diff options
author | Mark Rutland <mark.rutland@arm.com> | 2017-10-23 17:07:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-10-25 05:01:08 -0400 |
commit | 6aa7de059173a986114ac43b8f50b297a86f09a8 (patch) | |
tree | 77666afe795e022914ca26433d61686c694dc4fd /drivers | |
parent | b03a0fe0c5e4b46dcd400d27395b124499554a71 (diff) |
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
85 files changed, 190 insertions, 192 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..4b8ba2a75a4d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev) | |||
668 | * so be careful about accessing it. dev->bus and dev->class should | 668 | * so be careful about accessing it. dev->bus and dev->class should |
669 | * never change once they are set, so they don't need special care. | 669 | * never change once they are set, so they don't need special care. |
670 | */ | 670 | */ |
671 | drv = ACCESS_ONCE(dev->driver); | 671 | drv = READ_ONCE(dev->driver); |
672 | return drv ? drv->name : | 672 | return drv ? drv->name : |
673 | (dev->bus ? dev->bus->name : | 673 | (dev->bus ? dev->bus->name : |
674 | (dev->class ? dev->class->name : "")); | 674 | (dev->class ? dev->class->name : "")); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..41d7c2b99f69 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) | |||
134 | if (!dev->power.use_autosuspend) | 134 | if (!dev->power.use_autosuspend) |
135 | goto out; | 135 | goto out; |
136 | 136 | ||
137 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | 137 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
138 | if (autosuspend_delay < 0) | 138 | if (autosuspend_delay < 0) |
139 | goto out; | 139 | goto out; |
140 | 140 | ||
141 | last_busy = ACCESS_ONCE(dev->power.last_busy); | 141 | last_busy = READ_ONCE(dev->power.last_busy); |
142 | elapsed = jiffies - last_busy; | 142 | elapsed = jiffies - last_busy; |
143 | if (elapsed < 0) | 143 | if (elapsed < 0) |
144 | goto out; /* jiffies has wrapped around. */ | 144 | goto out; /* jiffies has wrapped around. */ |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..6c7ccac2679e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) | |||
641 | return; | 641 | return; |
642 | 642 | ||
643 | retry: | 643 | retry: |
644 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 644 | entropy_count = orig = READ_ONCE(r->entropy_count); |
645 | if (nfrac < 0) { | 645 | if (nfrac < 0) { |
646 | /* Debit */ | 646 | /* Debit */ |
647 | entropy_count += nfrac; | 647 | entropy_count += nfrac; |
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
1265 | 1265 | ||
1266 | /* Can we pull enough? */ | 1266 | /* Can we pull enough? */ |
1267 | retry: | 1267 | retry: |
1268 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 1268 | entropy_count = orig = READ_ONCE(r->entropy_count); |
1269 | ibytes = nbytes; | 1269 | ibytes = nbytes; |
1270 | /* never pull more than available */ | 1270 | /* never pull more than available */ |
1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | 1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 39e489a96ad7..60da2537bef9 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c | |||
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) | |||
71 | if (readl_relaxed(timer->control) & timer->match_mask) { | 71 | if (readl_relaxed(timer->control) & timer->match_mask) { |
72 | writel_relaxed(timer->match_mask, timer->control); | 72 | writel_relaxed(timer->match_mask, timer->control); |
73 | 73 | ||
74 | event_handler = ACCESS_ONCE(timer->evt.event_handler); | 74 | event_handler = READ_ONCE(timer->evt.event_handler); |
75 | if (event_handler) | 75 | if (event_handler) |
76 | event_handler(&timer->evt); | 76 | event_handler(&timer->evt); |
77 | return IRQ_HANDLED; | 77 | return IRQ_HANDLED; |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488..f4f258075b89 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
172 | 172 | ||
173 | while (rd_reg32(&jrp->rregs->outring_used)) { | 173 | while (rd_reg32(&jrp->rregs->outring_used)) { |
174 | 174 | ||
175 | head = ACCESS_ONCE(jrp->head); | 175 | head = READ_ONCE(jrp->head); |
176 | 176 | ||
177 | spin_lock(&jrp->outlock); | 177 | spin_lock(&jrp->outlock); |
178 | 178 | ||
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
341 | spin_lock_bh(&jrp->inplock); | 341 | spin_lock_bh(&jrp->inplock); |
342 | 342 | ||
343 | head = jrp->head; | 343 | head = jrp->head; |
344 | tail = ACCESS_ONCE(jrp->tail); | 344 | tail = READ_ONCE(jrp->tail); |
345 | 345 | ||
346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | 346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || |
347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | 347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 874ddf5e9087..0f20f5ec9617 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, | |||
193 | ktime_t start = wmem->start, now = ktime_get(); | 193 | ktime_t start = wmem->start, now = ktime_get(); |
194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); | 194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); |
195 | 195 | ||
196 | while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { | 196 | while (!(READ_ONCE(csb->flags) & CSB_V)) { |
197 | cpu_relax(); | 197 | cpu_relax(); |
198 | now = ktime_get(); | 198 | now = ktime_get(); |
199 | if (ktime_after(now, timeout)) | 199 | if (ktime_after(now, timeout)) |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..ccf52368a073 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
734 | __le16 res_count, next_res_count; | 734 | __le16 res_count, next_res_count; |
735 | 735 | ||
736 | i = ar_first_buffer_index(ctx); | 736 | i = ar_first_buffer_index(ctx); |
737 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | 737 | res_count = READ_ONCE(ctx->descriptors[i].res_count); |
738 | 738 | ||
739 | /* A buffer that is not yet completely filled must be the last one. */ | 739 | /* A buffer that is not yet completely filled must be the last one. */ |
740 | while (i != last && res_count == 0) { | 740 | while (i != last && res_count == 0) { |
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
742 | /* Peek at the next descriptor. */ | 742 | /* Peek at the next descriptor. */ |
743 | next_i = ar_next_buffer_index(i); | 743 | next_i = ar_next_buffer_index(i); |
744 | rmb(); /* read descriptors in order */ | 744 | rmb(); /* read descriptors in order */ |
745 | next_res_count = ACCESS_ONCE( | 745 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
746 | ctx->descriptors[next_i].res_count); | ||
747 | /* | 746 | /* |
748 | * If the next descriptor is still empty, we must stop at this | 747 | * If the next descriptor is still empty, we must stop at this |
749 | * descriptor. | 748 | * descriptor. |
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
759 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | 758 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { |
760 | next_i = ar_next_buffer_index(next_i); | 759 | next_i = ar_next_buffer_index(next_i); |
761 | rmb(); | 760 | rmb(); |
762 | next_res_count = ACCESS_ONCE( | 761 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
763 | ctx->descriptors[next_i].res_count); | ||
764 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | 762 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) |
765 | goto next_buffer_is_active; | 763 | goto next_buffer_is_active; |
766 | } | 764 | } |
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context, | |||
2812 | u32 buffer_dma; | 2810 | u32 buffer_dma; |
2813 | 2811 | ||
2814 | req_count = le16_to_cpu(last->req_count); | 2812 | req_count = le16_to_cpu(last->req_count); |
2815 | res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); | 2813 | res_count = le16_to_cpu(READ_ONCE(last->res_count)); |
2816 | completed = req_count - res_count; | 2814 | completed = req_count - res_count; |
2817 | buffer_dma = le32_to_cpu(last->data_address); | 2815 | buffer_dma = le32_to_cpu(last->data_address); |
2818 | 2816 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 333bad749067..303b5e099a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) | |||
260 | */ | 260 | */ |
261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
262 | { | 262 | { |
263 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); | 263 | uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); |
264 | struct dma_fence *fence, **ptr; | 264 | struct dma_fence *fence, **ptr; |
265 | int r; | 265 | int r; |
266 | 266 | ||
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |||
300 | amdgpu_fence_process(ring); | 300 | amdgpu_fence_process(ring); |
301 | emitted = 0x100000000ull; | 301 | emitted = 0x100000000ull; |
302 | emitted -= atomic_read(&ring->fence_drv.last_seq); | 302 | emitted -= atomic_read(&ring->fence_drv.last_seq); |
303 | emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); | 303 | emitted += READ_ONCE(ring->fence_drv.sync_seq); |
304 | return lower_32_bits(emitted); | 304 | return lower_32_bits(emitted); |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..6149a47fe63d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) | |||
788 | seq_printf(m, "\t0x%08x: %12ld byte %s", | 788 | seq_printf(m, "\t0x%08x: %12ld byte %s", |
789 | id, amdgpu_bo_size(bo), placement); | 789 | id, amdgpu_bo_size(bo), placement); |
790 | 790 | ||
791 | offset = ACCESS_ONCE(bo->tbo.mem.start); | 791 | offset = READ_ONCE(bo->tbo.mem.start); |
792 | if (offset != AMDGPU_BO_INVALID_OFFSET) | 792 | if (offset != AMDGPU_BO_INVALID_OFFSET) |
793 | seq_printf(m, " @ 0x%010Lx", offset); | 793 | seq_printf(m, " @ 0x%010Lx", offset); |
794 | 794 | ||
795 | pin_count = ACCESS_ONCE(bo->pin_count); | 795 | pin_count = READ_ONCE(bo->pin_count); |
796 | if (pin_count) | 796 | if (pin_count) |
797 | seq_printf(m, " pin count %d", pin_count); | 797 | seq_printf(m, " pin count %d", pin_count); |
798 | seq_printf(m, "\n"); | 798 | seq_printf(m, "\n"); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..a25f6c72f219 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |||
187 | if (kfifo_is_empty(&entity->job_queue)) | 187 | if (kfifo_is_empty(&entity->job_queue)) |
188 | return false; | 188 | return false; |
189 | 189 | ||
190 | if (ACCESS_ONCE(entity->dependency)) | 190 | if (READ_ONCE(entity->dependency)) |
191 | return false; | 191 | return false; |
192 | 192 | ||
193 | return true; | 193 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..cf3deb283da5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
451 | else | 451 | else |
452 | r = 0; | 452 | r = 0; |
453 | 453 | ||
454 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 454 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
455 | args->domain = radeon_mem_type_to_domain(cur_placement); | 455 | args->domain = radeon_mem_type_to_domain(cur_placement); |
456 | drm_gem_object_put_unlocked(gobj); | 456 | drm_gem_object_put_unlocked(gobj); |
457 | return r; | 457 | return r; |
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
481 | r = ret; | 481 | r = ret; |
482 | 482 | ||
483 | /* Flush HDP cache via MMIO if necessary */ | 483 | /* Flush HDP cache via MMIO if necessary */ |
484 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 484 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
485 | if (rdev->asic->mmio_hdp_flush && | 485 | if (rdev->asic->mmio_hdp_flush && |
486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | 486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
487 | robj->rdev->asic->mmio_hdp_flush(rdev); | 487 | robj->rdev->asic->mmio_hdp_flush(rdev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a552e4ea5440..6ac094ee8983 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
904 | if (unlikely(drm_is_render_client(file_priv))) | 904 | if (unlikely(drm_is_render_client(file_priv))) |
905 | require_exist = true; | 905 | require_exist = true; |
906 | 906 | ||
907 | if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { | 907 | if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { |
908 | DRM_ERROR("Locked master refused legacy " | 908 | DRM_ERROR("Locked master refused legacy " |
909 | "surface reference.\n"); | 909 | "surface reference.\n"); |
910 | return -EACCES; | 910 | return -EACCES; |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..97bea2e1aa6a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
380 | if (sc->flags & SCF_FROZEN) { | 380 | if (sc->flags & SCF_FROZEN) { |
381 | wait_event_interruptible_timeout( | 381 | wait_event_interruptible_timeout( |
382 | dd->event_queue, | 382 | dd->event_queue, |
383 | !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), | 383 | !(READ_ONCE(dd->flags) & HFI1_FROZEN), |
384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); | 384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); |
385 | if (dd->flags & HFI1_FROZEN) | 385 | if (dd->flags & HFI1_FROZEN) |
386 | return -ENOLCK; | 386 | return -ENOLCK; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..75e740780285 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -1423,14 +1423,14 @@ retry: | |||
1423 | goto done; | 1423 | goto done; |
1424 | } | 1424 | } |
1425 | /* copy from receiver cache line and recalculate */ | 1425 | /* copy from receiver cache line and recalculate */ |
1426 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1426 | sc->alloc_free = READ_ONCE(sc->free); |
1427 | avail = | 1427 | avail = |
1428 | (unsigned long)sc->credits - | 1428 | (unsigned long)sc->credits - |
1429 | (sc->fill - sc->alloc_free); | 1429 | (sc->fill - sc->alloc_free); |
1430 | if (blocks > avail) { | 1430 | if (blocks > avail) { |
1431 | /* still no room, actively update */ | 1431 | /* still no room, actively update */ |
1432 | sc_release_update(sc); | 1432 | sc_release_update(sc); |
1433 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1433 | sc->alloc_free = READ_ONCE(sc->free); |
1434 | trycount++; | 1434 | trycount++; |
1435 | goto retry; | 1435 | goto retry; |
1436 | } | 1436 | } |
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc) | |||
1667 | 1667 | ||
1668 | /* call sent buffer callbacks */ | 1668 | /* call sent buffer callbacks */ |
1669 | code = -1; /* code not yet set */ | 1669 | code = -1; /* code not yet set */ |
1670 | head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ | 1670 | head = READ_ONCE(sc->sr_head); /* snapshot the head */ |
1671 | tail = sc->sr_tail; | 1671 | tail = sc->sr_tail; |
1672 | while (head != tail) { | 1672 | while (head != tail) { |
1673 | pbuf = &sc->sr[tail].pbuf; | 1673 | pbuf = &sc->sr[tail].pbuf; |
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..a7fc664f0d4e 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c | |||
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp) | |||
363 | 363 | ||
364 | again: | 364 | again: |
365 | smp_read_barrier_depends(); /* see post_one_send() */ | 365 | smp_read_barrier_depends(); /* see post_one_send() */ |
366 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 366 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
367 | goto clr_busy; | 367 | goto clr_busy; |
368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
369 | 369 | ||
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6781bcdb10b3..08346d25441c 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -1725,7 +1725,7 @@ retry: | |||
1725 | 1725 | ||
1726 | swhead = sde->descq_head & sde->sdma_mask; | 1726 | swhead = sde->descq_head & sde->sdma_mask; |
1727 | /* this code is really bad for cache line trading */ | 1727 | /* this code is really bad for cache line trading */ |
1728 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1728 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1729 | cnt = sde->descq_cnt; | 1729 | cnt = sde->descq_cnt; |
1730 | 1730 | ||
1731 | if (swhead < swtail) | 1731 | if (swhead < swtail) |
@@ -1872,7 +1872,7 @@ retry: | |||
1872 | if ((status & sde->idle_mask) && !idle_check_done) { | 1872 | if ((status & sde->idle_mask) && !idle_check_done) { |
1873 | u16 swtail; | 1873 | u16 swtail; |
1874 | 1874 | ||
1875 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1875 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1876 | if (swtail != hwhead) { | 1876 | if (swtail != hwhead) { |
1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); | 1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); |
1878 | idle_check_done = 1; | 1878 | idle_check_done = 1; |
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) | |||
2222 | u16 len; | 2222 | u16 len; |
2223 | 2223 | ||
2224 | head = sde->descq_head & sde->sdma_mask; | 2224 | head = sde->descq_head & sde->sdma_mask; |
2225 | tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 2225 | tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
2226 | seq_printf(s, SDE_FMT, sde->this_idx, | 2226 | seq_printf(s, SDE_FMT, sde->this_idx, |
2227 | sde->cpu, | 2227 | sde->cpu, |
2228 | sdma_state_name(sde->state.current_state), | 2228 | sdma_state_name(sde->state.current_state), |
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde) | |||
3305 | return -EINVAL; | 3305 | return -EINVAL; |
3306 | } | 3306 | } |
3307 | while (1) { | 3307 | while (1) { |
3308 | nr = ffz(ACCESS_ONCE(sde->ahg_bits)); | 3308 | nr = ffz(READ_ONCE(sde->ahg_bits)); |
3309 | if (nr > 31) { | 3309 | if (nr > 31) { |
3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); | 3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); |
3311 | return -ENOSPC; | 3311 | return -ENOSPC; |
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 107011d8613b..374c59784950 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h | |||
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) | |||
445 | { | 445 | { |
446 | return sde->descq_cnt - | 446 | return sde->descq_cnt - |
447 | (sde->descq_tail - | 447 | (sde->descq_tail - |
448 | ACCESS_ONCE(sde->descq_head)) - 1; | 448 | READ_ONCE(sde->descq_head)) - 1; |
449 | } | 449 | } |
450 | 450 | ||
451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) | 451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) |
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..9a31c585427f 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c | |||
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
80 | goto bail; | 80 | goto bail; |
81 | /* We are in the error state, flush the work request. */ | 81 | /* We are in the error state, flush the work request. */ |
82 | smp_read_barrier_depends(); /* see post_one_send() */ | 82 | smp_read_barrier_depends(); /* see post_one_send() */ |
83 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 83 | if (qp->s_last == READ_ONCE(qp->s_head)) |
84 | goto bail; | 84 | goto bail; |
85 | /* If DMAs are in progress, we can't flush immediately. */ | 85 | /* If DMAs are in progress, we can't flush immediately. */ |
86 | if (iowait_sdma_pending(&priv->s_iowait)) { | 86 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
121 | goto bail; | 121 | goto bail; |
122 | /* Check if send work queue is empty. */ | 122 | /* Check if send work queue is empty. */ |
123 | smp_read_barrier_depends(); /* see post_one_send() */ | 123 | smp_read_barrier_depends(); /* see post_one_send() */ |
124 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { | 124 | if (qp->s_cur == READ_ONCE(qp->s_head)) { |
125 | clear_ahg(qp); | 125 | clear_ahg(qp); |
126 | goto bail; | 126 | goto bail; |
127 | } | 127 | } |
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..7fec6b984e3e 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
487 | goto bail; | 487 | goto bail; |
488 | /* We are in the error state, flush the work request. */ | 488 | /* We are in the error state, flush the work request. */ |
489 | smp_read_barrier_depends(); /* see post_one_send */ | 489 | smp_read_barrier_depends(); /* see post_one_send */ |
490 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 490 | if (qp->s_last == READ_ONCE(qp->s_head)) |
491 | goto bail; | 491 | goto bail; |
492 | /* If DMAs are in progress, we can't flush immediately. */ | 492 | /* If DMAs are in progress, we can't flush immediately. */ |
493 | if (iowait_sdma_pending(&priv->s_iowait)) { | 493 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
501 | 501 | ||
502 | /* see post_one_send() */ | 502 | /* see post_one_send() */ |
503 | smp_read_barrier_depends(); | 503 | smp_read_barrier_depends(); |
504 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 504 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
505 | goto bail; | 505 | goto bail; |
506 | 506 | ||
507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbf..8ec6e8a8d6f7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, | |||
276 | /* Wait until all requests have been freed. */ | 276 | /* Wait until all requests have been freed. */ |
277 | wait_event_interruptible( | 277 | wait_event_interruptible( |
278 | pq->wait, | 278 | pq->wait, |
279 | (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); | 279 | (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); |
280 | kfree(pq->reqs); | 280 | kfree(pq->reqs); |
281 | kfree(pq->req_in_use); | 281 | kfree(pq->req_in_use); |
282 | kmem_cache_destroy(pq->txreq_cache); | 282 | kmem_cache_destroy(pq->txreq_cache); |
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, | |||
591 | if (ret != -EBUSY) { | 591 | if (ret != -EBUSY) { |
592 | req->status = ret; | 592 | req->status = ret; |
593 | WRITE_ONCE(req->has_error, 1); | 593 | WRITE_ONCE(req->has_error, 1); |
594 | if (ACCESS_ONCE(req->seqcomp) == | 594 | if (READ_ONCE(req->seqcomp) == |
595 | req->seqsubmitted - 1) | 595 | req->seqsubmitted - 1) |
596 | goto free_req; | 596 | goto free_req; |
597 | return ret; | 597 | return ret; |
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) | |||
825 | */ | 825 | */ |
826 | if (req->data_len) { | 826 | if (req->data_len) { |
827 | iovec = &req->iovs[req->iov_idx]; | 827 | iovec = &req->iovs[req->iov_idx]; |
828 | if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { | 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { |
829 | if (++req->iov_idx == req->data_iovs) { | 829 | if (++req->iov_idx == req->data_iovs) { |
830 | ret = -EFAULT; | 830 | ret = -EFAULT; |
831 | goto free_txreq; | 831 | goto free_txreq; |
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) | |||
1390 | } else { | 1390 | } else { |
1391 | if (status != SDMA_TXREQ_S_OK) | 1391 | if (status != SDMA_TXREQ_S_OK) |
1392 | req->status = status; | 1392 | req->status = status; |
1393 | if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && | 1393 | if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && |
1394 | (READ_ONCE(req->done) || | 1394 | (READ_ONCE(req->done) || |
1395 | READ_ONCE(req->has_error))) { | 1395 | READ_ONCE(req->has_error))) { |
1396 | user_sdma_free_request(req, false); | 1396 | user_sdma_free_request(req, false); |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 53efbb0b40c4..9a37e844d4c8 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) | |||
368 | 368 | ||
369 | again: | 369 | again: |
370 | smp_read_barrier_depends(); /* see post_one_send() */ | 370 | smp_read_barrier_depends(); /* see post_one_send() */ |
371 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 371 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
372 | goto clr_busy; | 372 | goto clr_busy; |
373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
374 | 374 | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 498e2202e72c..bddcc37ace44 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
61 | goto bail; | 61 | goto bail; |
62 | /* We are in the error state, flush the work request. */ | 62 | /* We are in the error state, flush the work request. */ |
63 | smp_read_barrier_depends(); /* see post_one_send() */ | 63 | smp_read_barrier_depends(); /* see post_one_send() */ |
64 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 64 | if (qp->s_last == READ_ONCE(qp->s_head)) |
65 | goto bail; | 65 | goto bail; |
66 | /* If DMAs are in progress, we can't flush immediately. */ | 66 | /* If DMAs are in progress, we can't flush immediately. */ |
67 | if (atomic_read(&priv->s_dma_busy)) { | 67 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
91 | goto bail; | 91 | goto bail; |
92 | /* Check if send work queue is empty. */ | 92 | /* Check if send work queue is empty. */ |
93 | smp_read_barrier_depends(); /* see post_one_send() */ | 93 | smp_read_barrier_depends(); /* see post_one_send() */ |
94 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 94 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
95 | goto bail; | 95 | goto bail; |
96 | /* | 96 | /* |
97 | * Start a new request. | 97 | * Start a new request. |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index be4907453ac4..15962ed193ce 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
253 | goto bail; | 253 | goto bail; |
254 | /* We are in the error state, flush the work request. */ | 254 | /* We are in the error state, flush the work request. */ |
255 | smp_read_barrier_depends(); /* see post_one_send */ | 255 | smp_read_barrier_depends(); /* see post_one_send */ |
256 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 256 | if (qp->s_last == READ_ONCE(qp->s_head)) |
257 | goto bail; | 257 | goto bail; |
258 | /* If DMAs are in progress, we can't flush immediately. */ | 258 | /* If DMAs are in progress, we can't flush immediately. */ |
259 | if (atomic_read(&priv->s_dma_busy)) { | 259 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
267 | 267 | ||
268 | /* see post_one_send() */ | 268 | /* see post_one_send() */ |
269 | smp_read_barrier_depends(); | 269 | smp_read_barrier_depends(); |
270 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 270 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
271 | goto bail; | 271 | goto bail; |
272 | 272 | ||
273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 22df09ae809e..b670cb9d2006 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) | |||
1073 | rdi->driver_f.notify_error_qp(qp); | 1073 | rdi->driver_f.notify_error_qp(qp); |
1074 | 1074 | ||
1075 | /* Schedule the sending tasklet to drain the send work queue. */ | 1075 | /* Schedule the sending tasklet to drain the send work queue. */ |
1076 | if (ACCESS_ONCE(qp->s_last) != qp->s_head) | 1076 | if (READ_ONCE(qp->s_last) != qp->s_head) |
1077 | rdi->driver_f.schedule_send(qp); | 1077 | rdi->driver_f.schedule_send(qp); |
1078 | 1078 | ||
1079 | rvt_clear_mr_refs(qp, 0); | 1079 | rvt_clear_mr_refs(qp, 0); |
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail( | |||
1686 | if (likely(qp->s_avail)) | 1686 | if (likely(qp->s_avail)) |
1687 | return 0; | 1687 | return 0; |
1688 | smp_read_barrier_depends(); /* see rc.c */ | 1688 | smp_read_barrier_depends(); /* see rc.c */ |
1689 | slast = ACCESS_ONCE(qp->s_last); | 1689 | slast = READ_ONCE(qp->s_last); |
1690 | if (qp->s_head >= slast) | 1690 | if (qp->s_head >= slast) |
1691 | avail = qp->s_size - (qp->s_head - slast); | 1691 | avail = qp->s_size - (qp->s_head - slast); |
1692 | else | 1692 | else |
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1917 | * ahead and kick the send engine into gear. Otherwise we will always | 1917 | * ahead and kick the send engine into gear. Otherwise we will always |
1918 | * just schedule the send to happen later. | 1918 | * just schedule the send to happen later. |
1919 | */ | 1919 | */ |
1920 | call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; | 1920 | call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; |
1921 | 1921 | ||
1922 | for (; wr; wr = wr->next) { | 1922 | for (; wr; wr = wr->next) { |
1923 | err = rvt_post_one_wr(qp, wr, &call_send); | 1923 | err = rvt_post_one_wr(qp, wr, &call_send); |
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 2e8f801932be..a1db1e5040dc 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c | |||
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev) | |||
233 | 233 | ||
234 | haptic->suspended = false; | 234 | haptic->suspended = false; |
235 | 235 | ||
236 | magnitude = ACCESS_ONCE(haptic->magnitude); | 236 | magnitude = READ_ONCE(haptic->magnitude); |
237 | if (magnitude) | 237 | if (magnitude) |
238 | regulator_haptic_set_voltage(haptic, magnitude); | 238 | regulator_haptic_set_voltage(haptic, magnitude); |
239 | 239 | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..33bb074d6941 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void) | |||
347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | 347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
348 | BUG_ON(dm_bufio_client_count < 0); | 348 | BUG_ON(dm_bufio_client_count < 0); |
349 | 349 | ||
350 | dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); | 350 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Use default if set to 0 and report the actual cache size used. | 353 | * Use default if set to 0 and report the actual cache size used. |
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, | |||
960 | { | 960 | { |
961 | unsigned long buffers; | 961 | unsigned long buffers; |
962 | 962 | ||
963 | if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { | 963 | if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { |
964 | if (mutex_trylock(&dm_bufio_clients_lock)) { | 964 | if (mutex_trylock(&dm_bufio_clients_lock)) { |
965 | __cache_size_refresh(); | 965 | __cache_size_refresh(); |
966 | mutex_unlock(&dm_bufio_clients_lock); | 966 | mutex_unlock(&dm_bufio_clients_lock); |
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) | |||
1600 | 1600 | ||
1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) | 1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
1602 | { | 1602 | { |
1603 | unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); | 1603 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); | 1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); |
1605 | } | 1605 | } |
1606 | 1606 | ||
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |||
1647 | { | 1647 | { |
1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); | 1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); |
1649 | 1649 | ||
1650 | return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); | 1650 | return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | /* | 1653 | /* |
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | |||
1818 | 1818 | ||
1819 | static unsigned get_max_age_hz(void) | 1819 | static unsigned get_max_age_hz(void) |
1820 | { | 1820 | { |
1821 | unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); | 1821 | unsigned max_age = READ_ONCE(dm_bufio_max_age); |
1822 | 1822 | ||
1823 | if (max_age > UINT_MAX / HZ) | 1823 | if (max_age > UINT_MAX / HZ) |
1824 | max_age = UINT_MAX / HZ; | 1824 | max_age = UINT_MAX / HZ; |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eaf..eb45cc3df31d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) | |||
107 | try_again: | 107 | try_again: |
108 | spin_lock_irq(&throttle_spinlock); | 108 | spin_lock_irq(&throttle_spinlock); |
109 | 109 | ||
110 | throttle = ACCESS_ONCE(t->throttle); | 110 | throttle = READ_ONCE(t->throttle); |
111 | 111 | ||
112 | if (likely(throttle >= 100)) | 112 | if (likely(throttle >= 100)) |
113 | goto skip_limit; | 113 | goto skip_limit; |
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t) | |||
157 | 157 | ||
158 | t->num_io_jobs--; | 158 | t->num_io_jobs--; |
159 | 159 | ||
160 | if (likely(ACCESS_ONCE(t->throttle) >= 100)) | 160 | if (likely(READ_ONCE(t->throttle) >= 100)) |
161 | goto skip_limit; | 161 | goto skip_limit; |
162 | 162 | ||
163 | if (!t->num_io_jobs) { | 163 | if (!t->num_io_jobs) { |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 6028d8247f58..a1a5eec783cc 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -431,7 +431,7 @@ do_sync_free: | |||
431 | synchronize_rcu_expedited(); | 431 | synchronize_rcu_expedited(); |
432 | dm_stat_free(&s->rcu_head); | 432 | dm_stat_free(&s->rcu_head); |
433 | } else { | 433 | } else { |
434 | ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; | 434 | WRITE_ONCE(dm_stat_need_rcu_barrier, 1); |
435 | call_rcu(&s->rcu_head, dm_stat_free); | 435 | call_rcu(&s->rcu_head, dm_stat_free); |
436 | } | 436 | } |
437 | return 0; | 437 | return 0; |
@@ -639,12 +639,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |||
639 | */ | 639 | */ |
640 | last = raw_cpu_ptr(stats->last); | 640 | last = raw_cpu_ptr(stats->last); |
641 | stats_aux->merged = | 641 | stats_aux->merged = |
642 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | 642 | (bi_sector == (READ_ONCE(last->last_sector) && |
643 | ((bi_rw == WRITE) == | 643 | ((bi_rw == WRITE) == |
644 | (ACCESS_ONCE(last->last_rw) == WRITE)) | 644 | (READ_ONCE(last->last_rw) == WRITE)) |
645 | )); | 645 | )); |
646 | ACCESS_ONCE(last->last_sector) = end_sector; | 646 | WRITE_ONCE(last->last_sector, end_sector); |
647 | ACCESS_ONCE(last->last_rw) = bi_rw; | 647 | WRITE_ONCE(last->last_rw, bi_rw); |
648 | } | 648 | } |
649 | 649 | ||
650 | rcu_read_lock(); | 650 | rcu_read_lock(); |
@@ -693,22 +693,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared | |||
693 | 693 | ||
694 | for_each_possible_cpu(cpu) { | 694 | for_each_possible_cpu(cpu) { |
695 | p = &s->stat_percpu[cpu][x]; | 695 | p = &s->stat_percpu[cpu][x]; |
696 | shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); | 696 | shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); |
697 | shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); | 697 | shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); |
698 | shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); | 698 | shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); |
699 | shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); | 699 | shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); |
700 | shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); | 700 | shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); |
701 | shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); | 701 | shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); |
702 | shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); | 702 | shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); |
703 | shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); | 703 | shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); |
704 | shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); | 704 | shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); |
705 | shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); | 705 | shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); |
706 | shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); | 706 | shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); |
707 | shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); | 707 | shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); |
708 | if (s->n_histogram_entries) { | 708 | if (s->n_histogram_entries) { |
709 | unsigned i; | 709 | unsigned i; |
710 | for (i = 0; i < s->n_histogram_entries + 1; i++) | 710 | for (i = 0; i < s->n_histogram_entries + 1; i++) |
711 | shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]); | 711 | shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); |
712 | } | 712 | } |
713 | } | 713 | } |
714 | } | 714 | } |
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 4c8de1ff78ca..8d0ba879777e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c | |||
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long | |||
144 | 144 | ||
145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); | 145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); |
146 | 146 | ||
147 | return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & | 147 | return (READ_ONCE(sctx->region_table[region_index]) >> bit) & |
148 | ((1 << sctx->region_table_entry_bits) - 1); | 148 | ((1 << sctx->region_table_entry_bits) - 1); |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..89e5dff9b4cf 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
2431 | struct pool_c *pt = pool->ti->private; | 2431 | struct pool_c *pt = pool->ti->private; |
2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); | 2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
2433 | enum pool_mode old_mode = get_pool_mode(pool); | 2433 | enum pool_mode old_mode = get_pool_mode(pool); |
2434 | unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; | 2434 | unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; |
2435 | 2435 | ||
2436 | /* | 2436 | /* |
2437 | * Never allow the pool to transition to PM_WRITE mode if user | 2437 | * Never allow the pool to transition to PM_WRITE mode if user |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..fba93237a780 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work) | |||
589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); | 589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); |
590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); | 590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); |
591 | if (!i) { | 591 | if (!i) { |
592 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); | 592 | unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); |
593 | 593 | ||
594 | cluster >>= v->data_dev_block_bits; | 594 | cluster >>= v->data_dev_block_bits; |
595 | if (unlikely(!cluster)) | 595 | if (unlikely(!cluster)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..8aaffa19b29a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; | |||
114 | 114 | ||
115 | static int __dm_get_module_param_int(int *module_param, int min, int max) | 115 | static int __dm_get_module_param_int(int *module_param, int min, int max) |
116 | { | 116 | { |
117 | int param = ACCESS_ONCE(*module_param); | 117 | int param = READ_ONCE(*module_param); |
118 | int modified_param = 0; | 118 | int modified_param = 0; |
119 | bool modified = true; | 119 | bool modified = true; |
120 | 120 | ||
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) | |||
136 | unsigned __dm_get_module_param(unsigned *module_param, | 136 | unsigned __dm_get_module_param(unsigned *module_param, |
137 | unsigned def, unsigned max) | 137 | unsigned def, unsigned max) |
138 | { | 138 | { |
139 | unsigned param = ACCESS_ONCE(*module_param); | 139 | unsigned param = READ_ONCE(*module_param); |
140 | unsigned modified_param = 0; | 140 | unsigned modified_param = 0; |
141 | 141 | ||
142 | if (!param) | 142 | if (!param) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..447ddcbc9566 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page) | |||
2651 | { | 2651 | { |
2652 | char *sep = ","; | 2652 | char *sep = ","; |
2653 | size_t len = 0; | 2653 | size_t len = 0; |
2654 | unsigned long flags = ACCESS_ONCE(rdev->flags); | 2654 | unsigned long flags = READ_ONCE(rdev->flags); |
2655 | 2655 | ||
2656 | if (test_bit(Faulty, &flags) || | 2656 | if (test_bit(Faulty, &flags) || |
2657 | (!test_bit(ExternalBbl, &flags) && | 2657 | (!test_bit(ExternalBbl, &flags) && |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7d9a50eed9db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n | |||
6072 | */ | 6072 | */ |
6073 | rcu_read_lock(); | 6073 | rcu_read_lock(); |
6074 | for (i = 0; i < conf->raid_disks; i++) { | 6074 | for (i = 0; i < conf->raid_disks; i++) { |
6075 | struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); | 6075 | struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); |
6076 | 6076 | ||
6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) | 6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) |
6078 | still_degraded = 1; | 6078 | still_degraded = 1; |
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686742..b665757ca89a 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c | |||
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
138 | * the read barrier in scif_rb_count(..) | 138 | * the read barrier in scif_rb_count(..) |
139 | */ | 139 | */ |
140 | wmb(); | 140 | wmb(); |
141 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 141 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
142 | #ifdef CONFIG_INTEL_MIC_CARD | 142 | #ifdef CONFIG_INTEL_MIC_CARD |
143 | /* | 143 | /* |
144 | * X100 Si bug: For the case where a Core is performing an EXT_WR | 144 | * X100 Si bug: For the case where a Core is performing an EXT_WR |
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
147 | * This way, if ordering is violated for the Interrupt Message, it will | 147 | * This way, if ordering is violated for the Interrupt Message, it will |
148 | * fall just behind the first Posted associated with the first EXT_WR. | 148 | * fall just behind the first Posted associated with the first EXT_WR. |
149 | */ | 149 | */ |
150 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 150 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
151 | #endif | 151 | #endif |
152 | } | 152 | } |
153 | 153 | ||
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
210 | * scif_rb_space(..) | 210 | * scif_rb_space(..) |
211 | */ | 211 | */ |
212 | mb(); | 212 | mb(); |
213 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 213 | WRITE_ONCE(*rb->read_ptr, new_offset); |
214 | #ifdef CONFIG_INTEL_MIC_CARD | 214 | #ifdef CONFIG_INTEL_MIC_CARD |
215 | /* | 215 | /* |
216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR | 216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR |
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
219 | * This way, if ordering is violated for the Interrupt Message, it will | 219 | * This way, if ordering is violated for the Interrupt Message, it will |
220 | * fall just behind the first Posted associated with the first EXT_WR. | 220 | * fall just behind the first Posted associated with the first EXT_WR. |
221 | */ | 221 | */ |
222 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 222 | WRITE_ONCE(*rb->read_ptr, new_offset); |
223 | #endif | 223 | #endif |
224 | } | 224 | } |
225 | 225 | ||
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c index e1ef8daedd5a..a036dbb4101e 100644 --- a/drivers/misc/mic/scif/scif_rma_list.c +++ b/drivers/misc/mic/scif/scif_rma_list.c | |||
@@ -277,7 +277,7 @@ retry: | |||
277 | * Need to restart list traversal if there has been | 277 | * Need to restart list traversal if there has been |
278 | * an asynchronous list entry deletion. | 278 | * an asynchronous list entry deletion. |
279 | */ | 279 | */ |
280 | if (ACCESS_ONCE(ep->rma_info.async_list_del)) | 280 | if (READ_ONCE(ep->rma_info.async_list_del)) |
281 | goto retry; | 281 | goto retry; |
282 | } | 282 | } |
283 | mutex_unlock(&ep->rma_info.rma_lock); | 283 | mutex_unlock(&ep->rma_info.rma_lock); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..1ed9529e7bd1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1378 | unsigned int count; | 1378 | unsigned int count; |
1379 | 1379 | ||
1380 | slaves = rcu_dereference(bond->slave_arr); | 1380 | slaves = rcu_dereference(bond->slave_arr); |
1381 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 1381 | count = slaves ? READ_ONCE(slaves->count) : 0; |
1382 | if (likely(count)) | 1382 | if (likely(count)) |
1383 | tx_slave = slaves->arr[hash_index % | 1383 | tx_slave = slaves->arr[hash_index % |
1384 | count]; | 1384 | count]; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c99dc59d729b..af51b90cecbb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1167 | slave = bond_slave_get_rcu(skb->dev); | 1167 | slave = bond_slave_get_rcu(skb->dev); |
1168 | bond = slave->bond; | 1168 | bond = slave->bond; |
1169 | 1169 | ||
1170 | recv_probe = ACCESS_ONCE(bond->recv_probe); | 1170 | recv_probe = READ_ONCE(bond->recv_probe); |
1171 | if (recv_probe) { | 1171 | if (recv_probe) { |
1172 | ret = recv_probe(skb, bond, slave); | 1172 | ret = recv_probe(skb, bond, slave); |
1173 | if (ret == RX_HANDLER_CONSUMED) { | 1173 | if (ret == RX_HANDLER_CONSUMED) { |
@@ -3810,7 +3810,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3810 | else | 3810 | else |
3811 | bond_xmit_slave_id(bond, skb, 0); | 3811 | bond_xmit_slave_id(bond, skb, 0); |
3812 | } else { | 3812 | } else { |
3813 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | 3813 | int slave_cnt = READ_ONCE(bond->slave_cnt); |
3814 | 3814 | ||
3815 | if (likely(slave_cnt)) { | 3815 | if (likely(slave_cnt)) { |
3816 | slave_id = bond_rr_gen_slave_id(bond); | 3816 | slave_id = bond_rr_gen_slave_id(bond); |
@@ -3972,7 +3972,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3972 | unsigned int count; | 3972 | unsigned int count; |
3973 | 3973 | ||
3974 | slaves = rcu_dereference(bond->slave_arr); | 3974 | slaves = rcu_dereference(bond->slave_arr); |
3975 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 3975 | count = slaves ? READ_ONCE(slaves->count) : 0; |
3976 | if (likely(count)) { | 3976 | if (likely(count)) { |
3977 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; | 3977 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; |
3978 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3978 | bond_dev_queue_xmit(bond, skb, slave->dev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..43f52a8fe708 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, | |||
405 | */ | 405 | */ |
406 | static inline int reclaimable(const struct sge_txq *q) | 406 | static inline int reclaimable(const struct sge_txq *q) |
407 | { | 407 | { |
408 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 408 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
409 | hw_cidx -= q->cidx; | 409 | hw_cidx -= q->cidx; |
410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | 410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; |
411 | } | 411 | } |
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1375 | */ | 1375 | */ |
1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | 1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) |
1377 | { | 1377 | { |
1378 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 1378 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
1379 | int reclaim = hw_cidx - q->cidx; | 1379 | int reclaim = hw_cidx - q->cidx; |
1380 | 1380 | ||
1381 | if (reclaim < 0) | 1381 | if (reclaim < 0) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) | |||
605 | 605 | ||
606 | if (wrapped) | 606 | if (wrapped) |
607 | newacc += 65536; | 607 | newacc += 65536; |
608 | ACCESS_ONCE(*acc) = newacc; | 608 | WRITE_ONCE(*acc, newacc); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void populate_erx_stats(struct be_adapter *adapter, | 611 | static void populate_erx_stats(struct be_adapter *adapter, |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) | |||
373 | unsigned int count; | 373 | unsigned int count; |
374 | 374 | ||
375 | smp_rmb(); | 375 | smp_rmb(); |
376 | count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); | 376 | count = tx_count(READ_ONCE(priv->tx_head), tx_tail); |
377 | if (count == 0) | 377 | if (count == 0) |
378 | goto out; | 378 | goto out; |
379 | 379 | ||
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
431 | dma_addr_t phys; | 431 | dma_addr_t phys; |
432 | 432 | ||
433 | smp_rmb(); | 433 | smp_rmb(); |
434 | count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); | 434 | count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); |
435 | if (count == (TX_DESC_NUM - 1)) { | 435 | if (count == (TX_DESC_NUM - 1)) { |
436 | netif_stop_queue(ndev); | 436 | netif_stop_queue(ndev); |
437 | return NETDEV_TX_BUSY; | 437 | return NETDEV_TX_BUSY; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..2cb9539c931e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
264 | vsi->rx_buf_failed, vsi->rx_page_failed); | 264 | vsi->rx_buf_failed, vsi->rx_page_failed); |
265 | rcu_read_lock(); | 265 | rcu_read_lock(); |
266 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 266 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
267 | struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); | 267 | struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); |
268 | 268 | ||
269 | if (!rx_ring) | 269 | if (!rx_ring) |
270 | continue; | 270 | continue; |
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); | 320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); |
321 | } | 321 | } |
322 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 322 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
323 | struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 323 | struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); |
324 | 324 | ||
325 | if (!tx_ring) | 325 | if (!tx_ring) |
326 | continue; | 326 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..e9e04a485e0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, | |||
1570 | } | 1570 | } |
1571 | rcu_read_lock(); | 1571 | rcu_read_lock(); |
1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { | 1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { |
1573 | tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); | 1573 | tx_ring = READ_ONCE(vsi->tx_rings[j]); |
1574 | 1574 | ||
1575 | if (!tx_ring) | 1575 | if (!tx_ring) |
1576 | continue; | 1576 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..de1fcac7834d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, | |||
455 | u64 bytes, packets; | 455 | u64 bytes, packets; |
456 | unsigned int start; | 456 | unsigned int start; |
457 | 457 | ||
458 | tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 458 | tx_ring = READ_ONCE(vsi->tx_rings[i]); |
459 | if (!tx_ring) | 459 | if (!tx_ring) |
460 | continue; | 460 | continue; |
461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); | 461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); |
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |||
791 | rcu_read_lock(); | 791 | rcu_read_lock(); |
792 | for (q = 0; q < vsi->num_queue_pairs; q++) { | 792 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
793 | /* locate Tx ring */ | 793 | /* locate Tx ring */ |
794 | p = ACCESS_ONCE(vsi->tx_rings[q]); | 794 | p = READ_ONCE(vsi->tx_rings[q]); |
795 | 795 | ||
796 | do { | 796 | do { |
797 | start = u64_stats_fetch_begin_irq(&p->syncp); | 797 | start = u64_stats_fetch_begin_irq(&p->syncp); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
130 | } | 130 | } |
131 | 131 | ||
132 | smp_mb(); /* Force any pending update before accessing. */ | 132 | smp_mb(); /* Force any pending update before accessing. */ |
133 | adj = ACCESS_ONCE(pf->ptp_base_adj); | 133 | adj = READ_ONCE(pf->ptp_base_adj); |
134 | 134 | ||
135 | freq = adj; | 135 | freq = adj; |
136 | freq *= ppb; | 136 | freq *= ppb; |
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) | |||
499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); | 499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); |
500 | 500 | ||
501 | /* Update the base adjustement value. */ | 501 | /* Update the base adjustement value. */ |
502 | ACCESS_ONCE(pf->ptp_base_adj) = incval; | 502 | WRITE_ONCE(pf->ptp_base_adj, incval); |
503 | smp_mb(); /* Force the above update. */ | 503 | smp_mb(); /* Force the above update. */ |
504 | } | 504 | } |
505 | 505 | ||
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..31a3f09df9f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h | |||
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); | |||
375 | /* write operations, indexed using DWORDS */ | 375 | /* write operations, indexed using DWORDS */ |
376 | #define wr32(reg, val) \ | 376 | #define wr32(reg, val) \ |
377 | do { \ | 377 | do { \ |
378 | u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ | 378 | u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ |
379 | if (!E1000_REMOVED(hw_addr)) \ | 379 | if (!E1000_REMOVED(hw_addr)) \ |
380 | writel((val), &hw_addr[(reg)]); \ | 380 | writel((val), &hw_addr[(reg)]); \ |
381 | } while (0) | 381 | } while (0) |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fd4a46b03cc8..6bccc2be2b91 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) | 750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) |
751 | { | 751 | { |
752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); | 752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); |
753 | u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); | 753 | u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
754 | u32 value = 0; | 754 | u32 value = 0; |
755 | 755 | ||
756 | if (E1000_REMOVED(hw_addr)) | 756 | if (E1000_REMOVED(hw_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | |||
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) | |||
161 | 161 | ||
162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
163 | { | 163 | { |
164 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 164 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
165 | 165 | ||
166 | if (ixgbe_removed(reg_addr)) | 166 | if (ixgbe_removed(reg_addr)) |
167 | return; | 167 | return; |
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) | |||
180 | 180 | ||
181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) | 181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) |
182 | { | 182 | { |
183 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 183 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
184 | 184 | ||
185 | if (ixgbe_removed(reg_addr)) | 185 | if (ixgbe_removed(reg_addr)) |
186 | return; | 186 | return; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d76afd13868..2224e691ee07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
380 | */ | 380 | */ |
381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) | 381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) |
382 | { | 382 | { |
383 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 383 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
384 | u32 value; | 384 | u32 value; |
385 | 385 | ||
386 | if (ixgbe_removed(reg_addr)) | 386 | if (ixgbe_removed(reg_addr)) |
@@ -8630,7 +8630,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8630 | 8630 | ||
8631 | rcu_read_lock(); | 8631 | rcu_read_lock(); |
8632 | for (i = 0; i < adapter->num_rx_queues; i++) { | 8632 | for (i = 0; i < adapter->num_rx_queues; i++) { |
8633 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | 8633 | struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); |
8634 | u64 bytes, packets; | 8634 | u64 bytes, packets; |
8635 | unsigned int start; | 8635 | unsigned int start; |
8636 | 8636 | ||
@@ -8646,12 +8646,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8646 | } | 8646 | } |
8647 | 8647 | ||
8648 | for (i = 0; i < adapter->num_tx_queues; i++) { | 8648 | for (i = 0; i < adapter->num_tx_queues; i++) { |
8649 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | 8649 | struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); |
8650 | 8650 | ||
8651 | ixgbe_get_ring_stats64(stats, ring); | 8651 | ixgbe_get_ring_stats64(stats, ring); |
8652 | } | 8652 | } |
8653 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 8653 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
8654 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); | 8654 | struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); |
8655 | 8655 | ||
8656 | ixgbe_get_ring_stats64(stats, ring); | 8656 | ixgbe_get_ring_stats64(stats, ring); |
8657 | } | 8657 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | |||
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) | |||
378 | } | 378 | } |
379 | 379 | ||
380 | smp_mb(); | 380 | smp_mb(); |
381 | incval = ACCESS_ONCE(adapter->base_incval); | 381 | incval = READ_ONCE(adapter->base_incval); |
382 | 382 | ||
383 | freq = incval; | 383 | freq = incval; |
384 | freq *= ppb; | 384 | freq *= ppb; |
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | /* update the base incval used to calculate frequency adjustment */ | 1161 | /* update the base incval used to calculate frequency adjustment */ |
1162 | ACCESS_ONCE(adapter->base_incval) = incval; | 1162 | WRITE_ONCE(adapter->base_incval, incval); |
1163 | smp_mb(); | 1163 | smp_mb(); |
1164 | 1164 | ||
1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ | 1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..cacb30682434 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
164 | 164 | ||
165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) | 165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) |
166 | { | 166 | { |
167 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 167 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
168 | u32 value; | 168 | u32 value; |
169 | 169 | ||
170 | if (IXGBE_REMOVED(reg_addr)) | 170 | if (IXGBE_REMOVED(reg_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h | |||
@@ -182,7 +182,7 @@ struct ixgbevf_info { | |||
182 | 182 | ||
183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
184 | { | 184 | { |
185 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 185 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
186 | 186 | ||
187 | if (IXGBE_REMOVED(reg_addr)) | 187 | if (IXGBE_REMOVED(reg_addr)) |
188 | return; | 188 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..3541a7f9d12e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
414 | 414 | ||
415 | index = cons_index & size_mask; | 415 | index = cons_index & size_mask; |
416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; | 416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; |
417 | last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); | 417 | last_nr_txbb = READ_ONCE(ring->last_nr_txbb); |
418 | ring_cons = ACCESS_ONCE(ring->cons); | 418 | ring_cons = READ_ONCE(ring->cons); |
419 | ring_index = ring_cons & size_mask; | 419 | ring_index = ring_cons & size_mask; |
420 | stamp_index = ring_index; | 420 | stamp_index = ring_index; |
421 | 421 | ||
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
479 | wmb(); | 479 | wmb(); |
480 | 480 | ||
481 | /* we want to dirty this cache line once */ | 481 | /* we want to dirty this cache line once */ |
482 | ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; | 482 | WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); |
483 | ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; | 483 | WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); |
484 | 484 | ||
485 | if (cq->type == TX_XDP) | 485 | if (cq->type == TX_XDP) |
486 | return done < budget; | 486 | return done < budget; |
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
858 | goto tx_drop; | 858 | goto tx_drop; |
859 | 859 | ||
860 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 860 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
861 | ring_cons = ACCESS_ONCE(ring->cons); | 861 | ring_cons = READ_ONCE(ring->cons); |
862 | 862 | ||
863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, | 863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, |
864 | &inline_ok, &fragptr); | 864 | &inline_ok, &fragptr); |
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1066 | */ | 1066 | */ |
1067 | smp_rmb(); | 1067 | smp_rmb(); |
1068 | 1068 | ||
1069 | ring_cons = ACCESS_ONCE(ring->cons); | 1069 | ring_cons = READ_ONCE(ring->cons); |
1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { | 1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { |
1071 | netif_tx_wake_queue(ring->tx_queue); | 1071 | netif_tx_wake_queue(ring->tx_queue); |
1072 | ring->wake_queue++; | 1072 | ring->wake_queue++; |
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5dd5f61e1114 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c | |||
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2629 | ring = &vdev->vpaths[i].ring; | 2629 | ring = &vdev->vpaths[i].ring; |
2630 | 2630 | ||
2631 | /* Truncated to machine word size number of frames */ | 2631 | /* Truncated to machine word size number of frames */ |
2632 | rx_frms = ACCESS_ONCE(ring->stats.rx_frms); | 2632 | rx_frms = READ_ONCE(ring->stats.rx_frms); |
2633 | 2633 | ||
2634 | /* Did this vpath received any packets */ | 2634 | /* Did this vpath received any packets */ |
2635 | if (ring->stats.prev_rx_frms == rx_frms) { | 2635 | if (ring->stats.prev_rx_frms == rx_frms) { |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..a95a46bcd339 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2073 | netif_vdbg(efx, intr, efx->net_dev, | 2073 | netif_vdbg(efx, intr, efx->net_dev, |
2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | 2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
2075 | 2075 | ||
2076 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | 2076 | if (likely(READ_ONCE(efx->irq_soft_enabled))) { |
2077 | /* Note test interrupts */ | 2077 | /* Note test interrupts */ |
2078 | if (context->index == efx->irq_level) | 2078 | if (context->index == efx->irq_level) |
2079 | efx->last_irq_cpu = raw_smp_processor_id(); | 2079 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | 2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
2089 | { | 2089 | { |
2090 | struct efx_nic *efx = dev_id; | 2090 | struct efx_nic *efx = dev_id; |
2091 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 2091 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
2092 | struct efx_channel *channel; | 2092 | struct efx_channel *channel; |
2093 | efx_dword_t reg; | 2093 | efx_dword_t reg; |
2094 | u32 queues; | 2094 | u32 queues; |
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |||
3291 | bool rx_cont; | 3291 | bool rx_cont; |
3292 | u16 flags = 0; | 3292 | u16 flags = 0; |
3293 | 3293 | ||
3294 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3294 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3295 | return 0; | 3295 | return 0; |
3296 | 3296 | ||
3297 | /* Basic packet information */ | 3297 | /* Basic packet information */ |
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
3428 | unsigned int tx_ev_q_label; | 3428 | unsigned int tx_ev_q_label; |
3429 | int tx_descs = 0; | 3429 | int tx_descs = 0; |
3430 | 3430 | ||
3431 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3431 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3432 | return 0; | 3432 | return 0; |
3433 | 3433 | ||
3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | 3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) | |||
5316 | int i; | 5316 | int i; |
5317 | 5317 | ||
5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | 5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
5319 | if (ACCESS_ONCE(table->entry[i].spec) & | 5319 | if (READ_ONCE(table->entry[i].spec) & |
5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { | 5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { |
5321 | rc = efx_ef10_filter_remove_internal(efx, | 5321 | rc = efx_ef10_filter_remove_internal(efx, |
5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); | 5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..016616a63880 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data) | |||
2809 | unsigned long pending; | 2809 | unsigned long pending; |
2810 | enum reset_type method; | 2810 | enum reset_type method; |
2811 | 2811 | ||
2812 | pending = ACCESS_ONCE(efx->reset_pending); | 2812 | pending = READ_ONCE(efx->reset_pending); |
2813 | method = fls(pending) - 1; | 2813 | method = fls(pending) - 1; |
2814 | 2814 | ||
2815 | if (method == RESET_TYPE_MC_BIST) | 2815 | if (method == RESET_TYPE_MC_BIST) |
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
2874 | /* If we're not READY then just leave the flags set as the cue | 2874 | /* If we're not READY then just leave the flags set as the cue |
2875 | * to abort probing or reschedule the reset later. | 2875 | * to abort probing or reschedule the reset later. |
2876 | */ | 2876 | */ |
2877 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2877 | if (READ_ONCE(efx->state) != STATE_READY) |
2878 | return; | 2878 | return; |
2879 | 2879 | ||
2880 | /* efx_process_channel() will no longer read events once a | 2880 | /* efx_process_channel() will no longer read events once a |
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..7263275fde4a 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c | |||
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data) | |||
2545 | unsigned long pending; | 2545 | unsigned long pending; |
2546 | enum reset_type method; | 2546 | enum reset_type method; |
2547 | 2547 | ||
2548 | pending = ACCESS_ONCE(efx->reset_pending); | 2548 | pending = READ_ONCE(efx->reset_pending); |
2549 | method = fls(pending) - 1; | 2549 | method = fls(pending) - 1; |
2550 | 2550 | ||
2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || | 2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || |
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) | |||
2605 | /* If we're not READY then just leave the flags set as the cue | 2605 | /* If we're not READY then just leave the flags set as the cue |
2606 | * to abort probing or reschedule the reset later. | 2606 | * to abort probing or reschedule the reset later. |
2607 | */ | 2607 | */ |
2608 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2608 | if (READ_ONCE(efx->state) != STATE_READY) |
2609 | return; | 2609 | return; |
2610 | 2610 | ||
2611 | queue_work(reset_workqueue, &efx->reset_work); | 2611 | queue_work(reset_workqueue, &efx->reset_work); |
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..cd8bb472d758 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c | |||
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
454 | 454 | ||
455 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 455 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
456 | return IRQ_HANDLED; | 456 | return IRQ_HANDLED; |
457 | 457 | ||
458 | /* Check to see if we have a serious error condition */ | 458 | /* Check to see if we have a serious error condition */ |
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) | |||
1372 | ef4_oword_t reg; | 1372 | ef4_oword_t reg; |
1373 | int link_speed, isolate; | 1373 | int link_speed, isolate; |
1374 | 1374 | ||
1375 | isolate = !!ACCESS_ONCE(efx->reset_pending); | 1375 | isolate = !!READ_ONCE(efx->reset_pending); |
1376 | 1376 | ||
1377 | switch (link_state->speed) { | 1377 | switch (link_state->speed) { |
1378 | case 10000: link_speed = 3; break; | 1378 | case 10000: link_speed = 3; break; |
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c | |||
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) | |||
834 | struct ef4_nic *efx = channel->efx; | 834 | struct ef4_nic *efx = channel->efx; |
835 | int tx_packets = 0; | 835 | int tx_packets = 0; |
836 | 836 | ||
837 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 837 | if (unlikely(READ_ONCE(efx->reset_pending))) |
838 | return 0; | 838 | return 0; |
839 | 839 | ||
840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) | |||
990 | struct ef4_rx_queue *rx_queue; | 990 | struct ef4_rx_queue *rx_queue; |
991 | struct ef4_nic *efx = channel->efx; | 991 | struct ef4_nic *efx = channel->efx; |
992 | 992 | ||
993 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 993 | if (unlikely(READ_ONCE(efx->reset_pending))) |
994 | return; | 994 | return; |
995 | 995 | ||
996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) | |||
1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) | 1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) |
1505 | { | 1505 | { |
1506 | struct ef4_nic *efx = dev_id; | 1506 | struct ef4_nic *efx = dev_id; |
1507 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1507 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1508 | ef4_oword_t *int_ker = efx->irq_status.addr; | 1508 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1509 | irqreturn_t result = IRQ_NONE; | 1509 | irqreturn_t result = IRQ_NONE; |
1510 | struct ef4_channel *channel; | 1510 | struct ef4_channel *channel; |
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) | |||
1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
1598 | 1598 | ||
1599 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1599 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1600 | return IRQ_HANDLED; | 1600 | return IRQ_HANDLED; |
1601 | 1601 | ||
1602 | /* Handle non-event-queue sources */ | 1602 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..54ca457cdb15 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h | |||
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ | |||
83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, | 83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, |
84 | unsigned int write_count) | 84 | unsigned int write_count) |
85 | { | 85 | { |
86 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 86 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
87 | 87 | ||
88 | if (empty_read_count == 0) | 88 | if (empty_read_count == 0) |
89 | return false; | 89 | return false; |
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); | |||
464 | 464 | ||
465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) | 465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) |
466 | { | 466 | { |
467 | return ACCESS_ONCE(channel->event_test_cpu); | 467 | return READ_ONCE(channel->event_test_cpu); |
468 | } | 468 | } |
469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) | 469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) |
470 | { | 470 | { |
471 | return ACCESS_ONCE(efx->last_irq_cpu); | 471 | return READ_ONCE(efx->last_irq_cpu); |
472 | } | 472 | } |
473 | 473 | ||
474 | /* Global Resources */ | 474 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..6486814e97dc 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c | |||
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) | |||
134 | */ | 134 | */ |
135 | netif_tx_stop_queue(txq1->core_txq); | 135 | netif_tx_stop_queue(txq1->core_txq); |
136 | smp_mb(); | 136 | smp_mb(); |
137 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 137 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
138 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 138 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
139 | 139 | ||
140 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 140 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
141 | txq2->insert_count - txq2->old_read_count); | 141 | txq2->insert_count - txq2->old_read_count); |
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) | |||
524 | 524 | ||
525 | /* Check whether the hardware queue is now empty */ | 525 | /* Check whether the hardware queue is now empty */ |
526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
527 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 527 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
528 | if (tx_queue->read_count == tx_queue->old_write_count) { | 528 | if (tx_queue->read_count == tx_queue->old_write_count) { |
529 | smp_mb(); | 529 | smp_mb(); |
530 | tx_queue->empty_read_count = | 530 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..86454d25a405 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
827 | struct efx_nic *efx = channel->efx; | 827 | struct efx_nic *efx = channel->efx; |
828 | int tx_packets = 0; | 828 | int tx_packets = 0; |
829 | 829 | ||
830 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 830 | if (unlikely(READ_ONCE(efx->reset_pending))) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
979 | struct efx_rx_queue *rx_queue; | 979 | struct efx_rx_queue *rx_queue; |
980 | struct efx_nic *efx = channel->efx; | 980 | struct efx_nic *efx = channel->efx; |
981 | 981 | ||
982 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 982 | if (unlikely(READ_ONCE(efx->reset_pending))) |
983 | return; | 983 | return; |
984 | 984 | ||
985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | |||
1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | 1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) |
1521 | { | 1521 | { |
1522 | struct efx_nic *efx = dev_id; | 1522 | struct efx_nic *efx = dev_id; |
1523 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1523 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1524 | efx_oword_t *int_ker = efx->irq_status.addr; | 1524 | efx_oword_t *int_ker = efx->irq_status.addr; |
1525 | irqreturn_t result = IRQ_NONE; | 1525 | irqreturn_t result = IRQ_NONE; |
1526 | struct efx_channel *channel; | 1526 | struct efx_channel *channel; |
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | |||
1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1614 | 1614 | ||
1615 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1615 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1616 | return IRQ_HANDLED; | 1616 | return IRQ_HANDLED; |
1617 | 1617 | ||
1618 | /* Handle non-event-queue sources */ | 1618 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |||
81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | 81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, |
82 | unsigned int write_count) | 82 | unsigned int write_count) |
83 | { | 83 | { |
84 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 84 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
85 | 85 | ||
86 | if (empty_read_count == 0) | 86 | if (empty_read_count == 0) |
87 | return false; | 87 | return false; |
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); | |||
617 | 617 | ||
618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) | 618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) |
619 | { | 619 | { |
620 | return ACCESS_ONCE(channel->event_test_cpu); | 620 | return READ_ONCE(channel->event_test_cpu); |
621 | } | 621 | } |
622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) | 622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) |
623 | { | 623 | { |
624 | return ACCESS_ONCE(efx->last_irq_cpu); | 624 | return READ_ONCE(efx->last_irq_cpu); |
625 | } | 625 | } |
626 | 626 | ||
627 | /* Global Resources */ | 627 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..56c2db398def 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
658 | 658 | ||
659 | /* Write host time for specified period or until MC is done */ | 659 | /* Write host time for specified period or until MC is done */ |
660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && | 660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && |
661 | ACCESS_ONCE(*mc_running)) { | 661 | READ_ONCE(*mc_running)) { |
662 | struct timespec64 update_time; | 662 | struct timespec64 update_time; |
663 | unsigned int host_time; | 663 | unsigned int host_time; |
664 | 664 | ||
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
668 | do { | 668 | do { |
669 | pps_get_ts(&now); | 669 | pps_get_ts(&now); |
670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && | 670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && |
671 | ACCESS_ONCE(*mc_running)); | 671 | READ_ONCE(*mc_running)); |
672 | 672 | ||
673 | /* Synchronise NIC with single word of time only */ | 673 | /* Synchronise NIC with single word of time only */ |
674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | | 674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
832 | ptp->start.dma_addr); | 832 | ptp->start.dma_addr); |
833 | 833 | ||
834 | /* Clear flag that signals MC ready */ | 834 | /* Clear flag that signals MC ready */ |
835 | ACCESS_ONCE(*start) = 0; | 835 | WRITE_ONCE(*start, 0); |
836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, | 836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, |
837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); | 837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); |
838 | EFX_WARN_ON_ONCE_PARANOID(rc); | 838 | EFX_WARN_ON_ONCE_PARANOID(rc); |
839 | 839 | ||
840 | /* Wait for start from MCDI (or timeout) */ | 840 | /* Wait for start from MCDI (or timeout) */ |
841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); | 841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); |
842 | while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { | 842 | while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { |
843 | udelay(20); /* Usually start MCDI execution quickly */ | 843 | udelay(20); /* Usually start MCDI execution quickly */ |
844 | loops++; | 844 | loops++; |
845 | } | 845 | } |
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
849 | if (!time_before(jiffies, timeout)) | 849 | if (!time_before(jiffies, timeout)) |
850 | ++ptp->sync_timeouts; | 850 | ++ptp->sync_timeouts; |
851 | 851 | ||
852 | if (ACCESS_ONCE(*start)) | 852 | if (READ_ONCE(*start)) |
853 | efx_ptp_send_times(efx, &last_time); | 853 | efx_ptp_send_times(efx, &last_time); |
854 | 854 | ||
855 | /* Collect results */ | 855 | /* Collect results */ |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..efb66ea21f27 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | |||
136 | */ | 136 | */ |
137 | netif_tx_stop_queue(txq1->core_txq); | 137 | netif_tx_stop_queue(txq1->core_txq); |
138 | smp_mb(); | 138 | smp_mb(); |
139 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 139 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
140 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 140 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
141 | 141 | ||
142 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 142 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
143 | txq2->insert_count - txq2->old_read_count); | 143 | txq2->insert_count - txq2->old_read_count); |
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
752 | 752 | ||
753 | /* Check whether the hardware queue is now empty */ | 753 | /* Check whether the hardware queue is now empty */ |
754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
755 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 755 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
756 | if (tx_queue->read_count == tx_queue->old_write_count) { | 756 | if (tx_queue->read_count == tx_queue->old_write_count) { |
757 | smp_mb(); | 757 | smp_mb(); |
758 | tx_queue->empty_read_count = | 758 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..8ab0fb6892d5 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np, | |||
6245 | 6245 | ||
6246 | pkts = dropped = errors = bytes = 0; | 6246 | pkts = dropped = errors = bytes = 0; |
6247 | 6247 | ||
6248 | rx_rings = ACCESS_ONCE(np->rx_rings); | 6248 | rx_rings = READ_ONCE(np->rx_rings); |
6249 | if (!rx_rings) | 6249 | if (!rx_rings) |
6250 | goto no_rings; | 6250 | goto no_rings; |
6251 | 6251 | ||
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np, | |||
6276 | 6276 | ||
6277 | pkts = errors = bytes = 0; | 6277 | pkts = errors = bytes = 0; |
6278 | 6278 | ||
6279 | tx_rings = ACCESS_ONCE(np->tx_rings); | 6279 | tx_rings = READ_ONCE(np->tx_rings); |
6280 | if (!tx_rings) | 6280 | if (!tx_rings) |
6281 | goto no_rings; | 6281 | goto no_rings; |
6282 | 6282 | ||
diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 21b71ae947fd..b55b29b90b88 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c | |||
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap, | |||
257 | * and validate that the result isn't NULL - in case we are | 257 | * and validate that the result isn't NULL - in case we are |
258 | * racing against queue removal. | 258 | * racing against queue removal. |
259 | */ | 259 | */ |
260 | int numvtaps = ACCESS_ONCE(tap->numvtaps); | 260 | int numvtaps = READ_ONCE(tap->numvtaps); |
261 | __u32 rxq; | 261 | __u32 rxq; |
262 | 262 | ||
263 | if (!numvtaps) | 263 | if (!numvtaps) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e21bf90b819f..27cd50c5bc9e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
469 | u32 numqueues = 0; | 469 | u32 numqueues = 0; |
470 | 470 | ||
471 | rcu_read_lock(); | 471 | rcu_read_lock(); |
472 | numqueues = ACCESS_ONCE(tun->numqueues); | 472 | numqueues = READ_ONCE(tun->numqueues); |
473 | 473 | ||
474 | txq = __skb_get_hash_symmetric(skb); | 474 | txq = __skb_get_hash_symmetric(skb); |
475 | if (txq) { | 475 | if (txq) { |
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
864 | 864 | ||
865 | rcu_read_lock(); | 865 | rcu_read_lock(); |
866 | tfile = rcu_dereference(tun->tfiles[txq]); | 866 | tfile = rcu_dereference(tun->tfiles[txq]); |
867 | numqueues = ACCESS_ONCE(tun->numqueues); | 867 | numqueues = READ_ONCE(tun->numqueues); |
868 | 868 | ||
869 | /* Drop packet if interface is not attached */ | 869 | /* Drop packet if interface is not attached */ |
870 | if (txq >= numqueues) | 870 | if (txq >= numqueues) |
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index bd8d4392d68b..80f75139495f 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c | |||
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, | |||
500 | 500 | ||
501 | tx_status = &desc->ud.ds_tx5212.tx_stat; | 501 | tx_status = &desc->ud.ds_tx5212.tx_stat; |
502 | 502 | ||
503 | txstat1 = ACCESS_ONCE(tx_status->tx_status_1); | 503 | txstat1 = READ_ONCE(tx_status->tx_status_1); |
504 | 504 | ||
505 | /* No frame has been send or error */ | 505 | /* No frame has been send or error */ |
506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) | 506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) |
507 | return -EINPROGRESS; | 507 | return -EINPROGRESS; |
508 | 508 | ||
509 | txstat0 = ACCESS_ONCE(tx_status->tx_status_0); | 509 | txstat0 = READ_ONCE(tx_status->tx_status_0); |
510 | 510 | ||
511 | /* | 511 | /* |
512 | * Get descriptor status | 512 | * Get descriptor status |
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
700 | u32 rxstat0, rxstat1; | 700 | u32 rxstat0, rxstat1; |
701 | 701 | ||
702 | rx_status = &desc->ud.ds_rx.rx_stat; | 702 | rx_status = &desc->ud.ds_rx.rx_stat; |
703 | rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); | 703 | rxstat1 = READ_ONCE(rx_status->rx_status_1); |
704 | 704 | ||
705 | /* No frame received / not ready */ | 705 | /* No frame received / not ready */ |
706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) | 706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) |
707 | return -EINPROGRESS; | 707 | return -EINPROGRESS; |
708 | 708 | ||
709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); | 709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); |
710 | rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); | 710 | rxstat0 = READ_ONCE(rx_status->rx_status_0); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Frame receive status | 713 | * Frame receive status |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..785a0f33b7e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work) | |||
3628 | 3628 | ||
3629 | bus->dpc_running = true; | 3629 | bus->dpc_running = true; |
3630 | wmb(); | 3630 | wmb(); |
3631 | while (ACCESS_ONCE(bus->dpc_triggered)) { | 3631 | while (READ_ONCE(bus->dpc_triggered)) { |
3632 | bus->dpc_triggered = false; | 3632 | bus->dpc_triggered = false; |
3633 | brcmf_sdio_dpc(bus); | 3633 | brcmf_sdio_dpc(bus); |
3634 | bus->idlecount = 0; | 3634 | bus->idlecount = 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..0f45f34e39d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) | |||
1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
1119 | { | 1119 | { |
1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1121 | bool calibrating = ACCESS_ONCE(mvm->calibrating); | 1121 | bool calibrating = READ_ONCE(mvm->calibrating); |
1122 | 1122 | ||
1123 | if (state) | 1123 | if (state) |
1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6e9d3289b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
652 | return -1; | 652 | return -1; |
653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && | 653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
654 | is_multicast_ether_addr(hdr->addr1)) { | 654 | is_multicast_ether_addr(hdr->addr1)) { |
655 | u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); | 655 | u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); |
656 | 656 | ||
657 | if (ap_sta_id != IWL_MVM_INVALID_STA) | 657 | if (ap_sta_id != IWL_MVM_INVALID_STA) |
658 | sta_id = ap_sta_id; | 658 | sta_id = ap_sta_id; |
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + | 700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + |
701 | tcp_hdrlen(skb); | 701 | tcp_hdrlen(skb); |
702 | 702 | ||
703 | dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); | 703 | dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); |
704 | 704 | ||
705 | if (!sta->max_amsdu_len || | 705 | if (!sta->max_amsdu_len || |
706 | !ieee80211_is_data_qos(hdr->frame_control) || | 706 | !ieee80211_is_data_qos(hdr->frame_control) || |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..f25ce3a1ea50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c | |||
@@ -1247,7 +1247,7 @@ restart: | |||
1247 | spin_lock(&rxq->lock); | 1247 | spin_lock(&rxq->lock); |
1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | 1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1249 | * buffer that the driver may process (last buffer filled by ucode). */ | 1249 | * buffer that the driver may process (last buffer filled by ucode). */ |
1250 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 1250 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
1251 | i = rxq->read; | 1251 | i = rxq->read; |
1252 | 1252 | ||
1253 | /* W/A 9000 device step A0 wrap-around bug */ | 1253 | /* W/A 9000 device step A0 wrap-around bug */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..9ad3f4fe5894 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) | |||
2076 | 2076 | ||
2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); | 2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); |
2078 | txq = trans_pcie->txq[txq_idx]; | 2078 | txq = trans_pcie->txq[txq_idx]; |
2079 | wr_ptr = ACCESS_ONCE(txq->write_ptr); | 2079 | wr_ptr = READ_ONCE(txq->write_ptr); |
2080 | 2080 | ||
2081 | while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && | 2081 | while (txq->read_ptr != READ_ONCE(txq->write_ptr) && |
2082 | !time_after(jiffies, | 2082 | !time_after(jiffies, |
2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { | 2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { |
2084 | u8 write_ptr = ACCESS_ONCE(txq->write_ptr); | 2084 | u8 write_ptr = READ_ONCE(txq->write_ptr); |
2085 | 2085 | ||
2086 | if (WARN_ONCE(wr_ptr != write_ptr, | 2086 | if (WARN_ONCE(wr_ptr != write_ptr, |
2087 | "WR pointer moved while flushing %d -> %d\n", | 2087 | "WR pointer moved while flushing %d -> %d\n", |
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, | |||
2553 | 2553 | ||
2554 | spin_lock(&rxq->lock); | 2554 | spin_lock(&rxq->lock); |
2555 | 2555 | ||
2556 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 2556 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
2557 | 2557 | ||
2558 | for (i = rxq->read, j = 0; | 2558 | for (i = rxq->read, j = 0; |
2559 | i != r && j < allocated_rb_nums; | 2559 | i != r && j < allocated_rb_nums; |
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data | |||
2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ | 2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ |
2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; | 2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; |
2816 | /* RBs */ | 2816 | /* RBs */ |
2817 | num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) | 2817 | num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) |
2818 | & 0x0FFF; | 2818 | & 0x0FFF; |
2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; | 2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; |
2820 | len += num_rbs * (sizeof(*data) + | 2820 | len += num_rbs * (sizeof(*data) + |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d2b3d6177a55 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, | |||
1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); | 1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); |
1381 | 1381 | ||
1382 | /* wmediumd mode check */ | 1382 | /* wmediumd mode check */ |
1383 | _portid = ACCESS_ONCE(data->wmediumd); | 1383 | _portid = READ_ONCE(data->wmediumd); |
1384 | 1384 | ||
1385 | if (_portid) | 1385 | if (_portid) |
1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); | 1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); |
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, | |||
1477 | struct ieee80211_channel *chan) | 1477 | struct ieee80211_channel *chan) |
1478 | { | 1478 | { |
1479 | struct mac80211_hwsim_data *data = hw->priv; | 1479 | struct mac80211_hwsim_data *data = hw->priv; |
1480 | u32 _pid = ACCESS_ONCE(data->wmediumd); | 1480 | u32 _pid = READ_ONCE(data->wmediumd); |
1481 | 1481 | ||
1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { | 1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { |
1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); | 1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..f946bf889015 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work) | |||
996 | if (logout_started) { | 996 | if (logout_started) { |
997 | bool traced = false; | 997 | bool traced = false; |
998 | 998 | ||
999 | while (!ACCESS_ONCE(sess->logout_completed)) { | 999 | while (!READ_ONCE(sess->logout_completed)) { |
1000 | if (!traced) { | 1000 | if (!traced) { |
1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, | 1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, |
1002 | "%s: waiting for sess %p logout\n", | 1002 | "%s: waiting for sess %p logout\n", |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..9469695f5871 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
985 | mb = udev->mb_addr; | 985 | mb = udev->mb_addr; |
986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
987 | 987 | ||
988 | while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { | 988 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
989 | 989 | ||
990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
991 | struct tcmu_cmd *cmd; | 991 | struct tcmu_cmd *cmd; |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e865dbf878c..fbaa2a90d25d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -483,7 +483,7 @@ static ssize_t wdm_read | |||
483 | if (rv < 0) | 483 | if (rv < 0) |
484 | return -ERESTARTSYS; | 484 | return -ERESTARTSYS; |
485 | 485 | ||
486 | cntr = ACCESS_ONCE(desc->length); | 486 | cntr = READ_ONCE(desc->length); |
487 | if (cntr == 0) { | 487 | if (cntr == 0) { |
488 | desc->read = 0; | 488 | desc->read = 0; |
489 | retry: | 489 | retry: |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..4ae667d8c238 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount) | |||
150 | { | 150 | { |
151 | u64 lim; | 151 | u64 lim; |
152 | 152 | ||
153 | lim = ACCESS_ONCE(usbfs_memory_mb); | 153 | lim = READ_ONCE(usbfs_memory_mb); |
154 | lim <<= 20; | 154 | lim <<= 20; |
155 | 155 | ||
156 | atomic64_add(amount, &usbfs_memory_usage); | 156 | atomic64_add(amount, &usbfs_memory_usage); |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d930bfda4010..58d59c5f8592 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c | |||
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr, | |||
973 | char *string; | 973 | char *string; |
974 | 974 | ||
975 | intf = to_usb_interface(dev); | 975 | intf = to_usb_interface(dev); |
976 | string = ACCESS_ONCE(intf->cur_altsetting->string); | 976 | string = READ_ONCE(intf->cur_altsetting->string); |
977 | if (!string) | 977 | if (!string) |
978 | return 0; | 978 | return 0; |
979 | return sprintf(buf, "%s\n", string); | 979 | return sprintf(buf, "%s\n", string); |
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |||
989 | 989 | ||
990 | intf = to_usb_interface(dev); | 990 | intf = to_usb_interface(dev); |
991 | udev = interface_to_usbdev(intf); | 991 | udev = interface_to_usbdev(intf); |
992 | alt = ACCESS_ONCE(intf->cur_altsetting); | 992 | alt = READ_ONCE(intf->cur_altsetting); |
993 | 993 | ||
994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" | 994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" |
995 | "ic%02Xisc%02Xip%02Xin%02X\n", | 995 | "ic%02Xisc%02Xip%02Xin%02X\n", |
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 1f9941145746..0b59fa50aa30 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c | |||
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep) | |||
1261 | if (!req->last_desc) | 1261 | if (!req->last_desc) |
1262 | return 0; | 1262 | return 0; |
1263 | 1263 | ||
1264 | if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) | 1264 | if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) |
1265 | return 0; /* Not put in hardware buffers yet */ | 1265 | return 0; /* Not put in hardware buffers yet */ |
1266 | 1266 | ||
1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) | 1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) |
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep) | |||
1290 | if (!req->curr_desc) | 1290 | if (!req->curr_desc) |
1291 | return 0; | 1291 | return 0; |
1292 | 1292 | ||
1293 | ctrl = ACCESS_ONCE(req->curr_desc->ctrl); | 1293 | ctrl = READ_ONCE(req->curr_desc->ctrl); |
1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) | 1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) |
1295 | return 0; /* Not received yet */ | 1295 | return 0; /* Not received yet */ |
1296 | 1296 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..c86f89babd57 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | /* find the last TD processed by the controller. */ | 787 | /* find the last TD processed by the controller. */ |
788 | head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK; | 788 | head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; |
789 | td_start = td; | 789 | td_start = td; |
790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); | 790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); |
791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { | 791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { |
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 91b22b2ea3aa..09a2a259941b 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h | |||
@@ -186,7 +186,7 @@ struct uhci_qh { | |||
186 | * We need a special accessor for the element pointer because it is | 186 | * We need a special accessor for the element pointer because it is |
187 | * subject to asynchronous updates by the controller. | 187 | * subject to asynchronous updates by the controller. |
188 | */ | 188 | */ |
189 | #define qh_element(qh) ACCESS_ONCE((qh)->element) | 189 | #define qh_element(qh) READ_ONCE((qh)->element) |
190 | 190 | ||
191 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ | 191 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ |
192 | cpu_to_hc32((uhci), (qh)->dma_handle)) | 192 | cpu_to_hc32((uhci), (qh)->dma_handle)) |
@@ -274,7 +274,7 @@ struct uhci_td { | |||
274 | * subject to asynchronous updates by the controller. | 274 | * subject to asynchronous updates by the controller. |
275 | */ | 275 | */ |
276 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ | 276 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ |
277 | ACCESS_ONCE((td)->status)) | 277 | READ_ONCE((td)->status)) |
278 | 278 | ||
279 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) | 279 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) |
280 | 280 | ||
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f5a86f651f38..2bc3705a99bd 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data) | |||
665 | { | 665 | { |
666 | struct vfio_group *group = data; | 666 | struct vfio_group *group = data; |
667 | struct vfio_device *device; | 667 | struct vfio_device *device; |
668 | struct device_driver *drv = ACCESS_ONCE(dev->driver); | 668 | struct device_driver *drv = READ_ONCE(dev->driver); |
669 | struct vfio_unbound_dev *unbound; | 669 | struct vfio_unbound_dev *unbound; |
670 | int ret = -EINVAL; | 670 | int ret = -EINVAL; |
671 | 671 | ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..35e929f132e8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
929 | continue; | 929 | continue; |
930 | } | 930 | } |
931 | 931 | ||
932 | tpg = ACCESS_ONCE(vs_tpg[*target]); | 932 | tpg = READ_ONCE(vs_tpg[*target]); |
933 | if (unlikely(!tpg)) { | 933 | if (unlikely(!tpg)) { |
934 | /* Target does not exist, fail the request */ | 934 | /* Target does not exist, fail the request */ |
935 | vhost_scsi_send_bad_target(vs, vq, head, out); | 935 | vhost_scsi_send_bad_target(vs, vq, head, out); |