aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-10-23 17:07:29 -0400
committerIngo Molnar <mingo@kernel.org>2017-10-25 05:01:08 -0400
commit6aa7de059173a986114ac43b8f50b297a86f09a8 (patch)
tree77666afe795e022914ca26433d61686c694dc4fd /drivers/gpu
parentb03a0fe0c5e4b46dcd400d27395b124499554a71 (diff)
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
5 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..303b5e099a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
260 */ 260 */
261int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 261int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
262{ 262{
263 uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); 263 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
264 struct dma_fence *fence, **ptr; 264 struct dma_fence *fence, **ptr;
265 int r; 265 int r;
266 266
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
300 amdgpu_fence_process(ring); 300 amdgpu_fence_process(ring);
301 emitted = 0x100000000ull; 301 emitted = 0x100000000ull;
302 emitted -= atomic_read(&ring->fence_drv.last_seq); 302 emitted -= atomic_read(&ring->fence_drv.last_seq);
303 emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); 303 emitted += READ_ONCE(ring->fence_drv.sync_seq);
304 return lower_32_bits(emitted); 304 return lower_32_bits(emitted);
305} 305}
306 306
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7171968f261e..6149a47fe63d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
788 seq_printf(m, "\t0x%08x: %12ld byte %s", 788 seq_printf(m, "\t0x%08x: %12ld byte %s",
789 id, amdgpu_bo_size(bo), placement); 789 id, amdgpu_bo_size(bo), placement);
790 790
791 offset = ACCESS_ONCE(bo->tbo.mem.start); 791 offset = READ_ONCE(bo->tbo.mem.start);
792 if (offset != AMDGPU_BO_INVALID_OFFSET) 792 if (offset != AMDGPU_BO_INVALID_OFFSET)
793 seq_printf(m, " @ 0x%010Lx", offset); 793 seq_printf(m, " @ 0x%010Lx", offset);
794 794
795 pin_count = ACCESS_ONCE(bo->pin_count); 795 pin_count = READ_ONCE(bo->pin_count);
796 if (pin_count) 796 if (pin_count)
797 seq_printf(m, " pin count %d", pin_count); 797 seq_printf(m, " pin count %d", pin_count);
798 seq_printf(m, "\n"); 798 seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..a25f6c72f219 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187 if (kfifo_is_empty(&entity->job_queue)) 187 if (kfifo_is_empty(&entity->job_queue))
188 return false; 188 return false;
189 189
190 if (ACCESS_ONCE(entity->dependency)) 190 if (READ_ONCE(entity->dependency))
191 return false; 191 return false;
192 192
193 return true; 193 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
451 else 451 else
452 r = 0; 452 r = 0;
453 453
454 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); 454 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
455 args->domain = radeon_mem_type_to_domain(cur_placement); 455 args->domain = radeon_mem_type_to_domain(cur_placement);
456 drm_gem_object_put_unlocked(gobj); 456 drm_gem_object_put_unlocked(gobj);
457 return r; 457 return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
481 r = ret; 481 r = ret;
482 482
483 /* Flush HDP cache via MMIO if necessary */ 483 /* Flush HDP cache via MMIO if necessary */
484 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); 484 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
485 if (rdev->asic->mmio_hdp_flush && 485 if (rdev->asic->mmio_hdp_flush &&
486 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 486 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
487 robj->rdev->asic->mmio_hdp_flush(rdev); 487 robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
904 if (unlikely(drm_is_render_client(file_priv))) 904 if (unlikely(drm_is_render_client(file_priv)))
905 require_exist = true; 905 require_exist = true;
906 906
907 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { 907 if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
908 DRM_ERROR("Locked master refused legacy " 908 DRM_ERROR("Locked master refused legacy "
909 "surface reference.\n"); 909 "surface reference.\n");
910 return -EACCES; 910 return -EACCES;