aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-10-27 21:33:52 -0400
committerDave Airlie <airlied@redhat.com>2016-10-27 21:33:52 -0400
commit220196b38483be6d84a295d318d48595f65da443 (patch)
treef91c2e6e64ef59afdc075d843d51f23369e9164a /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
parenta1873c62710b23e9afbd2faeed5f28649cbe4739 (diff)
parent56df51d003203f1c3a8eab05605973515aa15feb (diff)
Merge tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel into drm-next
Pull request already again to get the s/fence/dma_fence/ stuff in and allow everyone to resync. Otherwise really just misc stuff all over, and a new bridge driver. * tag 'topic/drm-misc-2016-10-27' of git://anongit.freedesktop.org/git/drm-intel: drm/bridge: fix platform_no_drv_owner.cocci warnings drm/bridge: fix semicolon.cocci warnings drm: Print some debug/error info during DP dual mode detect drm: mark drm_of_component_match_add dummy inline drm/bridge: add Silicon Image SiI8620 driver dt-bindings: add Silicon Image SiI8620 bridge bindings video: add header file for Mobile High-Definition Link (MHL) interface drm: convert DT component matching to component_match_add_release() dma-buf: Rename struct fence to dma_fence dma-buf/fence: add an lockdep_assert_held() drm/dp: Factor out helper to distinguish between branch and sink devices drm/edid: Only print the bad edid when aborting drm/msm: add missing header dependencies drm/msm/adreno: move function declarations to header file drm/i2c/tda998x: mark symbol static where possible doc: add missing docbook parameter for fence-array drm: RIP mode_config->rotation_property drm/msm/mdp5: Advertize 180 degree rotation drm/msm/mdp5: Use per-plane rotation property
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6d86eaef934c..400c66ba4c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,7 +35,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
35 kref_init(&ctx->refcount); 35 kref_init(&ctx->refcount);
36 spin_lock_init(&ctx->ring_lock); 36 spin_lock_init(&ctx->ring_lock);
37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, 37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
38 sizeof(struct fence*), GFP_KERNEL); 38 sizeof(struct dma_fence*), GFP_KERNEL);
39 if (!ctx->fences) 39 if (!ctx->fences)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
@@ -79,7 +79,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
79 79
80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 80 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
81 for (j = 0; j < amdgpu_sched_jobs; ++j) 81 for (j = 0; j < amdgpu_sched_jobs; ++j)
82 fence_put(ctx->rings[i].fences[j]); 82 dma_fence_put(ctx->rings[i].fences[j]);
83 kfree(ctx->fences); 83 kfree(ctx->fences);
84 ctx->fences = NULL; 84 ctx->fences = NULL;
85 85
@@ -241,39 +241,39 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
241} 241}
242 242
243uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 243uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
244 struct fence *fence) 244 struct dma_fence *fence)
245{ 245{
246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 246 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
247 uint64_t seq = cring->sequence; 247 uint64_t seq = cring->sequence;
248 unsigned idx = 0; 248 unsigned idx = 0;
249 struct fence *other = NULL; 249 struct dma_fence *other = NULL;
250 250
251 idx = seq & (amdgpu_sched_jobs - 1); 251 idx = seq & (amdgpu_sched_jobs - 1);
252 other = cring->fences[idx]; 252 other = cring->fences[idx];
253 if (other) { 253 if (other) {
254 signed long r; 254 signed long r;
255 r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); 255 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
256 if (r < 0) 256 if (r < 0)
257 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 257 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
258 } 258 }
259 259
260 fence_get(fence); 260 dma_fence_get(fence);
261 261
262 spin_lock(&ctx->ring_lock); 262 spin_lock(&ctx->ring_lock);
263 cring->fences[idx] = fence; 263 cring->fences[idx] = fence;
264 cring->sequence++; 264 cring->sequence++;
265 spin_unlock(&ctx->ring_lock); 265 spin_unlock(&ctx->ring_lock);
266 266
267 fence_put(other); 267 dma_fence_put(other);
268 268
269 return seq; 269 return seq;
270} 270}
271 271
272struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 272struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
273 struct amdgpu_ring *ring, uint64_t seq) 273 struct amdgpu_ring *ring, uint64_t seq)
274{ 274{
275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 275 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
276 struct fence *fence; 276 struct dma_fence *fence;
277 277
278 spin_lock(&ctx->ring_lock); 278 spin_lock(&ctx->ring_lock);
279 279
@@ -288,7 +288,7 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
288 return NULL; 288 return NULL;
289 } 289 }
290 290
291 fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); 291 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
292 spin_unlock(&ctx->ring_lock); 292 spin_unlock(&ctx->ring_lock);
293 293
294 return fence; 294 return fence;