aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c87
1 files changed, 18 insertions, 69 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 837962118dbc..58e495330b38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -270,34 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
270 return r; 270 return r;
271} 271}
272 272
273static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 273static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
274 bool direct, struct dma_fence **fence) 274 struct amdgpu_bo *bo, bool direct,
275 struct dma_fence **fence)
275{ 276{
276 struct ttm_operation_ctx ctx = { true, false }; 277 struct amdgpu_device *adev = ring->adev;
277 struct ttm_validate_buffer tv; 278 struct dma_fence *f = NULL;
278 struct ww_acquire_ctx ticket;
279 struct list_head head;
280 struct amdgpu_job *job; 279 struct amdgpu_job *job;
281 struct amdgpu_ib *ib; 280 struct amdgpu_ib *ib;
282 struct dma_fence *f = NULL;
283 struct amdgpu_device *adev = ring->adev;
284 uint64_t addr; 281 uint64_t addr;
285 int i, r; 282 int i, r;
286 283
287 memset(&tv, 0, sizeof(tv));
288 tv.bo = &bo->tbo;
289
290 INIT_LIST_HEAD(&head);
291 list_add(&tv.head, &head);
292
293 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
294 if (r)
295 return r;
296
297 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
298 if (r)
299 goto err;
300
301 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 284 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
302 if (r) 285 if (r)
303 goto err; 286 goto err;
@@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
330 goto err_free; 313 goto err_free;
331 } 314 }
332 315
333 ttm_eu_fence_buffer_objects(&ticket, &head, f); 316 amdgpu_bo_fence(bo, f, false);
317 amdgpu_bo_unreserve(bo);
318 amdgpu_bo_unref(&bo);
334 319
335 if (fence) 320 if (fence)
336 *fence = dma_fence_get(f); 321 *fence = dma_fence_get(f);
337 amdgpu_bo_unref(&bo);
338 dma_fence_put(f); 322 dma_fence_put(f);
339 323
340 return 0; 324 return 0;
@@ -343,7 +327,8 @@ err_free:
343 amdgpu_job_free(job); 327 amdgpu_job_free(job);
344 328
345err: 329err:
346 ttm_eu_backoff_reservation(&ticket, &head); 330 amdgpu_bo_unreserve(bo);
331 amdgpu_bo_unref(&bo);
347 return r; 332 return r;
348} 333}
349 334
@@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
351 struct dma_fence **fence) 336 struct dma_fence **fence)
352{ 337{
353 struct amdgpu_device *adev = ring->adev; 338 struct amdgpu_device *adev = ring->adev;
354 struct amdgpu_bo *bo; 339 struct amdgpu_bo *bo = NULL;
355 uint32_t *msg; 340 uint32_t *msg;
356 int r, i; 341 int r, i;
357 342
358 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 343 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
359 AMDGPU_GEM_DOMAIN_VRAM, 344 AMDGPU_GEM_DOMAIN_VRAM,
360 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 345 &bo, NULL, (void **)&msg);
361 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
362 NULL, NULL, 0, &bo);
363 if (r) 346 if (r)
364 return r; 347 return r;
365 348
366 r = amdgpu_bo_reserve(bo, false);
367 if (r) {
368 amdgpu_bo_unref(&bo);
369 return r;
370 }
371
372 r = amdgpu_bo_kmap(bo, (void **)&msg);
373 if (r) {
374 amdgpu_bo_unreserve(bo);
375 amdgpu_bo_unref(&bo);
376 return r;
377 }
378
379 msg[0] = cpu_to_le32(0x00000028); 349 msg[0] = cpu_to_le32(0x00000028);
380 msg[1] = cpu_to_le32(0x00000038); 350 msg[1] = cpu_to_le32(0x00000038);
381 msg[2] = cpu_to_le32(0x00000001); 351 msg[2] = cpu_to_le32(0x00000001);
@@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
393 for (i = 14; i < 1024; ++i) 363 for (i = 14; i < 1024; ++i)
394 msg[i] = cpu_to_le32(0x0); 364 msg[i] = cpu_to_le32(0x0);
395 365
396 amdgpu_bo_kunmap(bo);
397 amdgpu_bo_unreserve(bo);
398
399 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); 366 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
400} 367}
401 368
@@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
403 bool direct, struct dma_fence **fence) 370 bool direct, struct dma_fence **fence)
404{ 371{
405 struct amdgpu_device *adev = ring->adev; 372 struct amdgpu_device *adev = ring->adev;
406 struct amdgpu_bo *bo; 373 struct amdgpu_bo *bo = NULL;
407 uint32_t *msg; 374 uint32_t *msg;
408 int r, i; 375 int r, i;
409 376
410 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 377 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
411 AMDGPU_GEM_DOMAIN_VRAM, 378 AMDGPU_GEM_DOMAIN_VRAM,
412 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 379 &bo, NULL, (void **)&msg);
413 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
414 NULL, NULL, 0, &bo);
415 if (r) 380 if (r)
416 return r; 381 return r;
417 382
418 r = amdgpu_bo_reserve(bo, false);
419 if (r) {
420 amdgpu_bo_unref(&bo);
421 return r;
422 }
423
424 r = amdgpu_bo_kmap(bo, (void **)&msg);
425 if (r) {
426 amdgpu_bo_unreserve(bo);
427 amdgpu_bo_unref(&bo);
428 return r;
429 }
430
431 msg[0] = cpu_to_le32(0x00000028); 383 msg[0] = cpu_to_le32(0x00000028);
432 msg[1] = cpu_to_le32(0x00000018); 384 msg[1] = cpu_to_le32(0x00000018);
433 msg[2] = cpu_to_le32(0x00000000); 385 msg[2] = cpu_to_le32(0x00000000);
@@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
437 for (i = 6; i < 1024; ++i) 389 for (i = 6; i < 1024; ++i)
438 msg[i] = cpu_to_le32(0x0); 390 msg[i] = cpu_to_le32(0x0);
439 391
440 amdgpu_bo_kunmap(bo);
441 amdgpu_bo_unreserve(bo);
442
443 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); 392 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
444} 393}
445 394