aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c63
1 files changed, 24 insertions, 39 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6096effd6a56..3afcf0237c25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -439,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
439 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 439 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
440} 440}
441 441
442static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 442/**
443 * cs_parser_fini() - clean parser states
444 * @parser: parser structure holding parsing context.
445 * @error: error number
446 *
447 * If error is set than unvalidate buffer, otherwise just free memory
448 * used by parsing context.
449 **/
450static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
443{ 451{
452 unsigned i;
453
444 if (!error) { 454 if (!error) {
445 /* Sort the buffer list from the smallest to largest buffer, 455 /* Sort the buffer list from the smallest to largest buffer,
446 * which affects the order of buffers in the LRU list. 456 * which affects the order of buffers in the LRU list.
@@ -455,17 +465,13 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
455 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 465 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
456 466
457 ttm_eu_fence_buffer_objects(&parser->ticket, 467 ttm_eu_fence_buffer_objects(&parser->ticket,
458 &parser->validated, 468 &parser->validated,
459 &parser->ibs[parser->num_ibs-1].fence->base); 469 parser->fence);
460 } else if (backoff) { 470 } else if (backoff) {
461 ttm_eu_backoff_reservation(&parser->ticket, 471 ttm_eu_backoff_reservation(&parser->ticket,
462 &parser->validated); 472 &parser->validated);
463 } 473 }
464} 474 fence_put(parser->fence);
465
466static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
467{
468 unsigned i;
469 475
470 if (parser->ctx) 476 if (parser->ctx)
471 amdgpu_ctx_put(parser->ctx); 477 amdgpu_ctx_put(parser->ctx);
@@ -484,20 +490,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
484 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 490 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
485} 491}
486 492
487/**
488 * cs_parser_fini() - clean parser states
489 * @parser: parser structure holding parsing context.
490 * @error: error number
491 *
492 * If error is set than unvalidate buffer, otherwise just free memory
493 * used by parsing context.
494 **/
495static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
496{
497 amdgpu_cs_parser_fini_early(parser, error, backoff);
498 amdgpu_cs_parser_fini_late(parser);
499}
500
501static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 493static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
502 struct amdgpu_vm *vm) 494 struct amdgpu_vm *vm)
503{ 495{
@@ -582,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
582 } 574 }
583 575
584 r = amdgpu_bo_vm_update_pte(parser, vm); 576 r = amdgpu_bo_vm_update_pte(parser, vm);
585 if (r) { 577 if (!r)
586 goto out; 578 amdgpu_cs_sync_rings(parser);
587 }
588 amdgpu_cs_sync_rings(parser);
589 if (!amdgpu_enable_scheduler)
590 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
591 parser->filp);
592 579
593out:
594 return r; 580 return r;
595} 581}
596 582
@@ -881,7 +867,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
881 goto out; 867 goto out;
882 } 868 }
883 job->base.s_fence = fence; 869 job->base.s_fence = fence;
884 fence_get(&fence->base); 870 parser.fence = fence_get(&fence->base);
885 871
886 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, 872 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
887 &fence->base); 873 &fence->base);
@@ -890,17 +876,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
890 trace_amdgpu_cs_ioctl(job); 876 trace_amdgpu_cs_ioctl(job);
891 amd_sched_entity_push_job(&job->base); 877 amd_sched_entity_push_job(&job->base);
892 878
893 list_sort(NULL, &parser.validated, cmp_size_smaller_first); 879 } else {
894 ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, 880 struct amdgpu_fence *fence;
895 &fence->base);
896 fence_put(&fence->base);
897 881
898 amdgpu_cs_parser_fini_late(&parser); 882 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
899 mutex_unlock(&vm->mutex); 883 parser.filp);
900 return 0; 884 fence = parser.ibs[parser.num_ibs - 1].fence;
885 parser.fence = fence_get(&fence->base);
886 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
901 } 887 }
902 888
903 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
904out: 889out:
905 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 890 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
906 mutex_unlock(&vm->mutex); 891 mutex_unlock(&vm->mutex);