aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
commit8bd142c01648cdb33e9bcafa0448ba2c20ed814c (patch)
tree9197c60d3f9d4036f38f281a183e94750ceea1d7 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentd792abacaf1a1a8dfea353fab699b97fa6251c2a (diff)
parentfbb4574ce9a37e15a9872860bf202f2be5bdf6c4 (diff)
Merge tag 'kvm-arm-for-v4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM Fixes for v4.4-rc3. Includes some timer fixes, properly unmapping PTEs, an errata fix, and two tweaks to the EL2 panic code.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c177
1 files changed, 72 insertions, 105 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..3afcf0237c25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
131 struct drm_file *filp,
132 struct amdgpu_ctx *ctx,
133 struct amdgpu_ib *ibs,
134 uint32_t num_ibs)
135{
136 struct amdgpu_cs_parser *parser;
137 int i;
138
139 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
140 if (!parser)
141 return NULL;
142
143 parser->adev = adev;
144 parser->filp = filp;
145 parser->ctx = ctx;
146 parser->ibs = ibs;
147 parser->num_ibs = num_ibs;
148 for (i = 0; i < num_ibs; i++)
149 ibs[i].ctx = ctx;
150
151 return parser;
152}
153
154int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
155{ 131{
156 union drm_amdgpu_cs *cs = data; 132 union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 439 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
464} 440}
465 441
466static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 442/**
443 * cs_parser_fini() - clean parser states
444 * @parser: parser structure holding parsing context.
445 * @error: error number
446 *
447 * If error is set than unvalidate buffer, otherwise just free memory
448 * used by parsing context.
449 **/
450static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
467{ 451{
452 unsigned i;
453
468 if (!error) { 454 if (!error) {
469 /* Sort the buffer list from the smallest to largest buffer, 455 /* Sort the buffer list from the smallest to largest buffer,
470 * which affects the order of buffers in the LRU list. 456 * which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 465 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
480 466
481 ttm_eu_fence_buffer_objects(&parser->ticket, 467 ttm_eu_fence_buffer_objects(&parser->ticket,
482 &parser->validated, 468 &parser->validated,
483 &parser->ibs[parser->num_ibs-1].fence->base); 469 parser->fence);
484 } else if (backoff) { 470 } else if (backoff) {
485 ttm_eu_backoff_reservation(&parser->ticket, 471 ttm_eu_backoff_reservation(&parser->ticket,
486 &parser->validated); 472 &parser->validated);
487 } 473 }
488} 474 fence_put(parser->fence);
489 475
490static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
491{
492 unsigned i;
493 if (parser->ctx) 476 if (parser->ctx)
494 amdgpu_ctx_put(parser->ctx); 477 amdgpu_ctx_put(parser->ctx);
495 if (parser->bo_list) 478 if (parser->bo_list)
@@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
499 for (i = 0; i < parser->nchunks; i++) 482 for (i = 0; i < parser->nchunks; i++)
500 drm_free_large(parser->chunks[i].kdata); 483 drm_free_large(parser->chunks[i].kdata);
501 kfree(parser->chunks); 484 kfree(parser->chunks);
502 if (!amdgpu_enable_scheduler) 485 if (parser->ibs)
503 { 486 for (i = 0; i < parser->num_ibs; i++)
504 if (parser->ibs) 487 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
505 for (i = 0; i < parser->num_ibs; i++) 488 kfree(parser->ibs);
506 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 489 if (parser->uf.bo)
507 kfree(parser->ibs); 490 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
508 if (parser->uf.bo)
509 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
510 }
511
512 kfree(parser);
513}
514
515/**
516 * cs_parser_fini() - clean parser states
517 * @parser: parser structure holding parsing context.
518 * @error: error number
519 *
520 * If error is set than unvalidate buffer, otherwise just free memory
521 * used by parsing context.
522 **/
523static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
524{
525 amdgpu_cs_parser_fini_early(parser, error, backoff);
526 amdgpu_cs_parser_fini_late(parser);
527} 491}
528 492
529static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 493static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
610 } 574 }
611 575
612 r = amdgpu_bo_vm_update_pte(parser, vm); 576 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 577 if (!r)
614 goto out; 578 amdgpu_cs_sync_rings(parser);
615 }
616 amdgpu_cs_sync_rings(parser);
617 if (!amdgpu_enable_scheduler)
618 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
619 parser->filp);
620 579
621out:
622 return r; 580 return r;
623} 581}
624 582
@@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
828 union drm_amdgpu_cs *cs = data; 786 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 787 struct amdgpu_fpriv *fpriv = filp->driver_priv;
830 struct amdgpu_vm *vm = &fpriv->vm; 788 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_cs_parser *parser; 789 struct amdgpu_cs_parser parser = {};
832 bool reserved_buffers = false; 790 bool reserved_buffers = false;
833 int i, r; 791 int i, r;
834 792
835 if (!adev->accel_working) 793 if (!adev->accel_working)
836 return -EBUSY; 794 return -EBUSY;
837 795
838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 796 parser.adev = adev;
839 if (!parser) 797 parser.filp = filp;
840 return -ENOMEM; 798
841 r = amdgpu_cs_parser_init(parser, data); 799 r = amdgpu_cs_parser_init(&parser, data);
842 if (r) { 800 if (r) {
843 DRM_ERROR("Failed to initialize parser !\n"); 801 DRM_ERROR("Failed to initialize parser !\n");
844 amdgpu_cs_parser_fini(parser, r, false); 802 amdgpu_cs_parser_fini(&parser, r, false);
845 r = amdgpu_cs_handle_lockup(adev, r); 803 r = amdgpu_cs_handle_lockup(adev, r);
846 return r; 804 return r;
847 } 805 }
848 mutex_lock(&vm->mutex); 806 mutex_lock(&vm->mutex);
849 r = amdgpu_cs_parser_relocs(parser); 807 r = amdgpu_cs_parser_relocs(&parser);
850 if (r == -ENOMEM) 808 if (r == -ENOMEM)
851 DRM_ERROR("Not enough memory for command submission!\n"); 809 DRM_ERROR("Not enough memory for command submission!\n");
852 else if (r && r != -ERESTARTSYS) 810 else if (r && r != -ERESTARTSYS)
853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 811 DRM_ERROR("Failed to process the buffer list %d!\n", r);
854 else if (!r) { 812 else if (!r) {
855 reserved_buffers = true; 813 reserved_buffers = true;
856 r = amdgpu_cs_ib_fill(adev, parser); 814 r = amdgpu_cs_ib_fill(adev, &parser);
857 } 815 }
858 816
859 if (!r) { 817 if (!r) {
860 r = amdgpu_cs_dependencies(adev, parser); 818 r = amdgpu_cs_dependencies(adev, &parser);
861 if (r) 819 if (r)
862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 820 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
863 } 821 }
@@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
865 if (r) 823 if (r)
866 goto out; 824 goto out;
867 825
868 for (i = 0; i < parser->num_ibs; i++) 826 for (i = 0; i < parser.num_ibs; i++)
869 trace_amdgpu_cs(parser, i); 827 trace_amdgpu_cs(&parser, i);
870 828
871 r = amdgpu_cs_ib_vm_chunk(adev, parser); 829 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
872 if (r) 830 if (r)
873 goto out; 831 goto out;
874 832
875 if (amdgpu_enable_scheduler && parser->num_ibs) { 833 if (amdgpu_enable_scheduler && parser.num_ibs) {
834 struct amdgpu_ring * ring = parser.ibs->ring;
835 struct amd_sched_fence *fence;
876 struct amdgpu_job *job; 836 struct amdgpu_job *job;
877 struct amdgpu_ring * ring = parser->ibs->ring; 837
878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 838 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
879 if (!job) { 839 if (!job) {
880 r = -ENOMEM; 840 r = -ENOMEM;
881 goto out; 841 goto out;
882 } 842 }
843
883 job->base.sched = &ring->sched; 844 job->base.sched = &ring->sched;
884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 845 job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
885 job->adev = parser->adev; 846 job->adev = parser.adev;
886 job->ibs = parser->ibs; 847 job->owner = parser.filp;
887 job->num_ibs = parser->num_ibs; 848 job->free_job = amdgpu_cs_free_job;
888 job->base.owner = parser->filp; 849
889 mutex_init(&job->job_lock); 850 job->ibs = parser.ibs;
851 job->num_ibs = parser.num_ibs;
852 parser.ibs = NULL;
853 parser.num_ibs = 0;
854
890 if (job->ibs[job->num_ibs - 1].user) { 855 if (job->ibs[job->num_ibs - 1].user) {
891 memcpy(&job->uf, &parser->uf, 856 job->uf = parser.uf;
892 sizeof(struct amdgpu_user_fence));
893 job->ibs[job->num_ibs - 1].user = &job->uf; 857 job->ibs[job->num_ibs - 1].user = &job->uf;
858 parser.uf.bo = NULL;
894 } 859 }
895 860
896 job->free_job = amdgpu_cs_free_job; 861 fence = amd_sched_fence_create(job->base.s_entity,
897 mutex_lock(&job->job_lock); 862 parser.filp);
898 r = amd_sched_entity_push_job(&job->base); 863 if (!fence) {
899 if (r) { 864 r = -ENOMEM;
900 mutex_unlock(&job->job_lock);
901 amdgpu_cs_free_job(job); 865 amdgpu_cs_free_job(job);
902 kfree(job); 866 kfree(job);
903 goto out; 867 goto out;
904 } 868 }
905 cs->out.handle = 869 job->base.s_fence = fence;
906 amdgpu_ctx_add_fence(parser->ctx, ring, 870 parser.fence = fence_get(&fence->base);
907 &job->base.s_fence->base);
908 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
909 871
910 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 872 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
911 ttm_eu_fence_buffer_objects(&parser->ticket, 873 &fence->base);
912 &parser->validated, 874 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
913 &job->base.s_fence->base);
914 875
915 mutex_unlock(&job->job_lock); 876 trace_amdgpu_cs_ioctl(job);
916 amdgpu_cs_parser_fini_late(parser); 877 amd_sched_entity_push_job(&job->base);
917 mutex_unlock(&vm->mutex); 878
918 return 0; 879 } else {
880 struct amdgpu_fence *fence;
881
882 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
883 parser.filp);
884 fence = parser.ibs[parser.num_ibs - 1].fence;
885 parser.fence = fence_get(&fence->base);
886 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
919 } 887 }
920 888
921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
922out: 889out:
923 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 890 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
924 mutex_unlock(&vm->mutex); 891 mutex_unlock(&vm->mutex);
925 r = amdgpu_cs_handle_lockup(adev, r); 892 r = amdgpu_cs_handle_lockup(adev, r);
926 return r; 893 return r;