aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
committerMark Brown <broonie@kernel.org>2015-10-12 13:09:27 -0400
commit79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch)
tree5e0fa7156acb75ba603022bc807df8f2fedb97a8 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parent721b51fcf91898299d96f4b72cb9434cda29dce6 (diff)
parent8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff)
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c282
1 files changed, 200 insertions, 82 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..3b355aeb62fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,6 +126,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
126 return 0; 126 return 0;
127} 127}
128 128
129struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
130 struct drm_file *filp,
131 struct amdgpu_ctx *ctx,
132 struct amdgpu_ib *ibs,
133 uint32_t num_ibs)
134{
135 struct amdgpu_cs_parser *parser;
136 int i;
137
138 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
139 if (!parser)
140 return NULL;
141
142 parser->adev = adev;
143 parser->filp = filp;
144 parser->ctx = ctx;
145 parser->ibs = ibs;
146 parser->num_ibs = num_ibs;
147 for (i = 0; i < num_ibs; i++)
148 ibs[i].ctx = ctx;
149
150 return parser;
151}
152
129int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 153int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
130{ 154{
131 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
@@ -147,13 +171,13 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
147 171
148 /* get chunks */ 172 /* get chunks */
149 INIT_LIST_HEAD(&p->validated); 173 INIT_LIST_HEAD(&p->validated);
150 chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
151 if (chunk_array == NULL) { 175 if (chunk_array == NULL) {
152 r = -ENOMEM; 176 r = -ENOMEM;
153 goto out; 177 goto out;
154 } 178 }
155 179
156 chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks); 180 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
157 if (copy_from_user(chunk_array, chunk_array_user, 181 if (copy_from_user(chunk_array, chunk_array_user,
158 sizeof(uint64_t)*cs->in.num_chunks)) { 182 sizeof(uint64_t)*cs->in.num_chunks)) {
159 r = -EFAULT; 183 r = -EFAULT;
@@ -161,7 +185,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
161 } 185 }
162 186
163 p->nchunks = cs->in.num_chunks; 187 p->nchunks = cs->in.num_chunks;
164 p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk), 188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
165 GFP_KERNEL); 189 GFP_KERNEL);
166 if (p->chunks == NULL) { 190 if (p->chunks == NULL) {
167 r = -ENOMEM; 191 r = -ENOMEM;
@@ -173,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
173 struct drm_amdgpu_cs_chunk user_chunk; 197 struct drm_amdgpu_cs_chunk user_chunk;
174 uint32_t __user *cdata; 198 uint32_t __user *cdata;
175 199
176 chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; 200 chunk_ptr = (void __user *)chunk_array[i];
177 if (copy_from_user(&user_chunk, chunk_ptr, 201 if (copy_from_user(&user_chunk, chunk_ptr,
178 sizeof(struct drm_amdgpu_cs_chunk))) { 202 sizeof(struct drm_amdgpu_cs_chunk))) {
179 r = -EFAULT; 203 r = -EFAULT;
@@ -183,7 +207,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
183 p->chunks[i].length_dw = user_chunk.length_dw; 207 p->chunks[i].length_dw = user_chunk.length_dw;
184 208
185 size = p->chunks[i].length_dw; 209 size = p->chunks[i].length_dw;
186 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 210 cdata = (void __user *)user_chunk.chunk_data;
187 p->chunks[i].user_ptr = cdata; 211 p->chunks[i].user_ptr = cdata;
188 212
189 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +259,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
235 } 259 }
236 } 260 }
237 261
262
238 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
239 if (!p->ibs) { 264 if (!p->ibs)
240 r = -ENOMEM; 265 r = -ENOMEM;
241 goto out;
242 }
243 266
244out: 267out:
245 kfree(chunk_array); 268 kfree(chunk_array);
@@ -331,7 +354,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
331 * into account. We don't want to disallow buffer moves 354 * into account. We don't want to disallow buffer moves
332 * completely. 355 * completely.
333 */ 356 */
334 if (current_domain != AMDGPU_GEM_DOMAIN_CPU && 357 if ((lobj->allowed_domains & current_domain) != 0 &&
335 (domain & current_domain) == 0 && /* will be moved */ 358 (domain & current_domain) == 0 && /* will be moved */
336 bytes_moved > bytes_moved_threshold) { 359 bytes_moved > bytes_moved_threshold) {
337 /* don't move it */ 360 /* don't move it */
@@ -415,18 +438,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
415 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 438 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
416} 439}
417 440
418/** 441static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
419 * cs_parser_fini() - clean parser states
420 * @parser: parser structure holding parsing context.
421 * @error: error number
422 *
423 * If error is set than unvalidate buffer, otherwise just free memory
424 * used by parsing context.
425 **/
426static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
427{ 442{
428 unsigned i;
429
430 if (!error) { 443 if (!error) {
431 /* Sort the buffer list from the smallest to largest buffer, 444 /* Sort the buffer list from the smallest to largest buffer,
432 * which affects the order of buffers in the LRU list. 445 * which affects the order of buffers in the LRU list.
@@ -447,21 +460,45 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
447 ttm_eu_backoff_reservation(&parser->ticket, 460 ttm_eu_backoff_reservation(&parser->ticket,
448 &parser->validated); 461 &parser->validated);
449 } 462 }
463}
450 464
465static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
466{
467 unsigned i;
451 if (parser->ctx) 468 if (parser->ctx)
452 amdgpu_ctx_put(parser->ctx); 469 amdgpu_ctx_put(parser->ctx);
453 if (parser->bo_list) 470 if (parser->bo_list)
454 amdgpu_bo_list_put(parser->bo_list); 471 amdgpu_bo_list_put(parser->bo_list);
472
455 drm_free_large(parser->vm_bos); 473 drm_free_large(parser->vm_bos);
456 for (i = 0; i < parser->nchunks; i++) 474 for (i = 0; i < parser->nchunks; i++)
457 drm_free_large(parser->chunks[i].kdata); 475 drm_free_large(parser->chunks[i].kdata);
458 kfree(parser->chunks); 476 kfree(parser->chunks);
459 if (parser->ibs) 477 if (!amdgpu_enable_scheduler)
460 for (i = 0; i < parser->num_ibs; i++) 478 {
461 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 479 if (parser->ibs)
462 kfree(parser->ibs); 480 for (i = 0; i < parser->num_ibs; i++)
463 if (parser->uf.bo) 481 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
464 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 482 kfree(parser->ibs);
483 if (parser->uf.bo)
484 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
485 }
486
487 kfree(parser);
488}
489
490/**
491 * cs_parser_fini() - clean parser states
492 * @parser: parser structure holding parsing context.
493 * @error: error number
494 *
495 * If error is set than unvalidate buffer, otherwise just free memory
496 * used by parsing context.
497 **/
498static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
499{
500 amdgpu_cs_parser_fini_early(parser, error, backoff);
501 amdgpu_cs_parser_fini_late(parser);
465} 502}
466 503
467static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 504static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +513,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
476 if (r) 513 if (r)
477 return r; 514 return r;
478 515
516 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
517 if (r)
518 return r;
519
479 r = amdgpu_vm_clear_freed(adev, vm); 520 r = amdgpu_vm_clear_freed(adev, vm);
480 if (r) 521 if (r)
481 return r; 522 return r;
482 523
483 if (p->bo_list) { 524 if (p->bo_list) {
484 for (i = 0; i < p->bo_list->num_entries; i++) { 525 for (i = 0; i < p->bo_list->num_entries; i++) {
526 struct fence *f;
527
485 /* ignore duplicates */ 528 /* ignore duplicates */
486 bo = p->bo_list->array[i].robj; 529 bo = p->bo_list->array[i].robj;
487 if (!bo) 530 if (!bo)
@@ -495,7 +538,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
495 if (r) 538 if (r)
496 return r; 539 return r;
497 540
498 amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); 541 f = bo_va->last_pt_update;
542 r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
543 if (r)
544 return r;
499 } 545 }
500 } 546 }
501 547
@@ -529,9 +575,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
529 goto out; 575 goto out;
530 } 576 }
531 amdgpu_cs_sync_rings(parser); 577 amdgpu_cs_sync_rings(parser);
532 578 if (!amdgpu_enable_scheduler)
533 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, 579 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
534 parser->filp); 580 parser->filp);
535 581
536out: 582out:
537 mutex_unlock(&vm->mutex); 583 mutex_unlock(&vm->mutex);
@@ -650,7 +696,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
650 ib->oa_size = amdgpu_bo_size(oa); 696 ib->oa_size = amdgpu_bo_size(oa);
651 } 697 }
652 } 698 }
653
654 /* wrap the last IB with user fence */ 699 /* wrap the last IB with user fence */
655 if (parser->uf.bo) { 700 if (parser->uf.bo) {
656 struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; 701 struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -669,6 +714,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
669static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 714static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
670 struct amdgpu_cs_parser *p) 715 struct amdgpu_cs_parser *p)
671{ 716{
717 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
672 struct amdgpu_ib *ib; 718 struct amdgpu_ib *ib;
673 int i, j, r; 719 int i, j, r;
674 720
@@ -692,8 +738,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
692 sizeof(struct drm_amdgpu_cs_chunk_dep); 738 sizeof(struct drm_amdgpu_cs_chunk_dep);
693 739
694 for (j = 0; j < num_deps; ++j) { 740 for (j = 0; j < num_deps; ++j) {
695 struct amdgpu_fence *fence;
696 struct amdgpu_ring *ring; 741 struct amdgpu_ring *ring;
742 struct amdgpu_ctx *ctx;
743 struct fence *fence;
697 744
698 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 745 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
699 deps[j].ip_instance, 746 deps[j].ip_instance,
@@ -701,82 +748,141 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
701 if (r) 748 if (r)
702 return r; 749 return r;
703 750
704 r = amdgpu_fence_recreate(ring, p->filp, 751 ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
705 deps[j].handle, 752 if (ctx == NULL)
706 &fence); 753 return -EINVAL;
707 if (r) 754
755 fence = amdgpu_ctx_get_fence(ctx, ring,
756 deps[j].handle);
757 if (IS_ERR(fence)) {
758 r = PTR_ERR(fence);
759 amdgpu_ctx_put(ctx);
708 return r; 760 return r;
709 761
710 amdgpu_sync_fence(&ib->sync, fence); 762 } else if (fence) {
711 amdgpu_fence_unref(&fence); 763 r = amdgpu_sync_fence(adev, &ib->sync, fence);
764 fence_put(fence);
765 amdgpu_ctx_put(ctx);
766 if (r)
767 return r;
768 }
712 } 769 }
713 } 770 }
714 771
715 return 0; 772 return 0;
716} 773}
717 774
775static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
776{
777 int i;
778 if (sched_job->ibs)
779 for (i = 0; i < sched_job->num_ibs; i++)
780 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
781 kfree(sched_job->ibs);
782 if (sched_job->uf.bo)
783 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
784 return 0;
785}
786
718int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 787int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
719{ 788{
720 struct amdgpu_device *adev = dev->dev_private; 789 struct amdgpu_device *adev = dev->dev_private;
721 union drm_amdgpu_cs *cs = data; 790 union drm_amdgpu_cs *cs = data;
722 struct amdgpu_cs_parser parser; 791 struct amdgpu_cs_parser *parser;
723 int r, i;
724 bool reserved_buffers = false; 792 bool reserved_buffers = false;
793 int i, r;
725 794
726 down_read(&adev->exclusive_lock); 795 down_read(&adev->exclusive_lock);
727 if (!adev->accel_working) { 796 if (!adev->accel_working) {
728 up_read(&adev->exclusive_lock); 797 up_read(&adev->exclusive_lock);
729 return -EBUSY; 798 return -EBUSY;
730 } 799 }
731 /* initialize parser */ 800
732 memset(&parser, 0, sizeof(struct amdgpu_cs_parser)); 801 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
733 parser.filp = filp; 802 if (!parser)
734 parser.adev = adev; 803 return -ENOMEM;
735 r = amdgpu_cs_parser_init(&parser, data); 804 r = amdgpu_cs_parser_init(parser, data);
736 if (r) { 805 if (r) {
737 DRM_ERROR("Failed to initialize parser !\n"); 806 DRM_ERROR("Failed to initialize parser !\n");
738 amdgpu_cs_parser_fini(&parser, r, false); 807 amdgpu_cs_parser_fini(parser, r, false);
739 up_read(&adev->exclusive_lock); 808 up_read(&adev->exclusive_lock);
740 r = amdgpu_cs_handle_lockup(adev, r); 809 r = amdgpu_cs_handle_lockup(adev, r);
741 return r; 810 return r;
742 } 811 }
743 812
744 r = amdgpu_cs_parser_relocs(&parser); 813 r = amdgpu_cs_parser_relocs(parser);
745 if (r) { 814 if (r == -ENOMEM)
746 if (r != -ERESTARTSYS) { 815 DRM_ERROR("Not enough memory for command submission!\n");
747 if (r == -ENOMEM) 816 else if (r && r != -ERESTARTSYS)
748 DRM_ERROR("Not enough memory for command submission!\n"); 817 DRM_ERROR("Failed to process the buffer list %d!\n", r);
749 else 818 else if (!r) {
750 DRM_ERROR("Failed to process the buffer list %d!\n", r); 819 reserved_buffers = true;
751 } 820 r = amdgpu_cs_ib_fill(adev, parser);
752 } 821 }
753 822
754 if (!r) { 823 if (!r) {
755 reserved_buffers = true; 824 r = amdgpu_cs_dependencies(adev, parser);
756 r = amdgpu_cs_ib_fill(adev, &parser); 825 if (r)
826 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
757 } 827 }
758 828
759 if (!r) 829 if (r)
760 r = amdgpu_cs_dependencies(adev, &parser); 830 goto out;
761
762 if (r) {
763 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
764 up_read(&adev->exclusive_lock);
765 r = amdgpu_cs_handle_lockup(adev, r);
766 return r;
767 }
768 831
769 for (i = 0; i < parser.num_ibs; i++) 832 for (i = 0; i < parser->num_ibs; i++)
770 trace_amdgpu_cs(&parser, i); 833 trace_amdgpu_cs(parser, i);
771 834
772 r = amdgpu_cs_ib_vm_chunk(adev, &parser); 835 r = amdgpu_cs_ib_vm_chunk(adev, parser);
773 if (r) { 836 if (r)
774 goto out; 837 goto out;
838
839 if (amdgpu_enable_scheduler && parser->num_ibs) {
840 struct amdgpu_job *job;
841 struct amdgpu_ring * ring = parser->ibs->ring;
842 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
843 if (!job)
844 return -ENOMEM;
845 job->base.sched = ring->scheduler;
846 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
847 job->adev = parser->adev;
848 job->ibs = parser->ibs;
849 job->num_ibs = parser->num_ibs;
850 job->base.owner = parser->filp;
851 mutex_init(&job->job_lock);
852 if (job->ibs[job->num_ibs - 1].user) {
853 memcpy(&job->uf, &parser->uf,
854 sizeof(struct amdgpu_user_fence));
855 job->ibs[job->num_ibs - 1].user = &job->uf;
856 }
857
858 job->free_job = amdgpu_cs_free_job;
859 mutex_lock(&job->job_lock);
860 r = amd_sched_entity_push_job((struct amd_sched_job *)job);
861 if (r) {
862 mutex_unlock(&job->job_lock);
863 amdgpu_cs_free_job(job);
864 kfree(job);
865 goto out;
866 }
867 cs->out.handle =
868 amdgpu_ctx_add_fence(parser->ctx, ring,
869 &job->base.s_fence->base);
870 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
871
872 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
873 ttm_eu_fence_buffer_objects(&parser->ticket,
874 &parser->validated,
875 &job->base.s_fence->base);
876
877 mutex_unlock(&job->job_lock);
878 amdgpu_cs_parser_fini_late(parser);
879 up_read(&adev->exclusive_lock);
880 return 0;
775 } 881 }
776 882
777 cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq; 883 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
778out: 884out:
779 amdgpu_cs_parser_fini(&parser, r, true); 885 amdgpu_cs_parser_fini(parser, r, reserved_buffers);
780 up_read(&adev->exclusive_lock); 886 up_read(&adev->exclusive_lock);
781 r = amdgpu_cs_handle_lockup(adev, r); 887 r = amdgpu_cs_handle_lockup(adev, r);
782 return r; 888 return r;
@@ -797,26 +903,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
797 union drm_amdgpu_wait_cs *wait = data; 903 union drm_amdgpu_wait_cs *wait = data;
798 struct amdgpu_device *adev = dev->dev_private; 904 struct amdgpu_device *adev = dev->dev_private;
799 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 905 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
800 struct amdgpu_fence *fence = NULL;
801 struct amdgpu_ring *ring = NULL; 906 struct amdgpu_ring *ring = NULL;
802 struct amdgpu_ctx *ctx; 907 struct amdgpu_ctx *ctx;
908 struct fence *fence;
803 long r; 909 long r;
804 910
805 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
806 if (ctx == NULL)
807 return -EINVAL;
808
809 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 911 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
810 wait->in.ring, &ring); 912 wait->in.ring, &ring);
811 if (r) 913 if (r)
812 return r; 914 return r;
813 915
814 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); 916 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
815 if (r) 917 if (ctx == NULL)
816 return r; 918 return -EINVAL;
919
920 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
921 if (IS_ERR(fence))
922 r = PTR_ERR(fence);
923 else if (fence) {
924 r = fence_wait_timeout(fence, true, timeout);
925 fence_put(fence);
926 } else
927 r = 1;
817 928
818 r = fence_wait_timeout(&fence->base, true, timeout);
819 amdgpu_fence_unref(&fence);
820 amdgpu_ctx_put(ctx); 929 amdgpu_ctx_put(ctx);
821 if (r < 0) 930 if (r < 0)
822 return r; 931 return r;
@@ -851,7 +960,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
851 if (!reloc->bo_va) 960 if (!reloc->bo_va)
852 continue; 961 continue;
853 962
854 list_for_each_entry(mapping, &reloc->bo_va->mappings, list) { 963 list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
964 if (mapping->it.start > addr ||
965 addr > mapping->it.last)
966 continue;
967
968 *bo = reloc->bo_va->bo;
969 return mapping;
970 }
971
972 list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
855 if (mapping->it.start > addr || 973 if (mapping->it.start > addr ||
856 addr > mapping->it.last) 974 addr > mapping->it.last)
857 continue; 975 continue;