diff options
author | Chunming Zhou <David1.Zhou@amd.com> | 2016-01-14 22:25:00 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-10 14:16:50 -0500 |
commit | cadf97b196a1e5b2db2606d53f77714e3e9cf4bb (patch) | |
tree | 1954976bc68547599f4ea9c29381a962c5c5d681 | |
parent | be86c606b50a53b60f3591ba94dd687524f2ee21 (diff) |
drm/amdgpu: clean up non-scheduler code path (v2)
Non-scheduler code is longer supported.
v2: agd: rebased on upstream
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 9 |
11 files changed, 64 insertions, 110 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8c55c1d78ffe..9d3dff244217 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -82,7 +82,6 @@ extern int amdgpu_vm_size; | |||
82 | extern int amdgpu_vm_block_size; | 82 | extern int amdgpu_vm_block_size; |
83 | extern int amdgpu_vm_fault_stop; | 83 | extern int amdgpu_vm_fault_stop; |
84 | extern int amdgpu_vm_debug; | 84 | extern int amdgpu_vm_debug; |
85 | extern int amdgpu_enable_scheduler; | ||
86 | extern int amdgpu_sched_jobs; | 85 | extern int amdgpu_sched_jobs; |
87 | extern int amdgpu_sched_hw_submission; | 86 | extern int amdgpu_sched_hw_submission; |
88 | extern int amdgpu_enable_semaphores; | 87 | extern int amdgpu_enable_semaphores; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0479ad5a66ed..ddeba55c3b7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -813,7 +813,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
813 | if (r) | 813 | if (r) |
814 | goto out; | 814 | goto out; |
815 | 815 | ||
816 | if (amdgpu_enable_scheduler && parser.num_ibs) { | 816 | if (parser.num_ibs) { |
817 | struct amdgpu_ring * ring = parser.ibs->ring; | 817 | struct amdgpu_ring * ring = parser.ibs->ring; |
818 | struct amd_sched_fence *fence; | 818 | struct amd_sched_fence *fence; |
819 | struct amdgpu_job *job; | 819 | struct amdgpu_job *job; |
@@ -858,15 +858,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
858 | 858 | ||
859 | trace_amdgpu_cs_ioctl(job); | 859 | trace_amdgpu_cs_ioctl(job); |
860 | amd_sched_entity_push_job(&job->base); | 860 | amd_sched_entity_push_job(&job->base); |
861 | |||
862 | } else { | ||
863 | struct amdgpu_fence *fence; | ||
864 | |||
865 | r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, | ||
866 | parser.filp); | ||
867 | fence = parser.ibs[parser.num_ibs - 1].fence; | ||
868 | parser.fence = fence_get(&fence->base); | ||
869 | cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; | ||
870 | } | 861 | } |
871 | 862 | ||
872 | out: | 863 | out: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 17d1fb12128a..f1f4b453ece1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -45,29 +45,27 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, | |||
45 | ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * | 45 | ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) * |
46 | amdgpu_sched_jobs * i; | 46 | amdgpu_sched_jobs * i; |
47 | } | 47 | } |
48 | if (amdgpu_enable_scheduler) { | 48 | /* create context entity for each ring */ |
49 | /* create context entity for each ring */ | 49 | for (i = 0; i < adev->num_rings; i++) { |
50 | for (i = 0; i < adev->num_rings; i++) { | 50 | struct amd_sched_rq *rq; |
51 | struct amd_sched_rq *rq; | 51 | if (pri >= AMD_SCHED_MAX_PRIORITY) { |
52 | if (pri >= AMD_SCHED_MAX_PRIORITY) { | ||
53 | kfree(ctx->fences); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | rq = &adev->rings[i]->sched.sched_rq[pri]; | ||
57 | r = amd_sched_entity_init(&adev->rings[i]->sched, | ||
58 | &ctx->rings[i].entity, | ||
59 | rq, amdgpu_sched_jobs); | ||
60 | if (r) | ||
61 | break; | ||
62 | } | ||
63 | |||
64 | if (i < adev->num_rings) { | ||
65 | for (j = 0; j < i; j++) | ||
66 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
67 | &ctx->rings[j].entity); | ||
68 | kfree(ctx->fences); | 52 | kfree(ctx->fences); |
69 | return r; | 53 | return -EINVAL; |
70 | } | 54 | } |
55 | rq = &adev->rings[i]->sched.sched_rq[pri]; | ||
56 | r = amd_sched_entity_init(&adev->rings[i]->sched, | ||
57 | &ctx->rings[i].entity, | ||
58 | rq, amdgpu_sched_jobs); | ||
59 | if (r) | ||
60 | break; | ||
61 | } | ||
62 | |||
63 | if (i < adev->num_rings) { | ||
64 | for (j = 0; j < i; j++) | ||
65 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
66 | &ctx->rings[j].entity); | ||
67 | kfree(ctx->fences); | ||
68 | return r; | ||
71 | } | 69 | } |
72 | return 0; | 70 | return 0; |
73 | } | 71 | } |
@@ -85,11 +83,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
85 | fence_put(ctx->rings[i].fences[j]); | 83 | fence_put(ctx->rings[i].fences[j]); |
86 | kfree(ctx->fences); | 84 | kfree(ctx->fences); |
87 | 85 | ||
88 | if (amdgpu_enable_scheduler) { | 86 | for (i = 0; i < adev->num_rings; i++) |
89 | for (i = 0; i < adev->num_rings; i++) | 87 | amd_sched_entity_fini(&adev->rings[i]->sched, |
90 | amd_sched_entity_fini(&adev->rings[i]->sched, | 88 | &ctx->rings[i].entity); |
91 | &ctx->rings[i].entity); | ||
92 | } | ||
93 | } | 89 | } |
94 | 90 | ||
95 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, | 91 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 11573fd1f053..8af888a2aa9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -78,7 +78,6 @@ int amdgpu_vm_block_size = -1; | |||
78 | int amdgpu_vm_fault_stop = 0; | 78 | int amdgpu_vm_fault_stop = 0; |
79 | int amdgpu_vm_debug = 0; | 79 | int amdgpu_vm_debug = 0; |
80 | int amdgpu_exp_hw_support = 0; | 80 | int amdgpu_exp_hw_support = 0; |
81 | int amdgpu_enable_scheduler = 1; | ||
82 | int amdgpu_sched_jobs = 32; | 81 | int amdgpu_sched_jobs = 32; |
83 | int amdgpu_sched_hw_submission = 2; | 82 | int amdgpu_sched_hw_submission = 2; |
84 | int amdgpu_powerplay = -1; | 83 | int amdgpu_powerplay = -1; |
@@ -152,9 +151,6 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); | |||
152 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); | 151 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); |
153 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); | 152 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); |
154 | 153 | ||
155 | MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); | ||
156 | module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); | ||
157 | |||
158 | MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); | 154 | MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)"); |
159 | module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); | 155 | module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); |
160 | 156 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3671f9f220bd..cac03e743b58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -472,6 +472,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
472 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | 472 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) |
473 | { | 473 | { |
474 | int i, r; | 474 | int i, r; |
475 | long timeout; | ||
475 | 476 | ||
476 | ring->fence_drv.cpu_addr = NULL; | 477 | ring->fence_drv.cpu_addr = NULL; |
477 | ring->fence_drv.gpu_addr = 0; | 478 | ring->fence_drv.gpu_addr = 0; |
@@ -486,26 +487,24 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
486 | 487 | ||
487 | init_waitqueue_head(&ring->fence_drv.fence_queue); | 488 | init_waitqueue_head(&ring->fence_drv.fence_queue); |
488 | 489 | ||
489 | if (amdgpu_enable_scheduler) { | 490 | timeout = msecs_to_jiffies(amdgpu_lockup_timeout); |
490 | long timeout = msecs_to_jiffies(amdgpu_lockup_timeout); | 491 | if (timeout == 0) { |
491 | if (timeout == 0) { | 492 | /* |
492 | /* | 493 | * FIXME: |
493 | * FIXME: | 494 | * Delayed workqueue cannot use it directly, |
494 | * Delayed workqueue cannot use it directly, | 495 | * so the scheduler will not use delayed workqueue if |
495 | * so the scheduler will not use delayed workqueue if | 496 | * MAX_SCHEDULE_TIMEOUT is set. |
496 | * MAX_SCHEDULE_TIMEOUT is set. | 497 | * Currently keep it simple and silly. |
497 | * Currently keep it simple and silly. | 498 | */ |
498 | */ | 499 | timeout = MAX_SCHEDULE_TIMEOUT; |
499 | timeout = MAX_SCHEDULE_TIMEOUT; | 500 | } |
500 | } | 501 | r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, |
501 | r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, | 502 | amdgpu_sched_hw_submission, |
502 | amdgpu_sched_hw_submission, | 503 | timeout, ring->name); |
503 | timeout, ring->name); | 504 | if (r) { |
504 | if (r) { | 505 | DRM_ERROR("Failed to create scheduler on ring %s.\n", |
505 | DRM_ERROR("Failed to create scheduler on ring %s.\n", | 506 | ring->name); |
506 | ring->name); | 507 | return r; |
507 | return r; | ||
508 | } | ||
509 | } | 508 | } |
510 | 509 | ||
511 | return 0; | 510 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 3b58d70b73cd..54cede30a69c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -199,10 +199,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
199 | return r; | 199 | return r; |
200 | } | 200 | } |
201 | 201 | ||
202 | if (!amdgpu_enable_scheduler && ib->ctx) | ||
203 | ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, | ||
204 | &ib->fence->base); | ||
205 | |||
206 | /* wrap the last IB with fence */ | 202 | /* wrap the last IB with fence */ |
207 | if (ib->user) { | 203 | if (ib->user) { |
208 | uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); | 204 | uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 438c05254695..dd9fac302e55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -76,33 +76,25 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
76 | void *owner, | 76 | void *owner, |
77 | struct fence **f) | 77 | struct fence **f) |
78 | { | 78 | { |
79 | int r = 0; | 79 | struct amdgpu_job *job = |
80 | if (amdgpu_enable_scheduler) { | 80 | kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
81 | struct amdgpu_job *job = | 81 | if (!job) |
82 | kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 82 | return -ENOMEM; |
83 | if (!job) | 83 | job->base.sched = &ring->sched; |
84 | return -ENOMEM; | 84 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; |
85 | job->base.sched = &ring->sched; | 85 | job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); |
86 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; | 86 | if (!job->base.s_fence) { |
87 | job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); | 87 | kfree(job); |
88 | if (!job->base.s_fence) { | 88 | return -ENOMEM; |
89 | kfree(job); | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | *f = fence_get(&job->base.s_fence->base); | ||
93 | |||
94 | job->adev = adev; | ||
95 | job->ibs = ibs; | ||
96 | job->num_ibs = num_ibs; | ||
97 | job->owner = owner; | ||
98 | job->free_job = free_job; | ||
99 | amd_sched_entity_push_job(&job->base); | ||
100 | } else { | ||
101 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | ||
102 | if (r) | ||
103 | return r; | ||
104 | *f = fence_get(&ibs[num_ibs - 1].fence->base); | ||
105 | } | 89 | } |
90 | *f = fence_get(&job->base.s_fence->base); | ||
91 | |||
92 | job->adev = adev; | ||
93 | job->ibs = ibs; | ||
94 | job->num_ibs = num_ibs; | ||
95 | job->owner = owner; | ||
96 | job->free_job = free_job; | ||
97 | amd_sched_entity_push_job(&job->base); | ||
106 | 98 | ||
107 | return 0; | 99 | return 0; |
108 | } | 100 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6442a06d6fdc..100bfd4a0707 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -1070,10 +1070,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
1070 | if (r) | 1070 | if (r) |
1071 | goto error_free; | 1071 | goto error_free; |
1072 | 1072 | ||
1073 | if (!amdgpu_enable_scheduler) { | ||
1074 | amdgpu_ib_free(adev, ib); | ||
1075 | kfree(ib); | ||
1076 | } | ||
1077 | return 0; | 1073 | return 0; |
1078 | error_free: | 1074 | error_free: |
1079 | amdgpu_ib_free(adev, ib); | 1075 | amdgpu_ib_free(adev, ib); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 53f987aeeacf..72193f1c8e99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -895,11 +895,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | |||
895 | *fence = fence_get(f); | 895 | *fence = fence_get(f); |
896 | amdgpu_bo_unref(&bo); | 896 | amdgpu_bo_unref(&bo); |
897 | fence_put(f); | 897 | fence_put(f); |
898 | if (amdgpu_enable_scheduler) | ||
899 | return 0; | ||
900 | 898 | ||
901 | amdgpu_ib_free(ring->adev, ib); | ||
902 | kfree(ib); | ||
903 | return 0; | 899 | return 0; |
904 | err2: | 900 | err2: |
905 | amdgpu_ib_free(ring->adev, ib); | 901 | amdgpu_ib_free(ring->adev, ib); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index e882fbfacb12..16fbde9c5f56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -432,8 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
432 | if (fence) | 432 | if (fence) |
433 | *fence = fence_get(f); | 433 | *fence = fence_get(f); |
434 | fence_put(f); | 434 | fence_put(f); |
435 | if (amdgpu_enable_scheduler) | 435 | return 0; |
436 | return 0; | ||
437 | err: | 436 | err: |
438 | amdgpu_ib_free(adev, ib); | 437 | amdgpu_ib_free(adev, ib); |
439 | kfree(ib); | 438 | kfree(ib); |
@@ -499,8 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
499 | if (fence) | 498 | if (fence) |
500 | *fence = fence_get(f); | 499 | *fence = fence_get(f); |
501 | fence_put(f); | 500 | fence_put(f); |
502 | if (amdgpu_enable_scheduler) | 501 | return 0; |
503 | return 0; | ||
504 | err: | 502 | err: |
505 | amdgpu_ib_free(adev, ib); | 503 | amdgpu_ib_free(adev, ib); |
506 | kfree(ib); | 504 | kfree(ib); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7e6414cffbef..cc28bdc02078 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -401,8 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
401 | if (!r) | 401 | if (!r) |
402 | amdgpu_bo_fence(bo, fence, true); | 402 | amdgpu_bo_fence(bo, fence, true); |
403 | fence_put(fence); | 403 | fence_put(fence); |
404 | if (amdgpu_enable_scheduler) | 404 | return 0; |
405 | return 0; | ||
406 | 405 | ||
407 | error_free: | 406 | error_free: |
408 | amdgpu_ib_free(adev, ib); | 407 | amdgpu_ib_free(adev, ib); |
@@ -536,7 +535,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
536 | fence_put(fence); | 535 | fence_put(fence); |
537 | } | 536 | } |
538 | 537 | ||
539 | if (!amdgpu_enable_scheduler || ib->length_dw == 0) { | 538 | if (ib->length_dw == 0) { |
540 | amdgpu_ib_free(adev, ib); | 539 | amdgpu_ib_free(adev, ib); |
541 | kfree(ib); | 540 | kfree(ib); |
542 | } | 541 | } |
@@ -819,10 +818,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
819 | *fence = fence_get(f); | 818 | *fence = fence_get(f); |
820 | } | 819 | } |
821 | fence_put(f); | 820 | fence_put(f); |
822 | if (!amdgpu_enable_scheduler) { | ||
823 | amdgpu_ib_free(adev, ib); | ||
824 | kfree(ib); | ||
825 | } | ||
826 | return 0; | 821 | return 0; |
827 | 822 | ||
828 | error_free: | 823 | error_free: |