aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNayan Deshmukh <nayan26deshmukh@gmail.com>2018-07-13 05:51:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-07-13 15:46:05 -0400
commitaa16b6c6b4d979234f830a48add47d02c12bb569 (patch)
tree64efc4a451b98facae33ff08c9a3202db94d4413
parent8dc9fbbf274b7b2a647e06141aee70ffabf6dbc0 (diff)
drm/scheduler: modify args of drm_sched_entity_init
replace run queue by a list of run queues and remove the sched arg as that is part of run queue itself Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Acked-by: Eric Anholt <eric@anholt.net> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c7
-rw-r--r--include/drm/gpu_scheduler.h6
11 files changed, 33 insertions, 33 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0120b24fae1b..83e3b320a793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
90 if (ring == &adev->gfx.kiq.ring) 90 if (ring == &adev->gfx.kiq.ring)
91 continue; 91 continue;
92 92
93 r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, 93 r = drm_sched_entity_init(&ctx->rings[i].entity,
94 rq, &ctx->guilty); 94 &rq, 1, &ctx->guilty);
95 if (r) 95 if (r)
96 goto failed; 96 goto failed;
97 } 97 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6a3fead5c1f0..11a12483c995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1918 1918
1919 ring = adev->mman.buffer_funcs_ring; 1919 ring = adev->mman.buffer_funcs_ring;
1920 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1920 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1921 r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, 1921 r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
1922 rq, NULL);
1923 if (r) { 1922 if (r) {
1924 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 1923 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1925 r); 1924 r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3e70eb61a960..a6c2cace4b9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
266 266
267 ring = &adev->uvd.inst[j].ring; 267 ring = &adev->uvd.inst[j].ring;
268 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 268 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
269 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, 269 r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
270 rq, NULL); 270 1, NULL);
271 if (r != 0) { 271 if (r != 0) {
272 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); 272 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
273 return r; 273 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 6ae1ad7e83b3..ffb0fcc9707e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
190 190
191 ring = &adev->vce.ring[0]; 191 ring = &adev->vce.ring[0];
192 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 192 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
193 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, 193 r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
194 rq, NULL);
195 if (r != 0) { 194 if (r != 0) {
196 DRM_ERROR("Failed setting up VCE run queue.\n"); 195 DRM_ERROR("Failed setting up VCE run queue.\n");
197 return r; 196 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0fd0a718763b..484e2c19c027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2564 ring_instance %= adev->vm_manager.vm_pte_num_rings; 2564 ring_instance %= adev->vm_manager.vm_pte_num_rings;
2565 ring = adev->vm_manager.vm_pte_rings[ring_instance]; 2565 ring = adev->vm_manager.vm_pte_rings[ring_instance];
2566 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 2566 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2567 r = drm_sched_entity_init(&ring->sched, &vm->entity, 2567 r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2568 rq, NULL);
2569 if (r) 2568 if (r)
2570 return r; 2569 return r;
2571 2570
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2623f249cb7a..1c118c02e8cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle)
430 struct drm_sched_rq *rq; 430 struct drm_sched_rq *rq;
431 ring = &adev->uvd.inst->ring_enc[0]; 431 ring = &adev->uvd.inst->ring_enc[0];
432 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 432 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
433 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, 433 r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
434 rq, NULL); 434 &rq, 1, NULL);
435 if (r) { 435 if (r) {
436 DRM_ERROR("Failed setting up UVD ENC run queue.\n"); 436 DRM_ERROR("Failed setting up UVD ENC run queue.\n");
437 return r; 437 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index ce360ad16856..d48bc3393545 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle)
432 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 432 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
433 ring = &adev->uvd.inst[j].ring_enc[0]; 433 ring = &adev->uvd.inst[j].ring_enc[0];
434 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 434 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
435 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, 435 r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
436 rq, NULL); 436 &rq, 1, NULL);
437 if (r) { 437 if (r) {
438 DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); 438 DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
439 return r; 439 return r;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 45bfdf4cc107..36414ba56b22 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
49 49
50 for (i = 0; i < ETNA_MAX_PIPES; i++) { 50 for (i = 0; i < ETNA_MAX_PIPES; i++) {
51 struct etnaviv_gpu *gpu = priv->gpu[i]; 51 struct etnaviv_gpu *gpu = priv->gpu[i];
52 struct drm_sched_rq *rq;
52 53
53 if (gpu) { 54 if (gpu) {
54 drm_sched_entity_init(&gpu->sched, 55 rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
55 &ctx->sched_entity[i], 56 drm_sched_entity_init(&ctx->sched_entity[i],
56 &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], 57 &rq, 1, NULL);
57 NULL);
58 } 58 }
59 } 59 }
60 60
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 429b1328653a..16bf446aa6b3 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
162 * drm_sched_entity_init - Init a context entity used by scheduler when 162 * drm_sched_entity_init - Init a context entity used by scheduler when
163 * submit to HW ring. 163 * submit to HW ring.
164 * 164 *
165 * @sched: scheduler instance
166 * @entity: scheduler entity to init 165 * @entity: scheduler entity to init
167 * @rq: the run queue this entity belongs 166 * @rq_list: the list of run queue on which jobs from this
167 * entity can be submitted
168 * @num_rq_list: number of run queue in rq_list
168 * @guilty: atomic_t set to 1 when a job on this queue 169 * @guilty: atomic_t set to 1 when a job on this queue
169 * is found to be guilty causing a timeout 170 * is found to be guilty causing a timeout
170 * 171 *
172 * Note: the rq_list should have atleast one element to schedule
173 * the entity
174 *
171 * Returns 0 on success or a negative error code on failure. 175 * Returns 0 on success or a negative error code on failure.
172*/ 176*/
173int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 177int drm_sched_entity_init(struct drm_sched_entity *entity,
174 struct drm_sched_entity *entity, 178 struct drm_sched_rq **rq_list,
175 struct drm_sched_rq *rq, 179 unsigned int num_rq_list,
176 atomic_t *guilty) 180 atomic_t *guilty)
177{ 181{
178 if (!(sched && entity && rq)) 182 if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
179 return -EINVAL; 183 return -EINVAL;
180 184
181 memset(entity, 0, sizeof(struct drm_sched_entity)); 185 memset(entity, 0, sizeof(struct drm_sched_entity));
182 INIT_LIST_HEAD(&entity->list); 186 INIT_LIST_HEAD(&entity->list);
183 entity->rq = rq; 187 entity->rq = rq_list[0];
184 entity->sched = sched; 188 entity->sched = rq_list[0]->sched;
185 entity->guilty = guilty; 189 entity->guilty = guilty;
186 entity->last_scheduled = NULL; 190 entity->last_scheduled = NULL;
187 191
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 567f7d46d912..1dceba2b42fd 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
123{ 123{
124 struct v3d_dev *v3d = to_v3d_dev(dev); 124 struct v3d_dev *v3d = to_v3d_dev(dev);
125 struct v3d_file_priv *v3d_priv; 125 struct v3d_file_priv *v3d_priv;
126 struct drm_sched_rq *rq;
126 int i; 127 int i;
127 128
128 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); 129 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
132 v3d_priv->v3d = v3d; 133 v3d_priv->v3d = v3d;
133 134
134 for (i = 0; i < V3D_MAX_QUEUES; i++) { 135 for (i = 0; i < V3D_MAX_QUEUES; i++) {
135 drm_sched_entity_init(&v3d->queue[i].sched, 136 rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
136 &v3d_priv->sched_entity[i], 137 drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
137 &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
138 NULL);
139 } 138 }
140 139
141 file->driver_priv = v3d_priv; 140 file->driver_priv = v3d_priv;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 43e93d6077cf..2205e89722f6 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
282 const char *name); 282 const char *name);
283void drm_sched_fini(struct drm_gpu_scheduler *sched); 283void drm_sched_fini(struct drm_gpu_scheduler *sched);
284 284
285int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 285int drm_sched_entity_init(struct drm_sched_entity *entity,
286 struct drm_sched_entity *entity, 286 struct drm_sched_rq **rq_list,
287 struct drm_sched_rq *rq, 287 unsigned int num_rq_list,
288 atomic_t *guilty); 288 atomic_t *guilty);
289long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, 289long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
290 struct drm_sched_entity *entity, long timeout); 290 struct drm_sched_entity *entity, long timeout);