diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler/gpu_scheduler.c')
| -rw-r--r-- | drivers/gpu/drm/scheduler/gpu_scheduler.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 7d2560699b84..dac71e3b4514 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c | |||
| @@ -69,11 +69,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); | |||
| 69 | * | 69 | * |
| 70 | * Initializes a scheduler runqueue. | 70 | * Initializes a scheduler runqueue. |
| 71 | */ | 71 | */ |
| 72 | static void drm_sched_rq_init(struct drm_sched_rq *rq) | 72 | static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, |
| 73 | struct drm_sched_rq *rq) | ||
| 73 | { | 74 | { |
| 74 | spin_lock_init(&rq->lock); | 75 | spin_lock_init(&rq->lock); |
| 75 | INIT_LIST_HEAD(&rq->entities); | 76 | INIT_LIST_HEAD(&rq->entities); |
| 76 | rq->current_entity = NULL; | 77 | rq->current_entity = NULL; |
| 78 | rq->sched = sched; | ||
| 77 | } | 79 | } |
| 78 | 80 | ||
| 79 | /** | 81 | /** |
| @@ -160,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) | |||
| 160 | * drm_sched_entity_init - Init a context entity used by scheduler when | 162 | * drm_sched_entity_init - Init a context entity used by scheduler when |
| 161 | * submit to HW ring. | 163 | * submit to HW ring. |
| 162 | * | 164 | * |
| 163 | * @sched: scheduler instance | ||
| 164 | * @entity: scheduler entity to init | 165 | * @entity: scheduler entity to init |
| 165 | * @rq: the run queue this entity belongs | 166 | * @rq_list: the list of run queue on which jobs from this |
| 167 | * entity can be submitted | ||
| 168 | * @num_rq_list: number of run queue in rq_list | ||
| 166 | * @guilty: atomic_t set to 1 when a job on this queue | 169 | * @guilty: atomic_t set to 1 when a job on this queue |
| 167 | * is found to be guilty causing a timeout | 170 | * is found to be guilty causing a timeout |
| 168 | * | 171 | * |
| 172 | * Note: the rq_list should have atleast one element to schedule | ||
| 173 | * the entity | ||
| 174 | * | ||
| 169 | * Returns 0 on success or a negative error code on failure. | 175 | * Returns 0 on success or a negative error code on failure. |
| 170 | */ | 176 | */ |
| 171 | int drm_sched_entity_init(struct drm_gpu_scheduler *sched, | 177 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
| 172 | struct drm_sched_entity *entity, | 178 | struct drm_sched_rq **rq_list, |
| 173 | struct drm_sched_rq *rq, | 179 | unsigned int num_rq_list, |
| 174 | atomic_t *guilty) | 180 | atomic_t *guilty) |
| 175 | { | 181 | { |
| 176 | if (!(sched && entity && rq)) | 182 | if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) |
| 177 | return -EINVAL; | 183 | return -EINVAL; |
| 178 | 184 | ||
| 179 | memset(entity, 0, sizeof(struct drm_sched_entity)); | 185 | memset(entity, 0, sizeof(struct drm_sched_entity)); |
| 180 | INIT_LIST_HEAD(&entity->list); | 186 | INIT_LIST_HEAD(&entity->list); |
| 181 | entity->rq = rq; | 187 | entity->rq = rq_list[0]; |
| 182 | entity->sched = sched; | 188 | entity->sched = rq_list[0]->sched; |
| 183 | entity->guilty = guilty; | 189 | entity->guilty = guilty; |
| 184 | entity->last_scheduled = NULL; | 190 | entity->last_scheduled = NULL; |
| 185 | 191 | ||
| @@ -541,6 +547,11 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, | |||
| 541 | if (first) { | 547 | if (first) { |
| 542 | /* Add the entity to the run queue */ | 548 | /* Add the entity to the run queue */ |
| 543 | spin_lock(&entity->rq_lock); | 549 | spin_lock(&entity->rq_lock); |
| 550 | if (!entity->rq) { | ||
| 551 | DRM_ERROR("Trying to push to a killed entity\n"); | ||
| 552 | spin_unlock(&entity->rq_lock); | ||
| 553 | return; | ||
| 554 | } | ||
| 544 | drm_sched_rq_add_entity(entity->rq, entity); | 555 | drm_sched_rq_add_entity(entity->rq, entity); |
| 545 | spin_unlock(&entity->rq_lock); | 556 | spin_unlock(&entity->rq_lock); |
| 546 | drm_sched_wakeup(sched); | 557 | drm_sched_wakeup(sched); |
| @@ -926,7 +937,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, | |||
| 926 | sched->timeout = timeout; | 937 | sched->timeout = timeout; |
| 927 | sched->hang_limit = hang_limit; | 938 | sched->hang_limit = hang_limit; |
| 928 | for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) | 939 | for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) |
| 929 | drm_sched_rq_init(&sched->sched_rq[i]); | 940 | drm_sched_rq_init(sched, &sched->sched_rq[i]); |
| 930 | 941 | ||
| 931 | init_waitqueue_head(&sched->wake_up_worker); | 942 | init_waitqueue_head(&sched->wake_up_worker); |
| 932 | init_waitqueue_head(&sched->job_scheduled); | 943 | init_waitqueue_head(&sched->job_scheduled); |
