aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268f1781..926379d53484 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
264 * @hang_limit: once the hangs by a job crosses this limit then it is marked 264 * @hang_limit: once the hangs by a job crosses this limit then it is marked
265 * guilty and it will be considered for scheduling further. 265 * guilty and it will be considered for scheduling further.
266 * @num_jobs: the number of jobs in queue in the scheduler 266 * @num_jobs: the number of jobs in queue in the scheduler
267 * @ready: marks if the underlying HW is ready to work
267 * 268 *
268 * One scheduler is implemented for each hardware ring. 269 * One scheduler is implemented for each hardware ring.
269 */ 270 */
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
283 spinlock_t job_list_lock; 284 spinlock_t job_list_lock;
284 int hang_limit; 285 int hang_limit;
285 atomic_t num_jobs; 286 atomic_t num_jobs;
287 bool ready;
286}; 288};
287 289
288int drm_sched_init(struct drm_gpu_scheduler *sched, 290int drm_sched_init(struct drm_gpu_scheduler *sched,
289 const struct drm_sched_backend_ops *ops, 291 const struct drm_sched_backend_ops *ops,
290 uint32_t hw_submission, unsigned hang_limit, long timeout, 292 uint32_t hw_submission, unsigned hang_limit, long timeout,
291 const char *name); 293 const char *name);
294
292void drm_sched_fini(struct drm_gpu_scheduler *sched); 295void drm_sched_fini(struct drm_gpu_scheduler *sched);
293int drm_sched_job_init(struct drm_sched_job *job, 296int drm_sched_job_init(struct drm_sched_job *job,
294 struct drm_sched_entity *entity, 297 struct drm_sched_entity *entity,
295 void *owner); 298 void *owner);
299void drm_sched_job_cleanup(struct drm_sched_job *job);
296void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 300void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
297void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, 301void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
298 struct drm_sched_job *job); 302 struct drm_sched_job *job);
299void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); 303void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
300bool drm_sched_dependency_optimized(struct dma_fence* fence, 304bool drm_sched_dependency_optimized(struct dma_fence* fence,
301 struct drm_sched_entity *entity); 305 struct drm_sched_entity *entity);
306void drm_sched_fault(struct drm_gpu_scheduler *sched);
302void drm_sched_job_kickout(struct drm_sched_job *s_job); 307void drm_sched_job_kickout(struct drm_sched_job *s_job);
303 308
304void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 309void drm_sched_rq_add_entity(struct drm_sched_rq *rq,