diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 255 |
1 files changed, 134 insertions, 121 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 39bc4f54e272..108f52e1bf35 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c | |||
| @@ -35,109 +35,6 @@ static inline bool node_signaled(const struct i915_sched_node *node) | |||
| 35 | return i915_request_completed(node_to_request(node)); | 35 | return i915_request_completed(node_to_request(node)); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void i915_sched_node_init(struct i915_sched_node *node) | ||
| 39 | { | ||
| 40 | INIT_LIST_HEAD(&node->signalers_list); | ||
| 41 | INIT_LIST_HEAD(&node->waiters_list); | ||
| 42 | INIT_LIST_HEAD(&node->link); | ||
| 43 | node->attr.priority = I915_PRIORITY_INVALID; | ||
| 44 | node->semaphores = 0; | ||
| 45 | node->flags = 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | static struct i915_dependency * | ||
| 49 | i915_dependency_alloc(void) | ||
| 50 | { | ||
| 51 | return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); | ||
| 52 | } | ||
| 53 | |||
| 54 | static void | ||
| 55 | i915_dependency_free(struct i915_dependency *dep) | ||
| 56 | { | ||
| 57 | kmem_cache_free(global.slab_dependencies, dep); | ||
| 58 | } | ||
| 59 | |||
| 60 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
| 61 | struct i915_sched_node *signal, | ||
| 62 | struct i915_dependency *dep, | ||
| 63 | unsigned long flags) | ||
| 64 | { | ||
| 65 | bool ret = false; | ||
| 66 | |||
| 67 | spin_lock_irq(&schedule_lock); | ||
| 68 | |||
| 69 | if (!node_signaled(signal)) { | ||
| 70 | INIT_LIST_HEAD(&dep->dfs_link); | ||
| 71 | list_add(&dep->wait_link, &signal->waiters_list); | ||
| 72 | list_add(&dep->signal_link, &node->signalers_list); | ||
| 73 | dep->signaler = signal; | ||
| 74 | dep->flags = flags; | ||
| 75 | |||
| 76 | /* Keep track of whether anyone on this chain has a semaphore */ | ||
| 77 | if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN && | ||
| 78 | !node_started(signal)) | ||
| 79 | node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; | ||
| 80 | |||
| 81 | ret = true; | ||
| 82 | } | ||
| 83 | |||
| 84 | spin_unlock_irq(&schedule_lock); | ||
| 85 | |||
| 86 | return ret; | ||
| 87 | } | ||
| 88 | |||
| 89 | int i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
| 90 | struct i915_sched_node *signal) | ||
| 91 | { | ||
| 92 | struct i915_dependency *dep; | ||
| 93 | |||
| 94 | dep = i915_dependency_alloc(); | ||
| 95 | if (!dep) | ||
| 96 | return -ENOMEM; | ||
| 97 | |||
| 98 | if (!__i915_sched_node_add_dependency(node, signal, dep, | ||
| 99 | I915_DEPENDENCY_ALLOC)) | ||
| 100 | i915_dependency_free(dep); | ||
| 101 | |||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | void i915_sched_node_fini(struct i915_sched_node *node) | ||
| 106 | { | ||
| 107 | struct i915_dependency *dep, *tmp; | ||
| 108 | |||
| 109 | GEM_BUG_ON(!list_empty(&node->link)); | ||
| 110 | |||
| 111 | spin_lock_irq(&schedule_lock); | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Everyone we depended upon (the fences we wait to be signaled) | ||
| 115 | * should retire before us and remove themselves from our list. | ||
| 116 | * However, retirement is run independently on each timeline and | ||
| 117 | * so we may be called out-of-order. | ||
| 118 | */ | ||
| 119 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { | ||
| 120 | GEM_BUG_ON(!node_signaled(dep->signaler)); | ||
| 121 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
| 122 | |||
| 123 | list_del(&dep->wait_link); | ||
| 124 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
| 125 | i915_dependency_free(dep); | ||
| 126 | } | ||
| 127 | |||
| 128 | /* Remove ourselves from everyone who depends upon us */ | ||
| 129 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { | ||
| 130 | GEM_BUG_ON(dep->signaler != node); | ||
| 131 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
| 132 | |||
| 133 | list_del(&dep->signal_link); | ||
| 134 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
| 135 | i915_dependency_free(dep); | ||
| 136 | } | ||
| 137 | |||
| 138 | spin_unlock_irq(&schedule_lock); | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) | 38 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) |
| 142 | { | 39 | { |
| 143 | return rb_entry(rb, struct i915_priolist, node); | 40 | return rb_entry(rb, struct i915_priolist, node); |
| @@ -239,6 +136,11 @@ out: | |||
| 239 | return &p->requests[idx]; | 136 | return &p->requests[idx]; |
| 240 | } | 137 | } |
| 241 | 138 | ||
| 139 | void __i915_priolist_free(struct i915_priolist *p) | ||
| 140 | { | ||
| 141 | kmem_cache_free(global.slab_priorities, p); | ||
| 142 | } | ||
| 143 | |||
| 242 | struct sched_cache { | 144 | struct sched_cache { |
| 243 | struct list_head *priolist; | 145 | struct list_head *priolist; |
| 244 | }; | 146 | }; |
| @@ -273,7 +175,7 @@ static bool inflight(const struct i915_request *rq, | |||
| 273 | return active->hw_context == rq->hw_context; | 175 | return active->hw_context == rq->hw_context; |
| 274 | } | 176 | } |
| 275 | 177 | ||
| 276 | static void __i915_schedule(struct i915_request *rq, | 178 | static void __i915_schedule(struct i915_sched_node *node, |
| 277 | const struct i915_sched_attr *attr) | 179 | const struct i915_sched_attr *attr) |
| 278 | { | 180 | { |
| 279 | struct intel_engine_cs *engine; | 181 | struct intel_engine_cs *engine; |
| @@ -287,13 +189,13 @@ static void __i915_schedule(struct i915_request *rq, | |||
| 287 | lockdep_assert_held(&schedule_lock); | 189 | lockdep_assert_held(&schedule_lock); |
| 288 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); | 190 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); |
| 289 | 191 | ||
| 290 | if (i915_request_completed(rq)) | 192 | if (node_signaled(node)) |
| 291 | return; | 193 | return; |
| 292 | 194 | ||
| 293 | if (prio <= READ_ONCE(rq->sched.attr.priority)) | 195 | if (prio <= READ_ONCE(node->attr.priority)) |
| 294 | return; | 196 | return; |
| 295 | 197 | ||
| 296 | stack.signaler = &rq->sched; | 198 | stack.signaler = node; |
| 297 | list_add(&stack.dfs_link, &dfs); | 199 | list_add(&stack.dfs_link, &dfs); |
| 298 | 200 | ||
| 299 | /* | 201 | /* |
| @@ -344,9 +246,9 @@ static void __i915_schedule(struct i915_request *rq, | |||
| 344 | * execlists_submit_request()), we can set our own priority and skip | 246 | * execlists_submit_request()), we can set our own priority and skip |
| 345 | * acquiring the engine locks. | 247 | * acquiring the engine locks. |
| 346 | */ | 248 | */ |
| 347 | if (rq->sched.attr.priority == I915_PRIORITY_INVALID) { | 249 | if (node->attr.priority == I915_PRIORITY_INVALID) { |
| 348 | GEM_BUG_ON(!list_empty(&rq->sched.link)); | 250 | GEM_BUG_ON(!list_empty(&node->link)); |
| 349 | rq->sched.attr = *attr; | 251 | node->attr = *attr; |
| 350 | 252 | ||
| 351 | if (stack.dfs_link.next == stack.dfs_link.prev) | 253 | if (stack.dfs_link.next == stack.dfs_link.prev) |
| 352 | return; | 254 | return; |
| @@ -355,15 +257,14 @@ static void __i915_schedule(struct i915_request *rq, | |||
| 355 | } | 257 | } |
| 356 | 258 | ||
| 357 | memset(&cache, 0, sizeof(cache)); | 259 | memset(&cache, 0, sizeof(cache)); |
| 358 | engine = rq->engine; | 260 | engine = node_to_request(node)->engine; |
| 359 | spin_lock(&engine->timeline.lock); | 261 | spin_lock(&engine->timeline.lock); |
| 360 | 262 | ||
| 361 | /* Fifo and depth-first replacement ensure our deps execute before us */ | 263 | /* Fifo and depth-first replacement ensure our deps execute before us */ |
| 362 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { | 264 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { |
| 363 | struct i915_sched_node *node = dep->signaler; | ||
| 364 | |||
| 365 | INIT_LIST_HEAD(&dep->dfs_link); | 265 | INIT_LIST_HEAD(&dep->dfs_link); |
| 366 | 266 | ||
| 267 | node = dep->signaler; | ||
| 367 | engine = sched_lock_engine(node, engine, &cache); | 268 | engine = sched_lock_engine(node, engine, &cache); |
| 368 | lockdep_assert_held(&engine->timeline.lock); | 269 | lockdep_assert_held(&engine->timeline.lock); |
| 369 | 270 | ||
| @@ -413,13 +314,20 @@ static void __i915_schedule(struct i915_request *rq, | |||
| 413 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) | 314 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) |
| 414 | { | 315 | { |
| 415 | spin_lock_irq(&schedule_lock); | 316 | spin_lock_irq(&schedule_lock); |
| 416 | __i915_schedule(rq, attr); | 317 | __i915_schedule(&rq->sched, attr); |
| 417 | spin_unlock_irq(&schedule_lock); | 318 | spin_unlock_irq(&schedule_lock); |
| 418 | } | 319 | } |
| 419 | 320 | ||
| 321 | static void __bump_priority(struct i915_sched_node *node, unsigned int bump) | ||
| 322 | { | ||
| 323 | struct i915_sched_attr attr = node->attr; | ||
| 324 | |||
| 325 | attr.priority |= bump; | ||
| 326 | __i915_schedule(node, &attr); | ||
| 327 | } | ||
| 328 | |||
| 420 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) | 329 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) |
| 421 | { | 330 | { |
| 422 | struct i915_sched_attr attr; | ||
| 423 | unsigned long flags; | 331 | unsigned long flags; |
| 424 | 332 | ||
| 425 | GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); | 333 | GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); |
| @@ -428,17 +336,122 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) | |||
| 428 | return; | 336 | return; |
| 429 | 337 | ||
| 430 | spin_lock_irqsave(&schedule_lock, flags); | 338 | spin_lock_irqsave(&schedule_lock, flags); |
| 339 | __bump_priority(&rq->sched, bump); | ||
| 340 | spin_unlock_irqrestore(&schedule_lock, flags); | ||
| 341 | } | ||
| 431 | 342 | ||
| 432 | attr = rq->sched.attr; | 343 | void i915_sched_node_init(struct i915_sched_node *node) |
| 433 | attr.priority |= bump; | 344 | { |
| 434 | __i915_schedule(rq, &attr); | 345 | INIT_LIST_HEAD(&node->signalers_list); |
| 346 | INIT_LIST_HEAD(&node->waiters_list); | ||
| 347 | INIT_LIST_HEAD(&node->link); | ||
| 348 | node->attr.priority = I915_PRIORITY_INVALID; | ||
| 349 | node->semaphores = 0; | ||
| 350 | node->flags = 0; | ||
| 351 | } | ||
| 435 | 352 | ||
| 436 | spin_unlock_irqrestore(&schedule_lock, flags); | 353 | static struct i915_dependency * |
| 354 | i915_dependency_alloc(void) | ||
| 355 | { | ||
| 356 | return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); | ||
| 437 | } | 357 | } |
| 438 | 358 | ||
| 439 | void __i915_priolist_free(struct i915_priolist *p) | 359 | static void |
| 360 | i915_dependency_free(struct i915_dependency *dep) | ||
| 440 | { | 361 | { |
| 441 | kmem_cache_free(global.slab_priorities, p); | 362 | kmem_cache_free(global.slab_dependencies, dep); |
| 363 | } | ||
| 364 | |||
| 365 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
| 366 | struct i915_sched_node *signal, | ||
| 367 | struct i915_dependency *dep, | ||
| 368 | unsigned long flags) | ||
| 369 | { | ||
| 370 | bool ret = false; | ||
| 371 | |||
| 372 | spin_lock_irq(&schedule_lock); | ||
| 373 | |||
| 374 | if (!node_signaled(signal)) { | ||
| 375 | INIT_LIST_HEAD(&dep->dfs_link); | ||
| 376 | list_add(&dep->wait_link, &signal->waiters_list); | ||
| 377 | list_add(&dep->signal_link, &node->signalers_list); | ||
| 378 | dep->signaler = signal; | ||
| 379 | dep->flags = flags; | ||
| 380 | |||
| 381 | /* Keep track of whether anyone on this chain has a semaphore */ | ||
| 382 | if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN && | ||
| 383 | !node_started(signal)) | ||
| 384 | node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; | ||
| 385 | |||
| 386 | /* | ||
| 387 | * As we do not allow WAIT to preempt inflight requests, | ||
| 388 | * once we have executed a request, along with triggering | ||
| 389 | * any execution callbacks, we must preserve its ordering | ||
| 390 | * within the non-preemptible FIFO. | ||
| 391 | */ | ||
| 392 | BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); | ||
| 393 | if (flags & I915_DEPENDENCY_EXTERNAL) | ||
| 394 | __bump_priority(signal, __NO_PREEMPTION); | ||
| 395 | |||
| 396 | ret = true; | ||
| 397 | } | ||
| 398 | |||
| 399 | spin_unlock_irq(&schedule_lock); | ||
| 400 | |||
| 401 | return ret; | ||
| 402 | } | ||
| 403 | |||
| 404 | int i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
| 405 | struct i915_sched_node *signal) | ||
| 406 | { | ||
| 407 | struct i915_dependency *dep; | ||
| 408 | |||
| 409 | dep = i915_dependency_alloc(); | ||
| 410 | if (!dep) | ||
| 411 | return -ENOMEM; | ||
| 412 | |||
| 413 | if (!__i915_sched_node_add_dependency(node, signal, dep, | ||
| 414 | I915_DEPENDENCY_EXTERNAL | | ||
| 415 | I915_DEPENDENCY_ALLOC)) | ||
| 416 | i915_dependency_free(dep); | ||
| 417 | |||
| 418 | return 0; | ||
| 419 | } | ||
| 420 | |||
| 421 | void i915_sched_node_fini(struct i915_sched_node *node) | ||
| 422 | { | ||
| 423 | struct i915_dependency *dep, *tmp; | ||
| 424 | |||
| 425 | GEM_BUG_ON(!list_empty(&node->link)); | ||
| 426 | |||
| 427 | spin_lock_irq(&schedule_lock); | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Everyone we depended upon (the fences we wait to be signaled) | ||
| 431 | * should retire before us and remove themselves from our list. | ||
| 432 | * However, retirement is run independently on each timeline and | ||
| 433 | * so we may be called out-of-order. | ||
| 434 | */ | ||
| 435 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { | ||
| 436 | GEM_BUG_ON(!node_signaled(dep->signaler)); | ||
| 437 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
| 438 | |||
| 439 | list_del(&dep->wait_link); | ||
| 440 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
| 441 | i915_dependency_free(dep); | ||
| 442 | } | ||
| 443 | |||
| 444 | /* Remove ourselves from everyone who depends upon us */ | ||
| 445 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { | ||
| 446 | GEM_BUG_ON(dep->signaler != node); | ||
| 447 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
| 448 | |||
| 449 | list_del(&dep->signal_link); | ||
| 450 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
| 451 | i915_dependency_free(dep); | ||
| 452 | } | ||
| 453 | |||
| 454 | spin_unlock_irq(&schedule_lock); | ||
| 442 | } | 455 | } |
| 443 | 456 | ||
| 444 | static void i915_global_scheduler_shrink(void) | 457 | static void i915_global_scheduler_shrink(void) |
