aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c218
1 files changed, 88 insertions, 130 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6043062ffce7..d72db15afa02 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,9 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static int do_switch(struct intel_ring_buffer *ring,
100 struct i915_hw_context *to);
101
102static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103{ 100{
104 struct drm_device *dev = ppgtt->base.dev; 101 struct drm_device *dev = ppgtt->base.dev;
@@ -185,13 +182,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
185 typeof(*ctx), ref); 182 typeof(*ctx), ref);
186 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
187 184
188 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 185 if (ctx->obj) {
189 if (USES_PPGTT(ctx->obj->base.dev)) 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
190 ppgtt = ctx_to_ppgtt(ctx); 187 if (USES_PPGTT(ctx->obj->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx);
191 189
192 /* XXX: Free up the object before tearing down the address space, in 190 /* XXX: Free up the object before tearing down the address space, in
193 * case we're bound in the PPGTT */ 191 * case we're bound in the PPGTT */
194 drm_gem_object_unreference(&ctx->obj->base); 192 drm_gem_object_unreference(&ctx->obj->base);
193 }
195 194
196 if (ppgtt) 195 if (ppgtt)
197 kref_put(&ppgtt->ref, ppgtt_release); 196 kref_put(&ppgtt->ref, ppgtt_release);
@@ -232,32 +231,32 @@ __create_hw_context(struct drm_device *dev,
232 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
233 232
234 kref_init(&ctx->ref); 233 kref_init(&ctx->ref);
235 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 234 list_add_tail(&ctx->link, &dev_priv->context_list);
236 INIT_LIST_HEAD(&ctx->link);
237 if (ctx->obj == NULL) {
238 kfree(ctx);
239 DRM_DEBUG_DRIVER("Context object allocated failed\n");
240 return ERR_PTR(-ENOMEM);
241 }
242 235
243 if (INTEL_INFO(dev)->gen >= 7) { 236 if (dev_priv->hw_context_size) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
245 I915_CACHE_L3_LLC); 238 if (ctx->obj == NULL) {
246 /* Failure shouldn't ever happen this early */ 239 ret = -ENOMEM;
247 if (WARN_ON(ret))
248 goto err_out; 240 goto err_out;
249 } 241 }
250 242
251 list_add_tail(&ctx->link, &dev_priv->context_list); 243 if (INTEL_INFO(dev)->gen >= 7) {
244 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */
247 if (WARN_ON(ret))
248 goto err_out;
249 }
250 }
252 251
253 /* Default context will never have a file_priv */ 252 /* Default context will never have a file_priv */
254 if (file_priv == NULL) 253 if (file_priv != NULL) {
255 return ctx; 254 ret = idr_alloc(&file_priv->context_idr, ctx,
256 255 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
257 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, 256 if (ret < 0)
258 GFP_KERNEL); 257 goto err_out;
259 if (ret < 0) 258 } else
260 goto err_out; 259 ret = DEFAULT_CONTEXT_ID;
261 260
262 ctx->file_priv = file_priv; 261 ctx->file_priv = file_priv;
263 ctx->id = ret; 262 ctx->id = ret;
@@ -294,7 +293,7 @@ i915_gem_create_context(struct drm_device *dev,
294 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
295 return ctx; 294 return ctx;
296 295
297 if (is_global_default_ctx) { 296 if (is_global_default_ctx && ctx->obj) {
298 /* We may need to do things with the shrinker which 297 /* We may need to do things with the shrinker which
299 * require us to immediately switch back to the default 298 * require us to immediately switch back to the default
300 * context. This can cause a problem as pinning the 299 * context. This can cause a problem as pinning the
@@ -342,7 +341,7 @@ i915_gem_create_context(struct drm_device *dev,
342 return ctx; 341 return ctx;
343 342
344err_unpin: 343err_unpin:
345 if (is_global_default_ctx) 344 if (is_global_default_ctx && ctx->obj)
346 i915_gem_object_ggtt_unpin(ctx->obj); 345 i915_gem_object_ggtt_unpin(ctx->obj);
347err_destroy: 346err_destroy:
348 i915_gem_context_unreference(ctx); 347 i915_gem_context_unreference(ctx);
@@ -352,32 +351,22 @@ err_destroy:
352void i915_gem_context_reset(struct drm_device *dev) 351void i915_gem_context_reset(struct drm_device *dev)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 int i; 354 int i;
357 355
358 if (!HAS_HW_CONTEXTS(dev))
359 return;
360
361 /* Prevent the hardware from restoring the last context (which hung) on 356 /* Prevent the hardware from restoring the last context (which hung) on
362 * the next switch */ 357 * the next switch */
363 for (i = 0; i < I915_NUM_RINGS; i++) { 358 for (i = 0; i < I915_NUM_RINGS; i++) {
364 struct i915_hw_context *dctx; 359 struct intel_ring_buffer *ring = &dev_priv->ring[i];
365 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 360 struct i915_hw_context *dctx = ring->default_context;
366 continue;
367 361
368 /* Do a fake switch to the default context */ 362 /* Do a fake switch to the default context */
369 ring = &dev_priv->ring[i]; 363 if (ring->last_context == dctx)
370 dctx = ring->default_context;
371 if (WARN_ON(!dctx))
372 continue; 364 continue;
373 365
374 if (!ring->last_context) 366 if (!ring->last_context)
375 continue; 367 continue;
376 368
377 if (ring->last_context == dctx) 369 if (dctx->obj && i == RCS) {
378 continue;
379
380 if (i == RCS) {
381 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 370 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
382 get_context_alignment(dev), 0)); 371 get_context_alignment(dev), 0));
383 /* Fake a finish/inactive */ 372 /* Fake a finish/inactive */
@@ -394,44 +383,35 @@ void i915_gem_context_reset(struct drm_device *dev)
394int i915_gem_context_init(struct drm_device *dev) 383int i915_gem_context_init(struct drm_device *dev)
395{ 384{
396 struct drm_i915_private *dev_priv = dev->dev_private; 385 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_ring_buffer *ring; 386 struct i915_hw_context *ctx;
398 int i; 387 int i;
399 388
400 if (!HAS_HW_CONTEXTS(dev))
401 return 0;
402
403 /* Init should only be called once per module load. Eventually the 389 /* Init should only be called once per module load. Eventually the
404 * restriction on the context_disabled check can be loosened. */ 390 * restriction on the context_disabled check can be loosened. */
405 if (WARN_ON(dev_priv->ring[RCS].default_context)) 391 if (WARN_ON(dev_priv->ring[RCS].default_context))
406 return 0; 392 return 0;
407 393
408 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 394 if (HAS_HW_CONTEXTS(dev)) {
409 395 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
410 if (dev_priv->hw_context_size > (1<<20)) { 396 if (dev_priv->hw_context_size > (1<<20)) {
411 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); 397 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
412 return -E2BIG; 398 dev_priv->hw_context_size);
399 dev_priv->hw_context_size = 0;
400 }
413 } 401 }
414 402
415 dev_priv->ring[RCS].default_context = 403 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
416 i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 404 if (IS_ERR(ctx)) {
417 405 DRM_ERROR("Failed to create default global context (error %ld)\n",
418 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { 406 PTR_ERR(ctx));
419 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", 407 return PTR_ERR(ctx);
420 PTR_ERR(dev_priv->ring[RCS].default_context));
421 return PTR_ERR(dev_priv->ring[RCS].default_context);
422 } 408 }
423 409
424 for (i = RCS + 1; i < I915_NUM_RINGS; i++) { 410 /* NB: RCS will hold a ref for all rings */
425 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 411 for (i = 0; i < I915_NUM_RINGS; i++)
426 continue; 412 dev_priv->ring[i].default_context = ctx;
427
428 ring = &dev_priv->ring[i];
429 413
430 /* NB: RCS will hold a ref for all rings */ 414 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
431 ring->default_context = dev_priv->ring[RCS].default_context;
432 }
433
434 DRM_DEBUG_DRIVER("HW context support initialized\n");
435 return 0; 415 return 0;
436} 416}
437 417
@@ -441,33 +421,30 @@ void i915_gem_context_fini(struct drm_device *dev)
441 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 421 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
442 int i; 422 int i;
443 423
444 if (!HAS_HW_CONTEXTS(dev)) 424 if (dctx->obj) {
445 return; 425 /* The only known way to stop the gpu from accessing the hw context is
446 426 * to reset it. Do this as the very last operation to avoid confusing
447 /* The only known way to stop the gpu from accessing the hw context is 427 * other code, leading to spurious errors. */
448 * to reset it. Do this as the very last operation to avoid confusing 428 intel_gpu_reset(dev);
449 * other code, leading to spurious errors. */ 429
450 intel_gpu_reset(dev); 430 /* When default context is created and switched to, base object refcount
451 431 * will be 2 (+1 from object creation and +1 from do_switch()).
452 /* When default context is created and switched to, base object refcount 432 * i915_gem_context_fini() will be called after gpu_idle() has switched
453 * will be 2 (+1 from object creation and +1 from do_switch()). 433 * to default context. So we need to unreference the base object once
454 * i915_gem_context_fini() will be called after gpu_idle() has switched 434 * to offset the do_switch part, so that i915_gem_context_unreference()
455 * to default context. So we need to unreference the base object once 435 * can then free the base object correctly. */
456 * to offset the do_switch part, so that i915_gem_context_unreference() 436 WARN_ON(!dev_priv->ring[RCS].last_context);
457 * can then free the base object correctly. */ 437 if (dev_priv->ring[RCS].last_context == dctx) {
458 WARN_ON(!dev_priv->ring[RCS].last_context); 438 /* Fake switch to NULL context */
459 if (dev_priv->ring[RCS].last_context == dctx) { 439 WARN_ON(dctx->obj->active);
460 /* Fake switch to NULL context */ 440 i915_gem_object_ggtt_unpin(dctx->obj);
461 WARN_ON(dctx->obj->active); 441 i915_gem_context_unreference(dctx);
462 i915_gem_object_ggtt_unpin(dctx->obj); 442 dev_priv->ring[RCS].last_context = NULL;
463 i915_gem_context_unreference(dctx); 443 }
464 dev_priv->ring[RCS].last_context = NULL;
465 } 444 }
466 445
467 for (i = 0; i < I915_NUM_RINGS; i++) { 446 for (i = 0; i < I915_NUM_RINGS; i++) {
468 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 447 struct intel_ring_buffer *ring = &dev_priv->ring[i];
469 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
470 continue;
471 448
472 if (ring->last_context) 449 if (ring->last_context)
473 i915_gem_context_unreference(ring->last_context); 450 i915_gem_context_unreference(ring->last_context);
@@ -478,7 +455,6 @@ void i915_gem_context_fini(struct drm_device *dev)
478 455
479 i915_gem_object_ggtt_unpin(dctx->obj); 456 i915_gem_object_ggtt_unpin(dctx->obj);
480 i915_gem_context_unreference(dctx); 457 i915_gem_context_unreference(dctx);
481 dev_priv->mm.aliasing_ppgtt = NULL;
482} 458}
483 459
484int i915_gem_context_enable(struct drm_i915_private *dev_priv) 460int i915_gem_context_enable(struct drm_i915_private *dev_priv)
@@ -486,9 +462,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
486 struct intel_ring_buffer *ring; 462 struct intel_ring_buffer *ring;
487 int ret, i; 463 int ret, i;
488 464
489 if (!HAS_HW_CONTEXTS(dev_priv->dev))
490 return 0;
491
492 /* This is the only place the aliasing PPGTT gets enabled, which means 465 /* This is the only place the aliasing PPGTT gets enabled, which means
493 * it has to happen before we bail on reset */ 466 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) { 467 if (dev_priv->mm.aliasing_ppgtt) {
@@ -503,7 +476,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
503 BUG_ON(!dev_priv->ring[RCS].default_context); 476 BUG_ON(!dev_priv->ring[RCS].default_context);
504 477
505 for_each_ring(ring, dev_priv, i) { 478 for_each_ring(ring, dev_priv, i) {
506 ret = do_switch(ring, ring->default_context); 479 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 480 if (ret)
508 return ret; 481 return ret;
509 } 482 }
@@ -526,19 +499,6 @@ static int context_idr_cleanup(int id, void *p, void *data)
526int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 499int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527{ 500{
528 struct drm_i915_file_private *file_priv = file->driver_priv; 501 struct drm_i915_file_private *file_priv = file->driver_priv;
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 if (!HAS_HW_CONTEXTS(dev)) {
532 /* Cheat for hang stats */
533 file_priv->private_default_ctx =
534 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
535
536 if (file_priv->private_default_ctx == NULL)
537 return -ENOMEM;
538
539 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
540 return 0;
541 }
542 502
543 idr_init(&file_priv->context_idr); 503 idr_init(&file_priv->context_idr);
544 504
@@ -559,14 +519,10 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
559{ 519{
560 struct drm_i915_file_private *file_priv = file->driver_priv; 520 struct drm_i915_file_private *file_priv = file->driver_priv;
561 521
562 if (!HAS_HW_CONTEXTS(dev)) {
563 kfree(file_priv->private_default_ctx);
564 return;
565 }
566
567 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 522 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
568 i915_gem_context_unreference(file_priv->private_default_ctx);
569 idr_destroy(&file_priv->context_idr); 523 idr_destroy(&file_priv->context_idr);
524
525 i915_gem_context_unreference(file_priv->private_default_ctx);
570} 526}
571 527
572struct i915_hw_context * 528struct i915_hw_context *
@@ -574,9 +530,6 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
574{ 530{
575 struct i915_hw_context *ctx; 531 struct i915_hw_context *ctx;
576 532
577 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
578 return file_priv->private_default_ctx;
579
580 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 533 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
581 if (!ctx) 534 if (!ctx)
582 return ERR_PTR(-ENOENT); 535 return ERR_PTR(-ENOENT);
@@ -758,7 +711,6 @@ unpin_out:
758/** 711/**
759 * i915_switch_context() - perform a GPU context switch. 712 * i915_switch_context() - perform a GPU context switch.
760 * @ring: ring for which we'll execute the context switch 713 * @ring: ring for which we'll execute the context switch
761 * @file_priv: file_priv associated with the context, may be NULL
762 * @to: the context to switch to 714 * @to: the context to switch to
763 * 715 *
764 * The context life cycle is simple. The context refcount is incremented and 716 * The context life cycle is simple. The context refcount is incremented and
@@ -767,24 +719,30 @@ unpin_out:
767 * object while letting the normal object tracking destroy the backing BO. 719 * object while letting the normal object tracking destroy the backing BO.
768 */ 720 */
769int i915_switch_context(struct intel_ring_buffer *ring, 721int i915_switch_context(struct intel_ring_buffer *ring,
770 struct drm_file *file,
771 struct i915_hw_context *to) 722 struct i915_hw_context *to)
772{ 723{
773 struct drm_i915_private *dev_priv = ring->dev->dev_private; 724 struct drm_i915_private *dev_priv = ring->dev->dev_private;
774 725
775 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 726 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
776 727
777 BUG_ON(file && to == NULL); 728 if (to->obj == NULL) { /* We have the fake context */
778 729 if (to != ring->last_context) {
779 /* We have the fake context */ 730 i915_gem_context_reference(to);
780 if (!HAS_HW_CONTEXTS(ring->dev)) { 731 if (ring->last_context)
781 ring->last_context = to; 732 i915_gem_context_unreference(ring->last_context);
733 ring->last_context = to;
734 }
782 return 0; 735 return 0;
783 } 736 }
784 737
785 return do_switch(ring, to); 738 return do_switch(ring, to);
786} 739}
787 740
741static bool hw_context_enabled(struct drm_device *dev)
742{
743 return to_i915(dev)->hw_context_size;
744}
745
788int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 746int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
789 struct drm_file *file) 747 struct drm_file *file)
790{ 748{
@@ -793,7 +751,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
793 struct i915_hw_context *ctx; 751 struct i915_hw_context *ctx;
794 int ret; 752 int ret;
795 753
796 if (!HAS_HW_CONTEXTS(dev)) 754 if (!hw_context_enabled(dev))
797 return -ENODEV; 755 return -ENODEV;
798 756
799 ret = i915_mutex_lock_interruptible(dev); 757 ret = i915_mutex_lock_interruptible(dev);