aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-04-15 20:35:01 -0400
committerDave Airlie <airlied@redhat.com>2014-04-15 20:35:01 -0400
commit5df5242d34c131d049359e01b9ce928659885c4b (patch)
treef4f9d669ef6561205d4459047dc26c4ddcd208d3
parent55101e2d6ce1c780f6ee8fee5f37306971aac6cd (diff)
parent691e6415c891b8b2b082a120b896b443531c4d45 (diff)
Merge tag 'drm-intel-fixes-2014-04-11' of git://anongit.freedesktop.org/drm-intel into drm-next
Some fixes from Intel. * tag 'drm-intel-fixes-2014-04-11' of git://anongit.freedesktop.org/drm-intel: drm/i915: Always use kref tracking for all contexts. drm/i915: do not setup backlight if not available according to VBT drm/i915: check VBT for supported backlight type drm/i915: Disable self-refresh for untiled fbs on i915gm drm/mm: Don't WARN if drm_mm_reserve_node
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c218
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
9 files changed, 122 insertions, 139 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 71e2d3fcd6ee..04a209e2b66d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -207,8 +207,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
207 return 0; 207 return 0;
208 } 208 }
209 209
210 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
211 node->start, node->size);
212 return -ENOSPC; 210 return -ENOSPC;
213} 211}
214EXPORT_SYMBOL(drm_mm_reserve_node); 212EXPORT_SYMBOL(drm_mm_reserve_node);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0905cd915589..ec82f6bff122 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1308,6 +1308,7 @@ struct intel_vbt_data {
1308 1308
1309 struct { 1309 struct {
1310 u16 pwm_freq_hz; 1310 u16 pwm_freq_hz;
1311 bool present;
1311 bool active_low_pwm; 1312 bool active_low_pwm;
1312 } backlight; 1313 } backlight;
1313 1314
@@ -2431,20 +2432,18 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2431int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2432int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2432void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2433void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2433int i915_switch_context(struct intel_ring_buffer *ring, 2434int i915_switch_context(struct intel_ring_buffer *ring,
2434 struct drm_file *file, struct i915_hw_context *to); 2435 struct i915_hw_context *to);
2435struct i915_hw_context * 2436struct i915_hw_context *
2436i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2437i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2437void i915_gem_context_free(struct kref *ctx_ref); 2438void i915_gem_context_free(struct kref *ctx_ref);
2438static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2439static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2439{ 2440{
2440 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2441 kref_get(&ctx->ref);
2441 kref_get(&ctx->ref);
2442} 2442}
2443 2443
2444static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2444static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2445{ 2445{
2446 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2446 kref_put(&ctx->ref, i915_gem_context_free);
2447 kref_put(&ctx->ref, i915_gem_context_free);
2448} 2447}
2449 2448
2450static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) 2449static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6370a761d137..2871ce75f438 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2790,7 +2790,7 @@ int i915_gpu_idle(struct drm_device *dev)
2790 2790
2791 /* Flush everything onto the inactive list. */ 2791 /* Flush everything onto the inactive list. */
2792 for_each_ring(ring, dev_priv, i) { 2792 for_each_ring(ring, dev_priv, i) {
2793 ret = i915_switch_context(ring, NULL, ring->default_context); 2793 ret = i915_switch_context(ring, ring->default_context);
2794 if (ret) 2794 if (ret)
2795 return ret; 2795 return ret;
2796 2796
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6043062ffce7..d72db15afa02 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,9 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static int do_switch(struct intel_ring_buffer *ring,
100 struct i915_hw_context *to);
101
102static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103{ 100{
104 struct drm_device *dev = ppgtt->base.dev; 101 struct drm_device *dev = ppgtt->base.dev;
@@ -185,13 +182,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
185 typeof(*ctx), ref); 182 typeof(*ctx), ref);
186 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
187 184
188 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 185 if (ctx->obj) {
189 if (USES_PPGTT(ctx->obj->base.dev)) 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
190 ppgtt = ctx_to_ppgtt(ctx); 187 if (USES_PPGTT(ctx->obj->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx);
191 189
192 /* XXX: Free up the object before tearing down the address space, in 190 /* XXX: Free up the object before tearing down the address space, in
193 * case we're bound in the PPGTT */ 191 * case we're bound in the PPGTT */
194 drm_gem_object_unreference(&ctx->obj->base); 192 drm_gem_object_unreference(&ctx->obj->base);
193 }
195 194
196 if (ppgtt) 195 if (ppgtt)
197 kref_put(&ppgtt->ref, ppgtt_release); 196 kref_put(&ppgtt->ref, ppgtt_release);
@@ -232,32 +231,32 @@ __create_hw_context(struct drm_device *dev,
232 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
233 232
234 kref_init(&ctx->ref); 233 kref_init(&ctx->ref);
235 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 234 list_add_tail(&ctx->link, &dev_priv->context_list);
236 INIT_LIST_HEAD(&ctx->link);
237 if (ctx->obj == NULL) {
238 kfree(ctx);
239 DRM_DEBUG_DRIVER("Context object allocated failed\n");
240 return ERR_PTR(-ENOMEM);
241 }
242 235
243 if (INTEL_INFO(dev)->gen >= 7) { 236 if (dev_priv->hw_context_size) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
245 I915_CACHE_L3_LLC); 238 if (ctx->obj == NULL) {
246 /* Failure shouldn't ever happen this early */ 239 ret = -ENOMEM;
247 if (WARN_ON(ret))
248 goto err_out; 240 goto err_out;
249 } 241 }
250 242
251 list_add_tail(&ctx->link, &dev_priv->context_list); 243 if (INTEL_INFO(dev)->gen >= 7) {
244 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */
247 if (WARN_ON(ret))
248 goto err_out;
249 }
250 }
252 251
253 /* Default context will never have a file_priv */ 252 /* Default context will never have a file_priv */
254 if (file_priv == NULL) 253 if (file_priv != NULL) {
255 return ctx; 254 ret = idr_alloc(&file_priv->context_idr, ctx,
256 255 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
257 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, 256 if (ret < 0)
258 GFP_KERNEL); 257 goto err_out;
259 if (ret < 0) 258 } else
260 goto err_out; 259 ret = DEFAULT_CONTEXT_ID;
261 260
262 ctx->file_priv = file_priv; 261 ctx->file_priv = file_priv;
263 ctx->id = ret; 262 ctx->id = ret;
@@ -294,7 +293,7 @@ i915_gem_create_context(struct drm_device *dev,
294 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
295 return ctx; 294 return ctx;
296 295
297 if (is_global_default_ctx) { 296 if (is_global_default_ctx && ctx->obj) {
298 /* We may need to do things with the shrinker which 297 /* We may need to do things with the shrinker which
299 * require us to immediately switch back to the default 298 * require us to immediately switch back to the default
300 * context. This can cause a problem as pinning the 299 * context. This can cause a problem as pinning the
@@ -342,7 +341,7 @@ i915_gem_create_context(struct drm_device *dev,
342 return ctx; 341 return ctx;
343 342
344err_unpin: 343err_unpin:
345 if (is_global_default_ctx) 344 if (is_global_default_ctx && ctx->obj)
346 i915_gem_object_ggtt_unpin(ctx->obj); 345 i915_gem_object_ggtt_unpin(ctx->obj);
347err_destroy: 346err_destroy:
348 i915_gem_context_unreference(ctx); 347 i915_gem_context_unreference(ctx);
@@ -352,32 +351,22 @@ err_destroy:
352void i915_gem_context_reset(struct drm_device *dev) 351void i915_gem_context_reset(struct drm_device *dev)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 int i; 354 int i;
357 355
358 if (!HAS_HW_CONTEXTS(dev))
359 return;
360
361 /* Prevent the hardware from restoring the last context (which hung) on 356 /* Prevent the hardware from restoring the last context (which hung) on
362 * the next switch */ 357 * the next switch */
363 for (i = 0; i < I915_NUM_RINGS; i++) { 358 for (i = 0; i < I915_NUM_RINGS; i++) {
364 struct i915_hw_context *dctx; 359 struct intel_ring_buffer *ring = &dev_priv->ring[i];
365 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 360 struct i915_hw_context *dctx = ring->default_context;
366 continue;
367 361
368 /* Do a fake switch to the default context */ 362 /* Do a fake switch to the default context */
369 ring = &dev_priv->ring[i]; 363 if (ring->last_context == dctx)
370 dctx = ring->default_context;
371 if (WARN_ON(!dctx))
372 continue; 364 continue;
373 365
374 if (!ring->last_context) 366 if (!ring->last_context)
375 continue; 367 continue;
376 368
377 if (ring->last_context == dctx) 369 if (dctx->obj && i == RCS) {
378 continue;
379
380 if (i == RCS) {
381 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 370 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
382 get_context_alignment(dev), 0)); 371 get_context_alignment(dev), 0));
383 /* Fake a finish/inactive */ 372 /* Fake a finish/inactive */
@@ -394,44 +383,35 @@ void i915_gem_context_reset(struct drm_device *dev)
394int i915_gem_context_init(struct drm_device *dev) 383int i915_gem_context_init(struct drm_device *dev)
395{ 384{
396 struct drm_i915_private *dev_priv = dev->dev_private; 385 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_ring_buffer *ring; 386 struct i915_hw_context *ctx;
398 int i; 387 int i;
399 388
400 if (!HAS_HW_CONTEXTS(dev))
401 return 0;
402
403 /* Init should only be called once per module load. Eventually the 389 /* Init should only be called once per module load. Eventually the
404 * restriction on the context_disabled check can be loosened. */ 390 * restriction on the context_disabled check can be loosened. */
405 if (WARN_ON(dev_priv->ring[RCS].default_context)) 391 if (WARN_ON(dev_priv->ring[RCS].default_context))
406 return 0; 392 return 0;
407 393
408 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 394 if (HAS_HW_CONTEXTS(dev)) {
409 395 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
410 if (dev_priv->hw_context_size > (1<<20)) { 396 if (dev_priv->hw_context_size > (1<<20)) {
411 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); 397 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
412 return -E2BIG; 398 dev_priv->hw_context_size);
399 dev_priv->hw_context_size = 0;
400 }
413 } 401 }
414 402
415 dev_priv->ring[RCS].default_context = 403 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
416 i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 404 if (IS_ERR(ctx)) {
417 405 DRM_ERROR("Failed to create default global context (error %ld)\n",
418 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { 406 PTR_ERR(ctx));
419 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", 407 return PTR_ERR(ctx);
420 PTR_ERR(dev_priv->ring[RCS].default_context));
421 return PTR_ERR(dev_priv->ring[RCS].default_context);
422 } 408 }
423 409
424 for (i = RCS + 1; i < I915_NUM_RINGS; i++) { 410 /* NB: RCS will hold a ref for all rings */
425 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 411 for (i = 0; i < I915_NUM_RINGS; i++)
426 continue; 412 dev_priv->ring[i].default_context = ctx;
427
428 ring = &dev_priv->ring[i];
429 413
430 /* NB: RCS will hold a ref for all rings */ 414 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
431 ring->default_context = dev_priv->ring[RCS].default_context;
432 }
433
434 DRM_DEBUG_DRIVER("HW context support initialized\n");
435 return 0; 415 return 0;
436} 416}
437 417
@@ -441,33 +421,30 @@ void i915_gem_context_fini(struct drm_device *dev)
441 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 421 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
442 int i; 422 int i;
443 423
444 if (!HAS_HW_CONTEXTS(dev)) 424 if (dctx->obj) {
445 return; 425 /* The only known way to stop the gpu from accessing the hw context is
446 426 * to reset it. Do this as the very last operation to avoid confusing
447 /* The only known way to stop the gpu from accessing the hw context is 427 * other code, leading to spurious errors. */
448 * to reset it. Do this as the very last operation to avoid confusing 428 intel_gpu_reset(dev);
449 * other code, leading to spurious errors. */ 429
450 intel_gpu_reset(dev); 430 /* When default context is created and switched to, base object refcount
451 431 * will be 2 (+1 from object creation and +1 from do_switch()).
452 /* When default context is created and switched to, base object refcount 432 * i915_gem_context_fini() will be called after gpu_idle() has switched
453 * will be 2 (+1 from object creation and +1 from do_switch()). 433 * to default context. So we need to unreference the base object once
454 * i915_gem_context_fini() will be called after gpu_idle() has switched 434 * to offset the do_switch part, so that i915_gem_context_unreference()
455 * to default context. So we need to unreference the base object once 435 * can then free the base object correctly. */
456 * to offset the do_switch part, so that i915_gem_context_unreference() 436 WARN_ON(!dev_priv->ring[RCS].last_context);
457 * can then free the base object correctly. */ 437 if (dev_priv->ring[RCS].last_context == dctx) {
458 WARN_ON(!dev_priv->ring[RCS].last_context); 438 /* Fake switch to NULL context */
459 if (dev_priv->ring[RCS].last_context == dctx) { 439 WARN_ON(dctx->obj->active);
460 /* Fake switch to NULL context */ 440 i915_gem_object_ggtt_unpin(dctx->obj);
461 WARN_ON(dctx->obj->active); 441 i915_gem_context_unreference(dctx);
462 i915_gem_object_ggtt_unpin(dctx->obj); 442 dev_priv->ring[RCS].last_context = NULL;
463 i915_gem_context_unreference(dctx); 443 }
464 dev_priv->ring[RCS].last_context = NULL;
465 } 444 }
466 445
467 for (i = 0; i < I915_NUM_RINGS; i++) { 446 for (i = 0; i < I915_NUM_RINGS; i++) {
468 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 447 struct intel_ring_buffer *ring = &dev_priv->ring[i];
469 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
470 continue;
471 448
472 if (ring->last_context) 449 if (ring->last_context)
473 i915_gem_context_unreference(ring->last_context); 450 i915_gem_context_unreference(ring->last_context);
@@ -478,7 +455,6 @@ void i915_gem_context_fini(struct drm_device *dev)
478 455
479 i915_gem_object_ggtt_unpin(dctx->obj); 456 i915_gem_object_ggtt_unpin(dctx->obj);
480 i915_gem_context_unreference(dctx); 457 i915_gem_context_unreference(dctx);
481 dev_priv->mm.aliasing_ppgtt = NULL;
482} 458}
483 459
484int i915_gem_context_enable(struct drm_i915_private *dev_priv) 460int i915_gem_context_enable(struct drm_i915_private *dev_priv)
@@ -486,9 +462,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
486 struct intel_ring_buffer *ring; 462 struct intel_ring_buffer *ring;
487 int ret, i; 463 int ret, i;
488 464
489 if (!HAS_HW_CONTEXTS(dev_priv->dev))
490 return 0;
491
492 /* This is the only place the aliasing PPGTT gets enabled, which means 465 /* This is the only place the aliasing PPGTT gets enabled, which means
493 * it has to happen before we bail on reset */ 466 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) { 467 if (dev_priv->mm.aliasing_ppgtt) {
@@ -503,7 +476,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
503 BUG_ON(!dev_priv->ring[RCS].default_context); 476 BUG_ON(!dev_priv->ring[RCS].default_context);
504 477
505 for_each_ring(ring, dev_priv, i) { 478 for_each_ring(ring, dev_priv, i) {
506 ret = do_switch(ring, ring->default_context); 479 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 480 if (ret)
508 return ret; 481 return ret;
509 } 482 }
@@ -526,19 +499,6 @@ static int context_idr_cleanup(int id, void *p, void *data)
526int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 499int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527{ 500{
528 struct drm_i915_file_private *file_priv = file->driver_priv; 501 struct drm_i915_file_private *file_priv = file->driver_priv;
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 if (!HAS_HW_CONTEXTS(dev)) {
532 /* Cheat for hang stats */
533 file_priv->private_default_ctx =
534 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
535
536 if (file_priv->private_default_ctx == NULL)
537 return -ENOMEM;
538
539 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
540 return 0;
541 }
542 502
543 idr_init(&file_priv->context_idr); 503 idr_init(&file_priv->context_idr);
544 504
@@ -559,14 +519,10 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
559{ 519{
560 struct drm_i915_file_private *file_priv = file->driver_priv; 520 struct drm_i915_file_private *file_priv = file->driver_priv;
561 521
562 if (!HAS_HW_CONTEXTS(dev)) {
563 kfree(file_priv->private_default_ctx);
564 return;
565 }
566
567 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 522 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
568 i915_gem_context_unreference(file_priv->private_default_ctx);
569 idr_destroy(&file_priv->context_idr); 523 idr_destroy(&file_priv->context_idr);
524
525 i915_gem_context_unreference(file_priv->private_default_ctx);
570} 526}
571 527
572struct i915_hw_context * 528struct i915_hw_context *
@@ -574,9 +530,6 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
574{ 530{
575 struct i915_hw_context *ctx; 531 struct i915_hw_context *ctx;
576 532
577 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
578 return file_priv->private_default_ctx;
579
580 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 533 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
581 if (!ctx) 534 if (!ctx)
582 return ERR_PTR(-ENOENT); 535 return ERR_PTR(-ENOENT);
@@ -758,7 +711,6 @@ unpin_out:
758/** 711/**
759 * i915_switch_context() - perform a GPU context switch. 712 * i915_switch_context() - perform a GPU context switch.
760 * @ring: ring for which we'll execute the context switch 713 * @ring: ring for which we'll execute the context switch
761 * @file_priv: file_priv associated with the context, may be NULL
762 * @to: the context to switch to 714 * @to: the context to switch to
763 * 715 *
764 * The context life cycle is simple. The context refcount is incremented and 716 * The context life cycle is simple. The context refcount is incremented and
@@ -767,24 +719,30 @@ unpin_out:
767 * object while letting the normal object tracking destroy the backing BO. 719 * object while letting the normal object tracking destroy the backing BO.
768 */ 720 */
769int i915_switch_context(struct intel_ring_buffer *ring, 721int i915_switch_context(struct intel_ring_buffer *ring,
770 struct drm_file *file,
771 struct i915_hw_context *to) 722 struct i915_hw_context *to)
772{ 723{
773 struct drm_i915_private *dev_priv = ring->dev->dev_private; 724 struct drm_i915_private *dev_priv = ring->dev->dev_private;
774 725
775 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 726 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
776 727
777 BUG_ON(file && to == NULL); 728 if (to->obj == NULL) { /* We have the fake context */
778 729 if (to != ring->last_context) {
779 /* We have the fake context */ 730 i915_gem_context_reference(to);
780 if (!HAS_HW_CONTEXTS(ring->dev)) { 731 if (ring->last_context)
781 ring->last_context = to; 732 i915_gem_context_unreference(ring->last_context);
733 ring->last_context = to;
734 }
782 return 0; 735 return 0;
783 } 736 }
784 737
785 return do_switch(ring, to); 738 return do_switch(ring, to);
786} 739}
787 740
741static bool hw_context_enabled(struct drm_device *dev)
742{
743 return to_i915(dev)->hw_context_size;
744}
745
788int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 746int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
789 struct drm_file *file) 747 struct drm_file *file)
790{ 748{
@@ -793,7 +751,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
793 struct i915_hw_context *ctx; 751 struct i915_hw_context *ctx;
794 int ret; 752 int ret;
795 753
796 if (!HAS_HW_CONTEXTS(dev)) 754 if (!hw_context_enabled(dev))
797 return -ENODEV; 755 return -ENODEV;
798 756
799 ret = i915_mutex_lock_interruptible(dev); 757 ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7447160155a3..2c9d9cbaf653 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1221,7 +1221,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1221 if (ret) 1221 if (ret)
1222 goto err; 1222 goto err;
1223 1223
1224 ret = i915_switch_context(ring, file, ctx); 1224 ret = i915_switch_context(ring, ctx);
1225 if (ret) 1225 if (ret)
1226 goto err; 1226 goto err;
1227 1227
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4867f4cc0938..fa486c5fbb02 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -287,6 +287,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
287 const struct bdb_lfp_backlight_data *backlight_data; 287 const struct bdb_lfp_backlight_data *backlight_data;
288 const struct bdb_lfp_backlight_data_entry *entry; 288 const struct bdb_lfp_backlight_data_entry *entry;
289 289
290 /* Err to enabling backlight if no backlight block. */
291 dev_priv->vbt.backlight.present = true;
292
290 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); 293 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
291 if (!backlight_data) 294 if (!backlight_data)
292 return; 295 return;
@@ -299,6 +302,13 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
299 302
300 entry = &backlight_data->data[panel_type]; 303 entry = &backlight_data->data[panel_type];
301 304
305 dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
306 if (!dev_priv->vbt.backlight.present) {
307 DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
308 entry->type);
309 return;
310 }
311
302 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 312 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
303 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 313 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
304 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " 314 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 83b7629e4367..f27f7b282465 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -374,6 +374,9 @@ struct bdb_lvds_lfp_data {
374 struct bdb_lvds_lfp_data_entry data[16]; 374 struct bdb_lvds_lfp_data_entry data[16];
375} __packed; 375} __packed;
376 376
377#define BDB_BACKLIGHT_TYPE_NONE 0
378#define BDB_BACKLIGHT_TYPE_PWM 2
379
377struct bdb_lfp_backlight_data_entry { 380struct bdb_lfp_backlight_data_entry {
378 u8 type:2; 381 u8 type:2;
379 u8 active_low_pwm:1; 382 u8 active_low_pwm:1;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb058408c70e..0eead16aeda7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1065,6 +1065,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1065 unsigned long flags; 1065 unsigned long flags;
1066 int ret; 1066 int ret;
1067 1067
1068 if (!dev_priv->vbt.backlight.present) {
1069 DRM_DEBUG_KMS("native backlight control not available per VBT\n");
1070 return 0;
1071 }
1072
1068 /* set level and max in panel struct */ 1073 /* set level and max in panel struct */
1069 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 1074 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
1070 ret = dev_priv->display.setup_backlight(intel_connector); 1075 ret = dev_priv->display.setup_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5874716774a7..19e94c3edc19 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1545,6 +1545,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545 1545
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1547 1547
1548 if (IS_I915GM(dev) && enabled) {
1549 struct intel_framebuffer *fb;
1550
1551 fb = to_intel_framebuffer(enabled->primary->fb);
1552
1553 /* self-refresh seems busted with untiled */
1554 if (fb->obj->tiling_mode == I915_TILING_NONE)
1555 enabled = NULL;
1556 }
1557
1548 /* 1558 /*
1549 * Overlay gets an aggressive default since video jitter is bad. 1559 * Overlay gets an aggressive default since video jitter is bad.
1550 */ 1560 */