aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-03-14 21:32:46 -0400
committerDave Airlie <airlied@redhat.com>2017-03-14 21:32:46 -0400
commit4daad1b2b087eb43611653a7e1b0ecb01f4f7481 (patch)
tree73b4075c30c65635d748075d63071448eb178c4a
parent490b89813c24f2362f46fea3bbc222a273a0a7a8 (diff)
parent6aef660370a9c246956ba6d01eebd8063c4214cb (diff)
Merge tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
drm/i915 fixes for v4.11-rc3 * tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Fix forcewake active domain tracking drm/i915: Nuke skl_update_plane debug message from the pipe update critical section drm/i915: use correct node for handling cache domain eviction drm/i915: Drain the freed state from the tail of the next commit drm/i915: Nuke debug messages from the pipe update critical section drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl drm/i915: Store a permanent error in obj->mm.pages drm/i915: Move updating color management to before vblank evasion drm/i915/gen9: Increase PCODE request timeout to 50ms drm/i915: Avoid tweaking evaluation thresholds on Baytrail v3 drm/i915: Remove the vma from the drm_mm if binding fails drm/i915/fbdev: Stop repeating tile configuration on stagnation drm/i915/glk: Fix watermark computations for third sprite plane drm/i915: Squelch any ktime/jiffie rounding errors for wait-ioctl
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c57
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
10 files changed, 191 insertions, 77 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4b42d31391..7febe6eecf72 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -293,6 +293,7 @@ enum plane_id {
293 PLANE_PRIMARY, 293 PLANE_PRIMARY,
294 PLANE_SPRITE0, 294 PLANE_SPRITE0,
295 PLANE_SPRITE1, 295 PLANE_SPRITE1,
296 PLANE_SPRITE2,
296 PLANE_CURSOR, 297 PLANE_CURSOR,
297 I915_MAX_PLANES, 298 I915_MAX_PLANES,
298}; 299};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123162d1..10777da73039 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1434 1434
1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1436 1436
1437 ret = -ENODEV;
1438 if (obj->ops->pwrite)
1439 ret = obj->ops->pwrite(obj, args);
1440 if (ret != -ENODEV)
1441 goto err;
1442
1437 ret = i915_gem_object_wait(obj, 1443 ret = i915_gem_object_wait(obj,
1438 I915_WAIT_INTERRUPTIBLE | 1444 I915_WAIT_INTERRUPTIBLE |
1439 I915_WAIT_ALL, 1445 I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2119 */ 2125 */
2120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2126 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2121 obj->mm.madv = __I915_MADV_PURGED; 2127 obj->mm.madv = __I915_MADV_PURGED;
2128 obj->mm.pages = ERR_PTR(-EFAULT);
2122} 2129}
2123 2130
2124/* Try to discard unwanted pages */ 2131/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2218 2225
2219 __i915_gem_object_reset_page_iter(obj); 2226 __i915_gem_object_reset_page_iter(obj);
2220 2227
2221 obj->ops->put_pages(obj, pages); 2228 if (!IS_ERR(pages))
2229 obj->ops->put_pages(obj, pages);
2230
2222unlock: 2231unlock:
2223 mutex_unlock(&obj->mm.lock); 2232 mutex_unlock(&obj->mm.lock);
2224} 2233}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2437 if (err) 2446 if (err)
2438 return err; 2447 return err;
2439 2448
2440 if (unlikely(!obj->mm.pages)) { 2449 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2441 err = ____i915_gem_object_get_pages(obj); 2450 err = ____i915_gem_object_get_pages(obj);
2442 if (err) 2451 if (err)
2443 goto unlock; 2452 goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2515 2524
2516 pinned = true; 2525 pinned = true;
2517 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2526 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2518 if (unlikely(!obj->mm.pages)) { 2527 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2519 ret = ____i915_gem_object_get_pages(obj); 2528 ret = ____i915_gem_object_get_pages(obj);
2520 if (ret) 2529 if (ret)
2521 goto err_unlock; 2530 goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
2563 goto out_unlock; 2572 goto out_unlock;
2564} 2573}
2565 2574
2575static int
2576i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2577 const struct drm_i915_gem_pwrite *arg)
2578{
2579 struct address_space *mapping = obj->base.filp->f_mapping;
2580 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2581 u64 remain, offset;
2582 unsigned int pg;
2583
2584 /* Before we instantiate/pin the backing store for our use, we
2585 * can prepopulate the shmemfs filp efficiently using a write into
2586 * the pagecache. We avoid the penalty of instantiating all the
2587 * pages, important if the user is just writing to a few and never
2588 * uses the object on the GPU, and using a direct write into shmemfs
2589 * allows it to avoid the cost of retrieving a page (either swapin
2590 * or clearing-before-use) before it is overwritten.
2591 */
2592 if (READ_ONCE(obj->mm.pages))
2593 return -ENODEV;
2594
2595 /* Before the pages are instantiated the object is treated as being
2596 * in the CPU domain. The pages will be clflushed as required before
2597 * use, and we can freely write into the pages directly. If userspace
2598 * races pwrite with any other operation; corruption will ensue -
2599 * that is userspace's prerogative!
2600 */
2601
2602 remain = arg->size;
2603 offset = arg->offset;
2604 pg = offset_in_page(offset);
2605
2606 do {
2607 unsigned int len, unwritten;
2608 struct page *page;
2609 void *data, *vaddr;
2610 int err;
2611
2612 len = PAGE_SIZE - pg;
2613 if (len > remain)
2614 len = remain;
2615
2616 err = pagecache_write_begin(obj->base.filp, mapping,
2617 offset, len, 0,
2618 &page, &data);
2619 if (err < 0)
2620 return err;
2621
2622 vaddr = kmap(page);
2623 unwritten = copy_from_user(vaddr + pg, user_data, len);
2624 kunmap(page);
2625
2626 err = pagecache_write_end(obj->base.filp, mapping,
2627 offset, len, len - unwritten,
2628 page, data);
2629 if (err < 0)
2630 return err;
2631
2632 if (unwritten)
2633 return -EFAULT;
2634
2635 remain -= len;
2636 user_data += len;
2637 offset += len;
2638 pg = 0;
2639 } while (remain);
2640
2641 return 0;
2642}
2643
2566static bool ban_context(const struct i915_gem_context *ctx) 2644static bool ban_context(const struct i915_gem_context *ctx)
2567{ 2645{
2568 return (i915_gem_context_is_bannable(ctx) && 2646 return (i915_gem_context_is_bannable(ctx) &&
@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3029 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3107 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3030 if (args->timeout_ns < 0) 3108 if (args->timeout_ns < 0)
3031 args->timeout_ns = 0; 3109 args->timeout_ns = 0;
3110
3111 /*
3112 * Apparently ktime isn't accurate enough and occasionally has a
3113 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3114 * things up to make the test happy. We allow up to 1 jiffy.
3115 *
3116 * This is a regression from the timespec->ktime conversion.
3117 */
3118 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3119 args->timeout_ns = 0;
3032 } 3120 }
3033 3121
3034 i915_gem_object_put(obj); 3122 i915_gem_object_put(obj);
@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3974static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4062static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3975 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4063 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3976 I915_GEM_OBJECT_IS_SHRINKABLE, 4064 I915_GEM_OBJECT_IS_SHRINKABLE,
4065
3977 .get_pages = i915_gem_object_get_pages_gtt, 4066 .get_pages = i915_gem_object_get_pages_gtt,
3978 .put_pages = i915_gem_object_put_pages_gtt, 4067 .put_pages = i915_gem_object_put_pages_gtt,
4068
4069 .pwrite = i915_gem_object_pwrite_gtt,
3979}; 4070};
3980 4071
3981struct drm_i915_gem_object * 4072struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1bb3d2c..3be2503aa042 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
293 * those as well to make room for our guard pages. 293 * those as well to make room for our guard pages.
294 */ 294 */
295 if (check_color) { 295 if (check_color) {
296 if (vma->node.start + vma->node.size == node->start) { 296 if (node->start + node->size == target->start) {
297 if (vma->node.color == node->color) 297 if (node->color == target->color)
298 continue; 298 continue;
299 } 299 }
300 if (vma->node.start == node->start + node->size) { 300 if (node->start == target->start + target->size) {
301 if (vma->node.color == node->color) 301 if (node->color == target->color)
302 continue; 302 continue;
303 } 303 }
304 } 304 }
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07163d1..76b80a0be797 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
56 56
57 int (*pwrite)(struct drm_i915_gem_object *,
58 const struct drm_i915_gem_pwrite *);
59
57 int (*dmabuf_export)(struct drm_i915_gem_object *); 60 int (*dmabuf_export)(struct drm_i915_gem_object *);
58 void (*release)(struct drm_i915_gem_object *); 61 void (*release)(struct drm_i915_gem_object *);
59}; 62};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..df20e9bc1c0f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ err_unpin:
512 return ret; 512 return ret;
513} 513}
514 514
515static void
516i915_vma_remove(struct i915_vma *vma)
517{
518 struct drm_i915_gem_object *obj = vma->obj;
519
520 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523 drm_mm_remove_node(&vma->node);
524 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526 /* Since the unbound list is global, only move to that list if
527 * no more VMAs exist.
528 */
529 if (--obj->bind_count == 0)
530 list_move_tail(&obj->global_link,
531 &to_i915(obj->base.dev)->mm.unbound_list);
532
533 /* And finally now the object is completely decoupled from this vma,
534 * we can drop its hold on the backing storage and allow it to be
535 * reaped by the shrinker.
536 */
537 i915_gem_object_unpin_pages(obj);
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539}
540
515int __i915_vma_do_pin(struct i915_vma *vma, 541int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags) 542 u64 size, u64 alignment, u64 flags)
517{ 543{
518 unsigned int bound = vma->flags; 544 const unsigned int bound = vma->flags;
519 int ret; 545 int ret;
520 546
521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
524 550
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 551 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY; 552 ret = -EBUSY;
527 goto err; 553 goto err_unpin;
528 } 554 }
529 555
530 if ((bound & I915_VMA_BIND_MASK) == 0) { 556 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags); 557 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret) 558 if (ret)
533 goto err; 559 goto err_unpin;
534 } 560 }
535 561
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 562 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret) 563 if (ret)
538 goto err; 564 goto err_remove;
539 565
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 566 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma); 567 __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 570 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0; 571 return 0;
546 572
547err: 573err_remove:
574 if ((bound & I915_VMA_BIND_MASK) == 0) {
575 GEM_BUG_ON(vma->pages);
576 i915_vma_remove(vma);
577 }
578err_unpin:
548 __i915_vma_unpin(vma); 579 __i915_vma_unpin(vma);
549 return ret; 580 return ret;
550} 581}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
657 } 688 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 689 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659 690
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) { 691 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages); 692 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages); 693 sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
667 } 695 }
668 vma->pages = NULL; 696 vma->pages = NULL;
669 697
670 /* Since the unbound list is global, only move to that list if 698 i915_vma_remove(vma);
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
682 699
683destroy: 700destroy:
684 if (unlikely(i915_vma_is_closed(vma))) 701 if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01341670738f..3282b0f4b134 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3670 crtc->base.mode = crtc->base.state->mode; 3670 crtc->base.mode = crtc->base.state->mode;
3671 3671
3672 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3673 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3674 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3675
3676 /* 3672 /*
3677 * Update pipe size and adjust fitter if needed: the reason for this is 3673 * Update pipe size and adjust fitter if needed: the reason for this is
3678 * that in compute_mode_changes we check the native mode (not the pfit 3674 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4796 struct intel_crtc_scaler_state *scaler_state = 4792 struct intel_crtc_scaler_state *scaler_state =
4797 &crtc->config->scaler_state; 4793 &crtc->config->scaler_state;
4798 4794
4799 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4800
4801 if (crtc->config->pch_pfit.enabled) { 4795 if (crtc->config->pch_pfit.enabled) {
4802 int id; 4796 int id;
4803 4797
4804 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4798 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4805 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4806 return; 4799 return;
4807 }
4808 4800
4809 id = scaler_state->scaler_id; 4801 id = scaler_state->scaler_id;
4810 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4802 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4811 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4803 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4812 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4804 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4813 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4805 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4814
4815 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4816 } 4806 }
4817} 4807}
4818 4808
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
14379 } while (progress); 14369 } while (progress);
14380} 14370}
14381 14371
14372static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14373{
14374 struct intel_atomic_state *state, *next;
14375 struct llist_node *freed;
14376
14377 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14378 llist_for_each_entry_safe(state, next, freed, freed)
14379 drm_atomic_state_put(&state->base);
14380}
14381
14382static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14383{
14384 struct drm_i915_private *dev_priv =
14385 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14386
14387 intel_atomic_helper_free_state(dev_priv);
14388}
14389
14382static void intel_atomic_commit_tail(struct drm_atomic_state *state) 14390static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14383{ 14391{
14384 struct drm_device *dev = state->dev; 14392 struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14545 * can happen also when the device is completely off. 14553 * can happen also when the device is completely off.
14546 */ 14554 */
14547 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14555 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
14556
14557 intel_atomic_helper_free_state(dev_priv);
14548} 14558}
14549 14559
14550static void intel_atomic_commit_work(struct work_struct *work) 14560static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14946 to_intel_atomic_state(old_crtc_state->state); 14956 to_intel_atomic_state(old_crtc_state->state);
14947 bool modeset = needs_modeset(crtc->state); 14957 bool modeset = needs_modeset(crtc->state);
14948 14958
14959 if (!modeset &&
14960 (intel_cstate->base.color_mgmt_changed ||
14961 intel_cstate->update_pipe)) {
14962 intel_color_set_csc(crtc->state);
14963 intel_color_load_luts(crtc->state);
14964 }
14965
14949 /* Perform vblank evasion around commit operation */ 14966 /* Perform vblank evasion around commit operation */
14950 intel_pipe_update_start(intel_crtc); 14967 intel_pipe_update_start(intel_crtc);
14951 14968
14952 if (modeset) 14969 if (modeset)
14953 goto out; 14970 goto out;
14954 14971
14955 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14956 intel_color_set_csc(crtc->state);
14957 intel_color_load_luts(crtc->state);
14958 }
14959
14960 if (intel_cstate->update_pipe) 14972 if (intel_cstate->update_pipe)
14961 intel_update_pipe_config(intel_crtc, old_intel_cstate); 14973 intel_update_pipe_config(intel_crtc, old_intel_cstate);
14962 else if (INTEL_GEN(dev_priv) >= 9) 14974 else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
16599 drm_modeset_acquire_fini(&ctx); 16611 drm_modeset_acquire_fini(&ctx);
16600} 16612}
16601 16613
16602static void intel_atomic_helper_free_state(struct work_struct *work)
16603{
16604 struct drm_i915_private *dev_priv =
16605 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16606 struct intel_atomic_state *state, *next;
16607 struct llist_node *freed;
16608
16609 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16610 llist_for_each_entry_safe(state, next, freed, freed)
16611 drm_atomic_state_put(&state->base);
16612}
16613
16614int intel_modeset_init(struct drm_device *dev) 16614int intel_modeset_init(struct drm_device *dev)
16615{ 16615{
16616 struct drm_i915_private *dev_priv = to_i915(dev); 16616 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
16631 dev->mode_config.funcs = &intel_mode_funcs; 16631 dev->mode_config.funcs = &intel_mode_funcs;
16632 16632
16633 INIT_WORK(&dev_priv->atomic_helper.free_work, 16633 INIT_WORK(&dev_priv->atomic_helper.free_work,
16634 intel_atomic_helper_free_state); 16634 intel_atomic_helper_free_state_worker);
16635 16635
16636 intel_init_quirks(dev); 16636 intel_init_quirks(dev);
16637 16637
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 1b8ba2e77539..2d449fb5d1d2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
357 bool *enabled, int width, int height) 357 bool *enabled, int width, int height)
358{ 358{
359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
360 unsigned long conn_configured, mask; 360 unsigned long conn_configured, conn_seq, mask;
361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
362 int i, j; 362 int i, j;
363 bool *save_enabled; 363 bool *save_enabled;
364 bool fallback = true; 364 bool fallback = true;
365 int num_connectors_enabled = 0; 365 int num_connectors_enabled = 0;
366 int num_connectors_detected = 0; 366 int num_connectors_detected = 0;
367 int pass = 0;
368 367
369 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); 368 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
370 if (!save_enabled) 369 if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 mask = BIT(count) - 1; 373 mask = BIT(count) - 1;
375 conn_configured = 0; 374 conn_configured = 0;
376retry: 375retry:
376 conn_seq = conn_configured;
377 for (i = 0; i < count; i++) { 377 for (i = 0; i < count; i++) {
378 struct drm_fb_helper_connector *fb_conn; 378 struct drm_fb_helper_connector *fb_conn;
379 struct drm_connector *connector; 379 struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
387 if (conn_configured & BIT(i)) 387 if (conn_configured & BIT(i))
388 continue; 388 continue;
389 389
390 if (pass == 0 && !connector->has_tile) 390 if (conn_seq == 0 && !connector->has_tile)
391 continue; 391 continue;
392 392
393 if (connector->status == connector_status_connected) 393 if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
498 conn_configured |= BIT(i); 498 conn_configured |= BIT(i);
499 } 499 }
500 500
501 if ((conn_configured & mask) != mask) { 501 if ((conn_configured & mask) != mask && conn_configured != conn_seq)
502 pass++;
503 goto retry; 502 goto retry;
504 }
505 503
506 /* 504 /*
507 * If the BIOS didn't enable everything it could, fall back to have the 505 * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d45be0..940bab22d464 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4891 break; 4891 break;
4892 } 4892 }
4893 4893
4894 /* When byt can survive without system hang with dynamic
4895 * sw freq adjustments, this restriction can be lifted.
4896 */
4897 if (IS_VALLEYVIEW(dev_priv))
4898 goto skip_hw_write;
4899
4894 I915_WRITE(GEN6_RP_UP_EI, 4900 I915_WRITE(GEN6_RP_UP_EI,
4895 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4901 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4896 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4902 I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4911 GEN6_RP_UP_BUSY_AVG | 4917 GEN6_RP_UP_BUSY_AVG |
4912 GEN6_RP_DOWN_IDLE_AVG); 4918 GEN6_RP_DOWN_IDLE_AVG);
4913 4919
4920skip_hw_write:
4914 dev_priv->rps.power = new_power; 4921 dev_priv->rps.power = new_power;
4915 dev_priv->rps.up_threshold = threshold_up; 4922 dev_priv->rps.up_threshold = threshold_up;
4916 dev_priv->rps.down_threshold = threshold_down; 4923 dev_priv->rps.down_threshold = threshold_down;
@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7916 * @timeout_base_ms: timeout for polling with preemption enabled 7923 * @timeout_base_ms: timeout for polling with preemption enabled
7917 * 7924 *
7918 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7925 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7919 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7926 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
7920 * The request is acknowledged once the PCODE reply dword equals @reply after 7927 * The request is acknowledged once the PCODE reply dword equals @reply after
7921 * applying @reply_mask. Polling is first attempted with preemption enabled 7928 * applying @reply_mask. Polling is first attempted with preemption enabled
7922 * for @timeout_base_ms and if this times out for another 10 ms with 7929 * for @timeout_base_ms and if this times out for another 50 ms with
7923 * preemption disabled. 7930 * preemption disabled.
7924 * 7931 *
7925 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7932 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7955 * worst case) _and_ PCODE was busy for some reason even after a 7962 * worst case) _and_ PCODE was busy for some reason even after a
7956 * (queued) request and @timeout_base_ms delay. As a workaround retry 7963 * (queued) request and @timeout_base_ms delay. As a workaround retry
7957 * the poll with preemption disabled to maximize the number of 7964 * the poll with preemption disabled to maximize the number of
7958 * requests. Increase the timeout from @timeout_base_ms to 10ms to 7965 * requests. Increase the timeout from @timeout_base_ms to 50ms to
7959 * account for interrupts that could reduce the number of these 7966 * account for interrupts that could reduce the number of these
7960 * requests. 7967 * requests, and for any quirks of the PCODE firmware that delays
7968 * the request completion.
7961 */ 7969 */
7962 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7970 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7963 WARN_ON_ONCE(timeout_base_ms > 3); 7971 WARN_ON_ONCE(timeout_base_ms > 3);
7964 preempt_disable(); 7972 preempt_disable();
7965 ret = wait_for_atomic(COND, 10); 7973 ret = wait_for_atomic(COND, 50);
7966 preempt_enable(); 7974 preempt_enable();
7967 7975
7968out: 7976out:
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef54688872a..9481ca9a3ae7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
254 int scaler_id = plane_state->scaler_id; 254 int scaler_id = plane_state->scaler_id;
255 const struct intel_scaler *scaler; 255 const struct intel_scaler *scaler;
256 256
257 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
258 plane_id, PS_PLANE_SEL(plane_id));
259
260 scaler = &crtc_state->scaler_state.scalers[scaler_id]; 257 scaler = &crtc_state->scaler_state.scalers[scaler_id];
261 258
262 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), 259 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe08885a5ba..b7ff592b14f5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
119 119
120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d); 121 fw_domain_wait_ack(d);
122
123 dev_priv->uncore.fw_domains_active |= fw_domains;
122} 124}
123 125
124static void 126static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
130 fw_domain_put(d); 132 fw_domain_put(d);
131 fw_domain_posting_read(d); 133 fw_domain_posting_read(d);
132 } 134 }
135
136 dev_priv->uncore.fw_domains_active &= ~fw_domains;
133} 137}
134 138
135static void 139static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
240 if (WARN_ON(domain->wake_count == 0)) 244 if (WARN_ON(domain->wake_count == 0))
241 domain->wake_count++; 245 domain->wake_count++;
242 246
243 if (--domain->wake_count == 0) { 247 if (--domain->wake_count == 0)
244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
245 dev_priv->uncore.fw_domains_active &= ~domain->mask;
246 }
247 249
248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
249 251
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
454 fw_domains &= ~domain->mask; 456 fw_domains &= ~domain->mask;
455 } 457 }
456 458
457 if (fw_domains) { 459 if (fw_domains)
458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 460 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
459 dev_priv->uncore.fw_domains_active |= fw_domains;
460 }
461} 461}
462 462
463/** 463/**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
968 fw_domain_arm_timer(domain); 968 fw_domain_arm_timer(domain);
969 969
970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
971 dev_priv->uncore.fw_domains_active |= fw_domains;
972} 971}
973 972
974static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 973static inline void __force_wake_auto(struct drm_i915_private *dev_priv,