aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /drivers/gpu/drm
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c181
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c276
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c43
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c4
34 files changed, 630 insertions, 165 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1eb4c0..700c56baf2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -426,6 +426,8 @@ struct amdgpu_mman {
426 426
427 /* custom LRU management */ 427 /* custom LRU management */
428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
429 /* guard for log2_size array, don't add anything in between */
430 struct amdgpu_mman_lru guard;
429}; 431};
430 432
431int amdgpu_copy_buffer(struct amdgpu_ring *ring, 433int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
646void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 648void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
647int amdgpu_gart_init(struct amdgpu_device *adev); 649int amdgpu_gart_init(struct amdgpu_device *adev);
648void amdgpu_gart_fini(struct amdgpu_device *adev); 650void amdgpu_gart_fini(struct amdgpu_device *adev);
649void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 651void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
650 int pages); 652 int pages);
651int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 653int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
652 int pages, struct page **pagelist, 654 int pages, struct page **pagelist,
653 dma_addr_t *dma_addr, uint32_t flags); 655 dma_addr_t *dma_addr, uint32_t flags);
654 656
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363b06..fe872b82e619 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
321 (le16_to_cpu(path->usConnObjectId) & 321 (le16_to_cpu(path->usConnObjectId) &
322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
323 323
324 /* Skip TV/CV support */
325 if ((le16_to_cpu(path->usDeviceTag) ==
326 ATOM_DEVICE_TV1_SUPPORT) ||
327 (le16_to_cpu(path->usDeviceTag) ==
328 ATOM_DEVICE_CV_SUPPORT))
329 continue;
330
331 if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
332 DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
333 con_obj_id, le16_to_cpu(path->usDeviceTag));
334 continue;
335 }
336
324 connector_type = 337 connector_type =
325 object_connector_convert[con_obj_id]; 338 object_connector_convert[con_obj_id];
326 connector_object_id = con_obj_id; 339 connector_object_id = con_obj_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..10b5ddf2c588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
200 atpx->is_hybrid = false; 200 atpx->is_hybrid = false;
201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
202 printk("ATPX Hybrid Graphics\n"); 202 printk("ATPX Hybrid Graphics\n");
203#if 1
204 /* This is a temporary hack until the D3 cold support
205 * makes it upstream. The ATPX power_control method seems
206 * to still work on even if the system should be using
207 * the new standardized hybrid D3 cold ACPI interface.
208 */
209 atpx->functions.power_cntl = true;
210#else
211 atpx->functions.power_cntl = false; 203 atpx->functions.power_cntl = false;
212#endif
213 atpx->is_hybrid = true; 204 atpx->is_hybrid = true;
214 } 205 }
215 206
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df0b0..0feea347f680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
221 * Unbinds the requested pages from the gart page table and 221 * Unbinds the requested pages from the gart page table and
222 * replaces them with the dummy page (all asics). 222 * replaces them with the dummy page (all asics).
223 */ 223 */
224void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 224void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
225 int pages) 225 int pages)
226{ 226{
227 unsigned t; 227 unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
268 * (all asics). 268 * (all asics).
269 * Returns 0 for success, -EINVAL for failure. 269 * Returns 0 for success, -EINVAL for failure.
270 */ 270 */
271int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 271int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
272 int pages, struct page **pagelist, dma_addr_t *dma_addr, 272 int pages, struct page **pagelist, dma_addr_t *dma_addr,
273 uint32_t flags) 273 uint32_t flags)
274{ 274{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9b61c8ba7aaf..716f2afeb6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
251 251
252 adev = amdgpu_get_adev(bo->bdev); 252 adev = amdgpu_get_adev(bo->bdev);
253 ring = adev->mman.buffer_funcs_ring; 253 ring = adev->mman.buffer_funcs_ring;
254 old_start = old_mem->start << PAGE_SHIFT; 254 old_start = (u64)old_mem->start << PAGE_SHIFT;
255 new_start = new_mem->start << PAGE_SHIFT; 255 new_start = (u64)new_mem->start << PAGE_SHIFT;
256 256
257 switch (old_mem->mem_type) { 257 switch (old_mem->mem_type) {
258 case TTM_PL_VRAM: 258 case TTM_PL_VRAM:
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
950 struct list_head *res = lru->lru[tbo->mem.mem_type]; 950 struct list_head *res = lru->lru[tbo->mem.mem_type];
951 951
952 lru->lru[tbo->mem.mem_type] = &tbo->lru; 952 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 while ((++lru)->lru[tbo->mem.mem_type] == res)
954 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 955
954 return res; 956 return res;
955} 957}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
960 struct list_head *res = lru->swap_lru; 962 struct list_head *res = lru->swap_lru;
961 963
962 lru->swap_lru = &tbo->swap; 964 lru->swap_lru = &tbo->swap;
965 while ((++lru)->swap_lru == res)
966 lru->swap_lru = &tbo->swap;
963 967
964 return res; 968 return res;
965} 969}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1011 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1015 lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
1012 } 1016 }
1013 1017
1018 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1019 adev->mman.guard.lru[j] = NULL;
1020 adev->mman.guard.swap_lru = NULL;
1021
1014 adev->mman.initialized = true; 1022 adev->mman.initialized = true;
1015 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1023 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1016 adev->mc.real_vram_size >> PAGE_SHIFT); 1024 adev->mc.real_vram_size >> PAGE_SHIFT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8868d7..4aa993d19018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1187 r = 0; 1187 r = 0;
1188 } 1188 }
1189 1189
1190error:
1191 fence_put(fence); 1190 fence_put(fence);
1191
1192error:
1192 return r; 1193 return r;
1193} 1194}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc48df4..80120fa4092c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1535 r = amd_sched_entity_init(&ring->sched, &vm->entity, 1535 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1536 rq, amdgpu_sched_jobs); 1536 rq, amdgpu_sched_jobs);
1537 if (r) 1537 if (r)
1538 return r; 1538 goto err;
1539 1539
1540 vm->page_directory_fence = NULL; 1540 vm->page_directory_fence = NULL;
1541 1541
@@ -1565,6 +1565,9 @@ error_free_page_directory:
1565error_free_sched_entity: 1565error_free_sched_entity:
1566 amd_sched_entity_fini(&ring->sched, &vm->entity); 1566 amd_sched_entity_fini(&ring->sched, &vm->entity);
1567 1567
1568err:
1569 drm_free_large(vm->page_tables);
1570
1568 return r; 1571 return r;
1569} 1572}
1570 1573
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e834a2..a64715d90503 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
714 DRM_ERROR("amdgpu: IB test timed out\n"); 714 DRM_ERROR("amdgpu: IB test timed out\n");
715 r = -ETIMEDOUT; 715 r = -ETIMEDOUT;
716 goto err1; 716 goto err1;
717 } else if (r) { 717 } else if (r < 0) {
718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
719 goto err1; 719 goto err1;
720 } 720 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63126..a7d3cb3fead0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
184 sizeof(u32)) + inx; 184 sizeof(u32)) + inx;
185 185
186 pr_debug("kfd: get kernel queue doorbell\n" 186 pr_debug("kfd: get kernel queue doorbell\n"
187 " doorbell offset == 0x%08d\n" 187 " doorbell offset == 0x%08X\n"
188 " kernel address == 0x%08lX\n", 188 " kernel address == 0x%08lX\n",
189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); 189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
190 190
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ef312bb75fda..963a24d46a93 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
405 spin_lock(&sched->job_list_lock); 405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list, 406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node); 407 struct amd_sched_job, node);
408 if (s_job) 408 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
409 schedule_delayed_work(&s_job->work_tdr, sched->timeout); 409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
410 410
411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index fa3930757972..2a3ded44cf2a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
475 val, 475 val,
476 -1, 476 -1,
477 &replaced); 477 &replaced);
478 state->color_mgmt_changed = replaced; 478 state->color_mgmt_changed |= replaced;
479 return ret; 479 return ret;
480 } else if (property == config->ctm_property) { 480 } else if (property == config->ctm_property) {
481 ret = drm_atomic_replace_property_blob_from_id(crtc, 481 ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
483 val, 483 val,
484 sizeof(struct drm_color_ctm), 484 sizeof(struct drm_color_ctm),
485 &replaced); 485 &replaced);
486 state->color_mgmt_changed = replaced; 486 state->color_mgmt_changed |= replaced;
487 return ret; 487 return ret;
488 } else if (property == config->gamma_lut_property) { 488 } else if (property == config->gamma_lut_property) {
489 ret = drm_atomic_replace_property_blob_from_id(crtc, 489 ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
491 val, 491 val,
492 -1, 492 -1,
493 &replaced); 493 &replaced);
494 state->color_mgmt_changed = replaced; 494 state->color_mgmt_changed |= replaced;
495 return ret; 495 return ret;
496 } else if (crtc->funcs->atomic_set_property) 496 } else if (crtc->funcs->atomic_set_property)
497 return crtc->funcs->atomic_set_property(crtc, state, property, val); 497 return crtc->funcs->atomic_set_property(crtc, state, property, val);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b1dbb60af99f..ddebe54cd5ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5404,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
5404 struct drm_pending_vblank_event *e = NULL; 5404 struct drm_pending_vblank_event *e = NULL;
5405 int ret = -EINVAL; 5405 int ret = -EINVAL;
5406 5406
5407 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5408 return -EINVAL;
5409
5407 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 5410 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
5408 page_flip->reserved != 0) 5411 page_flip->reserved != 0)
5409 return -EINVAL; 5412 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index ce54e985d91b..0a06f9120b5a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
464 464
465 /* Sometimes user space wants everything disabled, so don't steal the 465 /* Sometimes user space wants everything disabled, so don't steal the
466 * display if there's a master. */ 466 * display if there's a master. */
467 if (lockless_dereference(dev->master)) 467 if (READ_ONCE(dev->master))
468 return false; 468 return false;
469 469
470 drm_for_each_crtc(crtc, dev) { 470 drm_for_each_crtc(crtc, dev) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 87ef34150d46..b382cf505262 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1333 if (ret < 0) 1333 if (ret < 0)
1334 return ret; 1334 return ret;
1335 1335
1336 mutex_lock(&gpu->lock);
1337
1338 /* 1336 /*
1339 * TODO 1337 * TODO
1340 * 1338 *
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1348 if (unlikely(event == ~0U)) { 1346 if (unlikely(event == ~0U)) {
1349 DRM_ERROR("no free event\n"); 1347 DRM_ERROR("no free event\n");
1350 ret = -EBUSY; 1348 ret = -EBUSY;
1351 goto out_unlock; 1349 goto out_pm_put;
1352 } 1350 }
1353 1351
1354 fence = etnaviv_gpu_fence_alloc(gpu); 1352 fence = etnaviv_gpu_fence_alloc(gpu);
1355 if (!fence) { 1353 if (!fence) {
1356 event_free(gpu, event); 1354 event_free(gpu, event);
1357 ret = -ENOMEM; 1355 ret = -ENOMEM;
1358 goto out_unlock; 1356 goto out_pm_put;
1359 } 1357 }
1360 1358
1359 mutex_lock(&gpu->lock);
1360
1361 gpu->event[event].fence = fence; 1361 gpu->event[event].fence = fence;
1362 submit->fence = fence->seqno; 1362 submit->fence = fence->seqno;
1363 gpu->active_fence = submit->fence; 1363 gpu->active_fence = submit->fence;
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1395 hangcheck_timer_reset(gpu); 1395 hangcheck_timer_reset(gpu);
1396 ret = 0; 1396 ret = 0;
1397 1397
1398out_unlock:
1399 mutex_unlock(&gpu->lock); 1398 mutex_unlock(&gpu->lock);
1400 1399
1400out_pm_put:
1401 etnaviv_gpu_pm_put(gpu); 1401 etnaviv_gpu_pm_put(gpu);
1402 1402
1403 return ret; 1403 return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21f939074abc..f68c78918d63 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -882,11 +882,12 @@ struct i915_gem_context {
882 882
883 struct i915_ctx_hang_stats hang_stats; 883 struct i915_ctx_hang_stats hang_stats;
884 884
885 /* Unique identifier for this context, used by the hw for tracking */
886 unsigned long flags; 885 unsigned long flags;
887#define CONTEXT_NO_ZEROMAP BIT(0) 886#define CONTEXT_NO_ZEROMAP BIT(0)
888#define CONTEXT_NO_ERROR_CAPTURE BIT(1) 887#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
889 unsigned hw_id; 888
889 /* Unique identifier for this context, used by the hw for tracking */
890 unsigned int hw_id;
890 u32 user_handle; 891 u32 user_handle;
891 892
892 u32 ggtt_alignment; 893 u32 ggtt_alignment;
@@ -1854,6 +1855,7 @@ struct drm_i915_private {
1854 enum modeset_restore modeset_restore; 1855 enum modeset_restore modeset_restore;
1855 struct mutex modeset_restore_lock; 1856 struct mutex modeset_restore_lock;
1856 struct drm_atomic_state *modeset_restore_state; 1857 struct drm_atomic_state *modeset_restore_state;
1858 struct drm_modeset_acquire_ctx reset_ctx;
1857 1859
1858 struct list_head vm_list; /* Global list of all address spaces */ 1860 struct list_head vm_list; /* Global list of all address spaces */
1859 struct i915_ggtt ggtt; /* VM representing the global address space */ 1861 struct i915_ggtt ggtt; /* VM representing the global address space */
@@ -1962,6 +1964,13 @@ struct drm_i915_private {
1962 struct i915_suspend_saved_registers regfile; 1964 struct i915_suspend_saved_registers regfile;
1963 struct vlv_s0ix_state vlv_s0ix_state; 1965 struct vlv_s0ix_state vlv_s0ix_state;
1964 1966
1967 enum {
1968 I915_SKL_SAGV_UNKNOWN = 0,
1969 I915_SKL_SAGV_DISABLED,
1970 I915_SKL_SAGV_ENABLED,
1971 I915_SKL_SAGV_NOT_CONTROLLED
1972 } skl_sagv_status;
1973
1965 struct { 1974 struct {
1966 /* 1975 /*
1967 * Raw watermark latency values: 1976 * Raw watermark latency values:
@@ -3590,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3590/* belongs in i915_gem_gtt.h */ 3599/* belongs in i915_gem_gtt.h */
3591static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3600static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3592{ 3601{
3602 wmb();
3593 if (INTEL_GEN(dev_priv) < 6) 3603 if (INTEL_GEN(dev_priv) < 6)
3594 intel_gtt_chipset_flush(); 3604 intel_gtt_chipset_flush();
3595} 3605}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 11681501d7b1..a77ce9983f69 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
879 ret = i915_gem_shmem_pread(dev, obj, args, file); 879 ret = i915_gem_shmem_pread(dev, obj, args, file);
880 880
881 /* pread for non shmem backed objects */ 881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV) 882 if (ret == -EFAULT || ret == -ENODEV) {
883 intel_runtime_pm_get(to_i915(dev));
883 ret = i915_gem_gtt_pread(dev, obj, args->size, 884 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr); 885 args->offset, args->data_ptr);
886 intel_runtime_pm_put(to_i915(dev));
887 }
885 888
886out: 889out:
887 drm_gem_object_unreference(&obj->base); 890 drm_gem_object_unreference(&obj->base);
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1306 * textures). Fallback to the shmem path in that case. */ 1309 * textures). Fallback to the shmem path in that case. */
1307 } 1310 }
1308 1311
1309 if (ret == -EFAULT) { 1312 if (ret == -EFAULT || ret == -ENOSPC) {
1310 if (obj->phys_handle) 1313 if (obj->phys_handle)
1311 ret = i915_gem_phys_pwrite(obj, args, file); 1314 ret = i915_gem_phys_pwrite(obj, args, file);
1312 else if (i915_gem_object_has_struct_page(obj)) 1315 else if (i915_gem_object_has_struct_page(obj))
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
3169 } 3172 }
3170 3173
3171 intel_ring_init_seqno(engine, engine->last_submitted_seqno); 3174 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
3175
3176 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
3172} 3177}
3173 3178
3174void i915_gem_reset(struct drm_device *dev) 3179void i915_gem_reset(struct drm_device *dev)
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev)
3186 3191
3187 for_each_engine(engine, dev_priv) 3192 for_each_engine(engine, dev_priv)
3188 i915_gem_reset_engine_cleanup(engine); 3193 i915_gem_reset_engine_cleanup(engine);
3194 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
3189 3195
3190 i915_gem_context_reset(dev); 3196 i915_gem_context_reset(dev);
3191 3197
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1978633e7549..b35e5b6475b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
943{ 943{
944 const unsigned other_rings = ~intel_engine_flag(req->engine); 944 const unsigned other_rings = ~intel_engine_flag(req->engine);
945 struct i915_vma *vma; 945 struct i915_vma *vma;
946 uint32_t flush_domains = 0;
947 bool flush_chipset = false;
948 int ret; 946 int ret;
949 947
950 list_for_each_entry(vma, vmas, exec_list) { 948 list_for_each_entry(vma, vmas, exec_list) {
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
957 } 955 }
958 956
959 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 957 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
960 flush_chipset |= i915_gem_clflush_object(obj, false); 958 i915_gem_clflush_object(obj, false);
961
962 flush_domains |= obj->base.write_domain;
963 } 959 }
964 960
965 if (flush_chipset) 961 /* Unconditionally flush any chipset caches (for streaming writes). */
966 i915_gem_chipset_flush(req->engine->i915); 962 i915_gem_chipset_flush(req->engine->i915);
967
968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb();
970 963
971 /* Unconditionally invalidate gpu caches and ensure that we do flush 964 /* Unconditionally invalidate gpu caches and ensure that we do flush
972 * any residual writes from the previous batch. 965 * any residual writes from the previous batch.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 10f1e32767e6..7a30af79d799 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2873 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2873 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2874 2874
2875 ppgtt->base.cleanup(&ppgtt->base); 2875 ppgtt->base.cleanup(&ppgtt->base);
2876 kfree(ppgtt);
2876 } 2877 }
2877 2878
2878 i915_gem_cleanup_stolen(dev); 2879 i915_gem_cleanup_stolen(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ce14fe09d962..bf2cad3f9e1f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
1536#define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) 1536#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
1537/* Balance leg disable bits */ 1537/* Balance leg disable bits */
1538#define BALANCE_LEG_DISABLE_SHIFT 23 1538#define BALANCE_LEG_DISABLE_SHIFT 23
1539#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
1539 1540
1540/* 1541/*
1541 * Fence registers 1542 * Fence registers
@@ -7144,6 +7145,15 @@ enum {
7144 7145
7145#define GEN6_PCODE_MAILBOX _MMIO(0x138124) 7146#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
7146#define GEN6_PCODE_READY (1<<31) 7147#define GEN6_PCODE_READY (1<<31)
7148#define GEN6_PCODE_ERROR_MASK 0xFF
7149#define GEN6_PCODE_SUCCESS 0x0
7150#define GEN6_PCODE_ILLEGAL_CMD 0x1
7151#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
7152#define GEN6_PCODE_TIMEOUT 0x3
7153#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
7154#define GEN7_PCODE_TIMEOUT 0x2
7155#define GEN7_PCODE_ILLEGAL_DATA 0x3
7156#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
7147#define GEN6_PCODE_WRITE_RC6VIDS 0x4 7157#define GEN6_PCODE_WRITE_RC6VIDS 0x4
7148#define GEN6_PCODE_READ_RC6VIDS 0x5 7158#define GEN6_PCODE_READ_RC6VIDS 0x5
7149#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 7159#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
@@ -7165,6 +7175,10 @@ enum {
7165#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 7175#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
7166#define DISPLAY_IPS_CONTROL 0x19 7176#define DISPLAY_IPS_CONTROL 0x19
7167#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 7177#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
7178#define GEN9_PCODE_SAGV_CONTROL 0x21
7179#define GEN9_SAGV_DISABLE 0x0
7180#define GEN9_SAGV_IS_DISABLED 0x1
7181#define GEN9_SAGV_ENABLE 0x3
7168#define GEN6_PCODE_DATA _MMIO(0x138128) 7182#define GEN6_PCODE_DATA _MMIO(0x138128)
7169#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 7183#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
7170#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 7184#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6700a7be7f78..d32f586f9c05 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
600 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 600 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
601 return; 601 return;
602 602
603 i915_audio_component_get_power(dev);
604
603 /* 605 /*
604 * Enable/disable generating the codec wake signal, overriding the 606 * Enable/disable generating the codec wake signal, overriding the
605 * internal logic to generate the codec wake to controller. 607 * internal logic to generate the codec wake to controller.
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
615 I915_WRITE(HSW_AUD_CHICKENBIT, tmp); 617 I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
616 usleep_range(1000, 1500); 618 usleep_range(1000, 1500);
617 } 619 }
620
621 i915_audio_component_put_power(dev);
618} 622}
619 623
620/* Get CDCLK in kHz */ 624/* Get CDCLK in kHz */
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
648 !IS_HASWELL(dev_priv)) 652 !IS_HASWELL(dev_priv))
649 return 0; 653 return 0;
650 654
655 i915_audio_component_get_power(dev);
651 mutex_lock(&dev_priv->av_mutex); 656 mutex_lock(&dev_priv->av_mutex);
652 /* 1. get the pipe */ 657 /* 1. get the pipe */
653 intel_encoder = dev_priv->dig_port_map[port]; 658 intel_encoder = dev_priv->dig_port_map[port];
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
698 703
699 unlock: 704 unlock:
700 mutex_unlock(&dev_priv->av_mutex); 705 mutex_unlock(&dev_priv->av_mutex);
706 i915_audio_component_put_power(dev);
701 return err; 707 return err;
702} 708}
703 709
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 3edb9580928e..c3b33a10c15c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,15 +41,15 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" 44#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
45MODULE_FIRMWARE(I915_CSR_KBL); 45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47 47
48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
49MODULE_FIRMWARE(I915_CSR_SKL); 49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
51 51
52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
53MODULE_FIRMWARE(I915_CSR_BXT); 53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
55 55
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index dd1d6fe12297..1a7efac65fd5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { 145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
146 { 0x0000201B, 0x000000A2, 0x0 }, 146 { 0x0000201B, 0x000000A2, 0x0 },
147 { 0x00005012, 0x00000088, 0x0 }, 147 { 0x00005012, 0x00000088, 0x0 },
148 { 0x80007011, 0x000000CD, 0x0 }, 148 { 0x80007011, 0x000000CD, 0x1 },
149 { 0x80009010, 0x000000C0, 0x1 }, 149 { 0x80009010, 0x000000C0, 0x1 },
150 { 0x0000201B, 0x0000009D, 0x0 }, 150 { 0x0000201B, 0x0000009D, 0x0 },
151 { 0x80005012, 0x000000C0, 0x1 }, 151 { 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
158static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { 158static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
159 { 0x00000018, 0x000000A2, 0x0 }, 159 { 0x00000018, 0x000000A2, 0x0 },
160 { 0x00005012, 0x00000088, 0x0 }, 160 { 0x00005012, 0x00000088, 0x0 },
161 { 0x80007011, 0x000000CD, 0x0 }, 161 { 0x80007011, 0x000000CD, 0x3 },
162 { 0x80009010, 0x000000C0, 0x3 }, 162 { 0x80009010, 0x000000C0, 0x3 },
163 { 0x00000018, 0x0000009D, 0x0 }, 163 { 0x00000018, 0x0000009D, 0x0 },
164 { 0x80005012, 0x000000C0, 0x3 }, 164 { 0x80005012, 0x000000C0, 0x3 },
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
388 } 388 }
389} 389}
390 390
391static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
392{
393 int n_hdmi_entries;
394 int hdmi_level;
395 int hdmi_default_entry;
396
397 hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
398
399 if (IS_BROXTON(dev_priv))
400 return hdmi_level;
401
402 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
403 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
404 hdmi_default_entry = 8;
405 } else if (IS_BROADWELL(dev_priv)) {
406 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
407 hdmi_default_entry = 7;
408 } else if (IS_HASWELL(dev_priv)) {
409 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
410 hdmi_default_entry = 6;
411 } else {
412 WARN(1, "ddi translation table missing\n");
413 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
414 hdmi_default_entry = 7;
415 }
416
417 /* Choose a good default if VBT is badly populated */
418 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
419 hdmi_level >= n_hdmi_entries)
420 hdmi_level = hdmi_default_entry;
421
422 return hdmi_level;
423}
424
391/* 425/*
392 * Starting with Haswell, DDI port buffers must be programmed with correct 426 * Starting with Haswell, DDI port buffers must be programmed with correct
393 * values in advance. The buffer values are different for FDI and DP modes, 427 * values in advance. The buffer values are different for FDI and DP modes,
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
399{ 433{
400 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 434 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
401 u32 iboost_bit = 0; 435 u32 iboost_bit = 0;
402 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, 436 int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
403 size; 437 size;
404 int hdmi_level; 438 int hdmi_level;
405 enum port port; 439 enum port port;
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
410 const struct ddi_buf_trans *ddi_translations; 444 const struct ddi_buf_trans *ddi_translations;
411 445
412 port = intel_ddi_get_encoder_port(encoder); 446 port = intel_ddi_get_encoder_port(encoder);
413 hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 447 hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
414 448
415 if (IS_BROXTON(dev_priv)) { 449 if (IS_BROXTON(dev_priv)) {
416 if (encoder->type != INTEL_OUTPUT_HDMI) 450 if (encoder->type != INTEL_OUTPUT_HDMI)
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
430 skl_get_buf_trans_edp(dev_priv, &n_edp_entries); 464 skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
431 ddi_translations_hdmi = 465 ddi_translations_hdmi =
432 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); 466 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
433 hdmi_default_entry = 8;
434 /* If we're boosting the current, set bit 31 of trans1 */ 467 /* If we're boosting the current, set bit 31 of trans1 */
435 if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || 468 if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
436 dev_priv->vbt.ddi_port_info[port].dp_boost_level) 469 dev_priv->vbt.ddi_port_info[port].dp_boost_level)
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
456 489
457 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 490 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
458 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 491 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
459 hdmi_default_entry = 7;
460 } else if (IS_HASWELL(dev_priv)) { 492 } else if (IS_HASWELL(dev_priv)) {
461 ddi_translations_fdi = hsw_ddi_translations_fdi; 493 ddi_translations_fdi = hsw_ddi_translations_fdi;
462 ddi_translations_dp = hsw_ddi_translations_dp; 494 ddi_translations_dp = hsw_ddi_translations_dp;
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
464 ddi_translations_hdmi = hsw_ddi_translations_hdmi; 496 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
465 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); 497 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
466 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); 498 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
467 hdmi_default_entry = 6;
468 } else { 499 } else {
469 WARN(1, "ddi translation table missing\n"); 500 WARN(1, "ddi translation table missing\n");
470 ddi_translations_edp = bdw_ddi_translations_dp; 501 ddi_translations_edp = bdw_ddi_translations_dp;
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
474 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 505 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
475 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 506 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
476 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 507 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
477 hdmi_default_entry = 7;
478 } 508 }
479 509
480 switch (encoder->type) { 510 switch (encoder->type) {
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
505 if (encoder->type != INTEL_OUTPUT_HDMI) 535 if (encoder->type != INTEL_OUTPUT_HDMI)
506 return; 536 return;
507 537
508 /* Choose a good default if VBT is badly populated */
509 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
510 hdmi_level >= n_hdmi_entries)
511 hdmi_level = hdmi_default_entry;
512
513 /* Entry 9 is for HDMI: */ 538 /* Entry 9 is for HDMI: */
514 I915_WRITE(DDI_BUF_TRANS_LO(port, i), 539 I915_WRITE(DDI_BUF_TRANS_LO(port, i),
515 ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); 540 ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1379 TRANS_CLK_SEL_DISABLED); 1404 TRANS_CLK_SEL_DISABLED);
1380} 1405}
1381 1406
1382static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, 1407static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1383 u32 level, enum port port, int type) 1408 enum port port, uint8_t iboost)
1384{ 1409{
1410 u32 tmp;
1411
1412 tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
1413 tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
1414 if (iboost)
1415 tmp |= iboost << BALANCE_LEG_SHIFT(port);
1416 else
1417 tmp |= BALANCE_LEG_DISABLE(port);
1418 I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
1419}
1420
1421static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
1422{
1423 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
1424 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
1425 enum port port = intel_dig_port->port;
1426 int type = encoder->type;
1385 const struct ddi_buf_trans *ddi_translations; 1427 const struct ddi_buf_trans *ddi_translations;
1386 uint8_t iboost; 1428 uint8_t iboost;
1387 uint8_t dp_iboost, hdmi_iboost; 1429 uint8_t dp_iboost, hdmi_iboost;
1388 int n_entries; 1430 int n_entries;
1389 u32 reg;
1390 1431
1391 /* VBT may override standard boost values */ 1432 /* VBT may override standard boost values */
1392 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; 1433 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1428 return; 1469 return;
1429 } 1470 }
1430 1471
1431 reg = I915_READ(DISPIO_CR_TX_BMU_CR0); 1472 _skl_ddi_set_iboost(dev_priv, port, iboost);
1432 reg &= ~BALANCE_LEG_MASK(port);
1433 reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
1434
1435 if (iboost)
1436 reg |= iboost << BALANCE_LEG_SHIFT(port);
1437 else
1438 reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
1439 1473
1440 I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); 1474 if (port == PORT_A && intel_dig_port->max_lanes == 4)
1475 _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
1441} 1476}
1442 1477
1443static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, 1478static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
1568 level = translate_signal_level(signal_levels); 1603 level = translate_signal_level(signal_levels);
1569 1604
1570 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 1605 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1571 skl_ddi_set_iboost(dev_priv, level, port, encoder->type); 1606 skl_ddi_set_iboost(encoder, level);
1572 else if (IS_BROXTON(dev_priv)) 1607 else if (IS_BROXTON(dev_priv))
1573 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); 1608 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
1574 1609
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1637 intel_dp_stop_link_train(intel_dp); 1672 intel_dp_stop_link_train(intel_dp);
1638 } else if (type == INTEL_OUTPUT_HDMI) { 1673 } else if (type == INTEL_OUTPUT_HDMI) {
1639 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1674 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1675 int level = intel_ddi_hdmi_level(dev_priv, port);
1676
1677 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1678 skl_ddi_set_iboost(intel_encoder, level);
1640 1679
1641 intel_hdmi->set_infoframes(encoder, 1680 intel_hdmi->set_infoframes(encoder,
1642 crtc->config->has_hdmi_sink, 1681 crtc->config->has_hdmi_sink,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dcf93b3d4fb6..175595fc3e45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev)
3093 3093
3094 for_each_crtc(dev, crtc) { 3094 for_each_crtc(dev, crtc) {
3095 struct intel_plane *plane = to_intel_plane(crtc->primary); 3095 struct intel_plane *plane = to_intel_plane(crtc->primary);
3096 struct intel_plane_state *plane_state; 3096 struct intel_plane_state *plane_state =
3097 3097 to_intel_plane_state(plane->base.state);
3098 drm_modeset_lock_crtc(crtc, &plane->base);
3099 plane_state = to_intel_plane_state(plane->base.state);
3100 3098
3101 if (plane_state->visible) 3099 if (plane_state->visible)
3102 plane->update_plane(&plane->base, 3100 plane->update_plane(&plane->base,
3103 to_intel_crtc_state(crtc->state), 3101 to_intel_crtc_state(crtc->state),
3104 plane_state); 3102 plane_state);
3103 }
3104}
3105
3106static int
3107__intel_display_resume(struct drm_device *dev,
3108 struct drm_atomic_state *state)
3109{
3110 struct drm_crtc_state *crtc_state;
3111 struct drm_crtc *crtc;
3112 int i, ret;
3113
3114 intel_modeset_setup_hw_state(dev);
3115 i915_redisable_vga(dev);
3105 3116
3106 drm_modeset_unlock_crtc(crtc); 3117 if (!state)
3118 return 0;
3119
3120 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3121 /*
3122 * Force recalculation even if we restore
3123 * current state. With fast modeset this may not result
3124 * in a modeset when the state is compatible.
3125 */
3126 crtc_state->mode_changed = true;
3107 } 3127 }
3128
3129 /* ignore any reset values/BIOS leftovers in the WM registers */
3130 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3131
3132 ret = drm_atomic_commit(state);
3133
3134 WARN_ON(ret == -EDEADLK);
3135 return ret;
3108} 3136}
3109 3137
3110void intel_prepare_reset(struct drm_i915_private *dev_priv) 3138void intel_prepare_reset(struct drm_i915_private *dev_priv)
3111{ 3139{
3140 struct drm_device *dev = &dev_priv->drm;
3141 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3142 struct drm_atomic_state *state;
3143 int ret;
3144
3112 /* no reset support for gen2 */ 3145 /* no reset support for gen2 */
3113 if (IS_GEN2(dev_priv)) 3146 if (IS_GEN2(dev_priv))
3114 return; 3147 return;
3115 3148
3116 /* reset doesn't touch the display */ 3149 /*
3150 * Need mode_config.mutex so that we don't
3151 * trample ongoing ->detect() and whatnot.
3152 */
3153 mutex_lock(&dev->mode_config.mutex);
3154 drm_modeset_acquire_init(ctx, 0);
3155 while (1) {
3156 ret = drm_modeset_lock_all_ctx(dev, ctx);
3157 if (ret != -EDEADLK)
3158 break;
3159
3160 drm_modeset_backoff(ctx);
3161 }
3162
3163 /* reset doesn't touch the display, but flips might get nuked anyway, */
3117 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3164 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3118 return; 3165 return;
3119 3166
3120 drm_modeset_lock_all(&dev_priv->drm);
3121 /* 3167 /*
3122 * Disabling the crtcs gracefully seems nicer. Also the 3168 * Disabling the crtcs gracefully seems nicer. Also the
3123 * g33 docs say we should at least disable all the planes. 3169 * g33 docs say we should at least disable all the planes.
3124 */ 3170 */
3125 intel_display_suspend(&dev_priv->drm); 3171 state = drm_atomic_helper_duplicate_state(dev, ctx);
3172 if (IS_ERR(state)) {
3173 ret = PTR_ERR(state);
3174 state = NULL;
3175 DRM_ERROR("Duplicating state failed with %i\n", ret);
3176 goto err;
3177 }
3178
3179 ret = drm_atomic_helper_disable_all(dev, ctx);
3180 if (ret) {
3181 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3182 goto err;
3183 }
3184
3185 dev_priv->modeset_restore_state = state;
3186 state->acquire_ctx = ctx;
3187 return;
3188
3189err:
3190 drm_atomic_state_free(state);
3126} 3191}
3127 3192
3128void intel_finish_reset(struct drm_i915_private *dev_priv) 3193void intel_finish_reset(struct drm_i915_private *dev_priv)
3129{ 3194{
3195 struct drm_device *dev = &dev_priv->drm;
3196 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3197 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3198 int ret;
3199
3130 /* 3200 /*
3131 * Flips in the rings will be nuked by the reset, 3201 * Flips in the rings will be nuked by the reset,
3132 * so complete all pending flips so that user space 3202 * so complete all pending flips so that user space
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3138 if (IS_GEN2(dev_priv)) 3208 if (IS_GEN2(dev_priv))
3139 return; 3209 return;
3140 3210
3211 dev_priv->modeset_restore_state = NULL;
3212
3141 /* reset doesn't touch the display */ 3213 /* reset doesn't touch the display */
3142 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { 3214 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3143 /* 3215 /*
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3149 * FIXME: Atomic will make this obsolete since we won't schedule 3221 * FIXME: Atomic will make this obsolete since we won't schedule
3150 * CS-based flips (which might get lost in gpu resets) any more. 3222 * CS-based flips (which might get lost in gpu resets) any more.
3151 */ 3223 */
3152 intel_update_primary_planes(&dev_priv->drm); 3224 intel_update_primary_planes(dev);
3153 return; 3225 } else {
3154 } 3226 /*
3155 3227 * The display has been reset as well,
3156 /* 3228 * so need a full re-initialization.
3157 * The display has been reset as well, 3229 */
3158 * so need a full re-initialization. 3230 intel_runtime_pm_disable_interrupts(dev_priv);
3159 */ 3231 intel_runtime_pm_enable_interrupts(dev_priv);
3160 intel_runtime_pm_disable_interrupts(dev_priv);
3161 intel_runtime_pm_enable_interrupts(dev_priv);
3162 3232
3163 intel_modeset_init_hw(&dev_priv->drm); 3233 intel_modeset_init_hw(dev);
3164 3234
3165 spin_lock_irq(&dev_priv->irq_lock); 3235 spin_lock_irq(&dev_priv->irq_lock);
3166 if (dev_priv->display.hpd_irq_setup) 3236 if (dev_priv->display.hpd_irq_setup)
3167 dev_priv->display.hpd_irq_setup(dev_priv); 3237 dev_priv->display.hpd_irq_setup(dev_priv);
3168 spin_unlock_irq(&dev_priv->irq_lock); 3238 spin_unlock_irq(&dev_priv->irq_lock);
3169 3239
3170 intel_display_resume(&dev_priv->drm); 3240 ret = __intel_display_resume(dev, state);
3241 if (ret)
3242 DRM_ERROR("Restoring old state failed with %i\n", ret);
3171 3243
3172 intel_hpd_init(dev_priv); 3244 intel_hpd_init(dev_priv);
3245 }
3173 3246
3174 drm_modeset_unlock_all(&dev_priv->drm); 3247 drm_modeset_drop_locks(ctx);
3248 drm_modeset_acquire_fini(ctx);
3249 mutex_unlock(&dev->mode_config.mutex);
3175} 3250}
3176 3251
3177static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3252static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -13684,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13684 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) 13759 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13685 dev_priv->display.modeset_commit_cdclk(state); 13760 dev_priv->display.modeset_commit_cdclk(state);
13686 13761
13762 /*
13763 * SKL workaround: bspec recommends we disable the SAGV when we
13764 * have more then one pipe enabled
13765 */
13766 if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
13767 skl_disable_sagv(dev_priv);
13768
13687 intel_modeset_verify_disabled(dev); 13769 intel_modeset_verify_disabled(dev);
13688 } 13770 }
13689 13771
@@ -13757,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13757 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13839 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13758 } 13840 }
13759 13841
13842 if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
13843 skl_can_enable_sagv(state))
13844 skl_enable_sagv(dev_priv);
13845
13760 drm_atomic_helper_commit_hw_done(state); 13846 drm_atomic_helper_commit_hw_done(state);
13761 13847
13762 if (intel_state->modeset) 13848 if (intel_state->modeset)
@@ -16156,9 +16242,10 @@ void intel_display_resume(struct drm_device *dev)
16156 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 16242 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16157 struct drm_modeset_acquire_ctx ctx; 16243 struct drm_modeset_acquire_ctx ctx;
16158 int ret; 16244 int ret;
16159 bool setup = false;
16160 16245
16161 dev_priv->modeset_restore_state = NULL; 16246 dev_priv->modeset_restore_state = NULL;
16247 if (state)
16248 state->acquire_ctx = &ctx;
16162 16249
16163 /* 16250 /*
16164 * This is a cludge because with real atomic modeset mode_config.mutex 16251 * This is a cludge because with real atomic modeset mode_config.mutex
@@ -16169,43 +16256,17 @@ void intel_display_resume(struct drm_device *dev)
16169 mutex_lock(&dev->mode_config.mutex); 16256 mutex_lock(&dev->mode_config.mutex);
16170 drm_modeset_acquire_init(&ctx, 0); 16257 drm_modeset_acquire_init(&ctx, 0);
16171 16258
16172retry: 16259 while (1) {
16173 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16260 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16174 16261 if (ret != -EDEADLK)
16175 if (ret == 0 && !setup) { 16262 break;
16176 setup = true;
16177
16178 intel_modeset_setup_hw_state(dev);
16179 i915_redisable_vga(dev);
16180 }
16181
16182 if (ret == 0 && state) {
16183 struct drm_crtc_state *crtc_state;
16184 struct drm_crtc *crtc;
16185 int i;
16186
16187 state->acquire_ctx = &ctx;
16188
16189 /* ignore any reset values/BIOS leftovers in the WM registers */
16190 to_intel_atomic_state(state)->skip_intermediate_wm = true;
16191
16192 for_each_crtc_in_state(state, crtc, crtc_state, i) {
16193 /*
16194 * Force recalculation even if we restore
16195 * current state. With fast modeset this may not result
16196 * in a modeset when the state is compatible.
16197 */
16198 crtc_state->mode_changed = true;
16199 }
16200
16201 ret = drm_atomic_commit(state);
16202 }
16203 16263
16204 if (ret == -EDEADLK) {
16205 drm_modeset_backoff(&ctx); 16264 drm_modeset_backoff(&ctx);
16206 goto retry;
16207 } 16265 }
16208 16266
16267 if (!ret)
16268 ret = __intel_display_resume(dev, state);
16269
16209 drm_modeset_drop_locks(&ctx); 16270 drm_modeset_drop_locks(&ctx);
16210 drm_modeset_acquire_fini(&ctx); 16271 drm_modeset_acquire_fini(&ctx);
16211 mutex_unlock(&dev->mode_config.mutex); 16272 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cc937a19b1ba..ff399b9a5c1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
1716void skl_wm_get_hw_state(struct drm_device *dev); 1716void skl_wm_get_hw_state(struct drm_device *dev);
1717void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 1717void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1718 struct skl_ddb_allocation *ddb /* out */); 1718 struct skl_ddb_allocation *ddb /* out */);
1719bool skl_can_enable_sagv(struct drm_atomic_state *state);
1720int skl_enable_sagv(struct drm_i915_private *dev_priv);
1721int skl_disable_sagv(struct drm_i915_private *dev_priv);
1719uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1722uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1720bool ilk_disable_lp_wm(struct drm_device *dev); 1723bool ilk_disable_lp_wm(struct drm_device *dev);
1721int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); 1724int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6a7ad3ed1463..3836a1c79714 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1230 if (i915.enable_fbc >= 0) 1230 if (i915.enable_fbc >= 0)
1231 return !!i915.enable_fbc; 1231 return !!i915.enable_fbc;
1232 1232
1233 if (!HAS_FBC(dev_priv))
1234 return 0;
1235
1233 if (IS_BROADWELL(dev_priv)) 1236 if (IS_BROADWELL(dev_priv))
1234 return 1; 1237 return 1;
1235 1238
1236 return 0; 1239 return 0;
1237} 1240}
1238 1241
1242static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1243{
1244#ifdef CONFIG_INTEL_IOMMU
1245 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1246 if (intel_iommu_gfx_mapped &&
1247 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1248 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1249 return true;
1250 }
1251#endif
1252
1253 return false;
1254}
1255
1239/** 1256/**
1240 * intel_fbc_init - Initialize FBC 1257 * intel_fbc_init - Initialize FBC
1241 * @dev_priv: the i915 device 1258 * @dev_priv: the i915 device
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1253 fbc->active = false; 1270 fbc->active = false;
1254 fbc->work.scheduled = false; 1271 fbc->work.scheduled = false;
1255 1272
1273 if (need_fbc_vtd_wa(dev_priv))
1274 mkwrite_device_info(dev_priv)->has_fbc = false;
1275
1256 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1276 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1257 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); 1277 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
1258 1278
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 97ba6c8cf907..53e13c10e4ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
2852 2852
2853#define SKL_DDB_SIZE 896 /* in blocks */ 2853#define SKL_DDB_SIZE 896 /* in blocks */
2854#define BXT_DDB_SIZE 512 2854#define BXT_DDB_SIZE 512
2855#define SKL_SAGV_BLOCK_TIME 30 /* µs */
2855 2856
2856/* 2857/*
2857 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2858 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
2875 } 2876 }
2876} 2877}
2877 2878
2879/*
2880 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2881 * depending on power and performance requirements. The display engine access
2882 * to system memory is blocked during the adjustment time. Because of the
2883 * blocking time, having this enabled can cause full system hangs and/or pipe
2884 * underruns if we don't meet all of the following requirements:
2885 *
2886 * - <= 1 pipe enabled
2887 * - All planes can enable watermarks for latencies >= SAGV engine block time
2888 * - We're not using an interlaced display configuration
2889 */
2890int
2891skl_enable_sagv(struct drm_i915_private *dev_priv)
2892{
2893 int ret;
2894
2895 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2896 dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
2897 return 0;
2898
2899 DRM_DEBUG_KMS("Enabling the SAGV\n");
2900 mutex_lock(&dev_priv->rps.hw_lock);
2901
2902 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2903 GEN9_SAGV_ENABLE);
2904
2905 /* We don't need to wait for the SAGV when enabling */
2906 mutex_unlock(&dev_priv->rps.hw_lock);
2907
2908 /*
2909 * Some skl systems, pre-release machines in particular,
2910 * don't actually have an SAGV.
2911 */
2912 if (ret == -ENXIO) {
2913 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2914 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2915 return 0;
2916 } else if (ret < 0) {
2917 DRM_ERROR("Failed to enable the SAGV\n");
2918 return ret;
2919 }
2920
2921 dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
2922 return 0;
2923}
2924
2925static int
2926skl_do_sagv_disable(struct drm_i915_private *dev_priv)
2927{
2928 int ret;
2929 uint32_t temp = GEN9_SAGV_DISABLE;
2930
2931 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2932 &temp);
2933 if (ret)
2934 return ret;
2935 else
2936 return temp & GEN9_SAGV_IS_DISABLED;
2937}
2938
2939int
2940skl_disable_sagv(struct drm_i915_private *dev_priv)
2941{
2942 int ret, result;
2943
2944 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2945 dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
2946 return 0;
2947
2948 DRM_DEBUG_KMS("Disabling the SAGV\n");
2949 mutex_lock(&dev_priv->rps.hw_lock);
2950
2951 /* bspec says to keep retrying for at least 1 ms */
2952 ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
2953 mutex_unlock(&dev_priv->rps.hw_lock);
2954
2955 if (ret == -ETIMEDOUT) {
2956 DRM_ERROR("Request to disable SAGV timed out\n");
2957 return -ETIMEDOUT;
2958 }
2959
2960 /*
2961 * Some skl systems, pre-release machines in particular,
2962 * don't actually have an SAGV.
2963 */
2964 if (result == -ENXIO) {
2965 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2966 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2967 return 0;
2968 } else if (result < 0) {
2969 DRM_ERROR("Failed to disable the SAGV\n");
2970 return result;
2971 }
2972
2973 dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
2974 return 0;
2975}
2976
2977bool skl_can_enable_sagv(struct drm_atomic_state *state)
2978{
2979 struct drm_device *dev = state->dev;
2980 struct drm_i915_private *dev_priv = to_i915(dev);
2981 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2982 struct drm_crtc *crtc;
2983 enum pipe pipe;
2984 int level, plane;
2985
2986 /*
2987 * SKL workaround: bspec recommends we disable the SAGV when we have
2988 * more then one pipe enabled
2989 *
2990 * If there are no active CRTCs, no additional checks need be performed
2991 */
2992 if (hweight32(intel_state->active_crtcs) == 0)
2993 return true;
2994 else if (hweight32(intel_state->active_crtcs) > 1)
2995 return false;
2996
2997 /* Since we're now guaranteed to only have one active CRTC... */
2998 pipe = ffs(intel_state->active_crtcs) - 1;
2999 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3000
3001 if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
3002 return false;
3003
3004 for_each_plane(dev_priv, pipe, plane) {
3005 /* Skip this plane if it's not enabled */
3006 if (intel_state->wm_results.plane[pipe][plane][0] == 0)
3007 continue;
3008
3009 /* Find the highest enabled wm level for this plane */
3010 for (level = ilk_wm_max_level(dev);
3011 intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
3012 { }
3013
3014 /*
3015 * If any of the planes on this pipe don't enable wm levels
3016 * that incur memory latencies higher then 30µs we can't enable
3017 * the SAGV
3018 */
3019 if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
3020 return false;
3021 }
3022
3023 return true;
3024}
3025
2878static void 3026static void
2879skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3027skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2880 const struct intel_crtc_state *cstate, 3028 const struct intel_crtc_state *cstate,
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; 3255 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3108 } 3256 }
3109 3257
3110 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3111
3112 return total_data_rate; 3258 return total_data_rate;
3113} 3259}
3114 3260
@@ -3344,6 +3490,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3344 plane_bytes_per_line *= 4; 3490 plane_bytes_per_line *= 4;
3345 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3491 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3346 plane_blocks_per_line /= 4; 3492 plane_blocks_per_line /= 4;
3493 } else if (tiling == DRM_FORMAT_MOD_NONE) {
3494 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3347 } else { 3495 } else {
3348 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3496 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3349 } 3497 }
@@ -3910,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
3910 * pretend that all pipes switched active status so that we'll 4058 * pretend that all pipes switched active status so that we'll
3911 * ensure a full DDB recompute. 4059 * ensure a full DDB recompute.
3912 */ 4060 */
3913 if (dev_priv->wm.distrust_bios_wm) 4061 if (dev_priv->wm.distrust_bios_wm) {
4062 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4063 state->acquire_ctx);
4064 if (ret)
4065 return ret;
4066
3914 intel_state->active_pipe_changes = ~0; 4067 intel_state->active_pipe_changes = ~0;
3915 4068
4069 /*
4070 * We usually only initialize intel_state->active_crtcs if we
4071 * we're doing a modeset; make sure this field is always
4072 * initialized during the sanitization process that happens
4073 * on the first commit too.
4074 */
4075 if (!intel_state->modeset)
4076 intel_state->active_crtcs = dev_priv->active_crtcs;
4077 }
4078
3916 /* 4079 /*
3917 * If the modeset changes which CRTC's are active, we need to 4080 * If the modeset changes which CRTC's are active, we need to
3918 * recompute the DDB allocation for *all* active pipes, even 4081 * recompute the DDB allocation for *all* active pipes, even
@@ -3941,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
3941 ret = skl_allocate_pipe_ddb(cstate, ddb); 4104 ret = skl_allocate_pipe_ddb(cstate, ddb);
3942 if (ret) 4105 if (ret)
3943 return ret; 4106 return ret;
4107
4108 ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
4109 if (ret)
4110 return ret;
3944 } 4111 }
3945 4112
3946 return 0; 4113 return 0;
3947} 4114}
3948 4115
4116static void
4117skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4118 struct skl_wm_values *src,
4119 enum pipe pipe)
4120{
4121 dst->wm_linetime[pipe] = src->wm_linetime[pipe];
4122 memcpy(dst->plane[pipe], src->plane[pipe],
4123 sizeof(dst->plane[pipe]));
4124 memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
4125 sizeof(dst->plane_trans[pipe]));
4126
4127 dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
4128 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4129 sizeof(dst->ddb.y_plane[pipe]));
4130 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4131 sizeof(dst->ddb.plane[pipe]));
4132}
4133
3949static int 4134static int
3950skl_compute_wm(struct drm_atomic_state *state) 4135skl_compute_wm(struct drm_atomic_state *state)
3951{ 4136{
@@ -4018,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
4018 struct drm_device *dev = crtc->dev; 4203 struct drm_device *dev = crtc->dev;
4019 struct drm_i915_private *dev_priv = to_i915(dev); 4204 struct drm_i915_private *dev_priv = to_i915(dev);
4020 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4205 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4206 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4021 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4207 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4022 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4208 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4209 int pipe;
4023 4210
4024 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4211 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4025 return; 4212 return;
@@ -4031,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc)
4031 skl_write_wm_values(dev_priv, results); 4218 skl_write_wm_values(dev_priv, results);
4032 skl_flush_wm_values(dev_priv, results); 4219 skl_flush_wm_values(dev_priv, results);
4033 4220
4034 /* store the new configuration */ 4221 /*
4035 dev_priv->wm.skl_hw = *results; 4222 * Store the new configuration (but only for the pipes that have
4223 * changed; the other values weren't recomputed).
4224 */
4225 for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
4226 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4036 4227
4037 mutex_unlock(&dev_priv->wm.wm_mutex); 4228 mutex_unlock(&dev_priv->wm.wm_mutex);
4038} 4229}
@@ -6574,9 +6765,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6574 6765
6575void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 6766void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6576{ 6767{
6577 if (IS_CHERRYVIEW(dev_priv)) 6768 if (IS_VALLEYVIEW(dev_priv))
6578 return;
6579 else if (IS_VALLEYVIEW(dev_priv))
6580 valleyview_cleanup_gt_powersave(dev_priv); 6769 valleyview_cleanup_gt_powersave(dev_priv);
6581 6770
6582 if (!i915.enable_rc6) 6771 if (!i915.enable_rc6)
@@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev)
7658 } 7847 }
7659} 7848}
7660 7849
7850static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7851{
7852 uint32_t flags =
7853 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7854
7855 switch (flags) {
7856 case GEN6_PCODE_SUCCESS:
7857 return 0;
7858 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7859 case GEN6_PCODE_ILLEGAL_CMD:
7860 return -ENXIO;
7861 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7862 return -EOVERFLOW;
7863 case GEN6_PCODE_TIMEOUT:
7864 return -ETIMEDOUT;
7865 default:
7866 MISSING_CASE(flags)
7867 return 0;
7868 }
7869}
7870
7871static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7872{
7873 uint32_t flags =
7874 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7875
7876 switch (flags) {
7877 case GEN6_PCODE_SUCCESS:
7878 return 0;
7879 case GEN6_PCODE_ILLEGAL_CMD:
7880 return -ENXIO;
7881 case GEN7_PCODE_TIMEOUT:
7882 return -ETIMEDOUT;
7883 case GEN7_PCODE_ILLEGAL_DATA:
7884 return -EINVAL;
7885 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7886 return -EOVERFLOW;
7887 default:
7888 MISSING_CASE(flags);
7889 return 0;
7890 }
7891}
7892
7661int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7893int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7662{ 7894{
7895 int status;
7896
7663 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7897 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7664 7898
7665 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7899 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7686 *val = I915_READ_FW(GEN6_PCODE_DATA); 7920 *val = I915_READ_FW(GEN6_PCODE_DATA);
7687 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7921 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7688 7922
7923 if (INTEL_GEN(dev_priv) > 6)
7924 status = gen7_check_mailbox_status(dev_priv);
7925 else
7926 status = gen6_check_mailbox_status(dev_priv);
7927
7928 if (status) {
7929 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7930 status);
7931 return status;
7932 }
7933
7689 return 0; 7934 return 0;
7690} 7935}
7691 7936
7692int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7937int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7693 u32 mbox, u32 val) 7938 u32 mbox, u32 val)
7694{ 7939{
7940 int status;
7941
7695 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7942 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7696 7943
7697 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7944 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7716 7963
7717 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7964 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7718 7965
7966 if (INTEL_GEN(dev_priv) > 6)
7967 status = gen7_check_mailbox_status(dev_priv);
7968 else
7969 status = gen6_check_mailbox_status(dev_priv);
7970
7971 if (status) {
7972 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7973 status);
7974 return status;
7975 }
7976
7719 return 0; 7977 return 0;
7720} 7978}
7721 7979
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cca7792f26d5..1d3161bbea24 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1179 L3_HIGH_PRIO_CREDITS(2)); 1179 L3_HIGH_PRIO_CREDITS(2));
1180 1180
1181 /* WaInsertDummyPushConstPs:bxt */ 1181 /* WaToEnableHwFixForPushConstHWBug:bxt */
1182 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1182 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1185 1185
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1222 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1222 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1223 GEN8_LQSC_RO_PERF_DIS); 1223 GEN8_LQSC_RO_PERF_DIS);
1224 1224
1225 /* WaInsertDummyPushConstPs:kbl */ 1225 /* WaToEnableHwFixForPushConstHWBug:kbl */
1226 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1226 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1227 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1227 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1228 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1228 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1229 1229
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 23ac8041c562..294de4549922 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -2,6 +2,9 @@ config DRM_MEDIATEK
2 tristate "DRM Support for Mediatek SoCs" 2 tristate "DRM Support for Mediatek SoCs"
3 depends on DRM 3 depends on DRM
4 depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) 4 depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
5 depends on COMMON_CLK
6 depends on HAVE_ARM_SMCCC
7 depends on OF
5 select DRM_GEM_CMA_HELPER 8 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_HELPER 9 select DRM_KMS_HELPER
7 select DRM_MIPI_DSI 10 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index df2657051afd..28c1423049c5 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
73 } 73 }
74} 74}
75 75
76#ifdef CONFIG_DRM_FBDEV_EMULATION
76static struct fb_deferred_io qxl_defio = { 77static struct fb_deferred_io qxl_defio = {
77 .delay = QXL_DIRTY_DELAY, 78 .delay = QXL_DIRTY_DELAY,
78 .deferred_io = drm_fb_helper_deferred_io, 79 .deferred_io = drm_fb_helper_deferred_io,
79}; 80};
81#endif
80 82
81static struct fb_ops qxlfb_ops = { 83static struct fb_ops qxlfb_ops = {
82 .owner = THIS_MODULE, 84 .owner = THIS_MODULE,
@@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
313 goto out_destroy_fbi; 315 goto out_destroy_fbi;
314 } 316 }
315 317
318#ifdef CONFIG_DRM_FBDEV_EMULATION
316 info->fbdefio = &qxl_defio; 319 info->fbdefio = &qxl_defio;
317 fb_deferred_io_init(info); 320 fb_deferred_io_init(info);
321#endif
318 322
319 qdev->fbdev_info = info; 323 qdev->fbdev_info = info;
320 qdev->fbdev_qfb = &qfbdev->qfb; 324 qdev->fbdev_qfb = &qfbdev->qfb;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a97abc8af657..1dcf39084555 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
627 if (radeon_crtc->ss.refdiv) { 627 if (radeon_crtc->ss.refdiv) {
628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
630 if (rdev->family >= CHIP_RV770) 630 if (ASIC_IS_AVIVO(rdev) &&
631 rdev->family != CHIP_RS780 &&
632 rdev->family != CHIP_RS880)
631 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 633 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
632 } 634 }
633 } 635 }
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6de342861202..ddef0d494084 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
198 atpx->is_hybrid = false; 198 atpx->is_hybrid = false;
199 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 199 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
200 printk("ATPX Hybrid Graphics\n"); 200 printk("ATPX Hybrid Graphics\n");
201#if 1
202 /* This is a temporary hack until the D3 cold support
203 * makes it upstream. The ATPX power_control method seems
204 * to still work on even if the system should be using
205 * the new standardized hybrid D3 cold ACPI interface.
206 */
207 atpx->functions.power_cntl = true;
208#else
209 atpx->functions.power_cntl = false; 201 atpx->functions.power_cntl = false;
210#endif
211 atpx->is_hybrid = true; 202 atpx->is_hybrid = true;
212 } 203 }
213 204
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0c00e192c845..c2e0a1ccdfbc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
263 263
264 rdev = radeon_get_rdev(bo->bdev); 264 rdev = radeon_get_rdev(bo->bdev);
265 ridx = radeon_copy_ring_index(rdev); 265 ridx = radeon_copy_ring_index(rdev);
266 old_start = old_mem->start << PAGE_SHIFT; 266 old_start = (u64)old_mem->start << PAGE_SHIFT;
267 new_start = new_mem->start << PAGE_SHIFT; 267 new_start = (u64)new_mem->start << PAGE_SHIFT;
268 268
269 switch (old_mem->mem_type) { 269 switch (old_mem->mem_type) {
270 case TTM_PL_VRAM: 270 case TTM_PL_VRAM:
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 3d228ad90e0f..3dea1216bafd 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
840 .destroy = tegra_output_encoder_destroy, 840 .destroy = tegra_output_encoder_destroy,
841}; 841};
842 842
843static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
844{
845 int err;
846
847 if (dsi->slave)
848 tegra_dsi_unprepare(dsi->slave);
849
850 err = tegra_mipi_disable(dsi->mipi);
851 if (err < 0)
852 dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
853 err);
854
855 pm_runtime_put(dsi->dev);
856}
857
843static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) 858static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
844{ 859{
845 struct tegra_output *output = encoder_to_output(encoder); 860 struct tegra_output *output = encoder_to_output(encoder);
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
876 891
877 tegra_dsi_disable(dsi); 892 tegra_dsi_disable(dsi);
878 893
879 pm_runtime_put(dsi->dev); 894 tegra_dsi_unprepare(dsi);
895}
896
897static void tegra_dsi_prepare(struct tegra_dsi *dsi)
898{
899 int err;
900
901 pm_runtime_get_sync(dsi->dev);
902
903 err = tegra_mipi_enable(dsi->mipi);
904 if (err < 0)
905 dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
906 err);
907
908 err = tegra_dsi_pad_calibrate(dsi);
909 if (err < 0)
910 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
911
912 if (dsi->slave)
913 tegra_dsi_prepare(dsi->slave);
880} 914}
881 915
882static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) 916static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
887 struct tegra_dsi *dsi = to_dsi(output); 921 struct tegra_dsi *dsi = to_dsi(output);
888 struct tegra_dsi_state *state; 922 struct tegra_dsi_state *state;
889 u32 value; 923 u32 value;
890 int err;
891
892 pm_runtime_get_sync(dsi->dev);
893 924
894 err = tegra_dsi_pad_calibrate(dsi); 925 tegra_dsi_prepare(dsi);
895 if (err < 0)
896 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
897 926
898 state = tegra_dsi_get_state(dsi); 927 state = tegra_dsi_get_state(dsi);
899 928
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d5df555aeba0..9688bfa92ccd 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
203 203
204 ufbdev->fb_count++; 204 ufbdev->fb_count++;
205 205
206#ifdef CONFIG_DRM_FBDEV_EMULATION
206 if (fb_defio && (info->fbdefio == NULL)) { 207 if (fb_defio && (info->fbdefio == NULL)) {
207 /* enable defio at last moment if not disabled by client */ 208 /* enable defio at last moment if not disabled by client */
208 209
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
218 info->fbdefio = fbdefio; 219 info->fbdefio = fbdefio;
219 fb_deferred_io_init(info); 220 fb_deferred_io_init(info);
220 } 221 }
222#endif
221 223
222 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 224 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
223 info->node, user, info, ufbdev->fb_count); 225 info->node, user, info, ufbdev->fb_count);
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
235 237
236 ufbdev->fb_count--; 238 ufbdev->fb_count--;
237 239
240#ifdef CONFIG_DRM_FBDEV_EMULATION
238 if ((ufbdev->fb_count == 0) && (info->fbdefio)) { 241 if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
239 fb_deferred_io_cleanup(info); 242 fb_deferred_io_cleanup(info);
240 kfree(info->fbdefio); 243 kfree(info->fbdefio);
241 info->fbdefio = NULL; 244 info->fbdefio = NULL;
242 info->fbops->fb_mmap = udl_fb_mmap; 245 info->fbops->fb_mmap = udl_fb_mmap;
243 } 246 }
247#endif
244 248
245 pr_warn("released /dev/fb%d user=%d count=%d\n", 249 pr_warn("released /dev/fb%d user=%d count=%d\n",
246 info->node, user, ufbdev->fb_count); 250 info->node, user, ufbdev->fb_count);