diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-09-15 02:24:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-09-15 02:24:53 -0400 |
commit | d4b80afbba49e968623330f1336da8c724da8aad (patch) | |
tree | a9478bd77d8b001a6a7119328d34e9666d7bfe93 /drivers/gpu/drm | |
parent | fcd709ef20a9d83bdb7524d27cd6719dac8690a0 (diff) | |
parent | 4cea8776571b18db7485930cb422faa739580c8c (diff) |
Merge branch 'linus' into x86/asm, to pick up recent fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm')
33 files changed, 524 insertions, 70 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8c704c86597b..700c56baf2de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -426,6 +426,8 @@ struct amdgpu_mman { | |||
426 | 426 | ||
427 | /* custom LRU management */ | 427 | /* custom LRU management */ |
428 | struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; | 428 | struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; |
429 | /* guard for log2_size array, don't add anything in between */ | ||
430 | struct amdgpu_mman_lru guard; | ||
429 | }; | 431 | }; |
430 | 432 | ||
431 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | 433 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 983175363b06..fe872b82e619 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * | |||
321 | (le16_to_cpu(path->usConnObjectId) & | 321 | (le16_to_cpu(path->usConnObjectId) & |
322 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 322 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
323 | 323 | ||
324 | /* Skip TV/CV support */ | ||
325 | if ((le16_to_cpu(path->usDeviceTag) == | ||
326 | ATOM_DEVICE_TV1_SUPPORT) || | ||
327 | (le16_to_cpu(path->usDeviceTag) == | ||
328 | ATOM_DEVICE_CV_SUPPORT)) | ||
329 | continue; | ||
330 | |||
331 | if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) { | ||
332 | DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n", | ||
333 | con_obj_id, le16_to_cpu(path->usDeviceTag)); | ||
334 | continue; | ||
335 | } | ||
336 | |||
324 | connector_type = | 337 | connector_type = |
325 | object_connector_convert[con_obj_id]; | 338 | object_connector_convert[con_obj_id]; |
326 | connector_object_id = con_obj_id; | 339 | connector_object_id = con_obj_id; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index a31d7ef3032c..ec1282af2479 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) | |||
280 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | 280 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) |
281 | { | 281 | { |
282 | unsigned i; | 282 | unsigned i; |
283 | int r; | 283 | int r, ret = 0; |
284 | 284 | ||
285 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 285 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
286 | struct amdgpu_ring *ring = adev->rings[i]; | 286 | struct amdgpu_ring *ring = adev->rings[i]; |
@@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |||
301 | } else { | 301 | } else { |
302 | /* still not good, but we can live with it */ | 302 | /* still not good, but we can live with it */ |
303 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); | 303 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); |
304 | ret = r; | ||
304 | } | 305 | } |
305 | } | 306 | } |
306 | } | 307 | } |
307 | return 0; | 308 | return ret; |
308 | } | 309 | } |
309 | 310 | ||
310 | /* | 311 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9b61c8ba7aaf..716f2afeb6a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
251 | 251 | ||
252 | adev = amdgpu_get_adev(bo->bdev); | 252 | adev = amdgpu_get_adev(bo->bdev); |
253 | ring = adev->mman.buffer_funcs_ring; | 253 | ring = adev->mman.buffer_funcs_ring; |
254 | old_start = old_mem->start << PAGE_SHIFT; | 254 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
255 | new_start = new_mem->start << PAGE_SHIFT; | 255 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
256 | 256 | ||
257 | switch (old_mem->mem_type) { | 257 | switch (old_mem->mem_type) { |
258 | case TTM_PL_VRAM: | 258 | case TTM_PL_VRAM: |
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) | |||
950 | struct list_head *res = lru->lru[tbo->mem.mem_type]; | 950 | struct list_head *res = lru->lru[tbo->mem.mem_type]; |
951 | 951 | ||
952 | lru->lru[tbo->mem.mem_type] = &tbo->lru; | 952 | lru->lru[tbo->mem.mem_type] = &tbo->lru; |
953 | while ((++lru)->lru[tbo->mem.mem_type] == res) | ||
954 | lru->lru[tbo->mem.mem_type] = &tbo->lru; | ||
953 | 955 | ||
954 | return res; | 956 | return res; |
955 | } | 957 | } |
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |||
960 | struct list_head *res = lru->swap_lru; | 962 | struct list_head *res = lru->swap_lru; |
961 | 963 | ||
962 | lru->swap_lru = &tbo->swap; | 964 | lru->swap_lru = &tbo->swap; |
965 | while ((++lru)->swap_lru == res) | ||
966 | lru->swap_lru = &tbo->swap; | ||
963 | 967 | ||
964 | return res; | 968 | return res; |
965 | } | 969 | } |
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1011 | lru->swap_lru = &adev->mman.bdev.glob->swap_lru; | 1015 | lru->swap_lru = &adev->mman.bdev.glob->swap_lru; |
1012 | } | 1016 | } |
1013 | 1017 | ||
1018 | for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) | ||
1019 | adev->mman.guard.lru[j] = NULL; | ||
1020 | adev->mman.guard.swap_lru = NULL; | ||
1021 | |||
1014 | adev->mman.initialized = true; | 1022 | adev->mman.initialized = true; |
1015 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | 1023 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
1016 | adev->mc.real_vram_size >> PAGE_SHIFT); | 1024 | adev->mc.real_vram_size >> PAGE_SHIFT); |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ee6466912497..77fdd9911c3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); | |||
52 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); | 52 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); |
53 | static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); | 53 | static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); |
54 | static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); | 54 | static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); |
55 | static int cik_sdma_soft_reset(void *handle); | ||
55 | 56 | ||
56 | MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); | 57 | MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); |
57 | MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); | 58 | MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); |
@@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle) | |||
1037 | { | 1038 | { |
1038 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1039 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1039 | 1040 | ||
1041 | cik_sdma_soft_reset(handle); | ||
1042 | |||
1040 | return cik_sdma_hw_init(adev); | 1043 | return cik_sdma_hw_init(adev); |
1041 | } | 1044 | } |
1042 | 1045 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d869d058ef24..425413fcaf02 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2755 | u64 wb_gpu_addr; | 2755 | u64 wb_gpu_addr; |
2756 | u32 *buf; | 2756 | u32 *buf; |
2757 | struct bonaire_mqd *mqd; | 2757 | struct bonaire_mqd *mqd; |
2758 | 2758 | struct amdgpu_ring *ring; | |
2759 | gfx_v7_0_cp_compute_enable(adev, true); | ||
2760 | 2759 | ||
2761 | /* fix up chicken bits */ | 2760 | /* fix up chicken bits */ |
2762 | tmp = RREG32(mmCP_CPF_DEBUG); | 2761 | tmp = RREG32(mmCP_CPF_DEBUG); |
@@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2791 | 2790 | ||
2792 | /* init the queues. Just two for now. */ | 2791 | /* init the queues. Just two for now. */ |
2793 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 2792 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2794 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | 2793 | ring = &adev->gfx.compute_ring[i]; |
2795 | 2794 | ||
2796 | if (ring->mqd_obj == NULL) { | 2795 | if (ring->mqd_obj == NULL) { |
2797 | r = amdgpu_bo_create(adev, | 2796 | r = amdgpu_bo_create(adev, |
@@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2970 | amdgpu_bo_unreserve(ring->mqd_obj); | 2969 | amdgpu_bo_unreserve(ring->mqd_obj); |
2971 | 2970 | ||
2972 | ring->ready = true; | 2971 | ring->ready = true; |
2972 | } | ||
2973 | |||
2974 | gfx_v7_0_cp_compute_enable(adev, true); | ||
2975 | |||
2976 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
2977 | ring = &adev->gfx.compute_ring[i]; | ||
2978 | |||
2973 | r = amdgpu_ring_test_ring(ring); | 2979 | r = amdgpu_ring_test_ring(ring); |
2974 | if (r) | 2980 | if (r) |
2975 | ring->ready = false; | 2981 | ring->ready = false; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 1351c7e834a2..a64715d90503 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
714 | DRM_ERROR("amdgpu: IB test timed out\n"); | 714 | DRM_ERROR("amdgpu: IB test timed out\n"); |
715 | r = -ETIMEDOUT; | 715 | r = -ETIMEDOUT; |
716 | goto err1; | 716 | goto err1; |
717 | } else if (r) { | 717 | } else if (r < 0) { |
718 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 718 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
719 | goto err1; | 719 | goto err1; |
720 | } | 720 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ef312bb75fda..963a24d46a93 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) | |||
405 | spin_lock(&sched->job_list_lock); | 405 | spin_lock(&sched->job_list_lock); |
406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | 406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
407 | struct amd_sched_job, node); | 407 | struct amd_sched_job, node); |
408 | if (s_job) | 408 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | 409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
410 | 410 | ||
411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index fa3930757972..2a3ded44cf2a 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
475 | val, | 475 | val, |
476 | -1, | 476 | -1, |
477 | &replaced); | 477 | &replaced); |
478 | state->color_mgmt_changed = replaced; | 478 | state->color_mgmt_changed |= replaced; |
479 | return ret; | 479 | return ret; |
480 | } else if (property == config->ctm_property) { | 480 | } else if (property == config->ctm_property) { |
481 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 481 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
483 | val, | 483 | val, |
484 | sizeof(struct drm_color_ctm), | 484 | sizeof(struct drm_color_ctm), |
485 | &replaced); | 485 | &replaced); |
486 | state->color_mgmt_changed = replaced; | 486 | state->color_mgmt_changed |= replaced; |
487 | return ret; | 487 | return ret; |
488 | } else if (property == config->gamma_lut_property) { | 488 | } else if (property == config->gamma_lut_property) { |
489 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 489 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
491 | val, | 491 | val, |
492 | -1, | 492 | -1, |
493 | &replaced); | 493 | &replaced); |
494 | state->color_mgmt_changed = replaced; | 494 | state->color_mgmt_changed |= replaced; |
495 | return ret; | 495 | return ret; |
496 | } else if (crtc->funcs->atomic_set_property) | 496 | } else if (crtc->funcs->atomic_set_property) |
497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); | 497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b1dbb60af99f..ddebe54cd5ca 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -5404,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, | |||
5404 | struct drm_pending_vblank_event *e = NULL; | 5404 | struct drm_pending_vblank_event *e = NULL; |
5405 | int ret = -EINVAL; | 5405 | int ret = -EINVAL; |
5406 | 5406 | ||
5407 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
5408 | return -EINVAL; | ||
5409 | |||
5407 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || | 5410 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || |
5408 | page_flip->reserved != 0) | 5411 | page_flip->reserved != 0) |
5409 | return -EINVAL; | 5412 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 20fe9d52e256..f68c78918d63 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -882,11 +882,12 @@ struct i915_gem_context { | |||
882 | 882 | ||
883 | struct i915_ctx_hang_stats hang_stats; | 883 | struct i915_ctx_hang_stats hang_stats; |
884 | 884 | ||
885 | /* Unique identifier for this context, used by the hw for tracking */ | ||
886 | unsigned long flags; | 885 | unsigned long flags; |
887 | #define CONTEXT_NO_ZEROMAP BIT(0) | 886 | #define CONTEXT_NO_ZEROMAP BIT(0) |
888 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) | 887 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) |
889 | unsigned hw_id; | 888 | |
889 | /* Unique identifier for this context, used by the hw for tracking */ | ||
890 | unsigned int hw_id; | ||
890 | u32 user_handle; | 891 | u32 user_handle; |
891 | 892 | ||
892 | u32 ggtt_alignment; | 893 | u32 ggtt_alignment; |
@@ -1963,6 +1964,13 @@ struct drm_i915_private { | |||
1963 | struct i915_suspend_saved_registers regfile; | 1964 | struct i915_suspend_saved_registers regfile; |
1964 | struct vlv_s0ix_state vlv_s0ix_state; | 1965 | struct vlv_s0ix_state vlv_s0ix_state; |
1965 | 1966 | ||
1967 | enum { | ||
1968 | I915_SKL_SAGV_UNKNOWN = 0, | ||
1969 | I915_SKL_SAGV_DISABLED, | ||
1970 | I915_SKL_SAGV_ENABLED, | ||
1971 | I915_SKL_SAGV_NOT_CONTROLLED | ||
1972 | } skl_sagv_status; | ||
1973 | |||
1966 | struct { | 1974 | struct { |
1967 | /* | 1975 | /* |
1968 | * Raw watermark latency values: | 1976 | * Raw watermark latency values: |
@@ -3591,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | |||
3591 | /* belongs in i915_gem_gtt.h */ | 3599 | /* belongs in i915_gem_gtt.h */ |
3592 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) | 3600 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) |
3593 | { | 3601 | { |
3602 | wmb(); | ||
3594 | if (INTEL_GEN(dev_priv) < 6) | 3603 | if (INTEL_GEN(dev_priv) < 6) |
3595 | intel_gtt_chipset_flush(); | 3604 | intel_gtt_chipset_flush(); |
3596 | } | 3605 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1978633e7549..b35e5b6475b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
943 | { | 943 | { |
944 | const unsigned other_rings = ~intel_engine_flag(req->engine); | 944 | const unsigned other_rings = ~intel_engine_flag(req->engine); |
945 | struct i915_vma *vma; | 945 | struct i915_vma *vma; |
946 | uint32_t flush_domains = 0; | ||
947 | bool flush_chipset = false; | ||
948 | int ret; | 946 | int ret; |
949 | 947 | ||
950 | list_for_each_entry(vma, vmas, exec_list) { | 948 | list_for_each_entry(vma, vmas, exec_list) { |
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
957 | } | 955 | } |
958 | 956 | ||
959 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) | 957 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
960 | flush_chipset |= i915_gem_clflush_object(obj, false); | 958 | i915_gem_clflush_object(obj, false); |
961 | |||
962 | flush_domains |= obj->base.write_domain; | ||
963 | } | 959 | } |
964 | 960 | ||
965 | if (flush_chipset) | 961 | /* Unconditionally flush any chipset caches (for streaming writes). */ |
966 | i915_gem_chipset_flush(req->engine->i915); | 962 | i915_gem_chipset_flush(req->engine->i915); |
967 | |||
968 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
969 | wmb(); | ||
970 | 963 | ||
971 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 964 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
972 | * any residual writes from the previous batch. | 965 | * any residual writes from the previous batch. |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5c06413ae0e6..bf2cad3f9e1f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -7145,6 +7145,15 @@ enum { | |||
7145 | 7145 | ||
7146 | #define GEN6_PCODE_MAILBOX _MMIO(0x138124) | 7146 | #define GEN6_PCODE_MAILBOX _MMIO(0x138124) |
7147 | #define GEN6_PCODE_READY (1<<31) | 7147 | #define GEN6_PCODE_READY (1<<31) |
7148 | #define GEN6_PCODE_ERROR_MASK 0xFF | ||
7149 | #define GEN6_PCODE_SUCCESS 0x0 | ||
7150 | #define GEN6_PCODE_ILLEGAL_CMD 0x1 | ||
7151 | #define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2 | ||
7152 | #define GEN6_PCODE_TIMEOUT 0x3 | ||
7153 | #define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF | ||
7154 | #define GEN7_PCODE_TIMEOUT 0x2 | ||
7155 | #define GEN7_PCODE_ILLEGAL_DATA 0x3 | ||
7156 | #define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 | ||
7148 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 | 7157 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 |
7149 | #define GEN6_PCODE_READ_RC6VIDS 0x5 | 7158 | #define GEN6_PCODE_READ_RC6VIDS 0x5 |
7150 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) | 7159 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) |
@@ -7166,6 +7175,10 @@ enum { | |||
7166 | #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 | 7175 | #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 |
7167 | #define DISPLAY_IPS_CONTROL 0x19 | 7176 | #define DISPLAY_IPS_CONTROL 0x19 |
7168 | #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A | 7177 | #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A |
7178 | #define GEN9_PCODE_SAGV_CONTROL 0x21 | ||
7179 | #define GEN9_SAGV_DISABLE 0x0 | ||
7180 | #define GEN9_SAGV_IS_DISABLED 0x1 | ||
7181 | #define GEN9_SAGV_ENABLE 0x3 | ||
7169 | #define GEN6_PCODE_DATA _MMIO(0x138128) | 7182 | #define GEN6_PCODE_DATA _MMIO(0x138128) |
7170 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 7183 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
7171 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 | 7184 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 3edb9580928e..c3b33a10c15c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -41,15 +41,15 @@ | |||
41 | * be moved to FW_FAILED. | 41 | * be moved to FW_FAILED. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" | 44 | #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" |
45 | MODULE_FIRMWARE(I915_CSR_KBL); | 45 | MODULE_FIRMWARE(I915_CSR_KBL); |
46 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) | 46 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) |
47 | 47 | ||
48 | #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" | 48 | #define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin" |
49 | MODULE_FIRMWARE(I915_CSR_SKL); | 49 | MODULE_FIRMWARE(I915_CSR_SKL); |
50 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) | 50 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26) |
51 | 51 | ||
52 | #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" | 52 | #define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" |
53 | MODULE_FIRMWARE(I915_CSR_BXT); | 53 | MODULE_FIRMWARE(I915_CSR_BXT); |
54 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | 54 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) |
55 | 55 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2a751b6e0253..175595fc3e45 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -13759,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
13759 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) | 13759 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) |
13760 | dev_priv->display.modeset_commit_cdclk(state); | 13760 | dev_priv->display.modeset_commit_cdclk(state); |
13761 | 13761 | ||
13762 | /* | ||
13763 | * SKL workaround: bspec recommends we disable the SAGV when we | ||
13764 | * have more then one pipe enabled | ||
13765 | */ | ||
13766 | if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) | ||
13767 | skl_disable_sagv(dev_priv); | ||
13768 | |||
13762 | intel_modeset_verify_disabled(dev); | 13769 | intel_modeset_verify_disabled(dev); |
13763 | } | 13770 | } |
13764 | 13771 | ||
@@ -13832,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
13832 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); | 13839 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); |
13833 | } | 13840 | } |
13834 | 13841 | ||
13842 | if (IS_SKYLAKE(dev_priv) && intel_state->modeset && | ||
13843 | skl_can_enable_sagv(state)) | ||
13844 | skl_enable_sagv(dev_priv); | ||
13845 | |||
13835 | drm_atomic_helper_commit_hw_done(state); | 13846 | drm_atomic_helper_commit_hw_done(state); |
13836 | 13847 | ||
13837 | if (intel_state->modeset) | 13848 | if (intel_state->modeset) |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cc937a19b1ba..ff399b9a5c1f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev); | |||
1716 | void skl_wm_get_hw_state(struct drm_device *dev); | 1716 | void skl_wm_get_hw_state(struct drm_device *dev); |
1717 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 1717 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
1718 | struct skl_ddb_allocation *ddb /* out */); | 1718 | struct skl_ddb_allocation *ddb /* out */); |
1719 | bool skl_can_enable_sagv(struct drm_atomic_state *state); | ||
1720 | int skl_enable_sagv(struct drm_i915_private *dev_priv); | ||
1721 | int skl_disable_sagv(struct drm_i915_private *dev_priv); | ||
1719 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); | 1722 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); |
1720 | bool ilk_disable_lp_wm(struct drm_device *dev); | 1723 | bool ilk_disable_lp_wm(struct drm_device *dev); |
1721 | int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); | 1724 | int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d5deb58a2128..53e13c10e4ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev) | |||
2852 | 2852 | ||
2853 | #define SKL_DDB_SIZE 896 /* in blocks */ | 2853 | #define SKL_DDB_SIZE 896 /* in blocks */ |
2854 | #define BXT_DDB_SIZE 512 | 2854 | #define BXT_DDB_SIZE 512 |
2855 | #define SKL_SAGV_BLOCK_TIME 30 /* µs */ | ||
2855 | 2856 | ||
2856 | /* | 2857 | /* |
2857 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary | 2858 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary |
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane) | |||
2875 | } | 2876 | } |
2876 | } | 2877 | } |
2877 | 2878 | ||
2879 | /* | ||
2880 | * SAGV dynamically adjusts the system agent voltage and clock frequencies | ||
2881 | * depending on power and performance requirements. The display engine access | ||
2882 | * to system memory is blocked during the adjustment time. Because of the | ||
2883 | * blocking time, having this enabled can cause full system hangs and/or pipe | ||
2884 | * underruns if we don't meet all of the following requirements: | ||
2885 | * | ||
2886 | * - <= 1 pipe enabled | ||
2887 | * - All planes can enable watermarks for latencies >= SAGV engine block time | ||
2888 | * - We're not using an interlaced display configuration | ||
2889 | */ | ||
2890 | int | ||
2891 | skl_enable_sagv(struct drm_i915_private *dev_priv) | ||
2892 | { | ||
2893 | int ret; | ||
2894 | |||
2895 | if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || | ||
2896 | dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED) | ||
2897 | return 0; | ||
2898 | |||
2899 | DRM_DEBUG_KMS("Enabling the SAGV\n"); | ||
2900 | mutex_lock(&dev_priv->rps.hw_lock); | ||
2901 | |||
2902 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, | ||
2903 | GEN9_SAGV_ENABLE); | ||
2904 | |||
2905 | /* We don't need to wait for the SAGV when enabling */ | ||
2906 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
2907 | |||
2908 | /* | ||
2909 | * Some skl systems, pre-release machines in particular, | ||
2910 | * don't actually have an SAGV. | ||
2911 | */ | ||
2912 | if (ret == -ENXIO) { | ||
2913 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); | ||
2914 | dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; | ||
2915 | return 0; | ||
2916 | } else if (ret < 0) { | ||
2917 | DRM_ERROR("Failed to enable the SAGV\n"); | ||
2918 | return ret; | ||
2919 | } | ||
2920 | |||
2921 | dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED; | ||
2922 | return 0; | ||
2923 | } | ||
2924 | |||
2925 | static int | ||
2926 | skl_do_sagv_disable(struct drm_i915_private *dev_priv) | ||
2927 | { | ||
2928 | int ret; | ||
2929 | uint32_t temp = GEN9_SAGV_DISABLE; | ||
2930 | |||
2931 | ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, | ||
2932 | &temp); | ||
2933 | if (ret) | ||
2934 | return ret; | ||
2935 | else | ||
2936 | return temp & GEN9_SAGV_IS_DISABLED; | ||
2937 | } | ||
2938 | |||
2939 | int | ||
2940 | skl_disable_sagv(struct drm_i915_private *dev_priv) | ||
2941 | { | ||
2942 | int ret, result; | ||
2943 | |||
2944 | if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || | ||
2945 | dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED) | ||
2946 | return 0; | ||
2947 | |||
2948 | DRM_DEBUG_KMS("Disabling the SAGV\n"); | ||
2949 | mutex_lock(&dev_priv->rps.hw_lock); | ||
2950 | |||
2951 | /* bspec says to keep retrying for at least 1 ms */ | ||
2952 | ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1); | ||
2953 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
2954 | |||
2955 | if (ret == -ETIMEDOUT) { | ||
2956 | DRM_ERROR("Request to disable SAGV timed out\n"); | ||
2957 | return -ETIMEDOUT; | ||
2958 | } | ||
2959 | |||
2960 | /* | ||
2961 | * Some skl systems, pre-release machines in particular, | ||
2962 | * don't actually have an SAGV. | ||
2963 | */ | ||
2964 | if (result == -ENXIO) { | ||
2965 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); | ||
2966 | dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; | ||
2967 | return 0; | ||
2968 | } else if (result < 0) { | ||
2969 | DRM_ERROR("Failed to disable the SAGV\n"); | ||
2970 | return result; | ||
2971 | } | ||
2972 | |||
2973 | dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED; | ||
2974 | return 0; | ||
2975 | } | ||
2976 | |||
2977 | bool skl_can_enable_sagv(struct drm_atomic_state *state) | ||
2978 | { | ||
2979 | struct drm_device *dev = state->dev; | ||
2980 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2981 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
2982 | struct drm_crtc *crtc; | ||
2983 | enum pipe pipe; | ||
2984 | int level, plane; | ||
2985 | |||
2986 | /* | ||
2987 | * SKL workaround: bspec recommends we disable the SAGV when we have | ||
2988 | * more then one pipe enabled | ||
2989 | * | ||
2990 | * If there are no active CRTCs, no additional checks need be performed | ||
2991 | */ | ||
2992 | if (hweight32(intel_state->active_crtcs) == 0) | ||
2993 | return true; | ||
2994 | else if (hweight32(intel_state->active_crtcs) > 1) | ||
2995 | return false; | ||
2996 | |||
2997 | /* Since we're now guaranteed to only have one active CRTC... */ | ||
2998 | pipe = ffs(intel_state->active_crtcs) - 1; | ||
2999 | crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
3000 | |||
3001 | if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
3002 | return false; | ||
3003 | |||
3004 | for_each_plane(dev_priv, pipe, plane) { | ||
3005 | /* Skip this plane if it's not enabled */ | ||
3006 | if (intel_state->wm_results.plane[pipe][plane][0] == 0) | ||
3007 | continue; | ||
3008 | |||
3009 | /* Find the highest enabled wm level for this plane */ | ||
3010 | for (level = ilk_wm_max_level(dev); | ||
3011 | intel_state->wm_results.plane[pipe][plane][level] == 0; --level) | ||
3012 | { } | ||
3013 | |||
3014 | /* | ||
3015 | * If any of the planes on this pipe don't enable wm levels | ||
3016 | * that incur memory latencies higher then 30µs we can't enable | ||
3017 | * the SAGV | ||
3018 | */ | ||
3019 | if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME) | ||
3020 | return false; | ||
3021 | } | ||
3022 | |||
3023 | return true; | ||
3024 | } | ||
3025 | |||
2878 | static void | 3026 | static void |
2879 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | 3027 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, |
2880 | const struct intel_crtc_state *cstate, | 3028 | const struct intel_crtc_state *cstate, |
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) | |||
3107 | total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; | 3255 | total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; |
3108 | } | 3256 | } |
3109 | 3257 | ||
3110 | WARN_ON(cstate->plane_mask && total_data_rate == 0); | ||
3111 | |||
3112 | return total_data_rate; | 3258 | return total_data_rate; |
3113 | } | 3259 | } |
3114 | 3260 | ||
@@ -3912,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
3912 | * pretend that all pipes switched active status so that we'll | 4058 | * pretend that all pipes switched active status so that we'll |
3913 | * ensure a full DDB recompute. | 4059 | * ensure a full DDB recompute. |
3914 | */ | 4060 | */ |
3915 | if (dev_priv->wm.distrust_bios_wm) | 4061 | if (dev_priv->wm.distrust_bios_wm) { |
4062 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, | ||
4063 | state->acquire_ctx); | ||
4064 | if (ret) | ||
4065 | return ret; | ||
4066 | |||
3916 | intel_state->active_pipe_changes = ~0; | 4067 | intel_state->active_pipe_changes = ~0; |
3917 | 4068 | ||
4069 | /* | ||
4070 | * We usually only initialize intel_state->active_crtcs if we | ||
4071 | * we're doing a modeset; make sure this field is always | ||
4072 | * initialized during the sanitization process that happens | ||
4073 | * on the first commit too. | ||
4074 | */ | ||
4075 | if (!intel_state->modeset) | ||
4076 | intel_state->active_crtcs = dev_priv->active_crtcs; | ||
4077 | } | ||
4078 | |||
3918 | /* | 4079 | /* |
3919 | * If the modeset changes which CRTC's are active, we need to | 4080 | * If the modeset changes which CRTC's are active, we need to |
3920 | * recompute the DDB allocation for *all* active pipes, even | 4081 | * recompute the DDB allocation for *all* active pipes, even |
@@ -3943,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
3943 | ret = skl_allocate_pipe_ddb(cstate, ddb); | 4104 | ret = skl_allocate_pipe_ddb(cstate, ddb); |
3944 | if (ret) | 4105 | if (ret) |
3945 | return ret; | 4106 | return ret; |
4107 | |||
4108 | ret = drm_atomic_add_affected_planes(state, &intel_crtc->base); | ||
4109 | if (ret) | ||
4110 | return ret; | ||
3946 | } | 4111 | } |
3947 | 4112 | ||
3948 | return 0; | 4113 | return 0; |
3949 | } | 4114 | } |
3950 | 4115 | ||
4116 | static void | ||
4117 | skl_copy_wm_for_pipe(struct skl_wm_values *dst, | ||
4118 | struct skl_wm_values *src, | ||
4119 | enum pipe pipe) | ||
4120 | { | ||
4121 | dst->wm_linetime[pipe] = src->wm_linetime[pipe]; | ||
4122 | memcpy(dst->plane[pipe], src->plane[pipe], | ||
4123 | sizeof(dst->plane[pipe])); | ||
4124 | memcpy(dst->plane_trans[pipe], src->plane_trans[pipe], | ||
4125 | sizeof(dst->plane_trans[pipe])); | ||
4126 | |||
4127 | dst->ddb.pipe[pipe] = src->ddb.pipe[pipe]; | ||
4128 | memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe], | ||
4129 | sizeof(dst->ddb.y_plane[pipe])); | ||
4130 | memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], | ||
4131 | sizeof(dst->ddb.plane[pipe])); | ||
4132 | } | ||
4133 | |||
3951 | static int | 4134 | static int |
3952 | skl_compute_wm(struct drm_atomic_state *state) | 4135 | skl_compute_wm(struct drm_atomic_state *state) |
3953 | { | 4136 | { |
@@ -4020,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
4020 | struct drm_device *dev = crtc->dev; | 4203 | struct drm_device *dev = crtc->dev; |
4021 | struct drm_i915_private *dev_priv = to_i915(dev); | 4204 | struct drm_i915_private *dev_priv = to_i915(dev); |
4022 | struct skl_wm_values *results = &dev_priv->wm.skl_results; | 4205 | struct skl_wm_values *results = &dev_priv->wm.skl_results; |
4206 | struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; | ||
4023 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); | 4207 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); |
4024 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; | 4208 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; |
4209 | int pipe; | ||
4025 | 4210 | ||
4026 | if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) | 4211 | if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) |
4027 | return; | 4212 | return; |
@@ -4033,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
4033 | skl_write_wm_values(dev_priv, results); | 4218 | skl_write_wm_values(dev_priv, results); |
4034 | skl_flush_wm_values(dev_priv, results); | 4219 | skl_flush_wm_values(dev_priv, results); |
4035 | 4220 | ||
4036 | /* store the new configuration */ | 4221 | /* |
4037 | dev_priv->wm.skl_hw = *results; | 4222 | * Store the new configuration (but only for the pipes that have |
4223 | * changed; the other values weren't recomputed). | ||
4224 | */ | ||
4225 | for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes) | ||
4226 | skl_copy_wm_for_pipe(hw_vals, results, pipe); | ||
4038 | 4227 | ||
4039 | mutex_unlock(&dev_priv->wm.wm_mutex); | 4228 | mutex_unlock(&dev_priv->wm.wm_mutex); |
4040 | } | 4229 | } |
@@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev) | |||
7658 | } | 7847 | } |
7659 | } | 7848 | } |
7660 | 7849 | ||
7850 | static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) | ||
7851 | { | ||
7852 | uint32_t flags = | ||
7853 | I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; | ||
7854 | |||
7855 | switch (flags) { | ||
7856 | case GEN6_PCODE_SUCCESS: | ||
7857 | return 0; | ||
7858 | case GEN6_PCODE_UNIMPLEMENTED_CMD: | ||
7859 | case GEN6_PCODE_ILLEGAL_CMD: | ||
7860 | return -ENXIO; | ||
7861 | case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | ||
7862 | return -EOVERFLOW; | ||
7863 | case GEN6_PCODE_TIMEOUT: | ||
7864 | return -ETIMEDOUT; | ||
7865 | default: | ||
7866 | MISSING_CASE(flags) | ||
7867 | return 0; | ||
7868 | } | ||
7869 | } | ||
7870 | |||
7871 | static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) | ||
7872 | { | ||
7873 | uint32_t flags = | ||
7874 | I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; | ||
7875 | |||
7876 | switch (flags) { | ||
7877 | case GEN6_PCODE_SUCCESS: | ||
7878 | return 0; | ||
7879 | case GEN6_PCODE_ILLEGAL_CMD: | ||
7880 | return -ENXIO; | ||
7881 | case GEN7_PCODE_TIMEOUT: | ||
7882 | return -ETIMEDOUT; | ||
7883 | case GEN7_PCODE_ILLEGAL_DATA: | ||
7884 | return -EINVAL; | ||
7885 | case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | ||
7886 | return -EOVERFLOW; | ||
7887 | default: | ||
7888 | MISSING_CASE(flags); | ||
7889 | return 0; | ||
7890 | } | ||
7891 | } | ||
7892 | |||
7661 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) | 7893 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) |
7662 | { | 7894 | { |
7895 | int status; | ||
7896 | |||
7663 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7897 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
7664 | 7898 | ||
7665 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 7899 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
@@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val | |||
7686 | *val = I915_READ_FW(GEN6_PCODE_DATA); | 7920 | *val = I915_READ_FW(GEN6_PCODE_DATA); |
7687 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); | 7921 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); |
7688 | 7922 | ||
7923 | if (INTEL_GEN(dev_priv) > 6) | ||
7924 | status = gen7_check_mailbox_status(dev_priv); | ||
7925 | else | ||
7926 | status = gen6_check_mailbox_status(dev_priv); | ||
7927 | |||
7928 | if (status) { | ||
7929 | DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n", | ||
7930 | status); | ||
7931 | return status; | ||
7932 | } | ||
7933 | |||
7689 | return 0; | 7934 | return 0; |
7690 | } | 7935 | } |
7691 | 7936 | ||
7692 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | 7937 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, |
7693 | u32 mbox, u32 val) | 7938 | u32 mbox, u32 val) |
7694 | { | 7939 | { |
7940 | int status; | ||
7941 | |||
7695 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7942 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
7696 | 7943 | ||
7697 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 7944 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
@@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | |||
7716 | 7963 | ||
7717 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); | 7964 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); |
7718 | 7965 | ||
7966 | if (INTEL_GEN(dev_priv) > 6) | ||
7967 | status = gen7_check_mailbox_status(dev_priv); | ||
7968 | else | ||
7969 | status = gen6_check_mailbox_status(dev_priv); | ||
7970 | |||
7971 | if (status) { | ||
7972 | DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n", | ||
7973 | status); | ||
7974 | return status; | ||
7975 | } | ||
7976 | |||
7719 | return 0; | 7977 | return 0; |
7720 | } | 7978 | } |
7721 | 7979 | ||
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9f7dafce3a4c..7bf90e9e6139 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
@@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm) | |||
171 | drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); | 171 | drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); |
172 | } | 172 | } |
173 | 173 | ||
174 | static int imx_drm_atomic_check(struct drm_device *dev, | ||
175 | struct drm_atomic_state *state) | ||
176 | { | ||
177 | int ret; | ||
178 | |||
179 | ret = drm_atomic_helper_check_modeset(dev, state); | ||
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | ret = drm_atomic_helper_check_planes(dev, state); | ||
184 | if (ret) | ||
185 | return ret; | ||
186 | |||
187 | /* | ||
188 | * Check modeset again in case crtc_state->mode_changed is | ||
189 | * updated in plane's ->atomic_check callback. | ||
190 | */ | ||
191 | ret = drm_atomic_helper_check_modeset(dev, state); | ||
192 | if (ret) | ||
193 | return ret; | ||
194 | |||
195 | return ret; | ||
196 | } | ||
197 | |||
174 | static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { | 198 | static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { |
175 | .fb_create = drm_fb_cma_create, | 199 | .fb_create = drm_fb_cma_create, |
176 | .output_poll_changed = imx_drm_output_poll_changed, | 200 | .output_poll_changed = imx_drm_output_poll_changed, |
177 | .atomic_check = drm_atomic_helper_check, | 201 | .atomic_check = imx_drm_atomic_check, |
178 | .atomic_commit = drm_atomic_helper_commit, | 202 | .atomic_commit = drm_atomic_helper_commit, |
179 | }; | 203 | }; |
180 | 204 | ||
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 08e188bc10fc..462056e4b9e4 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc) | |||
76 | crtc->state->event = NULL; | 76 | crtc->state->event = NULL; |
77 | } | 77 | } |
78 | spin_unlock_irq(&crtc->dev->event_lock); | 78 | spin_unlock_irq(&crtc->dev->event_lock); |
79 | |||
80 | drm_crtc_vblank_off(crtc); | ||
79 | } | 81 | } |
80 | 82 | ||
81 | static void imx_drm_crtc_reset(struct drm_crtc *crtc) | 83 | static void imx_drm_crtc_reset(struct drm_crtc *crtc) |
@@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc, | |||
175 | static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, | 177 | static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, |
176 | struct drm_crtc_state *old_crtc_state) | 178 | struct drm_crtc_state *old_crtc_state) |
177 | { | 179 | { |
180 | drm_crtc_vblank_on(crtc); | ||
181 | |||
178 | spin_lock_irq(&crtc->dev->event_lock); | 182 | spin_lock_irq(&crtc->dev->event_lock); |
179 | if (crtc->state->event) { | 183 | if (crtc->state->event) { |
180 | WARN_ON(drm_crtc_vblank_get(crtc)); | 184 | WARN_ON(drm_crtc_vblank_get(crtc)); |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 4ad67d015ec7..29423e757d36 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
319 | return -EINVAL; | 319 | return -EINVAL; |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * since we cannot touch active IDMAC channels, we do not support | 322 | * We support resizing active plane or changing its format by |
323 | * resizing the enabled plane or changing its format | 323 | * forcing CRTC mode change and disabling-enabling plane in plane's |
324 | * ->atomic_update callback. | ||
324 | */ | 325 | */ |
325 | if (old_fb && (state->src_w != old_state->src_w || | 326 | if (old_fb && (state->src_w != old_state->src_w || |
326 | state->src_h != old_state->src_h || | 327 | state->src_h != old_state->src_h || |
327 | fb->pixel_format != old_fb->pixel_format)) | 328 | fb->pixel_format != old_fb->pixel_format)) |
328 | return -EINVAL; | 329 | crtc_state->mode_changed = true; |
329 | 330 | ||
330 | eba = drm_plane_state_to_eba(state); | 331 | eba = drm_plane_state_to_eba(state); |
331 | 332 | ||
@@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
336 | return -EINVAL; | 337 | return -EINVAL; |
337 | 338 | ||
338 | if (old_fb && fb->pitches[0] != old_fb->pitches[0]) | 339 | if (old_fb && fb->pitches[0] != old_fb->pitches[0]) |
339 | return -EINVAL; | 340 | crtc_state->mode_changed = true; |
340 | 341 | ||
341 | switch (fb->pixel_format) { | 342 | switch (fb->pixel_format) { |
342 | case DRM_FORMAT_YUV420: | 343 | case DRM_FORMAT_YUV420: |
@@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
372 | return -EINVAL; | 373 | return -EINVAL; |
373 | 374 | ||
374 | if (old_fb && old_fb->pitches[1] != fb->pitches[1]) | 375 | if (old_fb && old_fb->pitches[1] != fb->pitches[1]) |
375 | return -EINVAL; | 376 | crtc_state->mode_changed = true; |
376 | } | 377 | } |
377 | 378 | ||
378 | return 0; | 379 | return 0; |
@@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, | |||
392 | enum ipu_color_space ics; | 393 | enum ipu_color_space ics; |
393 | 394 | ||
394 | if (old_state->fb) { | 395 | if (old_state->fb) { |
395 | ipu_plane_atomic_set_base(ipu_plane, old_state); | 396 | struct drm_crtc_state *crtc_state = state->crtc->state; |
396 | return; | 397 | |
398 | if (!crtc_state->mode_changed) { | ||
399 | ipu_plane_atomic_set_base(ipu_plane, old_state); | ||
400 | return; | ||
401 | } | ||
402 | |||
403 | ipu_disable_plane(plane); | ||
397 | } | 404 | } |
398 | 405 | ||
399 | switch (ipu_plane->dp_flow) { | 406 | switch (ipu_plane->dp_flow) { |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b4bc7f1ef717..d0da52f2a806 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -157,6 +157,12 @@ struct msm_drm_private { | |||
157 | struct shrinker shrinker; | 157 | struct shrinker shrinker; |
158 | 158 | ||
159 | struct msm_vblank_ctrl vblank_ctrl; | 159 | struct msm_vblank_ctrl vblank_ctrl; |
160 | |||
161 | /* task holding struct_mutex.. currently only used in submit path | ||
162 | * to detect and reject faults from copy_from_user() for submit | ||
163 | * ioctl. | ||
164 | */ | ||
165 | struct task_struct *struct_mutex_task; | ||
160 | }; | 166 | }; |
161 | 167 | ||
162 | struct msm_format { | 168 | struct msm_format { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 6cd4af443139..85f3047e05ae 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
196 | { | 196 | { |
197 | struct drm_gem_object *obj = vma->vm_private_data; | 197 | struct drm_gem_object *obj = vma->vm_private_data; |
198 | struct drm_device *dev = obj->dev; | 198 | struct drm_device *dev = obj->dev; |
199 | struct msm_drm_private *priv = dev->dev_private; | ||
199 | struct page **pages; | 200 | struct page **pages; |
200 | unsigned long pfn; | 201 | unsigned long pfn; |
201 | pgoff_t pgoff; | 202 | pgoff_t pgoff; |
202 | int ret; | 203 | int ret; |
203 | 204 | ||
205 | /* This should only happen if userspace tries to pass a mmap'd | ||
206 | * but unfaulted gem bo vaddr into submit ioctl, triggering | ||
207 | * a page fault while struct_mutex is already held. This is | ||
208 | * not a valid use-case so just bail. | ||
209 | */ | ||
210 | if (priv->struct_mutex_task == current) | ||
211 | return VM_FAULT_SIGBUS; | ||
212 | |||
204 | /* Make sure we don't parallel update on a fault, nor move or remove | 213 | /* Make sure we don't parallel update on a fault, nor move or remove |
205 | * something from beneath our feet | 214 | * something from beneath our feet |
206 | */ | 215 | */ |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 9766f9ae4b7d..880d6a9af7c8 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) | |||
64 | kfree(submit); | 64 | kfree(submit); |
65 | } | 65 | } |
66 | 66 | ||
67 | static inline unsigned long __must_check | ||
68 | copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | ||
69 | { | ||
70 | if (access_ok(VERIFY_READ, from, n)) | ||
71 | return __copy_from_user_inatomic(to, from, n); | ||
72 | return -EFAULT; | ||
73 | } | ||
74 | |||
67 | static int submit_lookup_objects(struct msm_gem_submit *submit, | 75 | static int submit_lookup_objects(struct msm_gem_submit *submit, |
68 | struct drm_msm_gem_submit *args, struct drm_file *file) | 76 | struct drm_msm_gem_submit *args, struct drm_file *file) |
69 | { | 77 | { |
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
71 | int ret = 0; | 79 | int ret = 0; |
72 | 80 | ||
73 | spin_lock(&file->table_lock); | 81 | spin_lock(&file->table_lock); |
82 | pagefault_disable(); | ||
74 | 83 | ||
75 | for (i = 0; i < args->nr_bos; i++) { | 84 | for (i = 0; i < args->nr_bos; i++) { |
76 | struct drm_msm_gem_submit_bo submit_bo; | 85 | struct drm_msm_gem_submit_bo submit_bo; |
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
84 | */ | 93 | */ |
85 | submit->bos[i].flags = 0; | 94 | submit->bos[i].flags = 0; |
86 | 95 | ||
87 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 96 | ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); |
88 | if (ret) { | 97 | if (unlikely(ret)) { |
89 | ret = -EFAULT; | 98 | pagefault_enable(); |
90 | goto out_unlock; | 99 | spin_unlock(&file->table_lock); |
100 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | ||
101 | if (ret) | ||
102 | goto out; | ||
103 | spin_lock(&file->table_lock); | ||
104 | pagefault_disable(); | ||
91 | } | 105 | } |
92 | 106 | ||
93 | if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { | 107 | if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { |
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
127 | } | 141 | } |
128 | 142 | ||
129 | out_unlock: | 143 | out_unlock: |
130 | submit->nr_bos = i; | 144 | pagefault_enable(); |
131 | spin_unlock(&file->table_lock); | 145 | spin_unlock(&file->table_lock); |
132 | 146 | ||
147 | out: | ||
148 | submit->nr_bos = i; | ||
149 | |||
133 | return ret; | 150 | return ret; |
134 | } | 151 | } |
135 | 152 | ||
@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
377 | if (ret) | 394 | if (ret) |
378 | return ret; | 395 | return ret; |
379 | 396 | ||
397 | priv->struct_mutex_task = current; | ||
398 | |||
380 | submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); | 399 | submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); |
381 | if (!submit) { | 400 | if (!submit) { |
382 | ret = -ENOMEM; | 401 | ret = -ENOMEM; |
@@ -468,6 +487,7 @@ out: | |||
468 | if (ret) | 487 | if (ret) |
469 | msm_gem_submit_free(submit); | 488 | msm_gem_submit_free(submit); |
470 | out_unlock: | 489 | out_unlock: |
490 | priv->struct_mutex_task = NULL; | ||
471 | mutex_unlock(&dev->struct_mutex); | 491 | mutex_unlock(&dev->struct_mutex); |
472 | return ret; | 492 | return ret; |
473 | } | 493 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index f2ad17aa33f0..dc57b628e074 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev) | |||
225 | if (!parent_pdev) | 225 | if (!parent_pdev) |
226 | return false; | 226 | return false; |
227 | 227 | ||
228 | if (!parent_pdev->bridge_d3) { | ||
229 | /* | ||
230 | * Parent PCI bridge is currently not power managed. | ||
231 | * Since userspace can change these afterwards to be on | ||
232 | * the safe side we stick with _DSM and prevent usage of | ||
233 | * _PR3 from the bridge. | ||
234 | */ | ||
235 | pci_d3cold_disable(pdev); | ||
236 | return false; | ||
237 | } | ||
238 | |||
228 | parent_adev = ACPI_COMPANION(&parent_pdev->dev); | 239 | parent_adev = ACPI_COMPANION(&parent_pdev->dev); |
229 | if (!parent_adev) | 240 | if (!parent_adev) |
230 | return false; | 241 | return false; |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index df2657051afd..28c1423049c5 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, | |||
73 | } | 73 | } |
74 | } | 74 | } |
75 | 75 | ||
76 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
76 | static struct fb_deferred_io qxl_defio = { | 77 | static struct fb_deferred_io qxl_defio = { |
77 | .delay = QXL_DIRTY_DELAY, | 78 | .delay = QXL_DIRTY_DELAY, |
78 | .deferred_io = drm_fb_helper_deferred_io, | 79 | .deferred_io = drm_fb_helper_deferred_io, |
79 | }; | 80 | }; |
81 | #endif | ||
80 | 82 | ||
81 | static struct fb_ops qxlfb_ops = { | 83 | static struct fb_ops qxlfb_ops = { |
82 | .owner = THIS_MODULE, | 84 | .owner = THIS_MODULE, |
@@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
313 | goto out_destroy_fbi; | 315 | goto out_destroy_fbi; |
314 | } | 316 | } |
315 | 317 | ||
318 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
316 | info->fbdefio = &qxl_defio; | 319 | info->fbdefio = &qxl_defio; |
317 | fb_deferred_io_init(info); | 320 | fb_deferred_io_init(info); |
321 | #endif | ||
318 | 322 | ||
319 | qdev->fbdev_info = info; | 323 | qdev->fbdev_info = info; |
320 | qdev->fbdev_qfb = &qfbdev->qfb; | 324 | qdev->fbdev_qfb = &qfbdev->qfb; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a97abc8af657..1dcf39084555 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
627 | if (radeon_crtc->ss.refdiv) { | 627 | if (radeon_crtc->ss.refdiv) { |
628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; | 628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; |
629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; | 629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; |
630 | if (rdev->family >= CHIP_RV770) | 630 | if (ASIC_IS_AVIVO(rdev) && |
631 | rdev->family != CHIP_RS780 && | ||
632 | rdev->family != CHIP_RS880) | ||
631 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 633 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
632 | } | 634 | } |
633 | } | 635 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0c00e192c845..c2e0a1ccdfbc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
263 | 263 | ||
264 | rdev = radeon_get_rdev(bo->bdev); | 264 | rdev = radeon_get_rdev(bo->bdev); |
265 | ridx = radeon_copy_ring_index(rdev); | 265 | ridx = radeon_copy_ring_index(rdev); |
266 | old_start = old_mem->start << PAGE_SHIFT; | 266 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
267 | new_start = new_mem->start << PAGE_SHIFT; | 267 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
268 | 268 | ||
269 | switch (old_mem->mem_type) { | 269 | switch (old_mem->mem_type) { |
270 | case TTM_PL_VRAM: | 270 | case TTM_PL_VRAM: |
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 3d228ad90e0f..3dea1216bafd 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c | |||
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { | |||
840 | .destroy = tegra_output_encoder_destroy, | 840 | .destroy = tegra_output_encoder_destroy, |
841 | }; | 841 | }; |
842 | 842 | ||
843 | static void tegra_dsi_unprepare(struct tegra_dsi *dsi) | ||
844 | { | ||
845 | int err; | ||
846 | |||
847 | if (dsi->slave) | ||
848 | tegra_dsi_unprepare(dsi->slave); | ||
849 | |||
850 | err = tegra_mipi_disable(dsi->mipi); | ||
851 | if (err < 0) | ||
852 | dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", | ||
853 | err); | ||
854 | |||
855 | pm_runtime_put(dsi->dev); | ||
856 | } | ||
857 | |||
843 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | 858 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) |
844 | { | 859 | { |
845 | struct tegra_output *output = encoder_to_output(encoder); | 860 | struct tegra_output *output = encoder_to_output(encoder); |
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | |||
876 | 891 | ||
877 | tegra_dsi_disable(dsi); | 892 | tegra_dsi_disable(dsi); |
878 | 893 | ||
879 | pm_runtime_put(dsi->dev); | 894 | tegra_dsi_unprepare(dsi); |
895 | } | ||
896 | |||
897 | static void tegra_dsi_prepare(struct tegra_dsi *dsi) | ||
898 | { | ||
899 | int err; | ||
900 | |||
901 | pm_runtime_get_sync(dsi->dev); | ||
902 | |||
903 | err = tegra_mipi_enable(dsi->mipi); | ||
904 | if (err < 0) | ||
905 | dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n", | ||
906 | err); | ||
907 | |||
908 | err = tegra_dsi_pad_calibrate(dsi); | ||
909 | if (err < 0) | ||
910 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
911 | |||
912 | if (dsi->slave) | ||
913 | tegra_dsi_prepare(dsi->slave); | ||
880 | } | 914 | } |
881 | 915 | ||
882 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | 916 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) |
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | |||
887 | struct tegra_dsi *dsi = to_dsi(output); | 921 | struct tegra_dsi *dsi = to_dsi(output); |
888 | struct tegra_dsi_state *state; | 922 | struct tegra_dsi_state *state; |
889 | u32 value; | 923 | u32 value; |
890 | int err; | ||
891 | |||
892 | pm_runtime_get_sync(dsi->dev); | ||
893 | 924 | ||
894 | err = tegra_dsi_pad_calibrate(dsi); | 925 | tegra_dsi_prepare(dsi); |
895 | if (err < 0) | ||
896 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
897 | 926 | ||
898 | state = tegra_dsi_get_state(dsi); | 927 | state = tegra_dsi_get_state(dsi); |
899 | 928 | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index d5df555aeba0..9688bfa92ccd 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user) | |||
203 | 203 | ||
204 | ufbdev->fb_count++; | 204 | ufbdev->fb_count++; |
205 | 205 | ||
206 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
206 | if (fb_defio && (info->fbdefio == NULL)) { | 207 | if (fb_defio && (info->fbdefio == NULL)) { |
207 | /* enable defio at last moment if not disabled by client */ | 208 | /* enable defio at last moment if not disabled by client */ |
208 | 209 | ||
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user) | |||
218 | info->fbdefio = fbdefio; | 219 | info->fbdefio = fbdefio; |
219 | fb_deferred_io_init(info); | 220 | fb_deferred_io_init(info); |
220 | } | 221 | } |
222 | #endif | ||
221 | 223 | ||
222 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", | 224 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", |
223 | info->node, user, info, ufbdev->fb_count); | 225 | info->node, user, info, ufbdev->fb_count); |
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user) | |||
235 | 237 | ||
236 | ufbdev->fb_count--; | 238 | ufbdev->fb_count--; |
237 | 239 | ||
240 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
238 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { | 241 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { |
239 | fb_deferred_io_cleanup(info); | 242 | fb_deferred_io_cleanup(info); |
240 | kfree(info->fbdefio); | 243 | kfree(info->fbdefio); |
241 | info->fbdefio = NULL; | 244 | info->fbdefio = NULL; |
242 | info->fbops->fb_mmap = udl_fb_mmap; | 245 | info->fbops->fb_mmap = udl_fb_mmap; |
243 | } | 246 | } |
247 | #endif | ||
244 | 248 | ||
245 | pr_warn("released /dev/fb%d user=%d count=%d\n", | 249 | pr_warn("released /dev/fb%d user=%d count=%d\n", |
246 | info->node, user, ufbdev->fb_count); | 250 | info->node, user, ufbdev->fb_count); |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8b42d31a7f0e..9ecef9385491 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
@@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, | |||
57 | switch (args->param) { | 57 | switch (args->param) { |
58 | case DRM_VC4_PARAM_V3D_IDENT0: | 58 | case DRM_VC4_PARAM_V3D_IDENT0: |
59 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 59 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
60 | if (ret) | 60 | if (ret < 0) |
61 | return ret; | 61 | return ret; |
62 | args->value = V3D_READ(V3D_IDENT0); | 62 | args->value = V3D_READ(V3D_IDENT0); |
63 | pm_runtime_put(&vc4->v3d->pdev->dev); | 63 | pm_runtime_put(&vc4->v3d->pdev->dev); |
64 | break; | 64 | break; |
65 | case DRM_VC4_PARAM_V3D_IDENT1: | 65 | case DRM_VC4_PARAM_V3D_IDENT1: |
66 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 66 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
67 | if (ret) | 67 | if (ret < 0) |
68 | return ret; | 68 | return ret; |
69 | args->value = V3D_READ(V3D_IDENT1); | 69 | args->value = V3D_READ(V3D_IDENT1); |
70 | pm_runtime_put(&vc4->v3d->pdev->dev); | 70 | pm_runtime_put(&vc4->v3d->pdev->dev); |
71 | break; | 71 | break; |
72 | case DRM_VC4_PARAM_V3D_IDENT2: | 72 | case DRM_VC4_PARAM_V3D_IDENT2: |
73 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 73 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
74 | if (ret) | 74 | if (ret < 0) |
75 | return ret; | 75 | return ret; |
76 | args->value = V3D_READ(V3D_IDENT2); | 76 | args->value = V3D_READ(V3D_IDENT2); |
77 | pm_runtime_put(&vc4->v3d->pdev->dev); | 77 | pm_runtime_put(&vc4->v3d->pdev->dev); |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 489e3de0c050..428e24919ef1 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h | |||
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4) | |||
321 | struct vc4_exec_info, head); | 321 | struct vc4_exec_info, head); |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline struct vc4_exec_info * | ||
325 | vc4_last_render_job(struct vc4_dev *vc4) | ||
326 | { | ||
327 | if (list_empty(&vc4->render_job_list)) | ||
328 | return NULL; | ||
329 | return list_last_entry(&vc4->render_job_list, | ||
330 | struct vc4_exec_info, head); | ||
331 | } | ||
332 | |||
324 | /** | 333 | /** |
325 | * struct vc4_texture_sample_info - saves the offsets into the UBO for texture | 334 | * struct vc4_texture_sample_info - saves the offsets into the UBO for texture |
326 | * setup parameters. | 335 | * setup parameters. |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 6155e8aca1c6..b262c5c26f10 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
@@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev, | |||
534 | return -EINVAL; | 534 | return -EINVAL; |
535 | } | 535 | } |
536 | 536 | ||
537 | exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *), | 537 | exec->bo = drm_calloc_large(exec->bo_count, |
538 | GFP_KERNEL); | 538 | sizeof(struct drm_gem_cma_object *)); |
539 | if (!exec->bo) { | 539 | if (!exec->bo) { |
540 | DRM_ERROR("Failed to allocate validated BO pointers\n"); | 540 | DRM_ERROR("Failed to allocate validated BO pointers\n"); |
541 | return -ENOMEM; | 541 | return -ENOMEM; |
@@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev, | |||
572 | spin_unlock(&file_priv->table_lock); | 572 | spin_unlock(&file_priv->table_lock); |
573 | 573 | ||
574 | fail: | 574 | fail: |
575 | kfree(handles); | 575 | drm_free_large(handles); |
576 | return 0; | 576 | return ret; |
577 | } | 577 | } |
578 | 578 | ||
579 | static int | 579 | static int |
@@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
608 | * read the contents back for validation, and I think the | 608 | * read the contents back for validation, and I think the |
609 | * bo->vaddr is uncached access. | 609 | * bo->vaddr is uncached access. |
610 | */ | 610 | */ |
611 | temp = kmalloc(temp_size, GFP_KERNEL); | 611 | temp = drm_malloc_ab(temp_size, 1); |
612 | if (!temp) { | 612 | if (!temp) { |
613 | DRM_ERROR("Failed to allocate storage for copying " | 613 | DRM_ERROR("Failed to allocate storage for copying " |
614 | "in bin/render CLs.\n"); | 614 | "in bin/render CLs.\n"); |
@@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
675 | ret = vc4_validate_shader_recs(dev, exec); | 675 | ret = vc4_validate_shader_recs(dev, exec); |
676 | 676 | ||
677 | fail: | 677 | fail: |
678 | kfree(temp); | 678 | drm_free_large(temp); |
679 | return ret; | 679 | return ret; |
680 | } | 680 | } |
681 | 681 | ||
@@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) | |||
688 | if (exec->bo) { | 688 | if (exec->bo) { |
689 | for (i = 0; i < exec->bo_count; i++) | 689 | for (i = 0; i < exec->bo_count; i++) |
690 | drm_gem_object_unreference_unlocked(&exec->bo[i]->base); | 690 | drm_gem_object_unreference_unlocked(&exec->bo[i]->base); |
691 | kfree(exec->bo); | 691 | drm_free_large(exec->bo); |
692 | } | 692 | } |
693 | 693 | ||
694 | while (!list_empty(&exec->unref_list)) { | 694 | while (!list_empty(&exec->unref_list)) { |
@@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev) | |||
942 | vc4->overflow_mem = NULL; | 942 | vc4->overflow_mem = NULL; |
943 | } | 943 | } |
944 | 944 | ||
945 | vc4_bo_cache_destroy(dev); | ||
946 | |||
947 | if (vc4->hang_state) | 945 | if (vc4->hang_state) |
948 | vc4_free_hang_state(dev, vc4->hang_state); | 946 | vc4_free_hang_state(dev, vc4->hang_state); |
947 | |||
948 | vc4_bo_cache_destroy(dev); | ||
949 | } | 949 | } |
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index b0104a346a74..094bc6a475c1 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c | |||
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work) | |||
83 | 83 | ||
84 | spin_lock_irqsave(&vc4->job_lock, irqflags); | 84 | spin_lock_irqsave(&vc4->job_lock, irqflags); |
85 | current_exec = vc4_first_bin_job(vc4); | 85 | current_exec = vc4_first_bin_job(vc4); |
86 | if (!current_exec) | ||
87 | current_exec = vc4_last_render_job(vc4); | ||
86 | if (current_exec) { | 88 | if (current_exec) { |
87 | vc4->overflow_mem->seqno = vc4->finished_seqno + 1; | 89 | vc4->overflow_mem->seqno = current_exec->seqno; |
88 | list_add_tail(&vc4->overflow_mem->unref_head, | 90 | list_add_tail(&vc4->overflow_mem->unref_head, |
89 | ¤t_exec->unref_list); | 91 | ¤t_exec->unref_list); |
90 | vc4->overflow_mem = NULL; | 92 | vc4->overflow_mem = NULL; |