diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 17:08:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-27 17:08:56 -0400 |
commit | c61b49c79e1c1d4bc0c2fdc053ef56e65759b5fd (patch) | |
tree | 9a61cd3c629f2df7ce29590bfcef1e77c9953565 | |
parent | 1e8143db755f745a9842984de5e8b423f583aea2 (diff) | |
parent | 7fa1d27b638db86516d7b3d8dc1a3576c72ee423 (diff) |
Merge tag 'drm-fixes-v4.7-rc1' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
- one IMX built-in regression fix
- a set of amdgpu fixes, mostly powerplay and polaris GPU stuff
- a set of i915 fixes all over, many cc'ed to stable.
The i915 batch contain support for DP++ dongle detection, which is
used to fix some regressions in the HDMI color depth area
* tag 'drm-fixes-v4.7-rc1' of git://people.freedesktop.org/~airlied/linux: (44 commits)
drm/amd: add Kconfig dependency for ACP on DRM_AMDGPU
drm/amdgpu: Fix hdmi deep color support.
drm/amdgpu: fix bug in fence driver fini
drm/i915: Stop automatically retiring requests after a GPU hang
drm/i915: Unify intel_ring_begin()
drm/i915: Ignore stale wm register values on resume on ilk-bdw (v2)
drm/i915/psr: Try to program link training times correctly
drm/imx: Match imx-ipuv3-crtc components using device node in platform data
drm/i915/bxt: Adjusting the error in horizontal timings retrieval
drm/i915: Don't leave old junk in ilk active watermarks on readout
drm/i915: s/DPPL/DPLL/ for SKL DPLLs
drm/i915: Fix gen8 semaphores id for legacy mode
drm/i915: Set crtc_state->lane_count for HDMI
drm/i915/BXT: Retrieving the horizontal timing for DSI
drm/i915: Protect gen7 irq_seqno_barrier with uncore lock
drm/i915: Re-enable GGTT earlier during resume on pre-gen6 platforms
drm/i915: Determine DP++ type 1 DVI adaptor presence based on VBT
drm/i915: Enable/disable TMDS output buffers in DP++ adaptor as needed
drm/i915: Respect DP++ adaptor TMDS clock limit
drm: Add helper for DP++ adaptors
...
51 files changed, 1144 insertions, 492 deletions
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 4a0c599b6a6d..7586bf75f62e 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl | |||
@@ -1628,6 +1628,12 @@ void intel_crt_init(struct drm_device *dev) | |||
1628 | !Edrivers/gpu/drm/drm_dp_helper.c | 1628 | !Edrivers/gpu/drm/drm_dp_helper.c |
1629 | </sect2> | 1629 | </sect2> |
1630 | <sect2> | 1630 | <sect2> |
1631 | <title>Display Port Dual Mode Adaptor Helper Functions Reference</title> | ||
1632 | !Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers | ||
1633 | !Iinclude/drm/drm_dp_dual_mode_helper.h | ||
1634 | !Edrivers/gpu/drm/drm_dp_dual_mode_helper.c | ||
1635 | </sect2> | ||
1636 | <sect2> | ||
1631 | <title>Display Port MST Helper Functions Reference</title> | 1637 | <title>Display Port MST Helper Functions Reference</title> |
1632 | !Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper | 1638 | !Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper |
1633 | !Iinclude/drm/drm_dp_mst_helper.h | 1639 | !Iinclude/drm/drm_dp_mst_helper.h |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2bd3e5aa43c6..be43afb08c69 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o | |||
23 | 23 | ||
24 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ | 24 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ |
25 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ | 25 | drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ |
26 | drm_kms_helper_common.o | 26 | drm_kms_helper_common.o drm_dp_dual_mode_helper.o |
27 | 27 | ||
28 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | 28 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o |
29 | drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o | 29 | drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o |
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig index ca77ec10147c..e503e3d6d920 100644 --- a/drivers/gpu/drm/amd/acp/Kconfig +++ b/drivers/gpu/drm/amd/acp/Kconfig | |||
@@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration" | |||
2 | 2 | ||
3 | config DRM_AMD_ACP | 3 | config DRM_AMD_ACP |
4 | bool "Enable AMD Audio CoProcessor IP support" | 4 | bool "Enable AMD Audio CoProcessor IP support" |
5 | depends on DRM_AMDGPU | ||
5 | select MFD_CORE | 6 | select MFD_CORE |
6 | select PM_GENERIC_DOMAINS if PM | 7 | select PM_GENERIC_DOMAINS if PM |
7 | help | 8 | help |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a009c398dcb..992f00b65be4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync); | |||
602 | void amdgpu_sync_free(struct amdgpu_sync *sync); | 602 | void amdgpu_sync_free(struct amdgpu_sync *sync); |
603 | int amdgpu_sync_init(void); | 603 | int amdgpu_sync_init(void); |
604 | void amdgpu_sync_fini(void); | 604 | void amdgpu_sync_fini(void); |
605 | int amdgpu_fence_slab_init(void); | ||
606 | void amdgpu_fence_slab_fini(void); | ||
605 | 607 | ||
606 | /* | 608 | /* |
607 | * GART structures, functions & helpers | 609 | * GART structures, functions & helpers |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 60a0c9ac11b2..cb07da41152b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) | |||
194 | bpc = 8; | 194 | bpc = 8; |
195 | DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", | 195 | DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", |
196 | connector->name, bpc); | 196 | connector->name, bpc); |
197 | } else if (bpc > 8) { | ||
198 | /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ | ||
199 | DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", | ||
200 | connector->name); | ||
201 | bpc = 8; | ||
202 | } | 197 | } |
198 | } else if (bpc > 8) { | ||
199 | /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ | ||
200 | DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", | ||
201 | connector->name); | ||
202 | bpc = 8; | ||
203 | } | 203 | } |
204 | } | 204 | } |
205 | 205 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1dab5f2b725b..f888c015f76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -50,9 +50,11 @@ | |||
50 | * KMS wrapper. | 50 | * KMS wrapper. |
51 | * - 3.0.0 - initial driver | 51 | * - 3.0.0 - initial driver |
52 | * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) | 52 | * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) |
53 | * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same | ||
54 | * at the end of IBs. | ||
53 | */ | 55 | */ |
54 | #define KMS_DRIVER_MAJOR 3 | 56 | #define KMS_DRIVER_MAJOR 3 |
55 | #define KMS_DRIVER_MINOR 1 | 57 | #define KMS_DRIVER_MINOR 2 |
56 | #define KMS_DRIVER_PATCHLEVEL 0 | 58 | #define KMS_DRIVER_PATCHLEVEL 0 |
57 | 59 | ||
58 | int amdgpu_vram_limit = 0; | 60 | int amdgpu_vram_limit = 0; |
@@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = { | |||
279 | {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, | 281 | {0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU}, |
280 | /* Polaris11 */ | 282 | /* Polaris11 */ |
281 | {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 283 | {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
282 | {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 284 | {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
283 | {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 285 | {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
284 | {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
285 | {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 286 | {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
287 | {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
286 | {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | 288 | {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, |
289 | {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
290 | {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
291 | {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11}, | ||
287 | /* Polaris10 */ | 292 | /* Polaris10 */ |
288 | {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 293 | {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
294 | {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
295 | {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
296 | {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
297 | {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
289 | {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | 298 | {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, |
299 | {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
300 | {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
301 | {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
302 | {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
303 | {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, | ||
290 | 304 | ||
291 | {0, 0, 0} | 305 | {0, 0, 0} |
292 | }; | 306 | }; |
@@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = { | |||
563 | .driver.pm = &amdgpu_pm_ops, | 577 | .driver.pm = &amdgpu_pm_ops, |
564 | }; | 578 | }; |
565 | 579 | ||
580 | |||
581 | |||
566 | static int __init amdgpu_init(void) | 582 | static int __init amdgpu_init(void) |
567 | { | 583 | { |
568 | amdgpu_sync_init(); | 584 | amdgpu_sync_init(); |
585 | amdgpu_fence_slab_init(); | ||
569 | if (vgacon_text_force()) { | 586 | if (vgacon_text_force()) { |
570 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); | 587 | DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); |
571 | return -EINVAL; | 588 | return -EINVAL; |
@@ -576,7 +593,6 @@ static int __init amdgpu_init(void) | |||
576 | driver->driver_features |= DRIVER_MODESET; | 593 | driver->driver_features |= DRIVER_MODESET; |
577 | driver->num_ioctls = amdgpu_max_kms_ioctl; | 594 | driver->num_ioctls = amdgpu_max_kms_ioctl; |
578 | amdgpu_register_atpx_handler(); | 595 | amdgpu_register_atpx_handler(); |
579 | |||
580 | /* let modprobe override vga console setting */ | 596 | /* let modprobe override vga console setting */ |
581 | return drm_pci_init(driver, pdriver); | 597 | return drm_pci_init(driver, pdriver); |
582 | } | 598 | } |
@@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void) | |||
587 | drm_pci_exit(driver, pdriver); | 603 | drm_pci_exit(driver, pdriver); |
588 | amdgpu_unregister_atpx_handler(); | 604 | amdgpu_unregister_atpx_handler(); |
589 | amdgpu_sync_fini(); | 605 | amdgpu_sync_fini(); |
606 | amdgpu_fence_slab_fini(); | ||
590 | } | 607 | } |
591 | 608 | ||
592 | module_init(amdgpu_init); | 609 | module_init(amdgpu_init); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ba9c04283d01..d1558768cfb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -55,8 +55,21 @@ struct amdgpu_fence { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | static struct kmem_cache *amdgpu_fence_slab; | 57 | static struct kmem_cache *amdgpu_fence_slab; |
58 | static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); | ||
59 | 58 | ||
59 | int amdgpu_fence_slab_init(void) | ||
60 | { | ||
61 | amdgpu_fence_slab = kmem_cache_create( | ||
62 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
63 | SLAB_HWCACHE_ALIGN, NULL); | ||
64 | if (!amdgpu_fence_slab) | ||
65 | return -ENOMEM; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | void amdgpu_fence_slab_fini(void) | ||
70 | { | ||
71 | kmem_cache_destroy(amdgpu_fence_slab); | ||
72 | } | ||
60 | /* | 73 | /* |
61 | * Cast helper | 74 | * Cast helper |
62 | */ | 75 | */ |
@@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | |||
396 | */ | 409 | */ |
397 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | 410 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) |
398 | { | 411 | { |
399 | if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { | ||
400 | amdgpu_fence_slab = kmem_cache_create( | ||
401 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
402 | SLAB_HWCACHE_ALIGN, NULL); | ||
403 | if (!amdgpu_fence_slab) | ||
404 | return -ENOMEM; | ||
405 | } | ||
406 | if (amdgpu_debugfs_fence_init(adev)) | 412 | if (amdgpu_debugfs_fence_init(adev)) |
407 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | 413 | dev_err(adev->dev, "fence debugfs file creation failed\n"); |
408 | 414 | ||
@@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
437 | amd_sched_fini(&ring->sched); | 443 | amd_sched_fini(&ring->sched); |
438 | del_timer_sync(&ring->fence_drv.fallback_timer); | 444 | del_timer_sync(&ring->fence_drv.fallback_timer); |
439 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) | 445 | for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) |
440 | fence_put(ring->fence_drv.fences[i]); | 446 | fence_put(ring->fence_drv.fences[j]); |
441 | kfree(ring->fence_drv.fences); | 447 | kfree(ring->fence_drv.fences); |
442 | ring->fence_drv.initialized = false; | 448 | ring->fence_drv.initialized = false; |
443 | } | 449 | } |
444 | |||
445 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) | ||
446 | kmem_cache_destroy(amdgpu_fence_slab); | ||
447 | } | 450 | } |
448 | 451 | ||
449 | /** | 452 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ea708cb94862..9f36ed30ba11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -53,6 +53,18 @@ | |||
53 | /* Special value that no flush is necessary */ | 53 | /* Special value that no flush is necessary */ |
54 | #define AMDGPU_VM_NO_FLUSH (~0ll) | 54 | #define AMDGPU_VM_NO_FLUSH (~0ll) |
55 | 55 | ||
56 | /* Local structure. Encapsulate some VM table update parameters to reduce | ||
57 | * the number of function parameters | ||
58 | */ | ||
59 | struct amdgpu_vm_update_params { | ||
60 | /* address where to copy page table entries from */ | ||
61 | uint64_t src; | ||
62 | /* DMA addresses to use for mapping */ | ||
63 | dma_addr_t *pages_addr; | ||
64 | /* indirect buffer to fill with commands */ | ||
65 | struct amdgpu_ib *ib; | ||
66 | }; | ||
67 | |||
56 | /** | 68 | /** |
57 | * amdgpu_vm_num_pde - return the number of page directory entries | 69 | * amdgpu_vm_num_pde - return the number of page directory entries |
58 | * | 70 | * |
@@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
389 | * amdgpu_vm_update_pages - helper to call the right asic function | 401 | * amdgpu_vm_update_pages - helper to call the right asic function |
390 | * | 402 | * |
391 | * @adev: amdgpu_device pointer | 403 | * @adev: amdgpu_device pointer |
392 | * @src: address where to copy page table entries from | 404 | * @vm_update_params: see amdgpu_vm_update_params definition |
393 | * @pages_addr: DMA addresses to use for mapping | ||
394 | * @ib: indirect buffer to fill with commands | ||
395 | * @pe: addr of the page entry | 405 | * @pe: addr of the page entry |
396 | * @addr: dst addr to write into pe | 406 | * @addr: dst addr to write into pe |
397 | * @count: number of page entries to update | 407 | * @count: number of page entries to update |
@@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |||
402 | * to setup the page table using the DMA. | 412 | * to setup the page table using the DMA. |
403 | */ | 413 | */ |
404 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | 414 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, |
405 | uint64_t src, | 415 | struct amdgpu_vm_update_params |
406 | dma_addr_t *pages_addr, | 416 | *vm_update_params, |
407 | struct amdgpu_ib *ib, | ||
408 | uint64_t pe, uint64_t addr, | 417 | uint64_t pe, uint64_t addr, |
409 | unsigned count, uint32_t incr, | 418 | unsigned count, uint32_t incr, |
410 | uint32_t flags) | 419 | uint32_t flags) |
411 | { | 420 | { |
412 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 421 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); |
413 | 422 | ||
414 | if (src) { | 423 | if (vm_update_params->src) { |
415 | src += (addr >> 12) * 8; | 424 | amdgpu_vm_copy_pte(adev, vm_update_params->ib, |
416 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); | 425 | pe, (vm_update_params->src + (addr >> 12) * 8), count); |
417 | 426 | ||
418 | } else if (pages_addr) { | 427 | } else if (vm_update_params->pages_addr) { |
419 | amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, | 428 | amdgpu_vm_write_pte(adev, vm_update_params->ib, |
420 | count, incr, flags); | 429 | vm_update_params->pages_addr, |
430 | pe, addr, count, incr, flags); | ||
421 | 431 | ||
422 | } else if (count < 3) { | 432 | } else if (count < 3) { |
423 | amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, | 433 | amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, |
424 | count, incr, flags); | 434 | count, incr, flags); |
425 | 435 | ||
426 | } else { | 436 | } else { |
427 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | 437 | amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, |
428 | count, incr, flags); | 438 | count, incr, flags); |
429 | } | 439 | } |
430 | } | 440 | } |
@@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
444 | struct amdgpu_ring *ring; | 454 | struct amdgpu_ring *ring; |
445 | struct fence *fence = NULL; | 455 | struct fence *fence = NULL; |
446 | struct amdgpu_job *job; | 456 | struct amdgpu_job *job; |
457 | struct amdgpu_vm_update_params vm_update_params; | ||
447 | unsigned entries; | 458 | unsigned entries; |
448 | uint64_t addr; | 459 | uint64_t addr; |
449 | int r; | 460 | int r; |
450 | 461 | ||
462 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
451 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 463 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
452 | 464 | ||
453 | r = reservation_object_reserve_shared(bo->tbo.resv); | 465 | r = reservation_object_reserve_shared(bo->tbo.resv); |
@@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
465 | if (r) | 477 | if (r) |
466 | goto error; | 478 | goto error; |
467 | 479 | ||
468 | amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, | 480 | vm_update_params.ib = &job->ibs[0]; |
481 | amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, | ||
469 | 0, 0); | 482 | 0, 0); |
470 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | 483 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
471 | 484 | ||
@@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
538 | uint64_t last_pde = ~0, last_pt = ~0; | 551 | uint64_t last_pde = ~0, last_pt = ~0; |
539 | unsigned count = 0, pt_idx, ndw; | 552 | unsigned count = 0, pt_idx, ndw; |
540 | struct amdgpu_job *job; | 553 | struct amdgpu_job *job; |
541 | struct amdgpu_ib *ib; | 554 | struct amdgpu_vm_update_params vm_update_params; |
542 | struct fence *fence = NULL; | 555 | struct fence *fence = NULL; |
543 | 556 | ||
544 | int r; | 557 | int r; |
545 | 558 | ||
559 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
546 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 560 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
547 | 561 | ||
548 | /* padding, etc. */ | 562 | /* padding, etc. */ |
@@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
555 | if (r) | 569 | if (r) |
556 | return r; | 570 | return r; |
557 | 571 | ||
558 | ib = &job->ibs[0]; | 572 | vm_update_params.ib = &job->ibs[0]; |
559 | 573 | ||
560 | /* walk over the address space and update the page directory */ | 574 | /* walk over the address space and update the page directory */ |
561 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 575 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
@@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
575 | ((last_pt + incr * count) != pt)) { | 589 | ((last_pt + incr * count) != pt)) { |
576 | 590 | ||
577 | if (count) { | 591 | if (count) { |
578 | amdgpu_vm_update_pages(adev, 0, NULL, ib, | 592 | amdgpu_vm_update_pages(adev, &vm_update_params, |
579 | last_pde, last_pt, | 593 | last_pde, last_pt, |
580 | count, incr, | 594 | count, incr, |
581 | AMDGPU_PTE_VALID); | 595 | AMDGPU_PTE_VALID); |
@@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |||
590 | } | 604 | } |
591 | 605 | ||
592 | if (count) | 606 | if (count) |
593 | amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, | 607 | amdgpu_vm_update_pages(adev, &vm_update_params, |
594 | count, incr, AMDGPU_PTE_VALID); | 608 | last_pde, last_pt, |
609 | count, incr, AMDGPU_PTE_VALID); | ||
595 | 610 | ||
596 | if (ib->length_dw != 0) { | 611 | if (vm_update_params.ib->length_dw != 0) { |
597 | amdgpu_ring_pad_ib(ring, ib); | 612 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); |
598 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, | 613 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, |
599 | AMDGPU_FENCE_OWNER_VM); | 614 | AMDGPU_FENCE_OWNER_VM); |
600 | WARN_ON(ib->length_dw > ndw); | 615 | WARN_ON(vm_update_params.ib->length_dw > ndw); |
601 | r = amdgpu_job_submit(job, ring, &vm->entity, | 616 | r = amdgpu_job_submit(job, ring, &vm->entity, |
602 | AMDGPU_FENCE_OWNER_VM, &fence); | 617 | AMDGPU_FENCE_OWNER_VM, &fence); |
603 | if (r) | 618 | if (r) |
@@ -623,18 +638,15 @@ error_free: | |||
623 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | 638 | * amdgpu_vm_frag_ptes - add fragment information to PTEs |
624 | * | 639 | * |
625 | * @adev: amdgpu_device pointer | 640 | * @adev: amdgpu_device pointer |
626 | * @src: address where to copy page table entries from | 641 | * @vm_update_params: see amdgpu_vm_update_params definition |
627 | * @pages_addr: DMA addresses to use for mapping | ||
628 | * @ib: IB for the update | ||
629 | * @pe_start: first PTE to handle | 642 | * @pe_start: first PTE to handle |
630 | * @pe_end: last PTE to handle | 643 | * @pe_end: last PTE to handle |
631 | * @addr: addr those PTEs should point to | 644 | * @addr: addr those PTEs should point to |
632 | * @flags: hw mapping flags | 645 | * @flags: hw mapping flags |
633 | */ | 646 | */ |
634 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | 647 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, |
635 | uint64_t src, | 648 | struct amdgpu_vm_update_params |
636 | dma_addr_t *pages_addr, | 649 | *vm_update_params, |
637 | struct amdgpu_ib *ib, | ||
638 | uint64_t pe_start, uint64_t pe_end, | 650 | uint64_t pe_start, uint64_t pe_end, |
639 | uint64_t addr, uint32_t flags) | 651 | uint64_t addr, uint32_t flags) |
640 | { | 652 | { |
@@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
671 | return; | 683 | return; |
672 | 684 | ||
673 | /* system pages are non continuously */ | 685 | /* system pages are non continuously */ |
674 | if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || | 686 | if (vm_update_params->src || vm_update_params->pages_addr || |
675 | (frag_start >= frag_end)) { | 687 | !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { |
676 | 688 | ||
677 | count = (pe_end - pe_start) / 8; | 689 | count = (pe_end - pe_start) / 8; |
678 | amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, | 690 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, |
679 | addr, count, AMDGPU_GPU_PAGE_SIZE, | 691 | addr, count, AMDGPU_GPU_PAGE_SIZE, |
680 | flags); | 692 | flags); |
681 | return; | 693 | return; |
@@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
684 | /* handle the 4K area at the beginning */ | 696 | /* handle the 4K area at the beginning */ |
685 | if (pe_start != frag_start) { | 697 | if (pe_start != frag_start) { |
686 | count = (frag_start - pe_start) / 8; | 698 | count = (frag_start - pe_start) / 8; |
687 | amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, | 699 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, |
688 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 700 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
689 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 701 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
690 | } | 702 | } |
691 | 703 | ||
692 | /* handle the area in the middle */ | 704 | /* handle the area in the middle */ |
693 | count = (frag_end - frag_start) / 8; | 705 | count = (frag_end - frag_start) / 8; |
694 | amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, | 706 | amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, |
695 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); | 707 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); |
696 | 708 | ||
697 | /* handle the 4K area at the end */ | 709 | /* handle the 4K area at the end */ |
698 | if (frag_end != pe_end) { | 710 | if (frag_end != pe_end) { |
699 | addr += AMDGPU_GPU_PAGE_SIZE * count; | 711 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
700 | count = (pe_end - frag_end) / 8; | 712 | count = (pe_end - frag_end) / 8; |
701 | amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, | 713 | amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, |
702 | count, AMDGPU_GPU_PAGE_SIZE, flags); | 714 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
703 | } | 715 | } |
704 | } | 716 | } |
@@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
707 | * amdgpu_vm_update_ptes - make sure that page tables are valid | 719 | * amdgpu_vm_update_ptes - make sure that page tables are valid |
708 | * | 720 | * |
709 | * @adev: amdgpu_device pointer | 721 | * @adev: amdgpu_device pointer |
710 | * @src: address where to copy page table entries from | 722 | * @vm_update_params: see amdgpu_vm_update_params definition |
711 | * @pages_addr: DMA addresses to use for mapping | ||
712 | * @vm: requested vm | 723 | * @vm: requested vm |
713 | * @start: start of GPU address range | 724 | * @start: start of GPU address range |
714 | * @end: end of GPU address range | 725 | * @end: end of GPU address range |
@@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |||
718 | * Update the page tables in the range @start - @end. | 729 | * Update the page tables in the range @start - @end. |
719 | */ | 730 | */ |
720 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | 731 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, |
721 | uint64_t src, | 732 | struct amdgpu_vm_update_params |
722 | dma_addr_t *pages_addr, | 733 | *vm_update_params, |
723 | struct amdgpu_vm *vm, | 734 | struct amdgpu_vm *vm, |
724 | struct amdgpu_ib *ib, | ||
725 | uint64_t start, uint64_t end, | 735 | uint64_t start, uint64_t end, |
726 | uint64_t dst, uint32_t flags) | 736 | uint64_t dst, uint32_t flags) |
727 | { | 737 | { |
@@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
747 | 757 | ||
748 | if (last_pe_end != pe_start) { | 758 | if (last_pe_end != pe_start) { |
749 | 759 | ||
750 | amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, | 760 | amdgpu_vm_frag_ptes(adev, vm_update_params, |
751 | last_pe_start, last_pe_end, | 761 | last_pe_start, last_pe_end, |
752 | last_dst, flags); | 762 | last_dst, flags); |
753 | 763 | ||
@@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
762 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | 772 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; |
763 | } | 773 | } |
764 | 774 | ||
765 | amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, | 775 | amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, |
766 | last_pe_end, last_dst, flags); | 776 | last_pe_end, last_dst, flags); |
767 | } | 777 | } |
768 | 778 | ||
@@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
794 | void *owner = AMDGPU_FENCE_OWNER_VM; | 804 | void *owner = AMDGPU_FENCE_OWNER_VM; |
795 | unsigned nptes, ncmds, ndw; | 805 | unsigned nptes, ncmds, ndw; |
796 | struct amdgpu_job *job; | 806 | struct amdgpu_job *job; |
797 | struct amdgpu_ib *ib; | 807 | struct amdgpu_vm_update_params vm_update_params; |
798 | struct fence *f = NULL; | 808 | struct fence *f = NULL; |
799 | int r; | 809 | int r; |
800 | 810 | ||
801 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 811 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
812 | memset(&vm_update_params, 0, sizeof(vm_update_params)); | ||
813 | vm_update_params.src = src; | ||
814 | vm_update_params.pages_addr = pages_addr; | ||
802 | 815 | ||
803 | /* sync to everything on unmapping */ | 816 | /* sync to everything on unmapping */ |
804 | if (!(flags & AMDGPU_PTE_VALID)) | 817 | if (!(flags & AMDGPU_PTE_VALID)) |
@@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
815 | /* padding, etc. */ | 828 | /* padding, etc. */ |
816 | ndw = 64; | 829 | ndw = 64; |
817 | 830 | ||
818 | if (src) { | 831 | if (vm_update_params.src) { |
819 | /* only copy commands needed */ | 832 | /* only copy commands needed */ |
820 | ndw += ncmds * 7; | 833 | ndw += ncmds * 7; |
821 | 834 | ||
822 | } else if (pages_addr) { | 835 | } else if (vm_update_params.pages_addr) { |
823 | /* header for write data commands */ | 836 | /* header for write data commands */ |
824 | ndw += ncmds * 4; | 837 | ndw += ncmds * 4; |
825 | 838 | ||
@@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
838 | if (r) | 851 | if (r) |
839 | return r; | 852 | return r; |
840 | 853 | ||
841 | ib = &job->ibs[0]; | 854 | vm_update_params.ib = &job->ibs[0]; |
842 | 855 | ||
843 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | 856 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
844 | owner); | 857 | owner); |
@@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
849 | if (r) | 862 | if (r) |
850 | goto error_free; | 863 | goto error_free; |
851 | 864 | ||
852 | amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, | 865 | amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, |
853 | last + 1, addr, flags); | 866 | last + 1, addr, flags); |
854 | 867 | ||
855 | amdgpu_ring_pad_ib(ring, ib); | 868 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); |
856 | WARN_ON(ib->length_dw > ndw); | 869 | WARN_ON(vm_update_params.ib->length_dw > ndw); |
857 | r = amdgpu_job_submit(job, ring, &vm->entity, | 870 | r = amdgpu_job_submit(job, ring, &vm->entity, |
858 | AMDGPU_FENCE_OWNER_VM, &f); | 871 | AMDGPU_FENCE_OWNER_VM, &f); |
859 | if (r) | 872 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 845c21b1b2ee..be3d6f79a864 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c | |||
@@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev) | |||
103 | */ | 103 | */ |
104 | static int cik_ih_irq_init(struct amdgpu_device *adev) | 104 | static int cik_ih_irq_init(struct amdgpu_device *adev) |
105 | { | 105 | { |
106 | int ret = 0; | ||
107 | int rb_bufsz; | 106 | int rb_bufsz; |
108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
109 | u64 wptr_off; | 108 | u64 wptr_off; |
@@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) | |||
156 | /* enable irqs */ | 155 | /* enable irqs */ |
157 | cik_ih_enable_interrupts(adev); | 156 | cik_ih_enable_interrupts(adev); |
158 | 157 | ||
159 | return ret; | 158 | return 0; |
160 | } | 159 | } |
161 | 160 | ||
162 | /** | 161 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index fa4449e126e6..933e425a8154 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) | |||
1579 | 1579 | ||
1580 | static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) | 1580 | static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) |
1581 | { | 1581 | { |
1582 | int ret = 0; | ||
1583 | struct cz_power_info *pi = cz_get_pi(adev); | 1582 | struct cz_power_info *pi = cz_get_pi(adev); |
1584 | 1583 | ||
1585 | if (pi->caps_sclk_ds) { | 1584 | if (pi->caps_sclk_ds) { |
@@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) | |||
1588 | CZ_MIN_DEEP_SLEEP_SCLK); | 1587 | CZ_MIN_DEEP_SLEEP_SCLK); |
1589 | } | 1588 | } |
1590 | 1589 | ||
1591 | return ret; | 1590 | return 0; |
1592 | } | 1591 | } |
1593 | 1592 | ||
1594 | /* ?? without dal support, is this still needed in setpowerstate list*/ | 1593 | /* ?? without dal support, is this still needed in setpowerstate list*/ |
1595 | static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) | 1594 | static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) |
1596 | { | 1595 | { |
1597 | int ret = 0; | ||
1598 | struct cz_power_info *pi = cz_get_pi(adev); | 1596 | struct cz_power_info *pi = cz_get_pi(adev); |
1599 | 1597 | ||
1600 | cz_send_msg_to_smc_with_parameter(adev, | 1598 | cz_send_msg_to_smc_with_parameter(adev, |
1601 | PPSMC_MSG_SetWatermarkFrequency, | 1599 | PPSMC_MSG_SetWatermarkFrequency, |
1602 | pi->sclk_dpm.soft_max_clk); | 1600 | pi->sclk_dpm.soft_max_clk); |
1603 | 1601 | ||
1604 | return ret; | 1602 | return 0; |
1605 | } | 1603 | } |
1606 | 1604 | ||
1607 | static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) | 1605 | static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) |
@@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, | |||
1636 | 1634 | ||
1637 | static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) | 1635 | static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) |
1638 | { | 1636 | { |
1639 | int ret = 0; | ||
1640 | struct cz_power_info *pi = cz_get_pi(adev); | 1637 | struct cz_power_info *pi = cz_get_pi(adev); |
1641 | struct cz_ps *ps = &pi->requested_ps; | 1638 | struct cz_ps *ps = &pi->requested_ps; |
1642 | 1639 | ||
@@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) | |||
1647 | cz_dpm_nbdpm_lm_pstate_enable(adev, true); | 1644 | cz_dpm_nbdpm_lm_pstate_enable(adev, true); |
1648 | } | 1645 | } |
1649 | 1646 | ||
1650 | return ret; | 1647 | return 0; |
1651 | } | 1648 | } |
1652 | 1649 | ||
1653 | /* with dpm enabled */ | 1650 | /* with dpm enabled */ |
1654 | static int cz_dpm_set_power_state(struct amdgpu_device *adev) | 1651 | static int cz_dpm_set_power_state(struct amdgpu_device *adev) |
1655 | { | 1652 | { |
1656 | int ret = 0; | ||
1657 | |||
1658 | cz_dpm_update_sclk_limit(adev); | 1653 | cz_dpm_update_sclk_limit(adev); |
1659 | cz_dpm_set_deep_sleep_sclk_threshold(adev); | 1654 | cz_dpm_set_deep_sleep_sclk_threshold(adev); |
1660 | cz_dpm_set_watermark_threshold(adev); | 1655 | cz_dpm_set_watermark_threshold(adev); |
1661 | cz_dpm_enable_nbdpm(adev); | 1656 | cz_dpm_enable_nbdpm(adev); |
1662 | cz_dpm_update_low_memory_pstate(adev); | 1657 | cz_dpm_update_low_memory_pstate(adev); |
1663 | 1658 | ||
1664 | return ret; | 1659 | return 0; |
1665 | } | 1660 | } |
1666 | 1661 | ||
1667 | static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) | 1662 | static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 863cb16f6126..3d23a70b6432 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c | |||
@@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev) | |||
103 | */ | 103 | */ |
104 | static int cz_ih_irq_init(struct amdgpu_device *adev) | 104 | static int cz_ih_irq_init(struct amdgpu_device *adev) |
105 | { | 105 | { |
106 | int ret = 0; | ||
107 | int rb_bufsz; | 106 | int rb_bufsz; |
108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
109 | u64 wptr_off; | 108 | u64 wptr_off; |
@@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) | |||
157 | /* enable interrupts */ | 156 | /* enable interrupts */ |
158 | cz_ih_enable_interrupts(adev); | 157 | cz_ih_enable_interrupts(adev); |
159 | 158 | ||
160 | return ret; | 159 | return 0; |
161 | } | 160 | } |
162 | 161 | ||
163 | /** | 162 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c11b6007af80..af26ec0bc59d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] = | |||
137 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, | 137 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, |
138 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, | 138 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, |
139 | mmFBC_DEBUG1, 0xffffffff, 0x00000008, | 139 | mmFBC_DEBUG1, 0xffffffff, 0x00000008, |
140 | mmFBC_MISC, 0x9f313fff, 0x14300008, | 140 | mmFBC_MISC, 0x9f313fff, 0x14302008, |
141 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, | 141 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, |
142 | }; | 142 | }; |
143 | 143 | ||
@@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] = | |||
145 | { | 145 | { |
146 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, | 146 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, |
147 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, | 147 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, |
148 | mmFBC_MISC, 0x9f313fff, 0x14300008, | 148 | mmFBC_MISC, 0x9f313fff, 0x14302008, |
149 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, | 149 | mmHDMI_CONTROL, 0x313f031f, 0x00000011, |
150 | }; | 150 | }; |
151 | 151 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 92647fbf5b8b..f19bab68fd83 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] = | |||
267 | 267 | ||
268 | static const u32 golden_settings_polaris11_a11[] = | 268 | static const u32 golden_settings_polaris11_a11[] = |
269 | { | 269 | { |
270 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | ||
270 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 271 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
271 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 272 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
272 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 273 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
273 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, | 274 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, |
275 | mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, | ||
276 | mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, | ||
274 | mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, | 277 | mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, |
275 | mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, | 278 | mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, |
276 | mmSQ_CONFIG, 0x07f80000, 0x07180000, | 279 | mmSQ_CONFIG, 0x07f80000, 0x07180000, |
@@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] = | |||
284 | static const u32 polaris11_golden_common_all[] = | 287 | static const u32 polaris11_golden_common_all[] = |
285 | { | 288 | { |
286 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | 289 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, |
287 | mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, | ||
288 | mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, | ||
289 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, | 290 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, |
290 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, | 291 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, |
291 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, | 292 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, |
@@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] = | |||
296 | static const u32 golden_settings_polaris10_a11[] = | 297 | static const u32 golden_settings_polaris10_a11[] = |
297 | { | 298 | { |
298 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, | 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, |
300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | ||
299 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 301 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
300 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 302 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
301 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 303 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
@@ -5725,6 +5727,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | |||
5725 | amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | 5727 | amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
5726 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | | 5728 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | |
5727 | EOP_TC_ACTION_EN | | 5729 | EOP_TC_ACTION_EN | |
5730 | EOP_TC_WB_ACTION_EN | | ||
5728 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | | 5731 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
5729 | EVENT_INDEX(5))); | 5732 | EVENT_INDEX(5))); |
5730 | amdgpu_ring_write(ring, addr & 0xfffffffc); | 5733 | amdgpu_ring_write(ring, addr & 0xfffffffc); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 39bfc52d0b42..3b8906ce3511 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c | |||
@@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) | |||
103 | */ | 103 | */ |
104 | static int iceland_ih_irq_init(struct amdgpu_device *adev) | 104 | static int iceland_ih_irq_init(struct amdgpu_device *adev) |
105 | { | 105 | { |
106 | int ret = 0; | ||
107 | int rb_bufsz; | 106 | int rb_bufsz; |
108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 107 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
109 | u64 wptr_off; | 108 | u64 wptr_off; |
@@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) | |||
157 | /* enable interrupts */ | 156 | /* enable interrupts */ |
158 | iceland_ih_enable_interrupts(adev); | 157 | iceland_ih_enable_interrupts(adev); |
159 | 158 | ||
160 | return ret; | 159 | return 0; |
161 | } | 160 | } |
162 | 161 | ||
163 | /** | 162 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index b45f54714574..a789a863d677 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
2252 | if (pi->caps_stable_p_state) { | 2252 | if (pi->caps_stable_p_state) { |
2253 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; | 2253 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; |
2254 | 2254 | ||
2255 | for (i = table->count - 1; i >= 0; i++) { | 2255 | for (i = table->count - 1; i >= 0; i--) { |
2256 | if (stable_p_state_sclk >= table->entries[i].clk) { | 2256 | if (stable_p_state_sclk >= table->entries[i].clk) { |
2257 | stable_p_state_sclk = table->entries[i].clk; | 2257 | stable_p_state_sclk = table->entries[i].clk; |
2258 | break; | 2258 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 063f08a9957a..31d99b0010f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
109 | static const u32 golden_settings_polaris11_a11[] = | 109 | static const u32 golden_settings_polaris11_a11[] = |
110 | { | 110 | { |
111 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | 111 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, |
112 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | ||
112 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | 113 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
113 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | 114 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, |
114 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | 115 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, |
115 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | 116 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, |
117 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | ||
116 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | 118 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, |
117 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | 119 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, |
118 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | 120 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index f036af937fbc..c92055805a45 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
@@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) | |||
99 | */ | 99 | */ |
100 | static int tonga_ih_irq_init(struct amdgpu_device *adev) | 100 | static int tonga_ih_irq_init(struct amdgpu_device *adev) |
101 | { | 101 | { |
102 | int ret = 0; | ||
103 | int rb_bufsz; | 102 | int rb_bufsz; |
104 | u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; | 103 | u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; |
105 | u64 wptr_off; | 104 | u64 wptr_off; |
@@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) | |||
165 | /* enable interrupts */ | 164 | /* enable interrupts */ |
166 | tonga_ih_enable_interrupts(adev); | 165 | tonga_ih_enable_interrupts(adev); |
167 | 166 | ||
168 | return ret; | 167 | return 0; |
169 | } | 168 | } |
170 | 169 | ||
171 | /** | 170 | /** |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index c94f9faa220a..24a16e49b571 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
@@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
3573 | return 0; | 3573 | return 0; |
3574 | } | 3574 | } |
3575 | 3575 | ||
3576 | static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
3577 | { | ||
3578 | struct phm_ppt_v1_information *table_info = | ||
3579 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
3580 | struct phm_clock_voltage_dependency_table *table = | ||
3581 | table_info->vddc_dep_on_dal_pwrl; | ||
3582 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
3583 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
3584 | uint32_t req_vddc = 0, req_volt, i; | ||
3585 | |||
3586 | if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && | ||
3587 | dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) | ||
3588 | return; | ||
3589 | |||
3590 | for (i= 0; i < table->count; i++) { | ||
3591 | if (dal_power_level == table->entries[i].clk) { | ||
3592 | req_vddc = table->entries[i].v; | ||
3593 | break; | ||
3594 | } | ||
3595 | } | ||
3596 | |||
3597 | vddc_table = table_info->vdd_dep_on_sclk; | ||
3598 | for (i= 0; i < vddc_table->count; i++) { | ||
3599 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
3600 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) | ||
3601 | << VDDC_SHIFT; | ||
3602 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
3603 | PPSMC_MSG_VddC_Request, req_volt); | ||
3604 | return; | ||
3605 | } | ||
3606 | } | ||
3607 | printk(KERN_ERR "DAL requested level can not" | ||
3608 | " found a available voltage in VDDC DPM Table \n"); | ||
3609 | } | ||
3610 | |||
3611 | static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) | 3576 | static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr) |
3612 | { | 3577 | { |
3613 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); | 3578 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); |
3614 | 3579 | ||
3615 | fiji_apply_dal_min_voltage_request(hwmgr); | 3580 | phm_apply_dal_min_voltage_request(hwmgr); |
3616 | 3581 | ||
3617 | if (!data->sclk_dpm_key_disabled) { | 3582 | if (!data->sclk_dpm_key_disabled) { |
3618 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) | 3583 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) |
@@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels( | |||
4349 | 4314 | ||
4350 | if (data->need_update_smu7_dpm_table & | 4315 | if (data->need_update_smu7_dpm_table & |
4351 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | 4316 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { |
4352 | result = fiji_populate_all_memory_levels(hwmgr); | 4317 | result = fiji_populate_all_graphic_levels(hwmgr); |
4353 | PP_ASSERT_WITH_CODE((0 == result), | 4318 | PP_ASSERT_WITH_CODE((0 == result), |
4354 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | 4319 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", |
4355 | return result); | 4320 | return result); |
@@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
5109 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); | 5074 | struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); |
5110 | 5075 | ||
5111 | if (!data->soft_pp_table) { | 5076 | if (!data->soft_pp_table) { |
5112 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 5077 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
5078 | hwmgr->soft_pp_table_size, | ||
5079 | GFP_KERNEL); | ||
5113 | if (!data->soft_pp_table) | 5080 | if (!data->soft_pp_table) |
5114 | return -ENOMEM; | 5081 | return -ENOMEM; |
5115 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
5116 | hwmgr->soft_pp_table_size); | ||
5117 | } | 5082 | } |
5118 | 5083 | ||
5119 | *table = (char *)&data->soft_pp_table; | 5084 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7d69ed635bc2..1c48917da3cf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include "pppcielanes.h" | 30 | #include "pppcielanes.h" |
31 | #include "pp_debug.h" | 31 | #include "pp_debug.h" |
32 | #include "ppatomctrl.h" | 32 | #include "ppatomctrl.h" |
33 | #include "ppsmc.h" | ||
34 | |||
35 | #define VOLTAGE_SCALE 4 | ||
33 | 36 | ||
34 | extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); | 37 | extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); |
35 | extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); | 38 | extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); |
@@ -566,3 +569,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) | |||
566 | 569 | ||
567 | return level; | 570 | return level; |
568 | } | 571 | } |
572 | |||
573 | void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
574 | { | ||
575 | struct phm_ppt_v1_information *table_info = | ||
576 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
577 | struct phm_clock_voltage_dependency_table *table = | ||
578 | table_info->vddc_dep_on_dal_pwrl; | ||
579 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
580 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
581 | uint32_t req_vddc = 0, req_volt, i; | ||
582 | |||
583 | if (!table || table->count <= 0 | ||
584 | || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW | ||
585 | || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE) | ||
586 | return; | ||
587 | |||
588 | for (i = 0; i < table->count; i++) { | ||
589 | if (dal_power_level == table->entries[i].clk) { | ||
590 | req_vddc = table->entries[i].v; | ||
591 | break; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | vddc_table = table_info->vdd_dep_on_sclk; | ||
596 | for (i = 0; i < vddc_table->count; i++) { | ||
597 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
598 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); | ||
599 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
600 | PPSMC_MSG_VddC_Request, req_volt); | ||
601 | return; | ||
602 | } | ||
603 | } | ||
604 | printk(KERN_ERR "DAL requested level can not" | ||
605 | " found a available voltage in VDDC DPM Table \n"); | ||
606 | } | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 93768fa1dcdc..aa6be033f21b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | |||
@@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | |||
189 | return decode_pcie_lane_width(link_width); | 189 | return decode_pcie_lane_width(link_width); |
190 | } | 190 | } |
191 | 191 | ||
192 | void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) | ||
193 | { | ||
194 | struct phm_ppt_v1_information *table_info = | ||
195 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
196 | struct phm_clock_voltage_dependency_table *table = | ||
197 | table_info->vddc_dep_on_dal_pwrl; | ||
198 | struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; | ||
199 | enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; | ||
200 | uint32_t req_vddc = 0, req_volt, i; | ||
201 | |||
202 | if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW && | ||
203 | dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE)) | ||
204 | return; | ||
205 | |||
206 | for (i = 0; i < table->count; i++) { | ||
207 | if (dal_power_level == table->entries[i].clk) { | ||
208 | req_vddc = table->entries[i].v; | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | vddc_table = table_info->vdd_dep_on_sclk; | ||
214 | for (i = 0; i < vddc_table->count; i++) { | ||
215 | if (req_vddc <= vddc_table->entries[i].vddc) { | ||
216 | req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE) | ||
217 | << VDDC_SHIFT; | ||
218 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
219 | PPSMC_MSG_VddC_Request, req_volt); | ||
220 | return; | ||
221 | } | ||
222 | } | ||
223 | printk(KERN_ERR "DAL requested level can not" | ||
224 | " found a available voltage in VDDC DPM Table \n"); | ||
225 | } | ||
226 | |||
227 | /** | 192 | /** |
228 | * Enable voltage control | 193 | * Enable voltage control |
229 | * | 194 | * |
@@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
2091 | "Failed to populate Clock Stretcher Data Table!", | 2056 | "Failed to populate Clock Stretcher Data Table!", |
2092 | return result); | 2057 | return result); |
2093 | } | 2058 | } |
2094 | 2059 | table->CurrSclkPllRange = 0xff; | |
2095 | table->GraphicsVoltageChangeEnable = 1; | 2060 | table->GraphicsVoltageChangeEnable = 1; |
2096 | table->GraphicsThermThrottleEnable = 1; | 2061 | table->GraphicsThermThrottleEnable = 1; |
2097 | table->GraphicsInterval = 1; | 2062 | table->GraphicsInterval = 1; |
@@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
2184 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); | 2149 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); |
2185 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); | 2150 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); |
2186 | CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); | 2151 | CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); |
2152 | CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); | ||
2187 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); | 2153 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); |
2188 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); | 2154 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); |
2189 | CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); | 2155 | CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); |
@@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
4760 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 4726 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
4761 | 4727 | ||
4762 | if (!data->soft_pp_table) { | 4728 | if (!data->soft_pp_table) { |
4763 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 4729 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
4730 | hwmgr->soft_pp_table_size, | ||
4731 | GFP_KERNEL); | ||
4764 | if (!data->soft_pp_table) | 4732 | if (!data->soft_pp_table) |
4765 | return -ENOMEM; | 4733 | return -ENOMEM; |
4766 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
4767 | hwmgr->soft_pp_table_size); | ||
4768 | } | 4734 | } |
4769 | 4735 | ||
4770 | *table = (char *)&data->soft_pp_table; | 4736 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 1faad92b50d3..16fed487973b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
@@ -5331,7 +5331,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
5331 | (data->need_update_smu7_dpm_table & | 5331 | (data->need_update_smu7_dpm_table & |
5332 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | 5332 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
5333 | PP_ASSERT_WITH_CODE( | 5333 | PP_ASSERT_WITH_CODE( |
5334 | true == tonga_is_dpm_running(hwmgr), | 5334 | 0 == tonga_is_dpm_running(hwmgr), |
5335 | "Trying to freeze SCLK DPM when DPM is disabled", | 5335 | "Trying to freeze SCLK DPM when DPM is disabled", |
5336 | ); | 5336 | ); |
5337 | PP_ASSERT_WITH_CODE( | 5337 | PP_ASSERT_WITH_CODE( |
@@ -5344,7 +5344,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
5344 | if ((0 == data->mclk_dpm_key_disabled) && | 5344 | if ((0 == data->mclk_dpm_key_disabled) && |
5345 | (data->need_update_smu7_dpm_table & | 5345 | (data->need_update_smu7_dpm_table & |
5346 | DPMTABLE_OD_UPDATE_MCLK)) { | 5346 | DPMTABLE_OD_UPDATE_MCLK)) { |
5347 | PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), | 5347 | PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), |
5348 | "Trying to freeze MCLK DPM when DPM is disabled", | 5348 | "Trying to freeze MCLK DPM when DPM is disabled", |
5349 | ); | 5349 | ); |
5350 | PP_ASSERT_WITH_CODE( | 5350 | PP_ASSERT_WITH_CODE( |
@@ -5445,7 +5445,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr | |||
5445 | } | 5445 | } |
5446 | 5446 | ||
5447 | if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | 5447 | if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { |
5448 | result = tonga_populate_all_memory_levels(hwmgr); | 5448 | result = tonga_populate_all_graphic_levels(hwmgr); |
5449 | PP_ASSERT_WITH_CODE((0 == result), | 5449 | PP_ASSERT_WITH_CODE((0 == result), |
5450 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | 5450 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", |
5451 | return result); | 5451 | return result); |
@@ -5647,7 +5647,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
5647 | (data->need_update_smu7_dpm_table & | 5647 | (data->need_update_smu7_dpm_table & |
5648 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | 5648 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
5649 | 5649 | ||
5650 | PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr), | 5650 | PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), |
5651 | "Trying to Unfreeze SCLK DPM when DPM is disabled", | 5651 | "Trying to Unfreeze SCLK DPM when DPM is disabled", |
5652 | ); | 5652 | ); |
5653 | PP_ASSERT_WITH_CODE( | 5653 | PP_ASSERT_WITH_CODE( |
@@ -5661,7 +5661,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
5661 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { | 5661 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { |
5662 | 5662 | ||
5663 | PP_ASSERT_WITH_CODE( | 5663 | PP_ASSERT_WITH_CODE( |
5664 | true == tonga_is_dpm_running(hwmgr), | 5664 | 0 == tonga_is_dpm_running(hwmgr), |
5665 | "Trying to Unfreeze MCLK DPM when DPM is disabled", | 5665 | "Trying to Unfreeze MCLK DPM when DPM is disabled", |
5666 | ); | 5666 | ); |
5667 | PP_ASSERT_WITH_CODE( | 5667 | PP_ASSERT_WITH_CODE( |
@@ -6056,11 +6056,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) | |||
6056 | struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); | 6056 | struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); |
6057 | 6057 | ||
6058 | if (!data->soft_pp_table) { | 6058 | if (!data->soft_pp_table) { |
6059 | data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); | 6059 | data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, |
6060 | hwmgr->soft_pp_table_size, | ||
6061 | GFP_KERNEL); | ||
6060 | if (!data->soft_pp_table) | 6062 | if (!data->soft_pp_table) |
6061 | return -ENOMEM; | 6063 | return -ENOMEM; |
6062 | memcpy(data->soft_pp_table, hwmgr->soft_pp_table, | ||
6063 | hwmgr->soft_pp_table_size); | ||
6064 | } | 6064 | } |
6065 | 6065 | ||
6066 | *table = (char *)&data->soft_pp_table; | 6066 | *table = (char *)&data->soft_pp_table; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index fd4ce7aaeee9..28f571449495 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
@@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta | |||
673 | extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); | 673 | extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); |
674 | extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); | 674 | extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); |
675 | extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); | 675 | extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); |
676 | 676 | extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); | |
677 | 677 | ||
678 | #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU | 678 | #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU |
679 | 679 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index da18f44fd1c8..87c023e518ab 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | |||
@@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) | |||
639 | 639 | ||
640 | cz_smu->driver_buffer_length = 0; | 640 | cz_smu->driver_buffer_length = 0; |
641 | 641 | ||
642 | for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { | 642 | for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { |
643 | 643 | ||
644 | firmware_type = cz_translate_firmware_enum_to_arg(smumgr, | 644 | firmware_type = cz_translate_firmware_enum_to_arg(smumgr, |
645 | firmware_list[i]); | 645 | firmware_list[i]); |
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c new file mode 100644 index 000000000000..a7b2a751f6fe --- /dev/null +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c | |||
@@ -0,0 +1,366 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/errno.h> | ||
24 | #include <linux/export.h> | ||
25 | #include <linux/i2c.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <drm/drm_dp_dual_mode_helper.h> | ||
29 | #include <drm/drmP.h> | ||
30 | |||
31 | /** | ||
32 | * DOC: dp dual mode helpers | ||
33 | * | ||
34 | * Helper functions to deal with DP dual mode (aka. DP++) adaptors. | ||
35 | * | ||
36 | * Type 1: | ||
37 | * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C. | ||
38 | * | ||
39 | * Type 2: | ||
40 | * Adaptor registers and sink DDC bus can be accessed either via I2C or | ||
41 | * I2C-over-AUX. Source devices may choose to implement either of these | ||
42 | * access methods. | ||
43 | */ | ||
44 | |||
45 | #define DP_DUAL_MODE_SLAVE_ADDRESS 0x40 | ||
46 | |||
47 | /** | ||
48 | * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s) | ||
49 | * @adapter: I2C adapter for the DDC bus | ||
50 | * @offset: register offset | ||
51 | * @buffer: buffer for return data | ||
52 | * @size: sizo of the buffer | ||
53 | * | ||
54 | * Reads @size bytes from the DP dual mode adaptor registers | ||
55 | * starting at @offset. | ||
56 | * | ||
57 | * Returns: | ||
58 | * 0 on success, negative error code on failure | ||
59 | */ | ||
60 | ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, | ||
61 | u8 offset, void *buffer, size_t size) | ||
62 | { | ||
63 | struct i2c_msg msgs[] = { | ||
64 | { | ||
65 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
66 | .flags = 0, | ||
67 | .len = 1, | ||
68 | .buf = &offset, | ||
69 | }, | ||
70 | { | ||
71 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
72 | .flags = I2C_M_RD, | ||
73 | .len = size, | ||
74 | .buf = buffer, | ||
75 | }, | ||
76 | }; | ||
77 | int ret; | ||
78 | |||
79 | ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); | ||
80 | if (ret < 0) | ||
81 | return ret; | ||
82 | if (ret != ARRAY_SIZE(msgs)) | ||
83 | return -EPROTO; | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | EXPORT_SYMBOL(drm_dp_dual_mode_read); | ||
88 | |||
89 | /** | ||
90 | * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s) | ||
91 | * @adapter: I2C adapter for the DDC bus | ||
92 | * @offset: register offset | ||
93 | * @buffer: buffer for write data | ||
94 | * @size: sizo of the buffer | ||
95 | * | ||
96 | * Writes @size bytes to the DP dual mode adaptor registers | ||
97 | * starting at @offset. | ||
98 | * | ||
99 | * Returns: | ||
100 | * 0 on success, negative error code on failure | ||
101 | */ | ||
102 | ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, | ||
103 | u8 offset, const void *buffer, size_t size) | ||
104 | { | ||
105 | struct i2c_msg msg = { | ||
106 | .addr = DP_DUAL_MODE_SLAVE_ADDRESS, | ||
107 | .flags = 0, | ||
108 | .len = 1 + size, | ||
109 | .buf = NULL, | ||
110 | }; | ||
111 | void *data; | ||
112 | int ret; | ||
113 | |||
114 | data = kmalloc(msg.len, GFP_TEMPORARY); | ||
115 | if (!data) | ||
116 | return -ENOMEM; | ||
117 | |||
118 | msg.buf = data; | ||
119 | |||
120 | memcpy(data, &offset, 1); | ||
121 | memcpy(data + 1, buffer, size); | ||
122 | |||
123 | ret = i2c_transfer(adapter, &msg, 1); | ||
124 | |||
125 | kfree(data); | ||
126 | |||
127 | if (ret < 0) | ||
128 | return ret; | ||
129 | if (ret != 1) | ||
130 | return -EPROTO; | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | EXPORT_SYMBOL(drm_dp_dual_mode_write); | ||
135 | |||
136 | static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) | ||
137 | { | ||
138 | static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = | ||
139 | "DP-HDMI ADAPTOR\x04"; | ||
140 | |||
141 | return memcmp(hdmi_id, dp_dual_mode_hdmi_id, | ||
142 | sizeof(dp_dual_mode_hdmi_id)) == 0; | ||
143 | } | ||
144 | |||
145 | static bool is_type2_adaptor(uint8_t adaptor_id) | ||
146 | { | ||
147 | return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | | ||
148 | DP_DUAL_MODE_REV_TYPE2); | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor | ||
153 | * @adapter: I2C adapter for the DDC bus | ||
154 | * | ||
155 | * Attempt to identify the type of the DP dual mode adaptor used. | ||
156 | * | ||
157 | * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not | ||
158 | * certain whether we're dealing with a native HDMI port or | ||
159 | * a type 1 DVI dual mode adaptor. The driver will have to use | ||
160 | * some other hardware/driver specific mechanism to make that | ||
161 | * distinction. | ||
162 | * | ||
163 | * Returns: | ||
164 | * The type of the DP dual mode adaptor used | ||
165 | */ | ||
166 | enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) | ||
167 | { | ||
168 | char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {}; | ||
169 | uint8_t adaptor_id = 0x00; | ||
170 | ssize_t ret; | ||
171 | |||
172 | /* | ||
173 | * Let's see if the adaptor is there the by reading the | ||
174 | * HDMI ID registers. | ||
175 | * | ||
176 | * Note that type 1 DVI adaptors are not required to implemnt | ||
177 | * any registers, and that presents a problem for detection. | ||
178 | * If the i2c transfer is nacked, we may or may not be dealing | ||
179 | * with a type 1 DVI adaptor. Some other mechanism of detecting | ||
180 | * the presence of the adaptor is required. One way would be | ||
181 | * to check the state of the CONFIG1 pin, Another method would | ||
182 | * simply require the driver to know whether the port is a DP++ | ||
183 | * port or a native HDMI port. Both of these methods are entirely | ||
184 | * hardware/driver specific so we can't deal with them here. | ||
185 | */ | ||
186 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, | ||
187 | hdmi_id, sizeof(hdmi_id)); | ||
188 | if (ret) | ||
189 | return DRM_DP_DUAL_MODE_UNKNOWN; | ||
190 | |||
191 | /* | ||
192 | * Sigh. Some (maybe all?) type 1 adaptors are broken and ack | ||
193 | * the offset but ignore it, and instead they just always return | ||
194 | * data from the start of the HDMI ID buffer. So for a broken | ||
195 | * type 1 HDMI adaptor a single byte read will always give us | ||
196 | * 0x44, and for a type 1 DVI adaptor it should give 0x00 | ||
197 | * (assuming it implements any registers). Fortunately neither | ||
198 | * of those values will match the type 2 signature of the | ||
199 | * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with | ||
200 | * the type 2 adaptor detection safely even in the presence | ||
201 | * of broken type 1 adaptors. | ||
202 | */ | ||
203 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, | ||
204 | &adaptor_id, sizeof(adaptor_id)); | ||
205 | if (ret == 0) { | ||
206 | if (is_type2_adaptor(adaptor_id)) { | ||
207 | if (is_hdmi_adaptor(hdmi_id)) | ||
208 | return DRM_DP_DUAL_MODE_TYPE2_HDMI; | ||
209 | else | ||
210 | return DRM_DP_DUAL_MODE_TYPE2_DVI; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | if (is_hdmi_adaptor(hdmi_id)) | ||
215 | return DRM_DP_DUAL_MODE_TYPE1_HDMI; | ||
216 | else | ||
217 | return DRM_DP_DUAL_MODE_TYPE1_DVI; | ||
218 | } | ||
219 | EXPORT_SYMBOL(drm_dp_dual_mode_detect); | ||
220 | |||
221 | /** | ||
222 | * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor | ||
223 | * @type: DP dual mode adaptor type | ||
224 | * @adapter: I2C adapter for the DDC bus | ||
225 | * | ||
226 | * Determine the max TMDS clock the adaptor supports based on the | ||
227 | * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK | ||
228 | * register (on type2 adaptors). As some type 1 adaptors have | ||
229 | * problems with registers (see comments in drm_dp_dual_mode_detect()) | ||
230 | * we don't read the register on those, instead we simply assume | ||
231 | * a 165 MHz limit based on the specification. | ||
232 | * | ||
233 | * Returns: | ||
234 | * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz. | ||
235 | */ | ||
236 | int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, | ||
237 | struct i2c_adapter *adapter) | ||
238 | { | ||
239 | uint8_t max_tmds_clock; | ||
240 | ssize_t ret; | ||
241 | |||
242 | /* native HDMI so no limit */ | ||
243 | if (type == DRM_DP_DUAL_MODE_NONE) | ||
244 | return 0; | ||
245 | |||
246 | /* | ||
247 | * Type 1 adaptors are limited to 165MHz | ||
248 | * Type 2 adaptors can tells us their limit | ||
249 | */ | ||
250 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
251 | return 165000; | ||
252 | |||
253 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK, | ||
254 | &max_tmds_clock, sizeof(max_tmds_clock)); | ||
255 | if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) { | ||
256 | DRM_DEBUG_KMS("Failed to query max TMDS clock\n"); | ||
257 | return 165000; | ||
258 | } | ||
259 | |||
260 | return max_tmds_clock * 5000 / 2; | ||
261 | } | ||
262 | EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock); | ||
263 | |||
264 | /** | ||
265 | * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor | ||
266 | * @type: DP dual mode adaptor type | ||
267 | * @adapter: I2C adapter for the DDC bus | ||
268 | * @enabled: current state of the TMDS output buffers | ||
269 | * | ||
270 | * Get the state of the TMDS output buffers in the adaptor. For | ||
271 | * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN | ||
272 | * register. As some type 1 adaptors have problems with registers | ||
273 | * (see comments in drm_dp_dual_mode_detect()) we don't read the | ||
274 | * register on those, instead we simply assume that the buffers | ||
275 | * are always enabled. | ||
276 | * | ||
277 | * Returns: | ||
278 | * 0 on success, negative error code on failure | ||
279 | */ | ||
280 | int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, | ||
281 | struct i2c_adapter *adapter, | ||
282 | bool *enabled) | ||
283 | { | ||
284 | uint8_t tmds_oen; | ||
285 | ssize_t ret; | ||
286 | |||
287 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) { | ||
288 | *enabled = true; | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
293 | &tmds_oen, sizeof(tmds_oen)); | ||
294 | if (ret) { | ||
295 | DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n"); | ||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE); | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); | ||
304 | |||
305 | /** | ||
306 | * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor | ||
307 | * @type: DP dual mode adaptor type | ||
308 | * @adapter: I2C adapter for the DDC bus | ||
309 | * @enable: enable (as opposed to disable) the TMDS output buffers | ||
310 | * | ||
311 | * Set the state of the TMDS output buffers in the adaptor. For | ||
312 | * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As | ||
313 | * some type 1 adaptors have problems with registers (see comments | ||
314 | * in drm_dp_dual_mode_detect()) we avoid touching the register, | ||
315 | * making this function a no-op on type 1 adaptors. | ||
316 | * | ||
317 | * Returns: | ||
318 | * 0 on success, negative error code on failure | ||
319 | */ | ||
320 | int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, | ||
321 | struct i2c_adapter *adapter, bool enable) | ||
322 | { | ||
323 | uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; | ||
324 | ssize_t ret; | ||
325 | |||
326 | if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
327 | return 0; | ||
328 | |||
329 | ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, | ||
330 | &tmds_oen, sizeof(tmds_oen)); | ||
331 | if (ret) { | ||
332 | DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", | ||
333 | enable ? "enable" : "disable"); | ||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | return 0; | ||
338 | } | ||
339 | EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); | ||
340 | |||
341 | /** | ||
342 | * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string | ||
343 | * @type: DP dual mode adaptor type | ||
344 | * | ||
345 | * Returns: | ||
346 | * String representation of the DP dual mode adaptor type | ||
347 | */ | ||
348 | const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type) | ||
349 | { | ||
350 | switch (type) { | ||
351 | case DRM_DP_DUAL_MODE_NONE: | ||
352 | return "none"; | ||
353 | case DRM_DP_DUAL_MODE_TYPE1_DVI: | ||
354 | return "type 1 DVI"; | ||
355 | case DRM_DP_DUAL_MODE_TYPE1_HDMI: | ||
356 | return "type 1 HDMI"; | ||
357 | case DRM_DP_DUAL_MODE_TYPE2_DVI: | ||
358 | return "type 2 DVI"; | ||
359 | case DRM_DP_DUAL_MODE_TYPE2_HDMI: | ||
360 | return "type 2 HDMI"; | ||
361 | default: | ||
362 | WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN); | ||
363 | return "unknown"; | ||
364 | } | ||
365 | } | ||
366 | EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name); | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 15615fb9bde6..b3198fcd0536 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1183 | if (ret) | 1183 | if (ret) |
1184 | return ret; | 1184 | return ret; |
1185 | 1185 | ||
1186 | ret = i915_ggtt_enable_hw(dev); | ||
1187 | if (ret) { | ||
1188 | DRM_ERROR("failed to enable GGTT\n"); | ||
1189 | goto out_ggtt; | ||
1190 | } | ||
1191 | |||
1186 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, | 1192 | /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1187 | * otherwise the vga fbdev driver falls over. */ | 1193 | * otherwise the vga fbdev driver falls over. */ |
1188 | ret = i915_kick_out_firmware_fb(dev_priv); | 1194 | ret = i915_kick_out_firmware_fb(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d37c0a671eed..f313b4d8344f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) | |||
734 | static int i915_drm_resume(struct drm_device *dev) | 734 | static int i915_drm_resume(struct drm_device *dev) |
735 | { | 735 | { |
736 | struct drm_i915_private *dev_priv = dev->dev_private; | 736 | struct drm_i915_private *dev_priv = dev->dev_private; |
737 | int ret; | ||
737 | 738 | ||
738 | disable_rpm_wakeref_asserts(dev_priv); | 739 | disable_rpm_wakeref_asserts(dev_priv); |
739 | 740 | ||
741 | ret = i915_ggtt_enable_hw(dev); | ||
742 | if (ret) | ||
743 | DRM_ERROR("failed to re-enable GGTT\n"); | ||
744 | |||
740 | intel_csr_ucode_resume(dev_priv); | 745 | intel_csr_ucode_resume(dev_priv); |
741 | 746 | ||
742 | mutex_lock(&dev->struct_mutex); | 747 | mutex_lock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b87ca4fae20a..5faacc6e548d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -3482,6 +3482,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size); | |||
3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); | 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); | 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); | 3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
3485 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); | ||
3485 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); | 3486 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
3486 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, | 3487 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, |
3487 | enum port port); | 3488 | enum port port); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9b99490e8367..aad26851cee3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req) | |||
1456 | if (ret) | 1456 | if (ret) |
1457 | return ret; | 1457 | return ret; |
1458 | 1458 | ||
1459 | __i915_gem_request_retire__upto(req); | 1459 | /* If the GPU hung, we want to keep the requests to find the guilty. */ |
1460 | if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) | ||
1461 | __i915_gem_request_retire__upto(req); | ||
1462 | |||
1460 | return 0; | 1463 | return 0; |
1461 | } | 1464 | } |
1462 | 1465 | ||
@@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, | |||
1513 | else if (obj->last_write_req == req) | 1516 | else if (obj->last_write_req == req) |
1514 | i915_gem_object_retire__write(obj); | 1517 | i915_gem_object_retire__write(obj); |
1515 | 1518 | ||
1516 | __i915_gem_request_retire__upto(req); | 1519 | if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) |
1520 | __i915_gem_request_retire__upto(req); | ||
1517 | } | 1521 | } |
1518 | 1522 | ||
1519 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | 1523 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
@@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4860 | struct intel_engine_cs *engine; | 4864 | struct intel_engine_cs *engine; |
4861 | int ret, j; | 4865 | int ret, j; |
4862 | 4866 | ||
4863 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | ||
4864 | return -EIO; | ||
4865 | |||
4866 | /* Double layer security blanket, see i915_gem_init() */ | 4867 | /* Double layer security blanket, see i915_gem_init() */ |
4867 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 4868 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
4868 | 4869 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0d666b3f7e9b..92acdff9dad3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -3236,6 +3236,14 @@ out_gtt_cleanup: | |||
3236 | return ret; | 3236 | return ret; |
3237 | } | 3237 | } |
3238 | 3238 | ||
3239 | int i915_ggtt_enable_hw(struct drm_device *dev) | ||
3240 | { | ||
3241 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | ||
3242 | return -EIO; | ||
3243 | |||
3244 | return 0; | ||
3245 | } | ||
3246 | |||
3239 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 3247 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
3240 | { | 3248 | { |
3241 | struct drm_i915_private *dev_priv = to_i915(dev); | 3249 | struct drm_i915_private *dev_priv = to_i915(dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index d7dd3d8a8758..0008543d55f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) | |||
514 | } | 514 | } |
515 | 515 | ||
516 | int i915_ggtt_init_hw(struct drm_device *dev); | 516 | int i915_ggtt_init_hw(struct drm_device *dev); |
517 | int i915_ggtt_enable_hw(struct drm_device *dev); | ||
517 | void i915_gem_init_ggtt(struct drm_device *dev); | 518 | void i915_gem_init_ggtt(struct drm_device *dev); |
518 | void i915_ggtt_cleanup_hw(struct drm_device *dev); | 519 | void i915_ggtt_cleanup_hw(struct drm_device *dev); |
519 | 520 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index e72dd9a8d6bf..b235b6e88ead 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1578,6 +1578,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) | |||
1578 | return false; | 1578 | return false; |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) | ||
1582 | { | ||
1583 | static const struct { | ||
1584 | u16 dp, hdmi; | ||
1585 | } port_mapping[] = { | ||
1586 | /* | ||
1587 | * Buggy VBTs may declare DP ports as having | ||
1588 | * HDMI type dvo_port :( So let's check both. | ||
1589 | */ | ||
1590 | [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, | ||
1591 | [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, | ||
1592 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | ||
1593 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | ||
1594 | }; | ||
1595 | int i; | ||
1596 | |||
1597 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) | ||
1598 | return false; | ||
1599 | |||
1600 | if (!dev_priv->vbt.child_dev_num) | ||
1601 | return false; | ||
1602 | |||
1603 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | ||
1604 | const union child_device_config *p_child = | ||
1605 | &dev_priv->vbt.child_dev[i]; | ||
1606 | |||
1607 | if ((p_child->common.dvo_port == port_mapping[port].dp || | ||
1608 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
1609 | (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == | ||
1610 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
1611 | return true; | ||
1612 | } | ||
1613 | |||
1614 | return false; | ||
1615 | } | ||
1616 | |||
1581 | /** | 1617 | /** |
1582 | * intel_bios_is_dsi_present - is DSI present in VBT | 1618 | * intel_bios_is_dsi_present - is DSI present in VBT |
1583 | * @dev_priv: i915 device instance | 1619 | * @dev_priv: i915 device instance |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 3fac04602a25..01e523df363b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
1601 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | 1601 | enum port port = intel_ddi_get_encoder_port(intel_encoder); |
1602 | int type = intel_encoder->type; | 1602 | int type = intel_encoder->type; |
1603 | 1603 | ||
1604 | if (type == INTEL_OUTPUT_HDMI) { | ||
1605 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
1606 | |||
1607 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | ||
1608 | } | ||
1609 | |||
1604 | intel_prepare_ddi_buffer(intel_encoder); | 1610 | intel_prepare_ddi_buffer(intel_encoder); |
1605 | 1611 | ||
1606 | if (type == INTEL_OUTPUT_EDP) { | 1612 | if (type == INTEL_OUTPUT_EDP) { |
@@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
1667 | DPLL_CTRL2_DDI_CLK_OFF(port))); | 1673 | DPLL_CTRL2_DDI_CLK_OFF(port))); |
1668 | else if (INTEL_INFO(dev)->gen < 9) | 1674 | else if (INTEL_INFO(dev)->gen < 9) |
1669 | I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); | 1675 | I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); |
1676 | |||
1677 | if (type == INTEL_OUTPUT_HDMI) { | ||
1678 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | ||
1679 | |||
1680 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | ||
1681 | } | ||
1670 | } | 1682 | } |
1671 | 1683 | ||
1672 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) | 1684 | static void intel_enable_ddi(struct intel_encoder *intel_encoder) |
@@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2180 | 2192 | ||
2181 | if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) | 2193 | if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) |
2182 | pipe_config->has_infoframe = true; | 2194 | pipe_config->has_infoframe = true; |
2183 | break; | 2195 | /* fall through */ |
2184 | case TRANS_DDI_MODE_SELECT_DVI: | 2196 | case TRANS_DDI_MODE_SELECT_DVI: |
2197 | pipe_config->lane_count = 4; | ||
2198 | break; | ||
2185 | case TRANS_DDI_MODE_SELECT_FDI: | 2199 | case TRANS_DDI_MODE_SELECT_FDI: |
2186 | break; | 2200 | break; |
2187 | case TRANS_DDI_MODE_SELECT_DP_SST: | 2201 | case TRANS_DDI_MODE_SELECT_DP_SST: |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 46f9be3ad5a2..2113f401f0ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -12005,6 +12005,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
12005 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); | 12005 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); |
12006 | return ret; | 12006 | return ret; |
12007 | } | 12007 | } |
12008 | } else if (dev_priv->display.compute_intermediate_wm) { | ||
12009 | if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) | ||
12010 | pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; | ||
12008 | } | 12011 | } |
12009 | 12012 | ||
12010 | if (INTEL_INFO(dev)->gen >= 9) { | 12013 | if (INTEL_INFO(dev)->gen >= 9) { |
@@ -15990,6 +15993,9 @@ retry: | |||
15990 | 15993 | ||
15991 | state->acquire_ctx = &ctx; | 15994 | state->acquire_ctx = &ctx; |
15992 | 15995 | ||
15996 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
15997 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
15998 | |||
15993 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 15999 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
15994 | /* | 16000 | /* |
15995 | * Force recalculation even if we restore | 16001 | * Force recalculation even if we restore |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 639bf0209c15..3ac705936b04 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1702,9 +1702,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = { | |||
1702 | 1702 | ||
1703 | static const struct dpll_info skl_plls[] = { | 1703 | static const struct dpll_info skl_plls[] = { |
1704 | { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, | 1704 | { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, |
1705 | { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, | 1705 | { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, |
1706 | { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, | 1706 | { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, |
1707 | { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, | 1707 | { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, |
1708 | { NULL, -1, NULL, }, | 1708 | { NULL, -1, NULL, }, |
1709 | }; | 1709 | }; |
1710 | 1710 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5da29a02b9e3..a28b4aac1e02 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <drm/drm_crtc.h> | 33 | #include <drm/drm_crtc.h> |
34 | #include <drm/drm_crtc_helper.h> | 34 | #include <drm/drm_crtc_helper.h> |
35 | #include <drm/drm_fb_helper.h> | 35 | #include <drm/drm_fb_helper.h> |
36 | #include <drm/drm_dp_dual_mode_helper.h> | ||
36 | #include <drm/drm_dp_mst_helper.h> | 37 | #include <drm/drm_dp_mst_helper.h> |
37 | #include <drm/drm_rect.h> | 38 | #include <drm/drm_rect.h> |
38 | #include <drm/drm_atomic.h> | 39 | #include <drm/drm_atomic.h> |
@@ -753,6 +754,10 @@ struct cxsr_latency { | |||
753 | struct intel_hdmi { | 754 | struct intel_hdmi { |
754 | i915_reg_t hdmi_reg; | 755 | i915_reg_t hdmi_reg; |
755 | int ddc_bus; | 756 | int ddc_bus; |
757 | struct { | ||
758 | enum drm_dp_dual_mode_type type; | ||
759 | int max_tmds_clock; | ||
760 | } dp_dual_mode; | ||
756 | bool limited_color_range; | 761 | bool limited_color_range; |
757 | bool color_range_auto; | 762 | bool color_range_auto; |
758 | bool has_hdmi_sink; | 763 | bool has_hdmi_sink; |
@@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
1401 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); | 1406 | struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); |
1402 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 1407 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
1403 | struct intel_crtc_state *pipe_config); | 1408 | struct intel_crtc_state *pipe_config); |
1409 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); | ||
1404 | 1410 | ||
1405 | 1411 | ||
1406 | /* intel_lvds.c */ | 1412 | /* intel_lvds.c */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 2b22bb9bb86f..366ad6c67ce4 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -46,6 +46,22 @@ static const struct { | |||
46 | }, | 46 | }, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /* return pixels in terms of txbyteclkhs */ | ||
50 | static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, | ||
51 | u16 burst_mode_ratio) | ||
52 | { | ||
53 | return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, | ||
54 | 8 * 100), lane_count); | ||
55 | } | ||
56 | |||
57 | /* return pixels equvalent to txbyteclkhs */ | ||
58 | static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count, | ||
59 | u16 burst_mode_ratio) | ||
60 | { | ||
61 | return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100), | ||
62 | (bpp * burst_mode_ratio)); | ||
63 | } | ||
64 | |||
49 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) | 65 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) |
50 | { | 66 | { |
51 | /* It just so happens the VBT matches register contents. */ | 67 | /* It just so happens the VBT matches register contents. */ |
@@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, | |||
780 | struct drm_i915_private *dev_priv = dev->dev_private; | 796 | struct drm_i915_private *dev_priv = dev->dev_private; |
781 | struct drm_display_mode *adjusted_mode = | 797 | struct drm_display_mode *adjusted_mode = |
782 | &pipe_config->base.adjusted_mode; | 798 | &pipe_config->base.adjusted_mode; |
799 | struct drm_display_mode *adjusted_mode_sw; | ||
800 | struct intel_crtc *intel_crtc; | ||
783 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 801 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
802 | unsigned int lane_count = intel_dsi->lane_count; | ||
784 | unsigned int bpp, fmt; | 803 | unsigned int bpp, fmt; |
785 | enum port port; | 804 | enum port port; |
786 | u16 vfp, vsync, vbp; | 805 | u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; |
806 | u16 hfp_sw, hsync_sw, hbp_sw; | ||
807 | u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, | ||
808 | crtc_hblank_start_sw, crtc_hblank_end_sw; | ||
809 | |||
810 | intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
811 | adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode; | ||
787 | 812 | ||
788 | /* | 813 | /* |
789 | * Atleast one port is active as encoder->get_config called only if | 814 | * Atleast one port is active as encoder->get_config called only if |
@@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, | |||
808 | adjusted_mode->crtc_vtotal = | 833 | adjusted_mode->crtc_vtotal = |
809 | I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); | 834 | I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); |
810 | 835 | ||
836 | hactive = adjusted_mode->crtc_hdisplay; | ||
837 | hfp = I915_READ(MIPI_HFP_COUNT(port)); | ||
838 | |||
811 | /* | 839 | /* |
812 | * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and | 840 | * Meaningful for video mode non-burst sync pulse mode only, |
813 | * calculate hsync_start, hsync_end, htotal and hblank_end | 841 | * can be zero for non-burst sync events and burst modes |
814 | */ | 842 | */ |
843 | hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port)); | ||
844 | hbp = I915_READ(MIPI_HBP_COUNT(port)); | ||
845 | |||
846 | /* harizontal values are in terms of high speed byte clock */ | ||
847 | hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, | ||
848 | intel_dsi->burst_mode_ratio); | ||
849 | hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, | ||
850 | intel_dsi->burst_mode_ratio); | ||
851 | hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count, | ||
852 | intel_dsi->burst_mode_ratio); | ||
853 | |||
854 | if (intel_dsi->dual_link) { | ||
855 | hfp *= 2; | ||
856 | hsync *= 2; | ||
857 | hbp *= 2; | ||
858 | } | ||
815 | 859 | ||
816 | /* vertical values are in terms of lines */ | 860 | /* vertical values are in terms of lines */ |
817 | vfp = I915_READ(MIPI_VFP_COUNT(port)); | 861 | vfp = I915_READ(MIPI_VFP_COUNT(port)); |
818 | vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); | 862 | vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); |
819 | vbp = I915_READ(MIPI_VBP_COUNT(port)); | 863 | vbp = I915_READ(MIPI_VBP_COUNT(port)); |
820 | 864 | ||
865 | adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; | ||
866 | adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; | ||
867 | adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start; | ||
821 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; | 868 | adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; |
869 | adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; | ||
822 | 870 | ||
823 | adjusted_mode->crtc_vsync_start = | 871 | adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay; |
824 | vfp + adjusted_mode->crtc_vdisplay; | 872 | adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start; |
825 | adjusted_mode->crtc_vsync_end = | ||
826 | vsync + adjusted_mode->crtc_vsync_start; | ||
827 | adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; | 873 | adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; |
828 | adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; | 874 | adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; |
829 | } | ||
830 | 875 | ||
876 | /* | ||
877 | * In BXT DSI there is no regs programmed with few horizontal timings | ||
878 | * in Pixels but txbyteclkhs.. So retrieval process adds some | ||
879 | * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs. | ||
880 | * Actually here for the given adjusted_mode, we are calculating the | ||
881 | * value programmed to the port and then back to the horizontal timing | ||
882 | * param in pixels. This is the expected value, including roundup errors | ||
883 | * And if that is same as retrieved value from port, then | ||
884 | * (HW state) adjusted_mode's horizontal timings are corrected to | ||
885 | * match with SW state to nullify the errors. | ||
886 | */ | ||
887 | /* Calculating the value programmed to the Port register */ | ||
888 | hfp_sw = adjusted_mode_sw->crtc_hsync_start - | ||
889 | adjusted_mode_sw->crtc_hdisplay; | ||
890 | hsync_sw = adjusted_mode_sw->crtc_hsync_end - | ||
891 | adjusted_mode_sw->crtc_hsync_start; | ||
892 | hbp_sw = adjusted_mode_sw->crtc_htotal - | ||
893 | adjusted_mode_sw->crtc_hsync_end; | ||
894 | |||
895 | if (intel_dsi->dual_link) { | ||
896 | hfp_sw /= 2; | ||
897 | hsync_sw /= 2; | ||
898 | hbp_sw /= 2; | ||
899 | } | ||
900 | |||
901 | hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count, | ||
902 | intel_dsi->burst_mode_ratio); | ||
903 | hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count, | ||
904 | intel_dsi->burst_mode_ratio); | ||
905 | hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count, | ||
906 | intel_dsi->burst_mode_ratio); | ||
907 | |||
908 | /* Reverse calculating the adjusted mode parameters from port reg vals*/ | ||
909 | hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count, | ||
910 | intel_dsi->burst_mode_ratio); | ||
911 | hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count, | ||
912 | intel_dsi->burst_mode_ratio); | ||
913 | hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count, | ||
914 | intel_dsi->burst_mode_ratio); | ||
915 | |||
916 | if (intel_dsi->dual_link) { | ||
917 | hfp_sw *= 2; | ||
918 | hsync_sw *= 2; | ||
919 | hbp_sw *= 2; | ||
920 | } | ||
921 | |||
922 | crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw + | ||
923 | hsync_sw + hbp_sw; | ||
924 | crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay; | ||
925 | crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw; | ||
926 | crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay; | ||
927 | crtc_hblank_end_sw = crtc_htotal_sw; | ||
928 | |||
929 | if (adjusted_mode->crtc_htotal == crtc_htotal_sw) | ||
930 | adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal; | ||
931 | |||
932 | if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw) | ||
933 | adjusted_mode->crtc_hsync_start = | ||
934 | adjusted_mode_sw->crtc_hsync_start; | ||
935 | |||
936 | if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw) | ||
937 | adjusted_mode->crtc_hsync_end = | ||
938 | adjusted_mode_sw->crtc_hsync_end; | ||
939 | |||
940 | if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw) | ||
941 | adjusted_mode->crtc_hblank_start = | ||
942 | adjusted_mode_sw->crtc_hblank_start; | ||
943 | |||
944 | if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw) | ||
945 | adjusted_mode->crtc_hblank_end = | ||
946 | adjusted_mode_sw->crtc_hblank_end; | ||
947 | } | ||
831 | 948 | ||
832 | static void intel_dsi_get_config(struct intel_encoder *encoder, | 949 | static void intel_dsi_get_config(struct intel_encoder *encoder, |
833 | struct intel_crtc_state *pipe_config) | 950 | struct intel_crtc_state *pipe_config) |
@@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us) | |||
891 | } | 1008 | } |
892 | } | 1009 | } |
893 | 1010 | ||
894 | /* return pixels in terms of txbyteclkhs */ | ||
895 | static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, | ||
896 | u16 burst_mode_ratio) | ||
897 | { | ||
898 | return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, | ||
899 | 8 * 100), lane_count); | ||
900 | } | ||
901 | |||
902 | static void set_dsi_timings(struct drm_encoder *encoder, | 1011 | static void set_dsi_timings(struct drm_encoder *encoder, |
903 | const struct drm_display_mode *adjusted_mode) | 1012 | const struct drm_display_mode *adjusted_mode) |
904 | { | 1013 | { |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2cdab73046f8..2c3bd9c2573e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, | |||
836 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); | 836 | intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); |
837 | } | 837 | } |
838 | 838 | ||
839 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) | ||
840 | { | ||
841 | struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); | ||
842 | struct i2c_adapter *adapter = | ||
843 | intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); | ||
844 | |||
845 | if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) | ||
846 | return; | ||
847 | |||
848 | DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n", | ||
849 | enable ? "Enabling" : "Disabling"); | ||
850 | |||
851 | drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type, | ||
852 | adapter, enable); | ||
853 | } | ||
854 | |||
839 | static void intel_hdmi_prepare(struct intel_encoder *encoder) | 855 | static void intel_hdmi_prepare(struct intel_encoder *encoder) |
840 | { | 856 | { |
841 | struct drm_device *dev = encoder->base.dev; | 857 | struct drm_device *dev = encoder->base.dev; |
@@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder) | |||
845 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 861 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; |
846 | u32 hdmi_val; | 862 | u32 hdmi_val; |
847 | 863 | ||
864 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | ||
865 | |||
848 | hdmi_val = SDVO_ENCODING_HDMI; | 866 | hdmi_val = SDVO_ENCODING_HDMI; |
849 | if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) | 867 | if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) |
850 | hdmi_val |= HDMI_COLOR_RANGE_16_235; | 868 | hdmi_val |= HDMI_COLOR_RANGE_16_235; |
@@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
953 | dotclock /= pipe_config->pixel_multiplier; | 971 | dotclock /= pipe_config->pixel_multiplier; |
954 | 972 | ||
955 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; | 973 | pipe_config->base.adjusted_mode.crtc_clock = dotclock; |
974 | |||
975 | pipe_config->lane_count = 4; | ||
956 | } | 976 | } |
957 | 977 | ||
958 | static void intel_enable_hdmi_audio(struct intel_encoder *encoder) | 978 | static void intel_enable_hdmi_audio(struct intel_encoder *encoder) |
@@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
1140 | } | 1160 | } |
1141 | 1161 | ||
1142 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); | 1162 | intel_hdmi->set_infoframes(&encoder->base, false, NULL); |
1163 | |||
1164 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | ||
1143 | } | 1165 | } |
1144 | 1166 | ||
1145 | static void g4x_disable_hdmi(struct intel_encoder *encoder) | 1167 | static void g4x_disable_hdmi(struct intel_encoder *encoder) |
@@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder) | |||
1165 | intel_disable_hdmi(encoder); | 1187 | intel_disable_hdmi(encoder); |
1166 | } | 1188 | } |
1167 | 1189 | ||
1168 | static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) | 1190 | static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv) |
1169 | { | 1191 | { |
1170 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 1192 | if (IS_G4X(dev_priv)) |
1171 | |||
1172 | if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) | ||
1173 | return 165000; | 1193 | return 165000; |
1174 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 1194 | else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) |
1175 | return 300000; | 1195 | return 300000; |
1176 | else | 1196 | else |
1177 | return 225000; | 1197 | return 225000; |
1178 | } | 1198 | } |
1179 | 1199 | ||
1200 | static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, | ||
1201 | bool respect_downstream_limits) | ||
1202 | { | ||
1203 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
1204 | int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev)); | ||
1205 | |||
1206 | if (respect_downstream_limits) { | ||
1207 | if (hdmi->dp_dual_mode.max_tmds_clock) | ||
1208 | max_tmds_clock = min(max_tmds_clock, | ||
1209 | hdmi->dp_dual_mode.max_tmds_clock); | ||
1210 | if (!hdmi->has_hdmi_sink) | ||
1211 | max_tmds_clock = min(max_tmds_clock, 165000); | ||
1212 | } | ||
1213 | |||
1214 | return max_tmds_clock; | ||
1215 | } | ||
1216 | |||
1180 | static enum drm_mode_status | 1217 | static enum drm_mode_status |
1181 | hdmi_port_clock_valid(struct intel_hdmi *hdmi, | 1218 | hdmi_port_clock_valid(struct intel_hdmi *hdmi, |
1182 | int clock, bool respect_dvi_limit) | 1219 | int clock, bool respect_downstream_limits) |
1183 | { | 1220 | { |
1184 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 1221 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
1185 | 1222 | ||
1186 | if (clock < 25000) | 1223 | if (clock < 25000) |
1187 | return MODE_CLOCK_LOW; | 1224 | return MODE_CLOCK_LOW; |
1188 | if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) | 1225 | if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) |
1189 | return MODE_CLOCK_HIGH; | 1226 | return MODE_CLOCK_HIGH; |
1190 | 1227 | ||
1191 | /* BXT DPLL can't generate 223-240 MHz */ | 1228 | /* BXT DPLL can't generate 223-240 MHz */ |
@@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
1309 | * within limits. | 1346 | * within limits. |
1310 | */ | 1347 | */ |
1311 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && | 1348 | if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && |
1312 | hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && | 1349 | hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK && |
1313 | hdmi_12bpc_possible(pipe_config)) { | 1350 | hdmi_12bpc_possible(pipe_config)) { |
1314 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 1351 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
1315 | desired_bpp = 12*3; | 1352 | desired_bpp = 12*3; |
@@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
1337 | /* Set user selected PAR to incoming mode's member */ | 1374 | /* Set user selected PAR to incoming mode's member */ |
1338 | adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; | 1375 | adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; |
1339 | 1376 | ||
1377 | pipe_config->lane_count = 4; | ||
1378 | |||
1340 | return true; | 1379 | return true; |
1341 | } | 1380 | } |
1342 | 1381 | ||
@@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector) | |||
1349 | intel_hdmi->has_audio = false; | 1388 | intel_hdmi->has_audio = false; |
1350 | intel_hdmi->rgb_quant_range_selectable = false; | 1389 | intel_hdmi->rgb_quant_range_selectable = false; |
1351 | 1390 | ||
1391 | intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; | ||
1392 | intel_hdmi->dp_dual_mode.max_tmds_clock = 0; | ||
1393 | |||
1352 | kfree(to_intel_connector(connector)->detect_edid); | 1394 | kfree(to_intel_connector(connector)->detect_edid); |
1353 | to_intel_connector(connector)->detect_edid = NULL; | 1395 | to_intel_connector(connector)->detect_edid = NULL; |
1354 | } | 1396 | } |
1355 | 1397 | ||
1398 | static void | ||
1399 | intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) | ||
1400 | { | ||
1401 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | ||
1402 | struct intel_hdmi *hdmi = intel_attached_hdmi(connector); | ||
1403 | enum port port = hdmi_to_dig_port(hdmi)->port; | ||
1404 | struct i2c_adapter *adapter = | ||
1405 | intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); | ||
1406 | enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter); | ||
1407 | |||
1408 | /* | ||
1409 | * Type 1 DVI adaptors are not required to implement any | ||
1410 | * registers, so we can't always detect their presence. | ||
1411 | * Ideally we should be able to check the state of the | ||
1412 | * CONFIG1 pin, but no such luck on our hardware. | ||
1413 | * | ||
1414 | * The only method left to us is to check the VBT to see | ||
1415 | * if the port is a dual mode capable DP port. But let's | ||
1416 | * only do that when we sucesfully read the EDID, to avoid | ||
1417 | * confusing log messages about DP dual mode adaptors when | ||
1418 | * there's nothing connected to the port. | ||
1419 | */ | ||
1420 | if (type == DRM_DP_DUAL_MODE_UNKNOWN) { | ||
1421 | if (has_edid && | ||
1422 | intel_bios_is_port_dp_dual_mode(dev_priv, port)) { | ||
1423 | DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); | ||
1424 | type = DRM_DP_DUAL_MODE_TYPE1_DVI; | ||
1425 | } else { | ||
1426 | type = DRM_DP_DUAL_MODE_NONE; | ||
1427 | } | ||
1428 | } | ||
1429 | |||
1430 | if (type == DRM_DP_DUAL_MODE_NONE) | ||
1431 | return; | ||
1432 | |||
1433 | hdmi->dp_dual_mode.type = type; | ||
1434 | hdmi->dp_dual_mode.max_tmds_clock = | ||
1435 | drm_dp_dual_mode_max_tmds_clock(type, adapter); | ||
1436 | |||
1437 | DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", | ||
1438 | drm_dp_get_dual_mode_type_name(type), | ||
1439 | hdmi->dp_dual_mode.max_tmds_clock); | ||
1440 | } | ||
1441 | |||
1356 | static bool | 1442 | static bool |
1357 | intel_hdmi_set_edid(struct drm_connector *connector, bool force) | 1443 | intel_hdmi_set_edid(struct drm_connector *connector, bool force) |
1358 | { | 1444 | { |
@@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) | |||
1368 | intel_gmbus_get_adapter(dev_priv, | 1454 | intel_gmbus_get_adapter(dev_priv, |
1369 | intel_hdmi->ddc_bus)); | 1455 | intel_hdmi->ddc_bus)); |
1370 | 1456 | ||
1457 | intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); | ||
1458 | |||
1371 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); | 1459 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
1372 | } | 1460 | } |
1373 | 1461 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6179b591ee84..42eac37de047 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request | |||
721 | return ret; | 721 | return ret; |
722 | } | 722 | } |
723 | 723 | ||
724 | static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, | ||
725 | int bytes) | ||
726 | { | ||
727 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
728 | struct intel_engine_cs *engine = req->engine; | ||
729 | struct drm_i915_gem_request *target; | ||
730 | unsigned space; | ||
731 | int ret; | ||
732 | |||
733 | if (intel_ring_space(ringbuf) >= bytes) | ||
734 | return 0; | ||
735 | |||
736 | /* The whole point of reserving space is to not wait! */ | ||
737 | WARN_ON(ringbuf->reserved_in_use); | ||
738 | |||
739 | list_for_each_entry(target, &engine->request_list, list) { | ||
740 | /* | ||
741 | * The request queue is per-engine, so can contain requests | ||
742 | * from multiple ringbuffers. Here, we must ignore any that | ||
743 | * aren't from the ringbuffer we're considering. | ||
744 | */ | ||
745 | if (target->ringbuf != ringbuf) | ||
746 | continue; | ||
747 | |||
748 | /* Would completion of this request free enough space? */ | ||
749 | space = __intel_ring_space(target->postfix, ringbuf->tail, | ||
750 | ringbuf->size); | ||
751 | if (space >= bytes) | ||
752 | break; | ||
753 | } | ||
754 | |||
755 | if (WARN_ON(&target->list == &engine->request_list)) | ||
756 | return -ENOSPC; | ||
757 | |||
758 | ret = i915_wait_request(target); | ||
759 | if (ret) | ||
760 | return ret; | ||
761 | |||
762 | ringbuf->space = space; | ||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | /* | 724 | /* |
767 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload | 725 | * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload |
768 | * @request: Request to advance the logical ringbuffer of. | 726 | * @request: Request to advance the logical ringbuffer of. |
@@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) | |||
814 | return 0; | 772 | return 0; |
815 | } | 773 | } |
816 | 774 | ||
817 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) | ||
818 | { | ||
819 | uint32_t __iomem *virt; | ||
820 | int rem = ringbuf->size - ringbuf->tail; | ||
821 | |||
822 | virt = ringbuf->virtual_start + ringbuf->tail; | ||
823 | rem /= 4; | ||
824 | while (rem--) | ||
825 | iowrite32(MI_NOOP, virt++); | ||
826 | |||
827 | ringbuf->tail = 0; | ||
828 | intel_ring_update_space(ringbuf); | ||
829 | } | ||
830 | |||
831 | static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) | ||
832 | { | ||
833 | struct intel_ringbuffer *ringbuf = req->ringbuf; | ||
834 | int remain_usable = ringbuf->effective_size - ringbuf->tail; | ||
835 | int remain_actual = ringbuf->size - ringbuf->tail; | ||
836 | int ret, total_bytes, wait_bytes = 0; | ||
837 | bool need_wrap = false; | ||
838 | |||
839 | if (ringbuf->reserved_in_use) | ||
840 | total_bytes = bytes; | ||
841 | else | ||
842 | total_bytes = bytes + ringbuf->reserved_size; | ||
843 | |||
844 | if (unlikely(bytes > remain_usable)) { | ||
845 | /* | ||
846 | * Not enough space for the basic request. So need to flush | ||
847 | * out the remainder and then wait for base + reserved. | ||
848 | */ | ||
849 | wait_bytes = remain_actual + total_bytes; | ||
850 | need_wrap = true; | ||
851 | } else { | ||
852 | if (unlikely(total_bytes > remain_usable)) { | ||
853 | /* | ||
854 | * The base request will fit but the reserved space | ||
855 | * falls off the end. So don't need an immediate wrap | ||
856 | * and only need to effectively wait for the reserved | ||
857 | * size space from the start of ringbuffer. | ||
858 | */ | ||
859 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
860 | } else if (total_bytes > ringbuf->space) { | ||
861 | /* No wrapping required, just waiting. */ | ||
862 | wait_bytes = total_bytes; | ||
863 | } | ||
864 | } | ||
865 | |||
866 | if (wait_bytes) { | ||
867 | ret = logical_ring_wait_for_space(req, wait_bytes); | ||
868 | if (unlikely(ret)) | ||
869 | return ret; | ||
870 | |||
871 | if (need_wrap) | ||
872 | __wrap_ring_buffer(ringbuf); | ||
873 | } | ||
874 | |||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | /** | ||
879 | * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands | ||
880 | * | ||
881 | * @req: The request to start some new work for | ||
882 | * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. | ||
883 | * | ||
884 | * The ringbuffer might not be ready to accept the commands right away (maybe it needs to | ||
885 | * be wrapped, or wait a bit for the tail to be updated). This function takes care of that | ||
886 | * and also preallocates a request (every workload submission is still mediated through | ||
887 | * requests, same as it did with legacy ringbuffer submission). | ||
888 | * | ||
889 | * Return: non-zero if the ringbuffer is not ready to be written to. | ||
890 | */ | ||
891 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) | ||
892 | { | ||
893 | int ret; | ||
894 | |||
895 | ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); | ||
896 | if (ret) | ||
897 | return ret; | ||
898 | |||
899 | req->ringbuf->space -= num_dwords * sizeof(uint32_t); | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) | 775 | int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) |
904 | { | 776 | { |
905 | /* | 777 | /* |
@@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) | |||
912 | */ | 784 | */ |
913 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); | 785 | intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); |
914 | 786 | ||
915 | return intel_logical_ring_begin(request, 0); | 787 | return intel_ring_begin(request, 0); |
916 | } | 788 | } |
917 | 789 | ||
918 | /** | 790 | /** |
@@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, | |||
982 | 854 | ||
983 | if (engine == &dev_priv->engine[RCS] && | 855 | if (engine == &dev_priv->engine[RCS] && |
984 | instp_mode != dev_priv->relative_constants_mode) { | 856 | instp_mode != dev_priv->relative_constants_mode) { |
985 | ret = intel_logical_ring_begin(params->request, 4); | 857 | ret = intel_ring_begin(params->request, 4); |
986 | if (ret) | 858 | if (ret) |
987 | return ret; | 859 | return ret; |
988 | 860 | ||
@@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
1178 | if (ret) | 1050 | if (ret) |
1179 | return ret; | 1051 | return ret; |
1180 | 1052 | ||
1181 | ret = intel_logical_ring_begin(req, w->count * 2 + 2); | 1053 | ret = intel_ring_begin(req, w->count * 2 + 2); |
1182 | if (ret) | 1054 | if (ret) |
1183 | return ret; | 1055 | return ret; |
1184 | 1056 | ||
@@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) | |||
1669 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; | 1541 | const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; |
1670 | int i, ret; | 1542 | int i, ret; |
1671 | 1543 | ||
1672 | ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); | 1544 | ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); |
1673 | if (ret) | 1545 | if (ret) |
1674 | return ret; | 1546 | return ret; |
1675 | 1547 | ||
@@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, | |||
1716 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); | 1588 | req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); |
1717 | } | 1589 | } |
1718 | 1590 | ||
1719 | ret = intel_logical_ring_begin(req, 4); | 1591 | ret = intel_ring_begin(req, 4); |
1720 | if (ret) | 1592 | if (ret) |
1721 | return ret; | 1593 | return ret; |
1722 | 1594 | ||
@@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
1778 | uint32_t cmd; | 1650 | uint32_t cmd; |
1779 | int ret; | 1651 | int ret; |
1780 | 1652 | ||
1781 | ret = intel_logical_ring_begin(request, 4); | 1653 | ret = intel_ring_begin(request, 4); |
1782 | if (ret) | 1654 | if (ret) |
1783 | return ret; | 1655 | return ret; |
1784 | 1656 | ||
@@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
1846 | vf_flush_wa = true; | 1718 | vf_flush_wa = true; |
1847 | } | 1719 | } |
1848 | 1720 | ||
1849 | ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); | 1721 | ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); |
1850 | if (ret) | 1722 | if (ret) |
1851 | return ret; | 1723 | return ret; |
1852 | 1724 | ||
@@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) | |||
1920 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1792 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1921 | int ret; | 1793 | int ret; |
1922 | 1794 | ||
1923 | ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); | 1795 | ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); |
1924 | if (ret) | 1796 | if (ret) |
1925 | return ret; | 1797 | return ret; |
1926 | 1798 | ||
@@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) | |||
1944 | struct intel_ringbuffer *ringbuf = request->ringbuf; | 1816 | struct intel_ringbuffer *ringbuf = request->ringbuf; |
1945 | int ret; | 1817 | int ret; |
1946 | 1818 | ||
1947 | ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); | 1819 | ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); |
1948 | if (ret) | 1820 | if (ret) |
1949 | return ret; | 1821 | return ret; |
1950 | 1822 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 461f1ef9b5c1..60a7385bc531 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); | |||
63 | void intel_logical_ring_stop(struct intel_engine_cs *engine); | 63 | void intel_logical_ring_stop(struct intel_engine_cs *engine); |
64 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); | 64 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
65 | int intel_logical_rings_init(struct drm_device *dev); | 65 | int intel_logical_rings_init(struct drm_device *dev); |
66 | int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); | ||
67 | 66 | ||
68 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); | 67 | int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); |
69 | /** | 68 | /** |
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 23b8545ad6b0..6ba4bf7f2a89 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
@@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, | |||
239 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | 239 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) |
240 | return -ENODEV; | 240 | return -ENODEV; |
241 | 241 | ||
242 | ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); | 242 | ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); |
243 | if (ret) { | 243 | if (ret) |
244 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
245 | return ret; | 244 | return ret; |
246 | } | ||
247 | 245 | ||
248 | intel_logical_ring_emit(ringbuf, | 246 | intel_logical_ring_emit(ringbuf, |
249 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); | 247 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); |
@@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, | |||
305 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) | 303 | if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) |
306 | return -ENODEV; | 304 | return -ENODEV; |
307 | 305 | ||
308 | ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); | 306 | ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); |
309 | if (ret) { | 307 | if (ret) |
310 | DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); | ||
311 | return ret; | 308 | return ret; |
312 | } | ||
313 | 309 | ||
314 | intel_logical_ring_emit(ringbuf, | 310 | intel_logical_ring_emit(ringbuf, |
315 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); | 311 | MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4b60005cda37..a7ef45da0a9e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
3904 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 3904 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
3905 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | 3905 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); |
3906 | 3906 | ||
3907 | memset(active, 0, sizeof(*active)); | ||
3908 | |||
3907 | active->pipe_enabled = intel_crtc->active; | 3909 | active->pipe_enabled = intel_crtc->active; |
3908 | 3910 | ||
3909 | if (active->pipe_enabled) { | 3911 | if (active->pipe_enabled) { |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index c3abae4bc596..a788d1e9589b 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
280 | * with the 5 or 6 idle patterns. | 280 | * with the 5 or 6 idle patterns. |
281 | */ | 281 | */ |
282 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); | 282 | uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); |
283 | uint32_t val = 0x0; | 283 | uint32_t val = EDP_PSR_ENABLE; |
284 | |||
285 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; | ||
286 | val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; | ||
284 | 287 | ||
285 | if (IS_HASWELL(dev)) | 288 | if (IS_HASWELL(dev)) |
286 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 289 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
@@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) | |||
288 | if (dev_priv->psr.link_standby) | 291 | if (dev_priv->psr.link_standby) |
289 | val |= EDP_PSR_LINK_STANDBY; | 292 | val |= EDP_PSR_LINK_STANDBY; |
290 | 293 | ||
291 | I915_WRITE(EDP_PSR_CTL, val | | 294 | if (dev_priv->vbt.psr.tp1_wakeup_time > 5) |
292 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 295 | val |= EDP_PSR_TP1_TIME_2500us; |
293 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 296 | else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) |
294 | EDP_PSR_ENABLE); | 297 | val |= EDP_PSR_TP1_TIME_500us; |
298 | else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) | ||
299 | val |= EDP_PSR_TP1_TIME_100us; | ||
300 | else | ||
301 | val |= EDP_PSR_TP1_TIME_0us; | ||
302 | |||
303 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) | ||
304 | val |= EDP_PSR_TP2_TP3_TIME_2500us; | ||
305 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) | ||
306 | val |= EDP_PSR_TP2_TP3_TIME_500us; | ||
307 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) | ||
308 | val |= EDP_PSR_TP2_TP3_TIME_100us; | ||
309 | else | ||
310 | val |= EDP_PSR_TP2_TP3_TIME_0us; | ||
311 | |||
312 | if (intel_dp_source_supports_hbr2(intel_dp) && | ||
313 | drm_dp_tps3_supported(intel_dp->dpcd)) | ||
314 | val |= EDP_PSR_TP1_TP3_SEL; | ||
315 | else | ||
316 | val |= EDP_PSR_TP1_TP2_SEL; | ||
317 | |||
318 | I915_WRITE(EDP_PSR_CTL, val); | ||
319 | |||
320 | if (!dev_priv->psr.psr2_support) | ||
321 | return; | ||
322 | |||
323 | /* FIXME: selective update is probably totally broken because it doesn't | ||
324 | * mesh at all with our frontbuffer tracking. And the hw alone isn't | ||
325 | * good enough. */ | ||
326 | val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; | ||
327 | |||
328 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) | ||
329 | val |= EDP_PSR2_TP2_TIME_2500; | ||
330 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) | ||
331 | val |= EDP_PSR2_TP2_TIME_500; | ||
332 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) | ||
333 | val |= EDP_PSR2_TP2_TIME_100; | ||
334 | else | ||
335 | val |= EDP_PSR2_TP2_TIME_50; | ||
295 | 336 | ||
296 | if (dev_priv->psr.psr2_support) | 337 | I915_WRITE(EDP_PSR2_CTL, val); |
297 | I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | | ||
298 | EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); | ||
299 | } | 338 | } |
300 | 339 | ||
301 | static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | 340 | static bool intel_psr_match_conditions(struct intel_dp *intel_dp) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 245386e20c52..04402bb9d26b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) | |||
53 | ringbuf->tail, ringbuf->size); | 53 | ringbuf->tail, ringbuf->size); |
54 | } | 54 | } |
55 | 55 | ||
56 | int intel_ring_space(struct intel_ringbuffer *ringbuf) | ||
57 | { | ||
58 | intel_ring_update_space(ringbuf); | ||
59 | return ringbuf->space; | ||
60 | } | ||
61 | |||
62 | bool intel_engine_stopped(struct intel_engine_cs *engine) | 56 | bool intel_engine_stopped(struct intel_engine_cs *engine) |
63 | { | 57 | { |
64 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 58 | struct drm_i915_private *dev_priv = engine->dev->dev_private; |
@@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, | |||
1309 | intel_ring_emit(signaller, seqno); | 1303 | intel_ring_emit(signaller, seqno); |
1310 | intel_ring_emit(signaller, 0); | 1304 | intel_ring_emit(signaller, 0); |
1311 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 1305 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
1312 | MI_SEMAPHORE_TARGET(waiter->id)); | 1306 | MI_SEMAPHORE_TARGET(waiter->hw_id)); |
1313 | intel_ring_emit(signaller, 0); | 1307 | intel_ring_emit(signaller, 0); |
1314 | } | 1308 | } |
1315 | 1309 | ||
@@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, | |||
1349 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); | 1343 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); |
1350 | intel_ring_emit(signaller, seqno); | 1344 | intel_ring_emit(signaller, seqno); |
1351 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 1345 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
1352 | MI_SEMAPHORE_TARGET(waiter->id)); | 1346 | MI_SEMAPHORE_TARGET(waiter->hw_id)); |
1353 | intel_ring_emit(signaller, 0); | 1347 | intel_ring_emit(signaller, 0); |
1354 | } | 1348 | } |
1355 | 1349 | ||
@@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req) | |||
1573 | static void | 1567 | static void |
1574 | gen6_seqno_barrier(struct intel_engine_cs *engine) | 1568 | gen6_seqno_barrier(struct intel_engine_cs *engine) |
1575 | { | 1569 | { |
1570 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | ||
1571 | |||
1576 | /* Workaround to force correct ordering between irq and seqno writes on | 1572 | /* Workaround to force correct ordering between irq and seqno writes on |
1577 | * ivb (and maybe also on snb) by reading from a CS register (like | 1573 | * ivb (and maybe also on snb) by reading from a CS register (like |
1578 | * ACTHD) before reading the status page. | 1574 | * ACTHD) before reading the status page. |
@@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) | |||
1584 | * the write time to land, but that would incur a delay after every | 1580 | * the write time to land, but that would incur a delay after every |
1585 | * batch i.e. much more frequent than a delay when waiting for the | 1581 | * batch i.e. much more frequent than a delay when waiting for the |
1586 | * interrupt (with the same net latency). | 1582 | * interrupt (with the same net latency). |
1583 | * | ||
1584 | * Also note that to prevent whole machine hangs on gen7, we have to | ||
1585 | * take the spinlock to guard against concurrent cacheline access. | ||
1587 | */ | 1586 | */ |
1588 | struct drm_i915_private *dev_priv = engine->dev->dev_private; | 1587 | spin_lock_irq(&dev_priv->uncore.lock); |
1589 | POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); | 1588 | POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); |
1589 | spin_unlock_irq(&dev_priv->uncore.lock); | ||
1590 | } | 1590 | } |
1591 | 1591 | ||
1592 | static u32 | 1592 | static u32 |
@@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) | |||
2312 | engine->dev = NULL; | 2312 | engine->dev = NULL; |
2313 | } | 2313 | } |
2314 | 2314 | ||
2315 | static int ring_wait_for_space(struct intel_engine_cs *engine, int n) | ||
2316 | { | ||
2317 | struct intel_ringbuffer *ringbuf = engine->buffer; | ||
2318 | struct drm_i915_gem_request *request; | ||
2319 | unsigned space; | ||
2320 | int ret; | ||
2321 | |||
2322 | if (intel_ring_space(ringbuf) >= n) | ||
2323 | return 0; | ||
2324 | |||
2325 | /* The whole point of reserving space is to not wait! */ | ||
2326 | WARN_ON(ringbuf->reserved_in_use); | ||
2327 | |||
2328 | list_for_each_entry(request, &engine->request_list, list) { | ||
2329 | space = __intel_ring_space(request->postfix, ringbuf->tail, | ||
2330 | ringbuf->size); | ||
2331 | if (space >= n) | ||
2332 | break; | ||
2333 | } | ||
2334 | |||
2335 | if (WARN_ON(&request->list == &engine->request_list)) | ||
2336 | return -ENOSPC; | ||
2337 | |||
2338 | ret = i915_wait_request(request); | ||
2339 | if (ret) | ||
2340 | return ret; | ||
2341 | |||
2342 | ringbuf->space = space; | ||
2343 | return 0; | ||
2344 | } | ||
2345 | |||
2346 | static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) | ||
2347 | { | ||
2348 | uint32_t __iomem *virt; | ||
2349 | int rem = ringbuf->size - ringbuf->tail; | ||
2350 | |||
2351 | virt = ringbuf->virtual_start + ringbuf->tail; | ||
2352 | rem /= 4; | ||
2353 | while (rem--) | ||
2354 | iowrite32(MI_NOOP, virt++); | ||
2355 | |||
2356 | ringbuf->tail = 0; | ||
2357 | intel_ring_update_space(ringbuf); | ||
2358 | } | ||
2359 | |||
2360 | int intel_engine_idle(struct intel_engine_cs *engine) | 2315 | int intel_engine_idle(struct intel_engine_cs *engine) |
2361 | { | 2316 | { |
2362 | struct drm_i915_gem_request *req; | 2317 | struct drm_i915_gem_request *req; |
@@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request) | |||
2398 | 2353 | ||
2399 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) | 2354 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) |
2400 | { | 2355 | { |
2401 | WARN_ON(ringbuf->reserved_size); | 2356 | GEM_BUG_ON(ringbuf->reserved_size); |
2402 | WARN_ON(ringbuf->reserved_in_use); | ||
2403 | |||
2404 | ringbuf->reserved_size = size; | 2357 | ringbuf->reserved_size = size; |
2405 | } | 2358 | } |
2406 | 2359 | ||
2407 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) | 2360 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) |
2408 | { | 2361 | { |
2409 | WARN_ON(ringbuf->reserved_in_use); | 2362 | GEM_BUG_ON(!ringbuf->reserved_size); |
2410 | |||
2411 | ringbuf->reserved_size = 0; | 2363 | ringbuf->reserved_size = 0; |
2412 | ringbuf->reserved_in_use = false; | ||
2413 | } | 2364 | } |
2414 | 2365 | ||
2415 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) | 2366 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) |
2416 | { | 2367 | { |
2417 | WARN_ON(ringbuf->reserved_in_use); | 2368 | GEM_BUG_ON(!ringbuf->reserved_size); |
2418 | 2369 | ringbuf->reserved_size = 0; | |
2419 | ringbuf->reserved_in_use = true; | ||
2420 | ringbuf->reserved_tail = ringbuf->tail; | ||
2421 | } | 2370 | } |
2422 | 2371 | ||
2423 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) | 2372 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) |
2424 | { | 2373 | { |
2425 | WARN_ON(!ringbuf->reserved_in_use); | 2374 | GEM_BUG_ON(ringbuf->reserved_size); |
2426 | if (ringbuf->tail > ringbuf->reserved_tail) { | 2375 | } |
2427 | WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, | 2376 | |
2428 | "request reserved size too small: %d vs %d!\n", | 2377 | static int wait_for_space(struct drm_i915_gem_request *req, int bytes) |
2429 | ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); | 2378 | { |
2430 | } else { | 2379 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
2380 | struct intel_engine_cs *engine = req->engine; | ||
2381 | struct drm_i915_gem_request *target; | ||
2382 | |||
2383 | intel_ring_update_space(ringbuf); | ||
2384 | if (ringbuf->space >= bytes) | ||
2385 | return 0; | ||
2386 | |||
2387 | /* | ||
2388 | * Space is reserved in the ringbuffer for finalising the request, | ||
2389 | * as that cannot be allowed to fail. During request finalisation, | ||
2390 | * reserved_space is set to 0 to stop the overallocation and the | ||
2391 | * assumption is that then we never need to wait (which has the | ||
2392 | * risk of failing with EINTR). | ||
2393 | * | ||
2394 | * See also i915_gem_request_alloc() and i915_add_request(). | ||
2395 | */ | ||
2396 | GEM_BUG_ON(!ringbuf->reserved_size); | ||
2397 | |||
2398 | list_for_each_entry(target, &engine->request_list, list) { | ||
2399 | unsigned space; | ||
2400 | |||
2431 | /* | 2401 | /* |
2432 | * The ring was wrapped while the reserved space was in use. | 2402 | * The request queue is per-engine, so can contain requests |
2433 | * That means that some unknown amount of the ring tail was | 2403 | * from multiple ringbuffers. Here, we must ignore any that |
2434 | * no-op filled and skipped. Thus simply adding the ring size | 2404 | * aren't from the ringbuffer we're considering. |
2435 | * to the tail and doing the above space check will not work. | ||
2436 | * Rather than attempt to track how much tail was skipped, | ||
2437 | * it is much simpler to say that also skipping the sanity | ||
2438 | * check every once in a while is not a big issue. | ||
2439 | */ | 2405 | */ |
2406 | if (target->ringbuf != ringbuf) | ||
2407 | continue; | ||
2408 | |||
2409 | /* Would completion of this request free enough space? */ | ||
2410 | space = __intel_ring_space(target->postfix, ringbuf->tail, | ||
2411 | ringbuf->size); | ||
2412 | if (space >= bytes) | ||
2413 | break; | ||
2440 | } | 2414 | } |
2441 | 2415 | ||
2442 | ringbuf->reserved_size = 0; | 2416 | if (WARN_ON(&target->list == &engine->request_list)) |
2443 | ringbuf->reserved_in_use = false; | 2417 | return -ENOSPC; |
2418 | |||
2419 | return i915_wait_request(target); | ||
2444 | } | 2420 | } |
2445 | 2421 | ||
2446 | static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) | 2422 | int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) |
2447 | { | 2423 | { |
2448 | struct intel_ringbuffer *ringbuf = engine->buffer; | 2424 | struct intel_ringbuffer *ringbuf = req->ringbuf; |
2449 | int remain_usable = ringbuf->effective_size - ringbuf->tail; | ||
2450 | int remain_actual = ringbuf->size - ringbuf->tail; | 2425 | int remain_actual = ringbuf->size - ringbuf->tail; |
2451 | int ret, total_bytes, wait_bytes = 0; | 2426 | int remain_usable = ringbuf->effective_size - ringbuf->tail; |
2427 | int bytes = num_dwords * sizeof(u32); | ||
2428 | int total_bytes, wait_bytes; | ||
2452 | bool need_wrap = false; | 2429 | bool need_wrap = false; |
2453 | 2430 | ||
2454 | if (ringbuf->reserved_in_use) | 2431 | total_bytes = bytes + ringbuf->reserved_size; |
2455 | total_bytes = bytes; | ||
2456 | else | ||
2457 | total_bytes = bytes + ringbuf->reserved_size; | ||
2458 | 2432 | ||
2459 | if (unlikely(bytes > remain_usable)) { | 2433 | if (unlikely(bytes > remain_usable)) { |
2460 | /* | 2434 | /* |
@@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) | |||
2463 | */ | 2437 | */ |
2464 | wait_bytes = remain_actual + total_bytes; | 2438 | wait_bytes = remain_actual + total_bytes; |
2465 | need_wrap = true; | 2439 | need_wrap = true; |
2440 | } else if (unlikely(total_bytes > remain_usable)) { | ||
2441 | /* | ||
2442 | * The base request will fit but the reserved space | ||
2443 | * falls off the end. So we don't need an immediate wrap | ||
2444 | * and only need to effectively wait for the reserved | ||
2445 | * size space from the start of ringbuffer. | ||
2446 | */ | ||
2447 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
2466 | } else { | 2448 | } else { |
2467 | if (unlikely(total_bytes > remain_usable)) { | 2449 | /* No wrapping required, just waiting. */ |
2468 | /* | 2450 | wait_bytes = total_bytes; |
2469 | * The base request will fit but the reserved space | ||
2470 | * falls off the end. So don't need an immediate wrap | ||
2471 | * and only need to effectively wait for the reserved | ||
2472 | * size space from the start of ringbuffer. | ||
2473 | */ | ||
2474 | wait_bytes = remain_actual + ringbuf->reserved_size; | ||
2475 | } else if (total_bytes > ringbuf->space) { | ||
2476 | /* No wrapping required, just waiting. */ | ||
2477 | wait_bytes = total_bytes; | ||
2478 | } | ||
2479 | } | 2451 | } |
2480 | 2452 | ||
2481 | if (wait_bytes) { | 2453 | if (wait_bytes > ringbuf->space) { |
2482 | ret = ring_wait_for_space(engine, wait_bytes); | 2454 | int ret = wait_for_space(req, wait_bytes); |
2483 | if (unlikely(ret)) | 2455 | if (unlikely(ret)) |
2484 | return ret; | 2456 | return ret; |
2485 | 2457 | ||
2486 | if (need_wrap) | 2458 | intel_ring_update_space(ringbuf); |
2487 | __wrap_ring_buffer(ringbuf); | 2459 | if (unlikely(ringbuf->space < wait_bytes)) |
2460 | return -EAGAIN; | ||
2488 | } | 2461 | } |
2489 | 2462 | ||
2490 | return 0; | 2463 | if (unlikely(need_wrap)) { |
2491 | } | 2464 | GEM_BUG_ON(remain_actual > ringbuf->space); |
2465 | GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); | ||
2492 | 2466 | ||
2493 | int intel_ring_begin(struct drm_i915_gem_request *req, | 2467 | /* Fill the tail with MI_NOOP */ |
2494 | int num_dwords) | 2468 | memset(ringbuf->virtual_start + ringbuf->tail, |
2495 | { | 2469 | 0, remain_actual); |
2496 | struct intel_engine_cs *engine = req->engine; | 2470 | ringbuf->tail = 0; |
2497 | int ret; | 2471 | ringbuf->space -= remain_actual; |
2498 | 2472 | } | |
2499 | ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); | ||
2500 | if (ret) | ||
2501 | return ret; | ||
2502 | 2473 | ||
2503 | engine->buffer->space -= num_dwords * sizeof(uint32_t); | 2474 | ringbuf->space -= bytes; |
2475 | GEM_BUG_ON(ringbuf->space < 0); | ||
2504 | return 0; | 2476 | return 0; |
2505 | } | 2477 | } |
2506 | 2478 | ||
@@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2772 | engine->name = "render ring"; | 2744 | engine->name = "render ring"; |
2773 | engine->id = RCS; | 2745 | engine->id = RCS; |
2774 | engine->exec_id = I915_EXEC_RENDER; | 2746 | engine->exec_id = I915_EXEC_RENDER; |
2747 | engine->hw_id = 0; | ||
2775 | engine->mmio_base = RENDER_RING_BASE; | 2748 | engine->mmio_base = RENDER_RING_BASE; |
2776 | 2749 | ||
2777 | if (INTEL_INFO(dev)->gen >= 8) { | 2750 | if (INTEL_INFO(dev)->gen >= 8) { |
@@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
2923 | engine->name = "bsd ring"; | 2896 | engine->name = "bsd ring"; |
2924 | engine->id = VCS; | 2897 | engine->id = VCS; |
2925 | engine->exec_id = I915_EXEC_BSD; | 2898 | engine->exec_id = I915_EXEC_BSD; |
2899 | engine->hw_id = 1; | ||
2926 | 2900 | ||
2927 | engine->write_tail = ring_write_tail; | 2901 | engine->write_tail = ring_write_tail; |
2928 | if (INTEL_INFO(dev)->gen >= 6) { | 2902 | if (INTEL_INFO(dev)->gen >= 6) { |
@@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) | |||
3001 | engine->name = "bsd2 ring"; | 2975 | engine->name = "bsd2 ring"; |
3002 | engine->id = VCS2; | 2976 | engine->id = VCS2; |
3003 | engine->exec_id = I915_EXEC_BSD; | 2977 | engine->exec_id = I915_EXEC_BSD; |
2978 | engine->hw_id = 4; | ||
3004 | 2979 | ||
3005 | engine->write_tail = ring_write_tail; | 2980 | engine->write_tail = ring_write_tail; |
3006 | engine->mmio_base = GEN8_BSD2_RING_BASE; | 2981 | engine->mmio_base = GEN8_BSD2_RING_BASE; |
@@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
3033 | engine->name = "blitter ring"; | 3008 | engine->name = "blitter ring"; |
3034 | engine->id = BCS; | 3009 | engine->id = BCS; |
3035 | engine->exec_id = I915_EXEC_BLT; | 3010 | engine->exec_id = I915_EXEC_BLT; |
3011 | engine->hw_id = 2; | ||
3036 | 3012 | ||
3037 | engine->mmio_base = BLT_RING_BASE; | 3013 | engine->mmio_base = BLT_RING_BASE; |
3038 | engine->write_tail = ring_write_tail; | 3014 | engine->write_tail = ring_write_tail; |
@@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
3092 | engine->name = "video enhancement ring"; | 3068 | engine->name = "video enhancement ring"; |
3093 | engine->id = VECS; | 3069 | engine->id = VECS; |
3094 | engine->exec_id = I915_EXEC_VEBOX; | 3070 | engine->exec_id = I915_EXEC_VEBOX; |
3071 | engine->hw_id = 3; | ||
3095 | 3072 | ||
3096 | engine->mmio_base = VEBOX_RING_BASE; | 3073 | engine->mmio_base = VEBOX_RING_BASE; |
3097 | engine->write_tail = ring_write_tail; | 3074 | engine->write_tail = ring_write_tail; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2ade194bbea9..ff126485d398 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -108,8 +108,6 @@ struct intel_ringbuffer { | |||
108 | int size; | 108 | int size; |
109 | int effective_size; | 109 | int effective_size; |
110 | int reserved_size; | 110 | int reserved_size; |
111 | int reserved_tail; | ||
112 | bool reserved_in_use; | ||
113 | 111 | ||
114 | /** We track the position of the requests in the ring buffer, and | 112 | /** We track the position of the requests in the ring buffer, and |
115 | * when each is retired we increment last_retired_head as the GPU | 113 | * when each is retired we increment last_retired_head as the GPU |
@@ -156,7 +154,8 @@ struct intel_engine_cs { | |||
156 | #define I915_NUM_ENGINES 5 | 154 | #define I915_NUM_ENGINES 5 |
157 | #define _VCS(n) (VCS + (n)) | 155 | #define _VCS(n) (VCS + (n)) |
158 | unsigned int exec_id; | 156 | unsigned int exec_id; |
159 | unsigned int guc_id; | 157 | unsigned int hw_id; |
158 | unsigned int guc_id; /* XXX same as hw_id? */ | ||
160 | u32 mmio_base; | 159 | u32 mmio_base; |
161 | struct drm_device *dev; | 160 | struct drm_device *dev; |
162 | struct intel_ringbuffer *buffer; | 161 | struct intel_ringbuffer *buffer; |
@@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine) | |||
459 | } | 458 | } |
460 | int __intel_ring_space(int head, int tail, int size); | 459 | int __intel_ring_space(int head, int tail, int size); |
461 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); | 460 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
462 | int intel_ring_space(struct intel_ringbuffer *ringbuf); | ||
463 | bool intel_engine_stopped(struct intel_engine_cs *engine); | 461 | bool intel_engine_stopped(struct intel_engine_cs *engine); |
464 | 462 | ||
465 | int __must_check intel_engine_idle(struct intel_engine_cs *engine); | 463 | int __must_check intel_engine_idle(struct intel_engine_cs *engine); |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 9ff1e960d617..c15051de8023 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
@@ -740,6 +740,7 @@ struct bdb_psr { | |||
740 | #define DEVICE_TYPE_INT_TV 0x1009 | 740 | #define DEVICE_TYPE_INT_TV 0x1009 |
741 | #define DEVICE_TYPE_HDMI 0x60D2 | 741 | #define DEVICE_TYPE_HDMI 0x60D2 |
742 | #define DEVICE_TYPE_DP 0x68C6 | 742 | #define DEVICE_TYPE_DP 0x68C6 |
743 | #define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 | ||
743 | #define DEVICE_TYPE_eDP 0x78C6 | 744 | #define DEVICE_TYPE_eDP 0x78C6 |
744 | 745 | ||
745 | #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) | 746 | #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) |
@@ -774,6 +775,17 @@ struct bdb_psr { | |||
774 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ | 775 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ |
775 | DEVICE_TYPE_ANALOG_OUTPUT) | 776 | DEVICE_TYPE_ANALOG_OUTPUT) |
776 | 777 | ||
778 | #define DEVICE_TYPE_DP_DUAL_MODE_BITS \ | ||
779 | (DEVICE_TYPE_INTERNAL_CONNECTOR | \ | ||
780 | DEVICE_TYPE_MIPI_OUTPUT | \ | ||
781 | DEVICE_TYPE_COMPOSITE_OUTPUT | \ | ||
782 | DEVICE_TYPE_LVDS_SINGALING | \ | ||
783 | DEVICE_TYPE_TMDS_DVI_SIGNALING | \ | ||
784 | DEVICE_TYPE_VIDEO_SIGNALING | \ | ||
785 | DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ | ||
786 | DEVICE_TYPE_DIGITAL_OUTPUT | \ | ||
787 | DEVICE_TYPE_ANALOG_OUTPUT) | ||
788 | |||
777 | /* define the DVO port for HDMI output type */ | 789 | /* define the DVO port for HDMI output type */ |
778 | #define DVO_B 1 | 790 | #define DVO_B 1 |
779 | #define DVO_C 2 | 791 | #define DVO_C 2 |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1080019e7b17..1f14b602882b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <drm/drm_fb_cma_helper.h> | 25 | #include <drm/drm_fb_cma_helper.h> |
26 | #include <drm/drm_plane_helper.h> | 26 | #include <drm/drm_plane_helper.h> |
27 | #include <drm/drm_of.h> | 27 | #include <drm/drm_of.h> |
28 | #include <video/imx-ipu-v3.h> | ||
28 | 29 | ||
29 | #include "imx-drm.h" | 30 | #include "imx-drm.h" |
30 | 31 | ||
@@ -437,6 +438,13 @@ static int compare_of(struct device *dev, void *data) | |||
437 | { | 438 | { |
438 | struct device_node *np = data; | 439 | struct device_node *np = data; |
439 | 440 | ||
441 | /* Special case for DI, dev->of_node may not be set yet */ | ||
442 | if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) { | ||
443 | struct ipu_client_platformdata *pdata = dev->platform_data; | ||
444 | |||
445 | return pdata->of_node == np; | ||
446 | } | ||
447 | |||
440 | /* Special case for LDB, one device for two channels */ | 448 | /* Special case for LDB, one device for two channels */ |
441 | if (of_node_cmp(np->name, "lvds-channel") == 0) { | 449 | if (of_node_cmp(np->name, "lvds-channel") == 0) { |
442 | np = of_get_parent(np); | 450 | np = of_get_parent(np); |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index dee8e8b3523b..b2c30b8d9816 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -473,7 +473,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
473 | 473 | ||
474 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, | 474 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, |
475 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, | 475 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, |
476 | ipu_crtc->dev->of_node); | 476 | pdata->of_node); |
477 | if (ret) { | 477 | if (ret) { |
478 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); | 478 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); |
479 | goto err_put_resources; | 479 | goto err_put_resources; |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index d0240743a17c..a7e978677937 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2164 | if (pi->caps_stable_p_state) { | 2164 | if (pi->caps_stable_p_state) { |
2165 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; | 2165 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; |
2166 | 2166 | ||
2167 | for (i = table->count - 1; i >= 0; i++) { | 2167 | for (i = table->count - 1; i >= 0; i--) { |
2168 | if (stable_p_state_sclk >= table->entries[i].clk) { | 2168 | if (stable_p_state_sclk >= table->entries[i].clk) { |
2169 | stable_p_state_sclk = table->entries[i].clk; | 2169 | stable_p_state_sclk = table->entries[i].clk; |
2170 | break; | 2170 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index abb98c77bad2..99dcacf05b99 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -997,7 +997,7 @@ struct ipu_platform_reg { | |||
997 | }; | 997 | }; |
998 | 998 | ||
999 | /* These must be in the order of the corresponding device tree port nodes */ | 999 | /* These must be in the order of the corresponding device tree port nodes */ |
1000 | static const struct ipu_platform_reg client_reg[] = { | 1000 | static struct ipu_platform_reg client_reg[] = { |
1001 | { | 1001 | { |
1002 | .pdata = { | 1002 | .pdata = { |
1003 | .csi = 0, | 1003 | .csi = 0, |
@@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1048 | mutex_unlock(&ipu_client_id_mutex); | 1048 | mutex_unlock(&ipu_client_id_mutex); |
1049 | 1049 | ||
1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
1051 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1051 | struct ipu_platform_reg *reg = &client_reg[i]; |
1052 | struct platform_device *pdev; | 1052 | struct platform_device *pdev; |
1053 | struct device_node *of_node; | 1053 | struct device_node *of_node; |
1054 | 1054 | ||
@@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1070 | 1070 | ||
1071 | pdev->dev.parent = dev; | 1071 | pdev->dev.parent = dev; |
1072 | 1072 | ||
1073 | reg->pdata.of_node = of_node; | ||
1073 | ret = platform_device_add_data(pdev, ®->pdata, | 1074 | ret = platform_device_add_data(pdev, ®->pdata, |
1074 | sizeof(reg->pdata)); | 1075 | sizeof(reg->pdata)); |
1075 | if (!ret) | 1076 | if (!ret) |
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h new file mode 100644 index 000000000000..e8a9dfd0e055 --- /dev/null +++ b/include/drm/drm_dp_dual_mode_helper.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright © 2016 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef DRM_DP_DUAL_MODE_HELPER_H | ||
24 | #define DRM_DP_DUAL_MODE_HELPER_H | ||
25 | |||
26 | #include <linux/types.h> | ||
27 | |||
28 | /* | ||
29 | * Optional for type 1 DVI adaptors | ||
30 | * Mandatory for type 1 HDMI and type 2 adaptors | ||
31 | */ | ||
32 | #define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ | ||
33 | #define DP_DUAL_MODE_HDMI_ID_LEN 16 | ||
34 | /* | ||
35 | * Optional for type 1 adaptors | ||
36 | * Mandatory for type 2 adaptors | ||
37 | */ | ||
38 | #define DP_DUAL_MODE_ADAPTOR_ID 0x10 | ||
39 | #define DP_DUAL_MODE_REV_MASK 0x07 | ||
40 | #define DP_DUAL_MODE_REV_TYPE2 0x00 | ||
41 | #define DP_DUAL_MODE_TYPE_MASK 0xf0 | ||
42 | #define DP_DUAL_MODE_TYPE_TYPE2 0xa0 | ||
43 | #define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ | ||
44 | #define DP_DUAL_IEEE_OUI_LEN 3 | ||
45 | #define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ | ||
46 | #define DP_DUAL_DEVICE_ID_LEN 6 | ||
47 | #define DP_DUAL_MODE_HARDWARE_REV 0x1a | ||
48 | #define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b | ||
49 | #define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c | ||
50 | #define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d | ||
51 | #define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e | ||
52 | #define DP_DUAL_MODE_TMDS_OEN 0x20 | ||
53 | #define DP_DUAL_MODE_TMDS_DISABLE 0x01 | ||
54 | #define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 | ||
55 | #define DP_DUAL_MODE_CEC_ENABLE 0x01 | ||
56 | #define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 | ||
57 | |||
58 | struct i2c_adapter; | ||
59 | |||
60 | ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, | ||
61 | u8 offset, void *buffer, size_t size); | ||
62 | ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, | ||
63 | u8 offset, const void *buffer, size_t size); | ||
64 | |||
65 | /** | ||
66 | * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor | ||
67 | * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor | ||
68 | * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor | ||
69 | * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor | ||
70 | * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor | ||
71 | * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor | ||
72 | * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor | ||
73 | */ | ||
74 | enum drm_dp_dual_mode_type { | ||
75 | DRM_DP_DUAL_MODE_NONE, | ||
76 | DRM_DP_DUAL_MODE_UNKNOWN, | ||
77 | DRM_DP_DUAL_MODE_TYPE1_DVI, | ||
78 | DRM_DP_DUAL_MODE_TYPE1_HDMI, | ||
79 | DRM_DP_DUAL_MODE_TYPE2_DVI, | ||
80 | DRM_DP_DUAL_MODE_TYPE2_HDMI, | ||
81 | }; | ||
82 | |||
83 | enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); | ||
84 | int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, | ||
85 | struct i2c_adapter *adapter); | ||
86 | int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, | ||
87 | struct i2c_adapter *adapter, bool *enabled); | ||
88 | int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, | ||
89 | struct i2c_adapter *adapter, bool enable); | ||
90 | const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); | ||
91 | |||
92 | #endif | ||
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index ad66589f2ae6..3a2a79401789 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/videodev2.h> | 16 | #include <linux/videodev2.h> |
17 | #include <linux/bitmap.h> | 17 | #include <linux/bitmap.h> |
18 | #include <linux/fb.h> | 18 | #include <linux/fb.h> |
19 | #include <linux/of.h> | ||
19 | #include <media/v4l2-mediabus.h> | 20 | #include <media/v4l2-mediabus.h> |
20 | #include <video/videomode.h> | 21 | #include <video/videomode.h> |
21 | 22 | ||
@@ -345,6 +346,7 @@ struct ipu_client_platformdata { | |||
345 | int dc; | 346 | int dc; |
346 | int dp; | 347 | int dp; |
347 | int dma[2]; | 348 | int dma[2]; |
349 | struct device_node *of_node; | ||
348 | }; | 350 | }; |
349 | 351 | ||
350 | #endif /* __DRM_IPU_H__ */ | 352 | #endif /* __DRM_IPU_H__ */ |