diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-12-01 08:10:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-12-01 08:10:09 -0500 |
commit | 5dc9cbc4f10d7bc5aaa17ec0accf4c6e24d9ecd6 (patch) | |
tree | e7ade7368ce4bf78f33a8b1974411cc808f2b72a | |
parent | 75f64f68afa165ebe139cca2adb4df0a229a06de (diff) | |
parent | 503505bfea19b7d69e2572297e6defa0f9c2404e (diff) |
Merge tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes and cleanups from Dave Airlie:
"The main thing are a bunch of fixes for the new amd display code, a
bunch of smatch fixes.
core:
- Atomic helper regression fix.
- Deferred fbdev fallout regression fix.
amdgpu:
- New display code (dc) dpms, suspend/resume and smatch fixes, along
with some others
- Some regression fixes for amdkfd/radeon.
- Fix a ttm regression for swiotlb disabled
bridge:
- A bunch of fixes for the tc358767 bridge
mali-dp + hdlcd:
- some fixes and internal API catchups.
imx-drm:
-regression fix in atomic code.
omapdrm:
- platform detection regression fixes"
* tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux: (76 commits)
drm/imx: always call wait_for_flip_done in commit_tail
omapdrm: hdmi4_cec: signedness bug in hdmi4_cec_init()
drm: omapdrm: Fix DPI on platforms using the DSI VDDS
omapdrm: hdmi4: Correct the SoC revision matching
drm/omap: displays: panel-dpi: add backlight dependency
drm/omap: Fix error handling path in 'omap_dmm_probe()'
drm/i915: Disable THP until we have a GPU read BW W/A
drm/bridge: tc358767: fix 1-lane behavior
drm/bridge: tc358767: fix AUXDATAn registers access
drm/bridge: tc358767: fix timing calculations
drm/bridge: tc358767: fix DP0_MISC register set
drm/bridge: tc358767: filter out too high modes
drm/bridge: tc358767: do no fail on hi-res displays
drm/bridge: Fix lvds-encoder since the panel_bridge rework.
drm/bridge: synopsys/dw-hdmi: Enable cec clock
drm/bridge: adv7511/33: Fix adv7511_cec_init() failure handling
drm/radeon: remove init of CIK VMIDs 8-16 for amdkfd
drm/ttm: fix populate_and_map() functions once more
drm/fb_helper: Disable all crtc's when initial setup fails.
drm/atomic: make drm_atomic_helper_wait_for_vblanks more agressive
...
67 files changed, 814 insertions, 483 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5afaf6016b4a..0b14b5373783 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -717,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, | |||
717 | struct amdgpu_queue_mgr *mgr); | 717 | struct amdgpu_queue_mgr *mgr); |
718 | int amdgpu_queue_mgr_map(struct amdgpu_device *adev, | 718 | int amdgpu_queue_mgr_map(struct amdgpu_device *adev, |
719 | struct amdgpu_queue_mgr *mgr, | 719 | struct amdgpu_queue_mgr *mgr, |
720 | int hw_ip, int instance, int ring, | 720 | u32 hw_ip, u32 instance, u32 ring, |
721 | struct amdgpu_ring **out_ring); | 721 | struct amdgpu_ring **out_ring); |
722 | 722 | ||
723 | /* | 723 | /* |
@@ -1572,18 +1572,14 @@ struct amdgpu_device { | |||
1572 | /* sdma */ | 1572 | /* sdma */ |
1573 | struct amdgpu_sdma sdma; | 1573 | struct amdgpu_sdma sdma; |
1574 | 1574 | ||
1575 | union { | 1575 | /* uvd */ |
1576 | struct { | 1576 | struct amdgpu_uvd uvd; |
1577 | /* uvd */ | ||
1578 | struct amdgpu_uvd uvd; | ||
1579 | 1577 | ||
1580 | /* vce */ | 1578 | /* vce */ |
1581 | struct amdgpu_vce vce; | 1579 | struct amdgpu_vce vce; |
1582 | }; | ||
1583 | 1580 | ||
1584 | /* vcn */ | 1581 | /* vcn */ |
1585 | struct amdgpu_vcn vcn; | 1582 | struct amdgpu_vcn vcn; |
1586 | }; | ||
1587 | 1583 | ||
1588 | /* firmwares */ | 1584 | /* firmwares */ |
1589 | struct amdgpu_firmware firmware; | 1585 | struct amdgpu_firmware firmware; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 47d1c132ac40..1e3e9be7d77e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) | |||
379 | { | 379 | { |
380 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | 380 | struct amdgpu_device *adev = get_amdgpu_device(kgd); |
381 | struct cik_sdma_rlc_registers *m; | 381 | struct cik_sdma_rlc_registers *m; |
382 | unsigned long end_jiffies; | ||
382 | uint32_t sdma_base_addr; | 383 | uint32_t sdma_base_addr; |
384 | uint32_t data; | ||
383 | 385 | ||
384 | m = get_sdma_mqd(mqd); | 386 | m = get_sdma_mqd(mqd); |
385 | sdma_base_addr = get_sdma_base_addr(m); | 387 | sdma_base_addr = get_sdma_base_addr(m); |
386 | 388 | ||
387 | WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, | 389 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
388 | m->sdma_rlc_virtual_addr); | 390 | m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); |
389 | 391 | ||
390 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, | 392 | end_jiffies = msecs_to_jiffies(2000) + jiffies; |
391 | m->sdma_rlc_rb_base); | 393 | while (true) { |
394 | data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | ||
395 | if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) | ||
396 | break; | ||
397 | if (time_after(jiffies, end_jiffies)) | ||
398 | return -ETIME; | ||
399 | usleep_range(500, 1000); | ||
400 | } | ||
401 | if (m->sdma_engine_id) { | ||
402 | data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); | ||
403 | data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, | ||
404 | RESUME_CTX, 0); | ||
405 | WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); | ||
406 | } else { | ||
407 | data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); | ||
408 | data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, | ||
409 | RESUME_CTX, 0); | ||
410 | WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); | ||
411 | } | ||
392 | 412 | ||
413 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, | ||
414 | m->sdma_rlc_doorbell); | ||
415 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); | ||
416 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); | ||
417 | WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, | ||
418 | m->sdma_rlc_virtual_addr); | ||
419 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); | ||
393 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, | 420 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, |
394 | m->sdma_rlc_rb_base_hi); | 421 | m->sdma_rlc_rb_base_hi); |
395 | |||
396 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, | 422 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
397 | m->sdma_rlc_rb_rptr_addr_lo); | 423 | m->sdma_rlc_rb_rptr_addr_lo); |
398 | |||
399 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, | 424 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
400 | m->sdma_rlc_rb_rptr_addr_hi); | 425 | m->sdma_rlc_rb_rptr_addr_hi); |
401 | |||
402 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, | ||
403 | m->sdma_rlc_doorbell); | ||
404 | |||
405 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, | 426 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
406 | m->sdma_rlc_rb_cntl); | 427 | m->sdma_rlc_rb_cntl); |
407 | 428 | ||
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |||
574 | } | 595 | } |
575 | 596 | ||
576 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); | 597 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); |
577 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); | 598 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
578 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); | 599 | RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | |
579 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); | 600 | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); |
580 | 601 | ||
581 | return 0; | 602 | return 0; |
582 | } | 603 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a57cec737c18..57abf7abd7a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
409 | if (candidate->robj == validated) | 409 | if (candidate->robj == validated) |
410 | break; | 410 | break; |
411 | 411 | ||
412 | /* We can't move pinned BOs here */ | ||
413 | if (bo->pin_count) | ||
414 | continue; | ||
415 | |||
412 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | 416 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
413 | 417 | ||
414 | /* Check if this BO is in one of the domains we need space for */ | 418 | /* Check if this BO is in one of the domains we need space for */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2c574374d9b6..3573ecdb06ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -1837,9 +1837,6 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1837 | adev->ip_blocks[i].status.hw = false; | 1837 | adev->ip_blocks[i].status.hw = false; |
1838 | } | 1838 | } |
1839 | 1839 | ||
1840 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) | ||
1841 | amdgpu_ucode_fini_bo(adev); | ||
1842 | |||
1843 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1840 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1844 | if (!adev->ip_blocks[i].status.sw) | 1841 | if (!adev->ip_blocks[i].status.sw) |
1845 | continue; | 1842 | continue; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ec96bb1f9eaf..c2f414ffb2cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -536,7 +536,7 @@ static const struct pci_device_id pciidlist[] = { | |||
536 | {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 536 | {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
537 | {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, | 537 | {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, |
538 | /* Raven */ | 538 | /* Raven */ |
539 | {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, | 539 | {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, |
540 | 540 | ||
541 | {0, 0, 0} | 541 | {0, 0, 0} |
542 | }; | 542 | }; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 033fba2def6f..5f5aa5fddc16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
@@ -164,6 +164,9 @@ static int amdgpu_pp_hw_fini(void *handle) | |||
164 | ret = adev->powerplay.ip_funcs->hw_fini( | 164 | ret = adev->powerplay.ip_funcs->hw_fini( |
165 | adev->powerplay.pp_handle); | 165 | adev->powerplay.pp_handle); |
166 | 166 | ||
167 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) | ||
168 | amdgpu_ucode_fini_bo(adev); | ||
169 | |||
167 | return ret; | 170 | return ret; |
168 | } | 171 | } |
169 | 172 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 7714f4a6c8b0..447d446b5015 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
@@ -442,6 +442,8 @@ static int psp_hw_fini(void *handle) | |||
442 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) | 442 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
443 | return 0; | 443 | return 0; |
444 | 444 | ||
445 | amdgpu_ucode_fini_bo(adev); | ||
446 | |||
445 | psp_ring_destroy(psp, PSP_RING_TYPE__KM); | 447 | psp_ring_destroy(psp, PSP_RING_TYPE__KM); |
446 | 448 | ||
447 | amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); | 449 | amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 190e28cb827e..93d86619e802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | |||
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, | |||
63 | 63 | ||
64 | static int amdgpu_identity_map(struct amdgpu_device *adev, | 64 | static int amdgpu_identity_map(struct amdgpu_device *adev, |
65 | struct amdgpu_queue_mapper *mapper, | 65 | struct amdgpu_queue_mapper *mapper, |
66 | int ring, | 66 | u32 ring, |
67 | struct amdgpu_ring **out_ring) | 67 | struct amdgpu_ring **out_ring) |
68 | { | 68 | { |
69 | switch (mapper->hw_ip) { | 69 | switch (mapper->hw_ip) { |
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) | |||
121 | 121 | ||
122 | static int amdgpu_lru_map(struct amdgpu_device *adev, | 122 | static int amdgpu_lru_map(struct amdgpu_device *adev, |
123 | struct amdgpu_queue_mapper *mapper, | 123 | struct amdgpu_queue_mapper *mapper, |
124 | int user_ring, bool lru_pipe_order, | 124 | u32 user_ring, bool lru_pipe_order, |
125 | struct amdgpu_ring **out_ring) | 125 | struct amdgpu_ring **out_ring) |
126 | { | 126 | { |
127 | int r, i, j; | 127 | int r, i, j; |
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, | |||
208 | */ | 208 | */ |
209 | int amdgpu_queue_mgr_map(struct amdgpu_device *adev, | 209 | int amdgpu_queue_mgr_map(struct amdgpu_device *adev, |
210 | struct amdgpu_queue_mgr *mgr, | 210 | struct amdgpu_queue_mgr *mgr, |
211 | int hw_ip, int instance, int ring, | 211 | u32 hw_ip, u32 instance, u32 ring, |
212 | struct amdgpu_ring **out_ring) | 212 | struct amdgpu_ring **out_ring) |
213 | { | 213 | { |
214 | int r, ip_num_rings; | 214 | int r, ip_num_rings; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 793b1470284d..a296f7bbe57c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
@@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = | |||
1023 | {mmPA_SC_RASTER_CONFIG_1, true}, | 1023 | {mmPA_SC_RASTER_CONFIG_1, true}, |
1024 | }; | 1024 | }; |
1025 | 1025 | ||
1026 | static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, | 1026 | |
1027 | u32 se_num, u32 sh_num, | 1027 | static uint32_t cik_get_register_value(struct amdgpu_device *adev, |
1028 | u32 reg_offset) | 1028 | bool indexed, u32 se_num, |
1029 | u32 sh_num, u32 reg_offset) | ||
1029 | { | 1030 | { |
1030 | uint32_t val; | 1031 | if (indexed) { |
1032 | uint32_t val; | ||
1033 | unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; | ||
1034 | unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; | ||
1035 | |||
1036 | switch (reg_offset) { | ||
1037 | case mmCC_RB_BACKEND_DISABLE: | ||
1038 | return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; | ||
1039 | case mmGC_USER_RB_BACKEND_DISABLE: | ||
1040 | return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; | ||
1041 | case mmPA_SC_RASTER_CONFIG: | ||
1042 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; | ||
1043 | case mmPA_SC_RASTER_CONFIG_1: | ||
1044 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; | ||
1045 | } | ||
1031 | 1046 | ||
1032 | mutex_lock(&adev->grbm_idx_mutex); | 1047 | mutex_lock(&adev->grbm_idx_mutex); |
1033 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 1048 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
1034 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | 1049 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
1035 | 1050 | ||
1036 | val = RREG32(reg_offset); | 1051 | val = RREG32(reg_offset); |
1037 | 1052 | ||
1038 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 1053 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
1039 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 1054 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
1040 | mutex_unlock(&adev->grbm_idx_mutex); | 1055 | mutex_unlock(&adev->grbm_idx_mutex); |
1041 | return val; | 1056 | return val; |
1057 | } else { | ||
1058 | unsigned idx; | ||
1059 | |||
1060 | switch (reg_offset) { | ||
1061 | case mmGB_ADDR_CONFIG: | ||
1062 | return adev->gfx.config.gb_addr_config; | ||
1063 | case mmMC_ARB_RAMCFG: | ||
1064 | return adev->gfx.config.mc_arb_ramcfg; | ||
1065 | case mmGB_TILE_MODE0: | ||
1066 | case mmGB_TILE_MODE1: | ||
1067 | case mmGB_TILE_MODE2: | ||
1068 | case mmGB_TILE_MODE3: | ||
1069 | case mmGB_TILE_MODE4: | ||
1070 | case mmGB_TILE_MODE5: | ||
1071 | case mmGB_TILE_MODE6: | ||
1072 | case mmGB_TILE_MODE7: | ||
1073 | case mmGB_TILE_MODE8: | ||
1074 | case mmGB_TILE_MODE9: | ||
1075 | case mmGB_TILE_MODE10: | ||
1076 | case mmGB_TILE_MODE11: | ||
1077 | case mmGB_TILE_MODE12: | ||
1078 | case mmGB_TILE_MODE13: | ||
1079 | case mmGB_TILE_MODE14: | ||
1080 | case mmGB_TILE_MODE15: | ||
1081 | case mmGB_TILE_MODE16: | ||
1082 | case mmGB_TILE_MODE17: | ||
1083 | case mmGB_TILE_MODE18: | ||
1084 | case mmGB_TILE_MODE19: | ||
1085 | case mmGB_TILE_MODE20: | ||
1086 | case mmGB_TILE_MODE21: | ||
1087 | case mmGB_TILE_MODE22: | ||
1088 | case mmGB_TILE_MODE23: | ||
1089 | case mmGB_TILE_MODE24: | ||
1090 | case mmGB_TILE_MODE25: | ||
1091 | case mmGB_TILE_MODE26: | ||
1092 | case mmGB_TILE_MODE27: | ||
1093 | case mmGB_TILE_MODE28: | ||
1094 | case mmGB_TILE_MODE29: | ||
1095 | case mmGB_TILE_MODE30: | ||
1096 | case mmGB_TILE_MODE31: | ||
1097 | idx = (reg_offset - mmGB_TILE_MODE0); | ||
1098 | return adev->gfx.config.tile_mode_array[idx]; | ||
1099 | case mmGB_MACROTILE_MODE0: | ||
1100 | case mmGB_MACROTILE_MODE1: | ||
1101 | case mmGB_MACROTILE_MODE2: | ||
1102 | case mmGB_MACROTILE_MODE3: | ||
1103 | case mmGB_MACROTILE_MODE4: | ||
1104 | case mmGB_MACROTILE_MODE5: | ||
1105 | case mmGB_MACROTILE_MODE6: | ||
1106 | case mmGB_MACROTILE_MODE7: | ||
1107 | case mmGB_MACROTILE_MODE8: | ||
1108 | case mmGB_MACROTILE_MODE9: | ||
1109 | case mmGB_MACROTILE_MODE10: | ||
1110 | case mmGB_MACROTILE_MODE11: | ||
1111 | case mmGB_MACROTILE_MODE12: | ||
1112 | case mmGB_MACROTILE_MODE13: | ||
1113 | case mmGB_MACROTILE_MODE14: | ||
1114 | case mmGB_MACROTILE_MODE15: | ||
1115 | idx = (reg_offset - mmGB_MACROTILE_MODE0); | ||
1116 | return adev->gfx.config.macrotile_mode_array[idx]; | ||
1117 | default: | ||
1118 | return RREG32(reg_offset); | ||
1119 | } | ||
1120 | } | ||
1042 | } | 1121 | } |
1043 | 1122 | ||
1044 | static int cik_read_register(struct amdgpu_device *adev, u32 se_num, | 1123 | static int cik_read_register(struct amdgpu_device *adev, u32 se_num, |
@@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num, | |||
1048 | 1127 | ||
1049 | *value = 0; | 1128 | *value = 0; |
1050 | for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { | 1129 | for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { |
1130 | bool indexed = cik_allowed_read_registers[i].grbm_indexed; | ||
1131 | |||
1051 | if (reg_offset != cik_allowed_read_registers[i].reg_offset) | 1132 | if (reg_offset != cik_allowed_read_registers[i].reg_offset) |
1052 | continue; | 1133 | continue; |
1053 | 1134 | ||
1054 | *value = cik_allowed_read_registers[i].grbm_indexed ? | 1135 | *value = cik_get_register_value(adev, indexed, se_num, sh_num, |
1055 | cik_read_indexed_register(adev, se_num, | 1136 | reg_offset); |
1056 | sh_num, reg_offset) : | ||
1057 | RREG32(reg_offset); | ||
1058 | return 0; | 1137 | return 0; |
1059 | } | 1138 | } |
1060 | return -EINVAL; | 1139 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 5c8a7a48a4ad..419ba0ce7ee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) | |||
1819 | adev->gfx.config.backend_enable_mask, | 1819 | adev->gfx.config.backend_enable_mask, |
1820 | num_rb_pipes); | 1820 | num_rb_pipes); |
1821 | } | 1821 | } |
1822 | |||
1823 | /* cache the values for userspace */ | ||
1824 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
1825 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
1826 | gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); | ||
1827 | adev->gfx.config.rb_config[i][j].rb_backend_disable = | ||
1828 | RREG32(mmCC_RB_BACKEND_DISABLE); | ||
1829 | adev->gfx.config.rb_config[i][j].user_rb_backend_disable = | ||
1830 | RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
1831 | adev->gfx.config.rb_config[i][j].raster_config = | ||
1832 | RREG32(mmPA_SC_RASTER_CONFIG); | ||
1833 | adev->gfx.config.rb_config[i][j].raster_config_1 = | ||
1834 | RREG32(mmPA_SC_RASTER_CONFIG_1); | ||
1835 | } | ||
1836 | } | ||
1837 | gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
1822 | mutex_unlock(&adev->grbm_idx_mutex); | 1838 | mutex_unlock(&adev->grbm_idx_mutex); |
1823 | } | 1839 | } |
1824 | 1840 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 1eb4d79d6e30..0450ac5ba6b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |||
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { | |||
1175 | 1175 | ||
1176 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) | 1176 | static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) |
1177 | { | 1177 | { |
1178 | adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; | 1178 | adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; |
1179 | adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; | 1179 | adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; |
1180 | } | 1180 | } |
1181 | 1181 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 6c5a9cab55de..f744caeaee04 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/printk.h> | ||
27 | #include "kfd_priv.h" | 28 | #include "kfd_priv.h" |
28 | 29 | ||
29 | #define KFD_DRIVER_AUTHOR "AMD Inc. and others" | 30 | #define KFD_DRIVER_AUTHOR "AMD Inc. and others" |
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void) | |||
132 | kfd_process_destroy_wq(); | 133 | kfd_process_destroy_wq(); |
133 | kfd_topology_shutdown(); | 134 | kfd_topology_shutdown(); |
134 | kfd_chardev_exit(); | 135 | kfd_chardev_exit(); |
135 | dev_info(kfd_device, "Removed module\n"); | 136 | pr_info("amdkfd: Removed module\n"); |
136 | } | 137 | } |
137 | 138 | ||
138 | module_init(kfd_module_init); | 139 | module_init(kfd_module_init); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 4859d263fa2a..4728fad3fd74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | |||
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, | |||
202 | struct cik_sdma_rlc_registers *m; | 202 | struct cik_sdma_rlc_registers *m; |
203 | 203 | ||
204 | m = get_sdma_mqd(mqd); | 204 | m = get_sdma_mqd(mqd); |
205 | m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << | 205 | m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) |
206 | SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | | 206 | << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | |
207 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | | 207 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | |
208 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | | 208 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | |
209 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; | 209 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 2bec902fc939..a3f1e62c60ba 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
191 | 191 | ||
192 | switch (type) { | 192 | switch (type) { |
193 | case KFD_QUEUE_TYPE_SDMA: | 193 | case KFD_QUEUE_TYPE_SDMA: |
194 | if (dev->dqm->queue_count >= | ||
195 | CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { | ||
196 | pr_err("Over-subscription is not allowed for SDMA.\n"); | ||
197 | retval = -EPERM; | ||
198 | goto err_create_queue; | ||
199 | } | ||
200 | |||
201 | retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); | ||
202 | if (retval != 0) | ||
203 | goto err_create_queue; | ||
204 | pqn->q = q; | ||
205 | pqn->kq = NULL; | ||
206 | retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, | ||
207 | &q->properties.vmid); | ||
208 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
209 | print_queue(q); | ||
210 | break; | ||
211 | |||
194 | case KFD_QUEUE_TYPE_COMPUTE: | 212 | case KFD_QUEUE_TYPE_COMPUTE: |
195 | /* check if there is over subscription */ | 213 | /* check if there is over subscription */ |
196 | if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && | 214 | if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 889ed24084e8..f71fe6d2ddda 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -520,7 +520,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) | |||
520 | 520 | ||
521 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 521 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
522 | aconnector = to_amdgpu_dm_connector(connector); | 522 | aconnector = to_amdgpu_dm_connector(connector); |
523 | if (aconnector->dc_link->type == dc_connection_mst_branch) { | 523 | if (aconnector->dc_link->type == dc_connection_mst_branch && |
524 | aconnector->mst_mgr.aux) { | ||
524 | DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", | 525 | DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", |
525 | aconnector, aconnector->base.base.id); | 526 | aconnector, aconnector->base.base.id); |
526 | 527 | ||
@@ -677,6 +678,10 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) | |||
677 | 678 | ||
678 | mutex_lock(&aconnector->hpd_lock); | 679 | mutex_lock(&aconnector->hpd_lock); |
679 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); | 680 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); |
681 | |||
682 | if (aconnector->fake_enable && aconnector->dc_link->local_sink) | ||
683 | aconnector->fake_enable = false; | ||
684 | |||
680 | aconnector->dc_sink = NULL; | 685 | aconnector->dc_sink = NULL; |
681 | amdgpu_dm_update_connector_after_detect(aconnector); | 686 | amdgpu_dm_update_connector_after_detect(aconnector); |
682 | mutex_unlock(&aconnector->hpd_lock); | 687 | mutex_unlock(&aconnector->hpd_lock); |
@@ -711,7 +716,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) | |||
711 | 716 | ||
712 | ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); | 717 | ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); |
713 | 718 | ||
714 | drm_atomic_state_put(adev->dm.cached_state); | ||
715 | adev->dm.cached_state = NULL; | 719 | adev->dm.cached_state = NULL; |
716 | 720 | ||
717 | amdgpu_dm_irq_resume_late(adev); | 721 | amdgpu_dm_irq_resume_late(adev); |
@@ -2704,7 +2708,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) | |||
2704 | .link = aconnector->dc_link, | 2708 | .link = aconnector->dc_link, |
2705 | .sink_signal = SIGNAL_TYPE_VIRTUAL | 2709 | .sink_signal = SIGNAL_TYPE_VIRTUAL |
2706 | }; | 2710 | }; |
2707 | struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; | 2711 | struct edid *edid; |
2708 | 2712 | ||
2709 | if (!aconnector->base.edid_blob_ptr || | 2713 | if (!aconnector->base.edid_blob_ptr || |
2710 | !aconnector->base.edid_blob_ptr->data) { | 2714 | !aconnector->base.edid_blob_ptr->data) { |
@@ -2716,6 +2720,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) | |||
2716 | return; | 2720 | return; |
2717 | } | 2721 | } |
2718 | 2722 | ||
2723 | edid = (struct edid *) aconnector->base.edid_blob_ptr->data; | ||
2724 | |||
2719 | aconnector->edid = edid; | 2725 | aconnector->edid = edid; |
2720 | 2726 | ||
2721 | aconnector->dc_em_sink = dc_link_add_remote_sink( | 2727 | aconnector->dc_em_sink = dc_link_add_remote_sink( |
@@ -4193,13 +4199,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
4193 | update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, | 4199 | update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, |
4194 | dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); | 4200 | dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); |
4195 | 4201 | ||
4202 | if (!dm_new_crtc_state->stream) | ||
4203 | continue; | ||
4204 | |||
4196 | status = dc_stream_get_status(dm_new_crtc_state->stream); | 4205 | status = dc_stream_get_status(dm_new_crtc_state->stream); |
4197 | WARN_ON(!status); | 4206 | WARN_ON(!status); |
4198 | WARN_ON(!status->plane_count); | 4207 | WARN_ON(!status->plane_count); |
4199 | 4208 | ||
4200 | if (!dm_new_crtc_state->stream) | ||
4201 | continue; | ||
4202 | |||
4203 | /*TODO How it works with MPO ?*/ | 4209 | /*TODO How it works with MPO ?*/ |
4204 | if (!dc_commit_planes_to_stream( | 4210 | if (!dc_commit_planes_to_stream( |
4205 | dm->dc, | 4211 | dm->dc, |
@@ -4253,7 +4259,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
4253 | drm_atomic_helper_commit_hw_done(state); | 4259 | drm_atomic_helper_commit_hw_done(state); |
4254 | 4260 | ||
4255 | if (wait_for_vblank) | 4261 | if (wait_for_vblank) |
4256 | drm_atomic_helper_wait_for_vblanks(dev, state); | 4262 | drm_atomic_helper_wait_for_flip_done(dev, state); |
4257 | 4263 | ||
4258 | drm_atomic_helper_cleanup_planes(dev, state); | 4264 | drm_atomic_helper_cleanup_planes(dev, state); |
4259 | } | 4265 | } |
@@ -4332,9 +4338,11 @@ void dm_restore_drm_connector_state(struct drm_device *dev, | |||
4332 | return; | 4338 | return; |
4333 | 4339 | ||
4334 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); | 4340 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); |
4335 | acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); | 4341 | if (!disconnected_acrtc) |
4342 | return; | ||
4336 | 4343 | ||
4337 | if (!disconnected_acrtc || !acrtc_state->stream) | 4344 | acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); |
4345 | if (!acrtc_state->stream) | ||
4338 | return; | 4346 | return; |
4339 | 4347 | ||
4340 | /* | 4348 | /* |
@@ -4455,7 +4463,7 @@ static int dm_update_crtcs_state(struct dc *dc, | |||
4455 | } | 4463 | } |
4456 | } | 4464 | } |
4457 | 4465 | ||
4458 | if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && | 4466 | if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && |
4459 | dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { | 4467 | dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { |
4460 | 4468 | ||
4461 | new_crtc_state->mode_changed = false; | 4469 | new_crtc_state->mode_changed = false; |
@@ -4709,7 +4717,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
4709 | } | 4717 | } |
4710 | } else { | 4718 | } else { |
4711 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 4719 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
4712 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) | 4720 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
4721 | !new_crtc_state->color_mgmt_changed) | ||
4713 | continue; | 4722 | continue; |
4714 | 4723 | ||
4715 | if (!new_crtc_state->enable) | 4724 | if (!new_crtc_state->enable) |
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c index 785b943b60ed..6e43168fbdd6 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c | |||
@@ -75,6 +75,9 @@ void dc_conn_log(struct dc_context *ctx, | |||
75 | if (signal == signal_type_info_tbl[i].type) | 75 | if (signal == signal_type_info_tbl[i].type) |
76 | break; | 76 | break; |
77 | 77 | ||
78 | if (i == NUM_ELEMENTS(signal_type_info_tbl)) | ||
79 | goto fail; | ||
80 | |||
78 | dm_logger_append(&entry, "[%s][ConnIdx:%d] ", | 81 | dm_logger_append(&entry, "[%s][ConnIdx:%d] ", |
79 | signal_type_info_tbl[i].name, | 82 | signal_type_info_tbl[i].name, |
80 | link->link_index); | 83 | link->link_index); |
@@ -96,6 +99,8 @@ void dc_conn_log(struct dc_context *ctx, | |||
96 | 99 | ||
97 | dm_logger_append(&entry, "^\n"); | 100 | dm_logger_append(&entry, "^\n"); |
98 | dm_helpers_dc_conn_log(ctx, &entry, event); | 101 | dm_helpers_dc_conn_log(ctx, &entry, event); |
102 | |||
103 | fail: | ||
99 | dm_logger_close(&entry); | 104 | dm_logger_close(&entry); |
100 | 105 | ||
101 | va_end(args); | 106 | va_end(args); |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index aaaebd06d7ee..86e6438c5cf3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | |||
@@ -249,7 +249,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb, | |||
249 | struct graphics_object_id *dest_object_id) | 249 | struct graphics_object_id *dest_object_id) |
250 | { | 250 | { |
251 | uint32_t number; | 251 | uint32_t number; |
252 | uint16_t *id; | 252 | uint16_t *id = NULL; |
253 | ATOM_OBJECT *object; | 253 | ATOM_OBJECT *object; |
254 | struct bios_parser *bp = BP_FROM_DCB(dcb); | 254 | struct bios_parser *bp = BP_FROM_DCB(dcb); |
255 | 255 | ||
@@ -260,7 +260,7 @@ static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb, | |||
260 | 260 | ||
261 | number = get_dest_obj_list(bp, object, &id); | 261 | number = get_dest_obj_list(bp, object, &id); |
262 | 262 | ||
263 | if (number <= index) | 263 | if (number <= index || !id) |
264 | return BP_RESULT_BADINPUT; | 264 | return BP_RESULT_BADINPUT; |
265 | 265 | ||
266 | *dest_object_id = object_id_from_bios_object_id(id[index]); | 266 | *dest_object_id = object_id_from_bios_object_id(id[index]); |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index fe63f5894d43..7240db2e6f09 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -121,6 +121,10 @@ static bool create_links( | |||
121 | goto failed_alloc; | 121 | goto failed_alloc; |
122 | } | 122 | } |
123 | 123 | ||
124 | link->link_index = dc->link_count; | ||
125 | dc->links[dc->link_count] = link; | ||
126 | dc->link_count++; | ||
127 | |||
124 | link->ctx = dc->ctx; | 128 | link->ctx = dc->ctx; |
125 | link->dc = dc; | 129 | link->dc = dc; |
126 | link->connector_signal = SIGNAL_TYPE_VIRTUAL; | 130 | link->connector_signal = SIGNAL_TYPE_VIRTUAL; |
@@ -129,6 +133,13 @@ static bool create_links( | |||
129 | link->link_id.enum_id = ENUM_ID_1; | 133 | link->link_id.enum_id = ENUM_ID_1; |
130 | link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); | 134 | link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); |
131 | 135 | ||
136 | if (!link->link_enc) { | ||
137 | BREAK_TO_DEBUGGER(); | ||
138 | goto failed_alloc; | ||
139 | } | ||
140 | |||
141 | link->link_status.dpcd_caps = &link->dpcd_caps; | ||
142 | |||
132 | enc_init.ctx = dc->ctx; | 143 | enc_init.ctx = dc->ctx; |
133 | enc_init.channel = CHANNEL_ID_UNKNOWN; | 144 | enc_init.channel = CHANNEL_ID_UNKNOWN; |
134 | enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; | 145 | enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; |
@@ -138,10 +149,6 @@ static bool create_links( | |||
138 | enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; | 149 | enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; |
139 | enc_init.encoder.enum_id = ENUM_ID_1; | 150 | enc_init.encoder.enum_id = ENUM_ID_1; |
140 | virtual_link_encoder_construct(link->link_enc, &enc_init); | 151 | virtual_link_encoder_construct(link->link_enc, &enc_init); |
141 | |||
142 | link->link_index = dc->link_count; | ||
143 | dc->links[dc->link_count] = link; | ||
144 | dc->link_count++; | ||
145 | } | 152 | } |
146 | 153 | ||
147 | return true; | 154 | return true; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0602610489d7..e27ed4a45265 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -480,22 +480,6 @@ static void detect_dp( | |||
480 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; | 480 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; |
481 | detect_dp_sink_caps(link); | 481 | detect_dp_sink_caps(link); |
482 | 482 | ||
483 | /* DP active dongles */ | ||
484 | if (is_dp_active_dongle(link)) { | ||
485 | link->type = dc_connection_active_dongle; | ||
486 | if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { | ||
487 | /* | ||
488 | * active dongle unplug processing for short irq | ||
489 | */ | ||
490 | link_disconnect_sink(link); | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | if (link->dpcd_caps.dongle_type != | ||
495 | DISPLAY_DONGLE_DP_HDMI_CONVERTER) { | ||
496 | *converter_disable_audio = true; | ||
497 | } | ||
498 | } | ||
499 | if (is_mst_supported(link)) { | 483 | if (is_mst_supported(link)) { |
500 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; | 484 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; |
501 | link->type = dc_connection_mst_branch; | 485 | link->type = dc_connection_mst_branch; |
@@ -535,6 +519,22 @@ static void detect_dp( | |||
535 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; | 519 | sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; |
536 | } | 520 | } |
537 | } | 521 | } |
522 | |||
523 | if (link->type != dc_connection_mst_branch && | ||
524 | is_dp_active_dongle(link)) { | ||
525 | /* DP active dongles */ | ||
526 | link->type = dc_connection_active_dongle; | ||
527 | if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { | ||
528 | /* | ||
529 | * active dongle unplug processing for short irq | ||
530 | */ | ||
531 | link_disconnect_sink(link); | ||
532 | return; | ||
533 | } | ||
534 | |||
535 | if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) | ||
536 | *converter_disable_audio = true; | ||
537 | } | ||
538 | } else { | 538 | } else { |
539 | /* DP passive dongles */ | 539 | /* DP passive dongles */ |
540 | sink_caps->signal = dp_passive_dongle_detection(link->ddc, | 540 | sink_caps->signal = dp_passive_dongle_detection(link->ddc, |
@@ -1801,12 +1801,75 @@ static void disable_link(struct dc_link *link, enum signal_type signal) | |||
1801 | link->link_enc->funcs->disable_output(link->link_enc, signal, link); | 1801 | link->link_enc->funcs->disable_output(link->link_enc, signal, link); |
1802 | } | 1802 | } |
1803 | 1803 | ||
1804 | bool dp_active_dongle_validate_timing( | ||
1805 | const struct dc_crtc_timing *timing, | ||
1806 | const struct dc_dongle_caps *dongle_caps) | ||
1807 | { | ||
1808 | unsigned int required_pix_clk = timing->pix_clk_khz; | ||
1809 | |||
1810 | if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || | ||
1811 | dongle_caps->extendedCapValid == false) | ||
1812 | return true; | ||
1813 | |||
1814 | /* Check Pixel Encoding */ | ||
1815 | switch (timing->pixel_encoding) { | ||
1816 | case PIXEL_ENCODING_RGB: | ||
1817 | case PIXEL_ENCODING_YCBCR444: | ||
1818 | break; | ||
1819 | case PIXEL_ENCODING_YCBCR422: | ||
1820 | if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) | ||
1821 | return false; | ||
1822 | break; | ||
1823 | case PIXEL_ENCODING_YCBCR420: | ||
1824 | if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) | ||
1825 | return false; | ||
1826 | break; | ||
1827 | default: | ||
1828 | /* Invalid Pixel Encoding*/ | ||
1829 | return false; | ||
1830 | } | ||
1831 | |||
1832 | |||
1833 | /* Check Color Depth and Pixel Clock */ | ||
1834 | if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | ||
1835 | required_pix_clk /= 2; | ||
1836 | |||
1837 | switch (timing->display_color_depth) { | ||
1838 | case COLOR_DEPTH_666: | ||
1839 | case COLOR_DEPTH_888: | ||
1840 | /*888 and 666 should always be supported*/ | ||
1841 | break; | ||
1842 | case COLOR_DEPTH_101010: | ||
1843 | if (dongle_caps->dp_hdmi_max_bpc < 10) | ||
1844 | return false; | ||
1845 | required_pix_clk = required_pix_clk * 10 / 8; | ||
1846 | break; | ||
1847 | case COLOR_DEPTH_121212: | ||
1848 | if (dongle_caps->dp_hdmi_max_bpc < 12) | ||
1849 | return false; | ||
1850 | required_pix_clk = required_pix_clk * 12 / 8; | ||
1851 | break; | ||
1852 | |||
1853 | case COLOR_DEPTH_141414: | ||
1854 | case COLOR_DEPTH_161616: | ||
1855 | default: | ||
1856 | /* These color depths are currently not supported */ | ||
1857 | return false; | ||
1858 | } | ||
1859 | |||
1860 | if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk) | ||
1861 | return false; | ||
1862 | |||
1863 | return true; | ||
1864 | } | ||
1865 | |||
1804 | enum dc_status dc_link_validate_mode_timing( | 1866 | enum dc_status dc_link_validate_mode_timing( |
1805 | const struct dc_stream_state *stream, | 1867 | const struct dc_stream_state *stream, |
1806 | struct dc_link *link, | 1868 | struct dc_link *link, |
1807 | const struct dc_crtc_timing *timing) | 1869 | const struct dc_crtc_timing *timing) |
1808 | { | 1870 | { |
1809 | uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; | 1871 | uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; |
1872 | struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps; | ||
1810 | 1873 | ||
1811 | /* A hack to avoid failing any modes for EDID override feature on | 1874 | /* A hack to avoid failing any modes for EDID override feature on |
1812 | * topology change such as lower quality cable for DP or different dongle | 1875 | * topology change such as lower quality cable for DP or different dongle |
@@ -1814,8 +1877,13 @@ enum dc_status dc_link_validate_mode_timing( | |||
1814 | if (link->remote_sinks[0]) | 1877 | if (link->remote_sinks[0]) |
1815 | return DC_OK; | 1878 | return DC_OK; |
1816 | 1879 | ||
1880 | /* Passive Dongle */ | ||
1817 | if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk) | 1881 | if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk) |
1818 | return DC_EXCEED_DONGLE_MAX_CLK; | 1882 | return DC_EXCEED_DONGLE_CAP; |
1883 | |||
1884 | /* Active Dongle*/ | ||
1885 | if (!dp_active_dongle_validate_timing(timing, dongle_caps)) | ||
1886 | return DC_EXCEED_DONGLE_CAP; | ||
1819 | 1887 | ||
1820 | switch (stream->signal) { | 1888 | switch (stream->signal) { |
1821 | case SIGNAL_TYPE_EDP: | 1889 | case SIGNAL_TYPE_EDP: |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index ced42484dcfc..e6bf05d76a94 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -1512,7 +1512,7 @@ static bool hpd_rx_irq_check_link_loss_status( | |||
1512 | struct dc_link *link, | 1512 | struct dc_link *link, |
1513 | union hpd_irq_data *hpd_irq_dpcd_data) | 1513 | union hpd_irq_data *hpd_irq_dpcd_data) |
1514 | { | 1514 | { |
1515 | uint8_t irq_reg_rx_power_state; | 1515 | uint8_t irq_reg_rx_power_state = 0; |
1516 | enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; | 1516 | enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; |
1517 | union lane_status lane_status; | 1517 | union lane_status lane_status; |
1518 | uint32_t lane; | 1518 | uint32_t lane; |
@@ -1524,60 +1524,55 @@ static bool hpd_rx_irq_check_link_loss_status( | |||
1524 | 1524 | ||
1525 | if (link->cur_link_settings.lane_count == 0) | 1525 | if (link->cur_link_settings.lane_count == 0) |
1526 | return return_code; | 1526 | return return_code; |
1527 | /*1. Check that we can handle interrupt: Not in FS DOS, | ||
1528 | * Not in "Display Timeout" state, Link is trained. | ||
1529 | */ | ||
1530 | 1527 | ||
1531 | dpcd_result = core_link_read_dpcd(link, | 1528 | /*1. Check that Link Status changed, before re-training.*/ |
1532 | DP_SET_POWER, | ||
1533 | &irq_reg_rx_power_state, | ||
1534 | sizeof(irq_reg_rx_power_state)); | ||
1535 | 1529 | ||
1536 | if (dpcd_result != DC_OK) { | 1530 | /*parse lane status*/ |
1537 | irq_reg_rx_power_state = DP_SET_POWER_D0; | 1531 | for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { |
1538 | dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, | 1532 | /* check status of lanes 0,1 |
1539 | "%s: DPCD read failed to obtain power state.\n", | 1533 | * changed DpcdAddress_Lane01Status (0x202) |
1540 | __func__); | 1534 | */ |
1535 | lane_status.raw = get_nibble_at_index( | ||
1536 | &hpd_irq_dpcd_data->bytes.lane01_status.raw, | ||
1537 | lane); | ||
1538 | |||
1539 | if (!lane_status.bits.CHANNEL_EQ_DONE_0 || | ||
1540 | !lane_status.bits.CR_DONE_0 || | ||
1541 | !lane_status.bits.SYMBOL_LOCKED_0) { | ||
1542 | /* if one of the channel equalization, clock | ||
1543 | * recovery or symbol lock is dropped | ||
1544 | * consider it as (link has been | ||
1545 | * dropped) dp sink status has changed | ||
1546 | */ | ||
1547 | sink_status_changed = true; | ||
1548 | break; | ||
1549 | } | ||
1541 | } | 1550 | } |
1542 | 1551 | ||
1543 | if (irq_reg_rx_power_state == DP_SET_POWER_D0) { | 1552 | /* Check interlane align.*/ |
1544 | 1553 | if (sink_status_changed || | |
1545 | /*2. Check that Link Status changed, before re-training.*/ | 1554 | !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { |
1546 | |||
1547 | /*parse lane status*/ | ||
1548 | for (lane = 0; | ||
1549 | lane < link->cur_link_settings.lane_count; | ||
1550 | lane++) { | ||
1551 | 1555 | ||
1552 | /* check status of lanes 0,1 | 1556 | dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, |
1553 | * changed DpcdAddress_Lane01Status (0x202)*/ | 1557 | "%s: Link Status changed.\n", __func__); |
1554 | lane_status.raw = get_nibble_at_index( | ||
1555 | &hpd_irq_dpcd_data->bytes.lane01_status.raw, | ||
1556 | lane); | ||
1557 | |||
1558 | if (!lane_status.bits.CHANNEL_EQ_DONE_0 || | ||
1559 | !lane_status.bits.CR_DONE_0 || | ||
1560 | !lane_status.bits.SYMBOL_LOCKED_0) { | ||
1561 | /* if one of the channel equalization, clock | ||
1562 | * recovery or symbol lock is dropped | ||
1563 | * consider it as (link has been | ||
1564 | * dropped) dp sink status has changed*/ | ||
1565 | sink_status_changed = true; | ||
1566 | break; | ||
1567 | } | ||
1568 | 1558 | ||
1569 | } | 1559 | return_code = true; |
1570 | 1560 | ||
1571 | /* Check interlane align.*/ | 1561 | /*2. Check that we can handle interrupt: Not in FS DOS, |
1572 | if (sink_status_changed || | 1562 | * Not in "Display Timeout" state, Link is trained. |
1573 | !hpd_irq_dpcd_data->bytes.lane_status_updated.bits. | 1563 | */ |
1574 | INTERLANE_ALIGN_DONE) { | 1564 | dpcd_result = core_link_read_dpcd(link, |
1565 | DP_SET_POWER, | ||
1566 | &irq_reg_rx_power_state, | ||
1567 | sizeof(irq_reg_rx_power_state)); | ||
1575 | 1568 | ||
1569 | if (dpcd_result != DC_OK) { | ||
1576 | dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, | 1570 | dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ, |
1577 | "%s: Link Status changed.\n", | 1571 | "%s: DPCD read failed to obtain power state.\n", |
1578 | __func__); | 1572 | __func__); |
1579 | 1573 | } else { | |
1580 | return_code = true; | 1574 | if (irq_reg_rx_power_state != DP_SET_POWER_D0) |
1575 | return_code = false; | ||
1581 | } | 1576 | } |
1582 | } | 1577 | } |
1583 | 1578 | ||
@@ -2062,6 +2057,24 @@ bool is_dp_active_dongle(const struct dc_link *link) | |||
2062 | (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER); | 2057 | (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER); |
2063 | } | 2058 | } |
2064 | 2059 | ||
2060 | static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) | ||
2061 | { | ||
2062 | switch (bpc) { | ||
2063 | case DOWN_STREAM_MAX_8BPC: | ||
2064 | return 8; | ||
2065 | case DOWN_STREAM_MAX_10BPC: | ||
2066 | return 10; | ||
2067 | case DOWN_STREAM_MAX_12BPC: | ||
2068 | return 12; | ||
2069 | case DOWN_STREAM_MAX_16BPC: | ||
2070 | return 16; | ||
2071 | default: | ||
2072 | break; | ||
2073 | } | ||
2074 | |||
2075 | return -1; | ||
2076 | } | ||
2077 | |||
2065 | static void get_active_converter_info( | 2078 | static void get_active_converter_info( |
2066 | uint8_t data, struct dc_link *link) | 2079 | uint8_t data, struct dc_link *link) |
2067 | { | 2080 | { |
@@ -2131,7 +2144,8 @@ static void get_active_converter_info( | |||
2131 | hdmi_caps.bits.YCrCr420_CONVERSION; | 2144 | hdmi_caps.bits.YCrCr420_CONVERSION; |
2132 | 2145 | ||
2133 | link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = | 2146 | link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = |
2134 | hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT; | 2147 | translate_dpcd_max_bpc( |
2148 | hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); | ||
2135 | 2149 | ||
2136 | link->dpcd_caps.dongle_caps.extendedCapValid = true; | 2150 | link->dpcd_caps.dongle_caps.extendedCapValid = true; |
2137 | } | 2151 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d1cdf9f8853d..b7422d3b71ef 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -516,13 +516,11 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
516 | right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split; | 516 | right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split; |
517 | 517 | ||
518 | if (right_view) { | 518 | if (right_view) { |
519 | data->viewport.width /= 2; | 519 | data->viewport.x += data->viewport.width / 2; |
520 | data->viewport_c.width /= 2; | 520 | data->viewport_c.x += data->viewport_c.width / 2; |
521 | data->viewport.x += data->viewport.width; | ||
522 | data->viewport_c.x += data->viewport_c.width; | ||
523 | /* Ceil offset pipe */ | 521 | /* Ceil offset pipe */ |
524 | data->viewport.width += data->viewport.width % 2; | 522 | data->viewport.width = (data->viewport.width + 1) / 2; |
525 | data->viewport_c.width += data->viewport_c.width % 2; | 523 | data->viewport_c.width = (data->viewport_c.width + 1) / 2; |
526 | } else { | 524 | } else { |
527 | data->viewport.width /= 2; | 525 | data->viewport.width /= 2; |
528 | data->viewport_c.width /= 2; | 526 | data->viewport_c.width /= 2; |
@@ -580,14 +578,12 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip | |||
580 | if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == | 578 | if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == |
581 | pipe_ctx->plane_state) { | 579 | pipe_ctx->plane_state) { |
582 | if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { | 580 | if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { |
583 | pipe_ctx->plane_res.scl_data.recout.height /= 2; | 581 | pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2; |
584 | pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height; | ||
585 | /* Floor primary pipe, ceil 2ndary pipe */ | 582 | /* Floor primary pipe, ceil 2ndary pipe */ |
586 | pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2; | 583 | pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; |
587 | } else { | 584 | } else { |
588 | pipe_ctx->plane_res.scl_data.recout.width /= 2; | 585 | pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2; |
589 | pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; | 586 | pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; |
590 | pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2; | ||
591 | } | 587 | } |
592 | } else if (pipe_ctx->bottom_pipe && | 588 | } else if (pipe_ctx->bottom_pipe && |
593 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) { | 589 | pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) { |
@@ -856,6 +852,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
856 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; | 852 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; |
857 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; | 853 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; |
858 | 854 | ||
855 | |||
859 | /* Taps calculations */ | 856 | /* Taps calculations */ |
860 | if (pipe_ctx->plane_res.xfm != NULL) | 857 | if (pipe_ctx->plane_res.xfm != NULL) |
861 | res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( | 858 | res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( |
@@ -864,16 +861,21 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
864 | if (pipe_ctx->plane_res.dpp != NULL) | 861 | if (pipe_ctx->plane_res.dpp != NULL) |
865 | res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( | 862 | res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( |
866 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); | 863 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); |
867 | |||
868 | if (!res) { | 864 | if (!res) { |
869 | /* Try 24 bpp linebuffer */ | 865 | /* Try 24 bpp linebuffer */ |
870 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP; | 866 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP; |
871 | 867 | ||
872 | res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( | 868 | if (pipe_ctx->plane_res.xfm != NULL) |
873 | pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); | 869 | res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( |
870 | pipe_ctx->plane_res.xfm, | ||
871 | &pipe_ctx->plane_res.scl_data, | ||
872 | &plane_state->scaling_quality); | ||
874 | 873 | ||
875 | res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( | 874 | if (pipe_ctx->plane_res.dpp != NULL) |
876 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); | 875 | res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( |
876 | pipe_ctx->plane_res.dpp, | ||
877 | &pipe_ctx->plane_res.scl_data, | ||
878 | &plane_state->scaling_quality); | ||
877 | } | 879 | } |
878 | 880 | ||
879 | if (res) | 881 | if (res) |
@@ -991,8 +993,10 @@ static struct pipe_ctx *acquire_free_pipe_for_stream( | |||
991 | 993 | ||
992 | head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); | 994 | head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); |
993 | 995 | ||
994 | if (!head_pipe) | 996 | if (!head_pipe) { |
995 | ASSERT(0); | 997 | ASSERT(0); |
998 | return NULL; | ||
999 | } | ||
996 | 1000 | ||
997 | if (!head_pipe->plane_state) | 1001 | if (!head_pipe->plane_state) |
998 | return head_pipe; | 1002 | return head_pipe; |
@@ -1447,11 +1451,16 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link( | |||
1447 | 1451 | ||
1448 | static struct audio *find_first_free_audio( | 1452 | static struct audio *find_first_free_audio( |
1449 | struct resource_context *res_ctx, | 1453 | struct resource_context *res_ctx, |
1450 | const struct resource_pool *pool) | 1454 | const struct resource_pool *pool, |
1455 | enum engine_id id) | ||
1451 | { | 1456 | { |
1452 | int i; | 1457 | int i; |
1453 | for (i = 0; i < pool->audio_count; i++) { | 1458 | for (i = 0; i < pool->audio_count; i++) { |
1454 | if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { | 1459 | if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { |
1460 | /*we have enough audio endpoint, find the matching inst*/ | ||
1461 | if (id != i) | ||
1462 | continue; | ||
1463 | |||
1455 | return pool->audios[i]; | 1464 | return pool->audios[i]; |
1456 | } | 1465 | } |
1457 | } | 1466 | } |
@@ -1700,7 +1709,7 @@ enum dc_status resource_map_pool_resources( | |||
1700 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && | 1709 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && |
1701 | stream->audio_info.mode_count) { | 1710 | stream->audio_info.mode_count) { |
1702 | pipe_ctx->stream_res.audio = find_first_free_audio( | 1711 | pipe_ctx->stream_res.audio = find_first_free_audio( |
1703 | &context->res_ctx, pool); | 1712 | &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); |
1704 | 1713 | ||
1705 | /* | 1714 | /* |
1706 | * Audio assigned in order first come first get. | 1715 | * Audio assigned in order first come first get. |
@@ -1765,13 +1774,16 @@ enum dc_status dc_validate_global_state( | |||
1765 | enum dc_status result = DC_ERROR_UNEXPECTED; | 1774 | enum dc_status result = DC_ERROR_UNEXPECTED; |
1766 | int i, j; | 1775 | int i, j; |
1767 | 1776 | ||
1777 | if (!new_ctx) | ||
1778 | return DC_ERROR_UNEXPECTED; | ||
1779 | |||
1768 | if (dc->res_pool->funcs->validate_global) { | 1780 | if (dc->res_pool->funcs->validate_global) { |
1769 | result = dc->res_pool->funcs->validate_global(dc, new_ctx); | 1781 | result = dc->res_pool->funcs->validate_global(dc, new_ctx); |
1770 | if (result != DC_OK) | 1782 | if (result != DC_OK) |
1771 | return result; | 1783 | return result; |
1772 | } | 1784 | } |
1773 | 1785 | ||
1774 | for (i = 0; new_ctx && i < new_ctx->stream_count; i++) { | 1786 | for (i = 0; i < new_ctx->stream_count; i++) { |
1775 | struct dc_stream_state *stream = new_ctx->streams[i]; | 1787 | struct dc_stream_state *stream = new_ctx->streams[i]; |
1776 | 1788 | ||
1777 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | 1789 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index b00a6040a697..e230cc44a0a7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -263,7 +263,6 @@ bool dc_stream_set_cursor_position( | |||
263 | struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; | 263 | struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; |
264 | struct mem_input *mi = pipe_ctx->plane_res.mi; | 264 | struct mem_input *mi = pipe_ctx->plane_res.mi; |
265 | struct hubp *hubp = pipe_ctx->plane_res.hubp; | 265 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
266 | struct transform *xfm = pipe_ctx->plane_res.xfm; | ||
267 | struct dpp *dpp = pipe_ctx->plane_res.dpp; | 266 | struct dpp *dpp = pipe_ctx->plane_res.dpp; |
268 | struct dc_cursor_position pos_cpy = *position; | 267 | struct dc_cursor_position pos_cpy = *position; |
269 | struct dc_cursor_mi_param param = { | 268 | struct dc_cursor_mi_param param = { |
@@ -294,11 +293,11 @@ bool dc_stream_set_cursor_position( | |||
294 | if (mi != NULL && mi->funcs->set_cursor_position != NULL) | 293 | if (mi != NULL && mi->funcs->set_cursor_position != NULL) |
295 | mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); | 294 | mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m); |
296 | 295 | ||
297 | if (hubp != NULL && hubp->funcs->set_cursor_position != NULL) | 296 | if (!hubp) |
298 | hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); | 297 | continue; |
299 | 298 | ||
300 | if (xfm != NULL && xfm->funcs->set_cursor_position != NULL) | 299 | if (hubp->funcs->set_cursor_position != NULL) |
301 | xfm->funcs->set_cursor_position(xfm, &pos_cpy, ¶m, hubp->curs_attr.width); | 300 | hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); |
302 | 301 | ||
303 | if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) | 302 | if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) |
304 | dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); | 303 | dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 81c40f8864db..0df9ecb2710c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c | |||
@@ -352,11 +352,11 @@ void dce_aud_az_enable(struct audio *audio) | |||
352 | uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); | 352 | uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); |
353 | 353 | ||
354 | set_reg_field_value(value, 1, | 354 | set_reg_field_value(value, 1, |
355 | AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, | 355 | AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, |
356 | CLOCK_GATING_DISABLE); | 356 | CLOCK_GATING_DISABLE); |
357 | set_reg_field_value(value, 1, | 357 | set_reg_field_value(value, 1, |
358 | AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, | 358 | AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, |
359 | AUDIO_ENABLED); | 359 | AUDIO_ENABLED); |
360 | 360 | ||
361 | AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); | 361 | AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); |
362 | value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); | 362 | value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 4fd49a16c3b6..e42b6eb1c1f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | |||
@@ -87,6 +87,9 @@ static void dce110_update_generic_info_packet( | |||
87 | */ | 87 | */ |
88 | uint32_t max_retries = 50; | 88 | uint32_t max_retries = 50; |
89 | 89 | ||
90 | /*we need turn on clock before programming AFMT block*/ | ||
91 | REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); | ||
92 | |||
90 | if (REG(AFMT_VBI_PACKET_CONTROL1)) { | 93 | if (REG(AFMT_VBI_PACKET_CONTROL1)) { |
91 | if (packet_index >= 8) | 94 | if (packet_index >= 8) |
92 | ASSERT(0); | 95 | ASSERT(0); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1229a3315018..07ff8d2faf3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -991,6 +991,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) | |||
991 | struct dc_link *link = stream->sink->link; | 991 | struct dc_link *link = stream->sink->link; |
992 | struct dc *dc = pipe_ctx->stream->ctx->dc; | 992 | struct dc *dc = pipe_ctx->stream->ctx->dc; |
993 | 993 | ||
994 | if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) | ||
995 | pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( | ||
996 | pipe_ctx->stream_res.stream_enc); | ||
997 | |||
998 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | ||
999 | pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( | ||
1000 | pipe_ctx->stream_res.stream_enc); | ||
1001 | |||
1002 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | ||
1003 | pipe_ctx->stream_res.stream_enc, true); | ||
994 | if (pipe_ctx->stream_res.audio) { | 1004 | if (pipe_ctx->stream_res.audio) { |
995 | pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); | 1005 | pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); |
996 | 1006 | ||
@@ -1015,18 +1025,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1015 | */ | 1025 | */ |
1016 | } | 1026 | } |
1017 | 1027 | ||
1018 | if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) | ||
1019 | pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( | ||
1020 | pipe_ctx->stream_res.stream_enc); | ||
1021 | |||
1022 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | ||
1023 | pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( | ||
1024 | pipe_ctx->stream_res.stream_enc); | ||
1025 | |||
1026 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | ||
1027 | pipe_ctx->stream_res.stream_enc, true); | ||
1028 | |||
1029 | |||
1030 | /* blank at encoder level */ | 1028 | /* blank at encoder level */ |
1031 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) { | 1029 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) { |
1032 | if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP) | 1030 | if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP) |
@@ -1774,6 +1772,10 @@ static enum dc_status validate_fbc(struct dc *dc, | |||
1774 | if (pipe_ctx->stream->sink->link->psr_enabled) | 1772 | if (pipe_ctx->stream->sink->link->psr_enabled) |
1775 | return DC_ERROR_UNEXPECTED; | 1773 | return DC_ERROR_UNEXPECTED; |
1776 | 1774 | ||
1775 | /* Nothing to compress */ | ||
1776 | if (!pipe_ctx->plane_state) | ||
1777 | return DC_ERROR_UNEXPECTED; | ||
1778 | |||
1777 | /* Only for non-linear tiling */ | 1779 | /* Only for non-linear tiling */ |
1778 | if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) | 1780 | if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) |
1779 | return DC_ERROR_UNEXPECTED; | 1781 | return DC_ERROR_UNEXPECTED; |
@@ -1868,8 +1870,10 @@ static void dce110_reset_hw_ctx_wrap( | |||
1868 | pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { | 1870 | pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { |
1869 | struct clock_source *old_clk = pipe_ctx_old->clock_source; | 1871 | struct clock_source *old_clk = pipe_ctx_old->clock_source; |
1870 | 1872 | ||
1871 | /* disable already, no need to disable again */ | 1873 | /* Disable if new stream is null. O/w, if stream is |
1872 | if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off) | 1874 | * disabled already, no need to disable again. |
1875 | */ | ||
1876 | if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) | ||
1873 | core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE); | 1877 | core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE); |
1874 | 1878 | ||
1875 | pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true); | 1879 | pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index db96d2b47ff1..61adb8174ce0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -1037,11 +1037,13 @@ static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) | |||
1037 | struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), | 1037 | struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), |
1038 | GFP_KERNEL); | 1038 | GFP_KERNEL); |
1039 | 1039 | ||
1040 | if ((dce110_tgv == NULL) || | 1040 | if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { |
1041 | (dce110_xfmv == NULL) || | 1041 | kfree(dce110_tgv); |
1042 | (dce110_miv == NULL) || | 1042 | kfree(dce110_xfmv); |
1043 | (dce110_oppv == NULL)) | 1043 | kfree(dce110_miv); |
1044 | return false; | 1044 | kfree(dce110_oppv); |
1045 | return false; | ||
1046 | } | ||
1045 | 1047 | ||
1046 | dce110_opp_v_construct(dce110_oppv, ctx); | 1048 | dce110_opp_v_construct(dce110_oppv, ctx); |
1047 | 1049 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index 67ac737eaa7e..4befce6cd87a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c | |||
@@ -1112,10 +1112,7 @@ bool dce110_timing_generator_validate_timing( | |||
1112 | enum signal_type signal) | 1112 | enum signal_type signal) |
1113 | { | 1113 | { |
1114 | uint32_t h_blank; | 1114 | uint32_t h_blank; |
1115 | uint32_t h_back_porch; | 1115 | uint32_t h_back_porch, hsync_offset, h_sync_start; |
1116 | uint32_t hsync_offset = timing->h_border_right + | ||
1117 | timing->h_front_porch; | ||
1118 | uint32_t h_sync_start = timing->h_addressable + hsync_offset; | ||
1119 | 1116 | ||
1120 | struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); | 1117 | struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); |
1121 | 1118 | ||
@@ -1124,6 +1121,9 @@ bool dce110_timing_generator_validate_timing( | |||
1124 | if (!timing) | 1121 | if (!timing) |
1125 | return false; | 1122 | return false; |
1126 | 1123 | ||
1124 | hsync_offset = timing->h_border_right + timing->h_front_porch; | ||
1125 | h_sync_start = timing->h_addressable + hsync_offset; | ||
1126 | |||
1127 | /* Currently we don't support 3D, so block all 3D timings */ | 1127 | /* Currently we don't support 3D, so block all 3D timings */ |
1128 | if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) | 1128 | if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) |
1129 | return false; | 1129 | return false; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 4c4bd72d4e40..9fc8f827f2a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
@@ -912,11 +912,13 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( | |||
912 | struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); | 912 | struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); |
913 | struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool); | 913 | struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool); |
914 | 914 | ||
915 | if (!head_pipe) | 915 | if (!head_pipe) { |
916 | ASSERT(0); | 916 | ASSERT(0); |
917 | return NULL; | ||
918 | } | ||
917 | 919 | ||
918 | if (!idle_pipe) | 920 | if (!idle_pipe) |
919 | return false; | 921 | return NULL; |
920 | 922 | ||
921 | idle_pipe->stream = head_pipe->stream; | 923 | idle_pipe->stream = head_pipe->stream; |
922 | idle_pipe->stream_res.tg = head_pipe->stream_res.tg; | 924 | idle_pipe->stream_res.tg = head_pipe->stream_res.tg; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c index c7333cdf1802..fced178c8c79 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c | |||
@@ -496,9 +496,6 @@ static bool tgn10_validate_timing( | |||
496 | timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) | 496 | timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) |
497 | return false; | 497 | return false; |
498 | 498 | ||
499 | if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && | ||
500 | tg->ctx->dc->debug.disable_stereo_support) | ||
501 | return false; | ||
502 | /* Temporarily blocking interlacing mode until it's supported */ | 499 | /* Temporarily blocking interlacing mode until it's supported */ |
503 | if (timing->flags.INTERLACE == 1) | 500 | if (timing->flags.INTERLACE == 1) |
504 | return false; | 501 | return false; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index 01df85641684..94fc31080fda 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h | |||
@@ -38,7 +38,7 @@ enum dc_status { | |||
38 | DC_FAIL_DETACH_SURFACES = 8, | 38 | DC_FAIL_DETACH_SURFACES = 8, |
39 | DC_FAIL_SURFACE_VALIDATE = 9, | 39 | DC_FAIL_SURFACE_VALIDATE = 9, |
40 | DC_NO_DP_LINK_BANDWIDTH = 10, | 40 | DC_NO_DP_LINK_BANDWIDTH = 10, |
41 | DC_EXCEED_DONGLE_MAX_CLK = 11, | 41 | DC_EXCEED_DONGLE_CAP = 11, |
42 | DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12, | 42 | DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12, |
43 | DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ | 43 | DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ |
44 | DC_FAIL_SCALING = 14, | 44 | DC_FAIL_SCALING = 14, |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h index 7c08bc62c1f5..ea88997e1bbd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h | |||
@@ -259,13 +259,6 @@ struct transform_funcs { | |||
259 | struct transform *xfm_base, | 259 | struct transform *xfm_base, |
260 | const struct dc_cursor_attributes *attr); | 260 | const struct dc_cursor_attributes *attr); |
261 | 261 | ||
262 | void (*set_cursor_position)( | ||
263 | struct transform *xfm_base, | ||
264 | const struct dc_cursor_position *pos, | ||
265 | const struct dc_cursor_mi_param *param, | ||
266 | uint32_t width | ||
267 | ); | ||
268 | |||
269 | }; | 262 | }; |
270 | 263 | ||
271 | const uint16_t *get_filter_2tap_16p(void); | 264 | const uint16_t *get_filter_2tap_16p(void); |
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 72b22b805412..5a5427bbd70e 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c | |||
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) | |||
317 | formats, ARRAY_SIZE(formats), | 317 | formats, ARRAY_SIZE(formats), |
318 | NULL, | 318 | NULL, |
319 | DRM_PLANE_TYPE_PRIMARY, NULL); | 319 | DRM_PLANE_TYPE_PRIMARY, NULL); |
320 | if (ret) { | 320 | if (ret) |
321 | return ERR_PTR(ret); | 321 | return ERR_PTR(ret); |
322 | } | ||
323 | 322 | ||
324 | drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); | 323 | drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); |
325 | hdlcd->plane = plane; | 324 | hdlcd->plane = plane; |
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 764d0c83710c..0afb53b1f4e9 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/component.h> | 15 | #include <linux/component.h> |
16 | #include <linux/console.h> | ||
16 | #include <linux/list.h> | 17 | #include <linux/list.h> |
17 | #include <linux/of_graph.h> | 18 | #include <linux/of_graph.h> |
18 | #include <linux/of_reserved_mem.h> | 19 | #include <linux/of_reserved_mem.h> |
@@ -354,7 +355,7 @@ err_unload: | |||
354 | err_free: | 355 | err_free: |
355 | drm_mode_config_cleanup(drm); | 356 | drm_mode_config_cleanup(drm); |
356 | dev_set_drvdata(dev, NULL); | 357 | dev_set_drvdata(dev, NULL); |
357 | drm_dev_unref(drm); | 358 | drm_dev_put(drm); |
358 | 359 | ||
359 | return ret; | 360 | return ret; |
360 | } | 361 | } |
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev) | |||
379 | pm_runtime_disable(drm->dev); | 380 | pm_runtime_disable(drm->dev); |
380 | of_reserved_mem_device_release(drm->dev); | 381 | of_reserved_mem_device_release(drm->dev); |
381 | drm_mode_config_cleanup(drm); | 382 | drm_mode_config_cleanup(drm); |
382 | drm_dev_unref(drm); | 383 | drm_dev_put(drm); |
383 | drm->dev_private = NULL; | 384 | drm->dev_private = NULL; |
384 | dev_set_drvdata(dev, NULL); | 385 | dev_set_drvdata(dev, NULL); |
385 | } | 386 | } |
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev) | |||
432 | return 0; | 433 | return 0; |
433 | 434 | ||
434 | drm_kms_helper_poll_disable(drm); | 435 | drm_kms_helper_poll_disable(drm); |
436 | drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1); | ||
435 | 437 | ||
436 | hdlcd->state = drm_atomic_helper_suspend(drm); | 438 | hdlcd->state = drm_atomic_helper_suspend(drm); |
437 | if (IS_ERR(hdlcd->state)) { | 439 | if (IS_ERR(hdlcd->state)) { |
440 | drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0); | ||
438 | drm_kms_helper_poll_enable(drm); | 441 | drm_kms_helper_poll_enable(drm); |
439 | return PTR_ERR(hdlcd->state); | 442 | return PTR_ERR(hdlcd->state); |
440 | } | 443 | } |
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev) | |||
451 | return 0; | 454 | return 0; |
452 | 455 | ||
453 | drm_atomic_helper_resume(drm, hdlcd->state); | 456 | drm_atomic_helper_resume(drm, hdlcd->state); |
457 | drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0); | ||
454 | drm_kms_helper_poll_enable(drm); | 458 | drm_kms_helper_poll_enable(drm); |
455 | pm_runtime_set_active(dev); | ||
456 | 459 | ||
457 | return 0; | 460 | return 0; |
458 | } | 461 | } |
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 3615d18a7ddf..904fff80917b 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c | |||
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, | |||
65 | /* We rely on firmware to set mclk to a sensible level. */ | 65 | /* We rely on firmware to set mclk to a sensible level. */ |
66 | clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); | 66 | clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); |
67 | 67 | ||
68 | hwdev->modeset(hwdev, &vm); | 68 | hwdev->hw->modeset(hwdev, &vm); |
69 | hwdev->leave_config_mode(hwdev); | 69 | hwdev->hw->leave_config_mode(hwdev); |
70 | drm_crtc_vblank_on(crtc); | 70 | drm_crtc_vblank_on(crtc); |
71 | } | 71 | } |
72 | 72 | ||
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc, | |||
77 | struct malidp_hw_device *hwdev = malidp->dev; | 77 | struct malidp_hw_device *hwdev = malidp->dev; |
78 | int err; | 78 | int err; |
79 | 79 | ||
80 | /* always disable planes on the CRTC that is being turned off */ | ||
81 | drm_atomic_helper_disable_planes_on_crtc(old_state, false); | ||
82 | |||
80 | drm_crtc_vblank_off(crtc); | 83 | drm_crtc_vblank_off(crtc); |
81 | hwdev->enter_config_mode(hwdev); | 84 | hwdev->hw->enter_config_mode(hwdev); |
85 | |||
82 | clk_disable_unprepare(hwdev->pxlclk); | 86 | clk_disable_unprepare(hwdev->pxlclk); |
83 | 87 | ||
84 | err = pm_runtime_put(crtc->dev->dev); | 88 | err = pm_runtime_put(crtc->dev->dev); |
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc, | |||
319 | 323 | ||
320 | mclk_calc: | 324 | mclk_calc: |
321 | drm_display_mode_to_videomode(&state->adjusted_mode, &vm); | 325 | drm_display_mode_to_videomode(&state->adjusted_mode, &vm); |
322 | ret = hwdev->se_calc_mclk(hwdev, s, &vm); | 326 | ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm); |
323 | if (ret < 0) | 327 | if (ret < 0) |
324 | return -EINVAL; | 328 | return -EINVAL; |
325 | return 0; | 329 | return 0; |
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) | |||
475 | struct malidp_hw_device *hwdev = malidp->dev; | 479 | struct malidp_hw_device *hwdev = malidp->dev; |
476 | 480 | ||
477 | malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, | 481 | malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, |
478 | hwdev->map.de_irq_map.vsync_irq); | 482 | hwdev->hw->map.de_irq_map.vsync_irq); |
479 | return 0; | 483 | return 0; |
480 | } | 484 | } |
481 | 485 | ||
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc) | |||
485 | struct malidp_hw_device *hwdev = malidp->dev; | 489 | struct malidp_hw_device *hwdev = malidp->dev; |
486 | 490 | ||
487 | malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, | 491 | malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, |
488 | hwdev->map.de_irq_map.vsync_irq); | 492 | hwdev->hw->map.de_irq_map.vsync_irq); |
489 | } | 493 | } |
490 | 494 | ||
491 | static const struct drm_crtc_funcs malidp_crtc_funcs = { | 495 | static const struct drm_crtc_funcs malidp_crtc_funcs = { |
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index b8944666a18f..91f2b0191368 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c | |||
@@ -47,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, | |||
47 | * directly. | 47 | * directly. |
48 | */ | 48 | */ |
49 | malidp_hw_write(hwdev, gamma_write_mask, | 49 | malidp_hw_write(hwdev, gamma_write_mask, |
50 | hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); | 50 | hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); |
51 | for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) | 51 | for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) |
52 | malidp_hw_write(hwdev, data[i], | 52 | malidp_hw_write(hwdev, data[i], |
53 | hwdev->map.coeffs_base + | 53 | hwdev->hw->map.coeffs_base + |
54 | MALIDP_COEF_TABLE_DATA); | 54 | MALIDP_COEF_TABLE_DATA); |
55 | } | 55 | } |
56 | 56 | ||
@@ -103,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, | |||
103 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) | 103 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) |
104 | malidp_hw_write(hwdev, | 104 | malidp_hw_write(hwdev, |
105 | mc->coloradj_coeffs[i], | 105 | mc->coloradj_coeffs[i], |
106 | hwdev->map.coeffs_base + | 106 | hwdev->hw->map.coeffs_base + |
107 | MALIDP_COLOR_ADJ_COEF + 4 * i); | 107 | MALIDP_COLOR_ADJ_COEF + 4 * i); |
108 | 108 | ||
109 | malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, | 109 | malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, |
@@ -120,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, | |||
120 | struct malidp_hw_device *hwdev = malidp->dev; | 120 | struct malidp_hw_device *hwdev = malidp->dev; |
121 | struct malidp_se_config *s = &cs->scaler_config; | 121 | struct malidp_se_config *s = &cs->scaler_config; |
122 | struct malidp_se_config *old_s = &old_cs->scaler_config; | 122 | struct malidp_se_config *old_s = &old_cs->scaler_config; |
123 | u32 se_control = hwdev->map.se_base + | 123 | u32 se_control = hwdev->hw->map.se_base + |
124 | ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? | 124 | ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? |
125 | 0x10 : 0xC); | 125 | 0x10 : 0xC); |
126 | u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; | 126 | u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; |
127 | u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; | 127 | u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; |
@@ -135,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, | |||
135 | return; | 135 | return; |
136 | } | 136 | } |
137 | 137 | ||
138 | hwdev->se_set_scaling_coeffs(hwdev, s, old_s); | 138 | hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); |
139 | val = malidp_hw_read(hwdev, se_control); | 139 | val = malidp_hw_read(hwdev, se_control); |
140 | val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; | 140 | val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; |
141 | 141 | ||
@@ -170,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm) | |||
170 | int ret; | 170 | int ret; |
171 | 171 | ||
172 | atomic_set(&malidp->config_valid, 0); | 172 | atomic_set(&malidp->config_valid, 0); |
173 | hwdev->set_config_valid(hwdev); | 173 | hwdev->hw->set_config_valid(hwdev); |
174 | /* don't wait for config_valid flag if we are in config mode */ | 174 | /* don't wait for config_valid flag if we are in config mode */ |
175 | if (hwdev->in_config_mode(hwdev)) | 175 | if (hwdev->hw->in_config_mode(hwdev)) |
176 | return 0; | 176 | return 0; |
177 | 177 | ||
178 | ret = wait_event_interruptible_timeout(malidp->wq, | 178 | ret = wait_event_interruptible_timeout(malidp->wq, |
@@ -455,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev) | |||
455 | struct malidp_hw_device *hwdev = malidp->dev; | 455 | struct malidp_hw_device *hwdev = malidp->dev; |
456 | 456 | ||
457 | /* we can only suspend if the hardware is in config mode */ | 457 | /* we can only suspend if the hardware is in config mode */ |
458 | WARN_ON(!hwdev->in_config_mode(hwdev)); | 458 | WARN_ON(!hwdev->hw->in_config_mode(hwdev)); |
459 | 459 | ||
460 | hwdev->pm_suspended = true; | 460 | hwdev->pm_suspended = true; |
461 | clk_disable_unprepare(hwdev->mclk); | 461 | clk_disable_unprepare(hwdev->mclk); |
@@ -500,11 +500,7 @@ static int malidp_bind(struct device *dev) | |||
500 | if (!hwdev) | 500 | if (!hwdev) |
501 | return -ENOMEM; | 501 | return -ENOMEM; |
502 | 502 | ||
503 | /* | 503 | hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); |
504 | * copy the associated data from malidp_drm_of_match to avoid | ||
505 | * having to keep a reference to the OF node after binding | ||
506 | */ | ||
507 | memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); | ||
508 | malidp->dev = hwdev; | 504 | malidp->dev = hwdev; |
509 | 505 | ||
510 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 506 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -568,13 +564,13 @@ static int malidp_bind(struct device *dev) | |||
568 | goto query_hw_fail; | 564 | goto query_hw_fail; |
569 | } | 565 | } |
570 | 566 | ||
571 | ret = hwdev->query_hw(hwdev); | 567 | ret = hwdev->hw->query_hw(hwdev); |
572 | if (ret) { | 568 | if (ret) { |
573 | DRM_ERROR("Invalid HW configuration\n"); | 569 | DRM_ERROR("Invalid HW configuration\n"); |
574 | goto query_hw_fail; | 570 | goto query_hw_fail; |
575 | } | 571 | } |
576 | 572 | ||
577 | version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); | 573 | version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); |
578 | DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, | 574 | DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, |
579 | (version >> 12) & 0xf, (version >> 8) & 0xf); | 575 | (version >> 12) & 0xf, (version >> 8) & 0xf); |
580 | 576 | ||
@@ -589,7 +585,7 @@ static int malidp_bind(struct device *dev) | |||
589 | 585 | ||
590 | for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) | 586 | for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) |
591 | out_depth = (out_depth << 8) | (output_width[i] & 0xf); | 587 | out_depth = (out_depth << 8) | (output_width[i] & 0xf); |
592 | malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); | 588 | malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); |
593 | 589 | ||
594 | atomic_set(&malidp->config_valid, 0); | 590 | atomic_set(&malidp->config_valid, 0); |
595 | init_waitqueue_head(&malidp->wq); | 591 | init_waitqueue_head(&malidp->wq); |
@@ -671,7 +667,7 @@ query_hw_fail: | |||
671 | malidp_runtime_pm_suspend(dev); | 667 | malidp_runtime_pm_suspend(dev); |
672 | drm->dev_private = NULL; | 668 | drm->dev_private = NULL; |
673 | dev_set_drvdata(dev, NULL); | 669 | dev_set_drvdata(dev, NULL); |
674 | drm_dev_unref(drm); | 670 | drm_dev_put(drm); |
675 | alloc_fail: | 671 | alloc_fail: |
676 | of_reserved_mem_device_release(dev); | 672 | of_reserved_mem_device_release(dev); |
677 | 673 | ||
@@ -704,7 +700,7 @@ static void malidp_unbind(struct device *dev) | |||
704 | malidp_runtime_pm_suspend(dev); | 700 | malidp_runtime_pm_suspend(dev); |
705 | drm->dev_private = NULL; | 701 | drm->dev_private = NULL; |
706 | dev_set_drvdata(dev, NULL); | 702 | dev_set_drvdata(dev, NULL); |
707 | drm_dev_unref(drm); | 703 | drm_dev_put(drm); |
708 | of_reserved_mem_device_release(dev); | 704 | of_reserved_mem_device_release(dev); |
709 | } | 705 | } |
710 | 706 | ||
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 17bca99e8ac8..2bfb542135ac 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c | |||
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev) | |||
183 | 183 | ||
184 | malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); | 184 | malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); |
185 | while (count) { | 185 | while (count) { |
186 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 186 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
187 | if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) | 187 | if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) |
188 | break; | 188 | break; |
189 | /* | 189 | /* |
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev) | |||
203 | malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); | 203 | malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); |
204 | malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); | 204 | malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); |
205 | while (count) { | 205 | while (count) { |
206 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 206 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
207 | if ((status & MALIDP500_DC_CONFIG_REQ) == 0) | 207 | if ((status & MALIDP500_DC_CONFIG_REQ) == 0) |
208 | break; | 208 | break; |
209 | usleep_range(100, 1000); | 209 | usleep_range(100, 1000); |
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev) | |||
216 | { | 216 | { |
217 | u32 status; | 217 | u32 status; |
218 | 218 | ||
219 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 219 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
220 | if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) | 220 | if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) |
221 | return true; | 221 | return true; |
222 | 222 | ||
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev) | |||
407 | 407 | ||
408 | malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); | 408 | malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); |
409 | while (count) { | 409 | while (count) { |
410 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 410 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
411 | if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) | 411 | if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) |
412 | break; | 412 | break; |
413 | /* | 413 | /* |
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev) | |||
427 | malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); | 427 | malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); |
428 | malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); | 428 | malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); |
429 | while (count) { | 429 | while (count) { |
430 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 430 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
431 | if ((status & MALIDP550_DC_CONFIG_REQ) == 0) | 431 | if ((status & MALIDP550_DC_CONFIG_REQ) == 0) |
432 | break; | 432 | break; |
433 | usleep_range(100, 1000); | 433 | usleep_range(100, 1000); |
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev) | |||
440 | { | 440 | { |
441 | u32 status; | 441 | u32 status; |
442 | 442 | ||
443 | status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 443 | status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); |
444 | if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) | 444 | if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) |
445 | return true; | 445 | return true; |
446 | 446 | ||
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev) | |||
616 | return 0; | 616 | return 0; |
617 | } | 617 | } |
618 | 618 | ||
619 | const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { | 619 | const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { |
620 | [MALIDP_500] = { | 620 | [MALIDP_500] = { |
621 | .map = { | 621 | .map = { |
622 | .coeffs_base = MALIDP500_COEFFS_BASE, | 622 | .coeffs_base = MALIDP500_COEFFS_BASE, |
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir | |||
751 | { | 751 | { |
752 | u32 base = malidp_get_block_base(hwdev, block); | 752 | u32 base = malidp_get_block_base(hwdev, block); |
753 | 753 | ||
754 | if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) | 754 | if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) |
755 | malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); | 755 | malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); |
756 | else | 756 | else |
757 | malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); | 757 | malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); |
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg) | |||
762 | struct drm_device *drm = arg; | 762 | struct drm_device *drm = arg; |
763 | struct malidp_drm *malidp = drm->dev_private; | 763 | struct malidp_drm *malidp = drm->dev_private; |
764 | struct malidp_hw_device *hwdev; | 764 | struct malidp_hw_device *hwdev; |
765 | struct malidp_hw *hw; | ||
765 | const struct malidp_irq_map *de; | 766 | const struct malidp_irq_map *de; |
766 | u32 status, mask, dc_status; | 767 | u32 status, mask, dc_status; |
767 | irqreturn_t ret = IRQ_NONE; | 768 | irqreturn_t ret = IRQ_NONE; |
768 | 769 | ||
769 | hwdev = malidp->dev; | 770 | hwdev = malidp->dev; |
770 | de = &hwdev->map.de_irq_map; | 771 | hw = hwdev->hw; |
772 | de = &hw->map.de_irq_map; | ||
771 | 773 | ||
772 | /* | 774 | /* |
773 | * if we are suspended it is likely that we were invoked because | 775 | * if we are suspended it is likely that we were invoked because |
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg) | |||
778 | return IRQ_NONE; | 780 | return IRQ_NONE; |
779 | 781 | ||
780 | /* first handle the config valid IRQ */ | 782 | /* first handle the config valid IRQ */ |
781 | dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); | 783 | dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); |
782 | if (dc_status & hwdev->map.dc_irq_map.vsync_irq) { | 784 | if (dc_status & hw->map.dc_irq_map.vsync_irq) { |
783 | /* we have a page flip event */ | 785 | /* we have a page flip event */ |
784 | atomic_set(&malidp->config_valid, 1); | 786 | atomic_set(&malidp->config_valid, 1); |
785 | malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); | 787 | malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); |
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq) | |||
832 | 834 | ||
833 | /* first enable the DC block IRQs */ | 835 | /* first enable the DC block IRQs */ |
834 | malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, | 836 | malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, |
835 | hwdev->map.dc_irq_map.irq_mask); | 837 | hwdev->hw->map.dc_irq_map.irq_mask); |
836 | 838 | ||
837 | /* now enable the DE block IRQs */ | 839 | /* now enable the DE block IRQs */ |
838 | malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, | 840 | malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, |
839 | hwdev->map.de_irq_map.irq_mask); | 841 | hwdev->hw->map.de_irq_map.irq_mask); |
840 | 842 | ||
841 | return 0; | 843 | return 0; |
842 | } | 844 | } |
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm) | |||
847 | struct malidp_hw_device *hwdev = malidp->dev; | 849 | struct malidp_hw_device *hwdev = malidp->dev; |
848 | 850 | ||
849 | malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, | 851 | malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, |
850 | hwdev->map.de_irq_map.irq_mask); | 852 | hwdev->hw->map.de_irq_map.irq_mask); |
851 | malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, | 853 | malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, |
852 | hwdev->map.dc_irq_map.irq_mask); | 854 | hwdev->hw->map.dc_irq_map.irq_mask); |
853 | } | 855 | } |
854 | 856 | ||
855 | static irqreturn_t malidp_se_irq(int irq, void *arg) | 857 | static irqreturn_t malidp_se_irq(int irq, void *arg) |
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg) | |||
857 | struct drm_device *drm = arg; | 859 | struct drm_device *drm = arg; |
858 | struct malidp_drm *malidp = drm->dev_private; | 860 | struct malidp_drm *malidp = drm->dev_private; |
859 | struct malidp_hw_device *hwdev = malidp->dev; | 861 | struct malidp_hw_device *hwdev = malidp->dev; |
862 | struct malidp_hw *hw = hwdev->hw; | ||
863 | const struct malidp_irq_map *se = &hw->map.se_irq_map; | ||
860 | u32 status, mask; | 864 | u32 status, mask; |
861 | 865 | ||
862 | /* | 866 | /* |
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg) | |||
867 | if (hwdev->pm_suspended) | 871 | if (hwdev->pm_suspended) |
868 | return IRQ_NONE; | 872 | return IRQ_NONE; |
869 | 873 | ||
870 | status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); | 874 | status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); |
871 | if (!(status & hwdev->map.se_irq_map.irq_mask)) | 875 | if (!(status & se->irq_mask)) |
872 | return IRQ_NONE; | 876 | return IRQ_NONE; |
873 | 877 | ||
874 | mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ); | 878 | mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ); |
875 | status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); | 879 | status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); |
876 | status &= mask; | 880 | status &= mask; |
877 | /* ToDo: status decoding and firing up of VSYNC and page flip events */ | 881 | /* ToDo: status decoding and firing up of VSYNC and page flip events */ |
878 | 882 | ||
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq) | |||
905 | } | 909 | } |
906 | 910 | ||
907 | malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, | 911 | malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, |
908 | hwdev->map.se_irq_map.irq_mask); | 912 | hwdev->hw->map.se_irq_map.irq_mask); |
909 | 913 | ||
910 | return 0; | 914 | return 0; |
911 | } | 915 | } |
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm) | |||
916 | struct malidp_hw_device *hwdev = malidp->dev; | 920 | struct malidp_hw_device *hwdev = malidp->dev; |
917 | 921 | ||
918 | malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, | 922 | malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, |
919 | hwdev->map.se_irq_map.irq_mask); | 923 | hwdev->hw->map.se_irq_map.irq_mask); |
920 | } | 924 | } |
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 849ad9a30c3a..b0690ebb3565 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h | |||
@@ -120,18 +120,14 @@ struct malidp_hw_regmap { | |||
120 | /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */ | 120 | /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */ |
121 | #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0) | 121 | #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0) |
122 | 122 | ||
123 | struct malidp_hw_device { | 123 | struct malidp_hw_device; |
124 | const struct malidp_hw_regmap map; | ||
125 | void __iomem *regs; | ||
126 | 124 | ||
127 | /* APB clock */ | 125 | /* |
128 | struct clk *pclk; | 126 | * Static structure containing hardware specific data and pointers to |
129 | /* AXI clock */ | 127 | * functions that behave differently between various versions of the IP. |
130 | struct clk *aclk; | 128 | */ |
131 | /* main clock for display core */ | 129 | struct malidp_hw { |
132 | struct clk *mclk; | 130 | const struct malidp_hw_regmap map; |
133 | /* pixel clock for display core */ | ||
134 | struct clk *pxlclk; | ||
135 | 131 | ||
136 | /* | 132 | /* |
137 | * Validate the driver instance against the hardware bits | 133 | * Validate the driver instance against the hardware bits |
@@ -182,15 +178,6 @@ struct malidp_hw_device { | |||
182 | struct videomode *vm); | 178 | struct videomode *vm); |
183 | 179 | ||
184 | u8 features; | 180 | u8 features; |
185 | |||
186 | u8 min_line_size; | ||
187 | u16 max_line_size; | ||
188 | |||
189 | /* track the device PM state */ | ||
190 | bool pm_suspended; | ||
191 | |||
192 | /* size of memory used for rotating layers, up to two banks available */ | ||
193 | u32 rotation_memory[2]; | ||
194 | }; | 181 | }; |
195 | 182 | ||
196 | /* Supported variants of the hardware */ | 183 | /* Supported variants of the hardware */ |
@@ -202,7 +189,33 @@ enum { | |||
202 | MALIDP_MAX_DEVICES | 189 | MALIDP_MAX_DEVICES |
203 | }; | 190 | }; |
204 | 191 | ||
205 | extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES]; | 192 | extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES]; |
193 | |||
194 | /* | ||
195 | * Structure used by the driver during runtime operation. | ||
196 | */ | ||
197 | struct malidp_hw_device { | ||
198 | struct malidp_hw *hw; | ||
199 | void __iomem *regs; | ||
200 | |||
201 | /* APB clock */ | ||
202 | struct clk *pclk; | ||
203 | /* AXI clock */ | ||
204 | struct clk *aclk; | ||
205 | /* main clock for display core */ | ||
206 | struct clk *mclk; | ||
207 | /* pixel clock for display core */ | ||
208 | struct clk *pxlclk; | ||
209 | |||
210 | u8 min_line_size; | ||
211 | u16 max_line_size; | ||
212 | |||
213 | /* track the device PM state */ | ||
214 | bool pm_suspended; | ||
215 | |||
216 | /* size of memory used for rotating layers, up to two banks available */ | ||
217 | u32 rotation_memory[2]; | ||
218 | }; | ||
206 | 219 | ||
207 | static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) | 220 | static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) |
208 | { | 221 | { |
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev, | |||
240 | { | 253 | { |
241 | switch (block) { | 254 | switch (block) { |
242 | case MALIDP_SE_BLOCK: | 255 | case MALIDP_SE_BLOCK: |
243 | return hwdev->map.se_base; | 256 | return hwdev->hw->map.se_base; |
244 | case MALIDP_DC_BLOCK: | 257 | case MALIDP_DC_BLOCK: |
245 | return hwdev->map.dc_base; | 258 | return hwdev->hw->map.dc_base; |
246 | } | 259 | } |
247 | 260 | ||
248 | return 0; | 261 | return 0; |
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, | |||
275 | static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev, | 288 | static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev, |
276 | unsigned int pitch) | 289 | unsigned int pitch) |
277 | { | 290 | { |
278 | return !(pitch & (hwdev->map.bus_align_bytes - 1)); | 291 | return !(pitch & (hwdev->hw->map.bus_align_bytes - 1)); |
279 | } | 292 | } |
280 | 293 | ||
281 | /* U16.16 */ | 294 | /* U16.16 */ |
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev) | |||
308 | }; | 321 | }; |
309 | u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) | | 322 | u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) | |
310 | MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL); | 323 | MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL); |
311 | u32 image_enh = hwdev->map.se_base + | 324 | u32 image_enh = hwdev->hw->map.se_base + |
312 | ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? | 325 | ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? |
313 | 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH; | 326 | 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH; |
314 | u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0; | 327 | u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0; |
315 | int i; | 328 | int i; |
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 94e7e3fa3408..e7419797bbd1 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c | |||
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane) | |||
57 | struct malidp_plane *mp = to_malidp_plane(plane); | 57 | struct malidp_plane *mp = to_malidp_plane(plane); |
58 | 58 | ||
59 | if (mp->base.fb) | 59 | if (mp->base.fb) |
60 | drm_framebuffer_unreference(mp->base.fb); | 60 | drm_framebuffer_put(mp->base.fb); |
61 | 61 | ||
62 | drm_plane_helper_disable(plane); | 62 | drm_plane_helper_disable(plane); |
63 | drm_plane_cleanup(plane); | 63 | drm_plane_cleanup(plane); |
@@ -185,8 +185,9 @@ static int malidp_de_plane_check(struct drm_plane *plane, | |||
185 | 185 | ||
186 | fb = state->fb; | 186 | fb = state->fb; |
187 | 187 | ||
188 | ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id, | 188 | ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, |
189 | fb->format->format); | 189 | mp->layer->id, |
190 | fb->format->format); | ||
190 | if (ms->format == MALIDP_INVALID_FORMAT_ID) | 191 | if (ms->format == MALIDP_INVALID_FORMAT_ID) |
191 | return -EINVAL; | 192 | return -EINVAL; |
192 | 193 | ||
@@ -211,7 +212,7 @@ static int malidp_de_plane_check(struct drm_plane *plane, | |||
211 | * third plane stride register. | 212 | * third plane stride register. |
212 | */ | 213 | */ |
213 | if (ms->n_planes == 3 && | 214 | if (ms->n_planes == 3 && |
214 | !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && | 215 | !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && |
215 | (state->fb->pitches[1] != state->fb->pitches[2])) | 216 | (state->fb->pitches[1] != state->fb->pitches[2])) |
216 | return -EINVAL; | 217 | return -EINVAL; |
217 | 218 | ||
@@ -229,9 +230,9 @@ static int malidp_de_plane_check(struct drm_plane *plane, | |||
229 | if (state->rotation & MALIDP_ROTATED_MASK) { | 230 | if (state->rotation & MALIDP_ROTATED_MASK) { |
230 | int val; | 231 | int val; |
231 | 232 | ||
232 | val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h, | 233 | val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h, |
233 | state->crtc_w, | 234 | state->crtc_w, |
234 | fb->format->format); | 235 | fb->format->format); |
235 | if (val < 0) | 236 | if (val < 0) |
236 | return val; | 237 | return val; |
237 | 238 | ||
@@ -251,7 +252,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp, | |||
251 | return; | 252 | return; |
252 | 253 | ||
253 | if (num_planes == 3) | 254 | if (num_planes == 3) |
254 | num_strides = (mp->hwdev->features & | 255 | num_strides = (mp->hwdev->hw->features & |
255 | MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; | 256 | MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; |
256 | 257 | ||
257 | for (i = 0; i < num_strides; ++i) | 258 | for (i = 0; i < num_strides; ++i) |
@@ -264,13 +265,11 @@ static void malidp_de_plane_update(struct drm_plane *plane, | |||
264 | struct drm_plane_state *old_state) | 265 | struct drm_plane_state *old_state) |
265 | { | 266 | { |
266 | struct malidp_plane *mp; | 267 | struct malidp_plane *mp; |
267 | const struct malidp_hw_regmap *map; | ||
268 | struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); | 268 | struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); |
269 | u32 src_w, src_h, dest_w, dest_h, val; | 269 | u32 src_w, src_h, dest_w, dest_h, val; |
270 | int i; | 270 | int i; |
271 | 271 | ||
272 | mp = to_malidp_plane(plane); | 272 | mp = to_malidp_plane(plane); |
273 | map = &mp->hwdev->map; | ||
274 | 273 | ||
275 | /* convert src values from Q16 fixed point to integer */ | 274 | /* convert src values from Q16 fixed point to integer */ |
276 | src_w = plane->state->src_w >> 16; | 275 | src_w = plane->state->src_w >> 16; |
@@ -363,7 +362,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = { | |||
363 | int malidp_de_planes_init(struct drm_device *drm) | 362 | int malidp_de_planes_init(struct drm_device *drm) |
364 | { | 363 | { |
365 | struct malidp_drm *malidp = drm->dev_private; | 364 | struct malidp_drm *malidp = drm->dev_private; |
366 | const struct malidp_hw_regmap *map = &malidp->dev->map; | 365 | const struct malidp_hw_regmap *map = &malidp->dev->hw->map; |
367 | struct malidp_plane *plane = NULL; | 366 | struct malidp_plane *plane = NULL; |
368 | enum drm_plane_type plane_type; | 367 | enum drm_plane_type plane_type; |
369 | unsigned long crtcs = 1 << drm->mode_config.num_crtc; | 368 | unsigned long crtcs = 1 << drm->mode_config.num_crtc; |
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index b4efcbabf7f7..d034b2cb5eee 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h | |||
@@ -372,9 +372,18 @@ struct adv7511 { | |||
372 | }; | 372 | }; |
373 | 373 | ||
374 | #ifdef CONFIG_DRM_I2C_ADV7511_CEC | 374 | #ifdef CONFIG_DRM_I2C_ADV7511_CEC |
375 | int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511, | 375 | int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); |
376 | unsigned int offset); | ||
377 | void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); | 376 | void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); |
377 | #else | ||
378 | static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) | ||
379 | { | ||
380 | unsigned int offset = adv7511->type == ADV7533 ? | ||
381 | ADV7533_REG_CEC_OFFSET : 0; | ||
382 | |||
383 | regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, | ||
384 | ADV7511_CEC_CTRL_POWER_DOWN); | ||
385 | return 0; | ||
386 | } | ||
378 | #endif | 387 | #endif |
379 | 388 | ||
380 | #ifdef CONFIG_DRM_I2C_ADV7533 | 389 | #ifdef CONFIG_DRM_I2C_ADV7533 |
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index b33d730e4d73..a20a45c0b353 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c | |||
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511) | |||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | 302 | ||
303 | int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511, | 303 | int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) |
304 | unsigned int offset) | ||
305 | { | 304 | { |
305 | unsigned int offset = adv7511->type == ADV7533 ? | ||
306 | ADV7533_REG_CEC_OFFSET : 0; | ||
306 | int ret = adv7511_cec_parse_dt(dev, adv7511); | 307 | int ret = adv7511_cec_parse_dt(dev, adv7511); |
307 | 308 | ||
308 | if (ret) | 309 | if (ret) |
309 | return ret; | 310 | goto err_cec_parse_dt; |
310 | 311 | ||
311 | adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops, | 312 | adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops, |
312 | adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS); | 313 | adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS); |
313 | if (IS_ERR(adv7511->cec_adap)) | 314 | if (IS_ERR(adv7511->cec_adap)) { |
314 | return PTR_ERR(adv7511->cec_adap); | 315 | ret = PTR_ERR(adv7511->cec_adap); |
316 | goto err_cec_alloc; | ||
317 | } | ||
315 | 318 | ||
316 | regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); | 319 | regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); |
317 | /* cec soft reset */ | 320 | /* cec soft reset */ |
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511, | |||
329 | ((adv7511->cec_clk_freq / 750000) - 1) << 2); | 332 | ((adv7511->cec_clk_freq / 750000) - 1) << 2); |
330 | 333 | ||
331 | ret = cec_register_adapter(adv7511->cec_adap, dev); | 334 | ret = cec_register_adapter(adv7511->cec_adap, dev); |
332 | if (ret) { | 335 | if (ret) |
333 | cec_delete_adapter(adv7511->cec_adap); | 336 | goto err_cec_register; |
334 | adv7511->cec_adap = NULL; | 337 | return 0; |
335 | } | 338 | |
336 | return ret; | 339 | err_cec_register: |
340 | cec_delete_adapter(adv7511->cec_adap); | ||
341 | adv7511->cec_adap = NULL; | ||
342 | err_cec_alloc: | ||
343 | dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", | ||
344 | ret); | ||
345 | err_cec_parse_dt: | ||
346 | regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, | ||
347 | ADV7511_CEC_CTRL_POWER_DOWN); | ||
348 | return ret == -EPROBE_DEFER ? ret : 0; | ||
337 | } | 349 | } |
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 0e14f1572d05..efa29db5fc2b 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | |||
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) | |||
1084 | struct device *dev = &i2c->dev; | 1084 | struct device *dev = &i2c->dev; |
1085 | unsigned int main_i2c_addr = i2c->addr << 1; | 1085 | unsigned int main_i2c_addr = i2c->addr << 1; |
1086 | unsigned int edid_i2c_addr = main_i2c_addr + 4; | 1086 | unsigned int edid_i2c_addr = main_i2c_addr + 4; |
1087 | unsigned int offset; | ||
1088 | unsigned int val; | 1087 | unsigned int val; |
1089 | int ret; | 1088 | int ret; |
1090 | 1089 | ||
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) | |||
1192 | if (adv7511->type == ADV7511) | 1191 | if (adv7511->type == ADV7511) |
1193 | adv7511_set_link_config(adv7511, &link_config); | 1192 | adv7511_set_link_config(adv7511, &link_config); |
1194 | 1193 | ||
1194 | ret = adv7511_cec_init(dev, adv7511); | ||
1195 | if (ret) | ||
1196 | goto err_unregister_cec; | ||
1197 | |||
1195 | adv7511->bridge.funcs = &adv7511_bridge_funcs; | 1198 | adv7511->bridge.funcs = &adv7511_bridge_funcs; |
1196 | adv7511->bridge.of_node = dev->of_node; | 1199 | adv7511->bridge.of_node = dev->of_node; |
1197 | 1200 | ||
1198 | drm_bridge_add(&adv7511->bridge); | 1201 | drm_bridge_add(&adv7511->bridge); |
1199 | 1202 | ||
1200 | adv7511_audio_init(dev, adv7511); | 1203 | adv7511_audio_init(dev, adv7511); |
1201 | |||
1202 | offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0; | ||
1203 | |||
1204 | #ifdef CONFIG_DRM_I2C_ADV7511_CEC | ||
1205 | ret = adv7511_cec_init(dev, adv7511, offset); | ||
1206 | if (ret) | ||
1207 | goto err_unregister_cec; | ||
1208 | #else | ||
1209 | regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, | ||
1210 | ADV7511_CEC_CTRL_POWER_DOWN); | ||
1211 | #endif | ||
1212 | |||
1213 | return 0; | 1204 | return 0; |
1214 | 1205 | ||
1215 | err_unregister_cec: | 1206 | err_unregister_cec: |
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 0903ba574f61..75b0d3f6e4de 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c | |||
@@ -13,13 +13,37 @@ | |||
13 | 13 | ||
14 | #include <linux/of_graph.h> | 14 | #include <linux/of_graph.h> |
15 | 15 | ||
16 | struct lvds_encoder { | ||
17 | struct drm_bridge bridge; | ||
18 | struct drm_bridge *panel_bridge; | ||
19 | }; | ||
20 | |||
21 | static int lvds_encoder_attach(struct drm_bridge *bridge) | ||
22 | { | ||
23 | struct lvds_encoder *lvds_encoder = container_of(bridge, | ||
24 | struct lvds_encoder, | ||
25 | bridge); | ||
26 | |||
27 | return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, | ||
28 | bridge); | ||
29 | } | ||
30 | |||
31 | static struct drm_bridge_funcs funcs = { | ||
32 | .attach = lvds_encoder_attach, | ||
33 | }; | ||
34 | |||
16 | static int lvds_encoder_probe(struct platform_device *pdev) | 35 | static int lvds_encoder_probe(struct platform_device *pdev) |
17 | { | 36 | { |
18 | struct device_node *port; | 37 | struct device_node *port; |
19 | struct device_node *endpoint; | 38 | struct device_node *endpoint; |
20 | struct device_node *panel_node; | 39 | struct device_node *panel_node; |
21 | struct drm_panel *panel; | 40 | struct drm_panel *panel; |
22 | struct drm_bridge *bridge; | 41 | struct lvds_encoder *lvds_encoder; |
42 | |||
43 | lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), | ||
44 | GFP_KERNEL); | ||
45 | if (!lvds_encoder) | ||
46 | return -ENOMEM; | ||
23 | 47 | ||
24 | /* Locate the panel DT node. */ | 48 | /* Locate the panel DT node. */ |
25 | port = of_graph_get_port_by_id(pdev->dev.of_node, 1); | 49 | port = of_graph_get_port_by_id(pdev->dev.of_node, 1); |
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev) | |||
49 | return -EPROBE_DEFER; | 73 | return -EPROBE_DEFER; |
50 | } | 74 | } |
51 | 75 | ||
52 | bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); | 76 | lvds_encoder->panel_bridge = |
53 | if (IS_ERR(bridge)) | 77 | devm_drm_panel_bridge_add(&pdev->dev, |
54 | return PTR_ERR(bridge); | 78 | panel, DRM_MODE_CONNECTOR_LVDS); |
79 | if (IS_ERR(lvds_encoder->panel_bridge)) | ||
80 | return PTR_ERR(lvds_encoder->panel_bridge); | ||
81 | |||
82 | /* The panel_bridge bridge is attached to the panel's of_node, | ||
83 | * but we need a bridge attached to our of_node for our user | ||
84 | * to look up. | ||
85 | */ | ||
86 | lvds_encoder->bridge.of_node = pdev->dev.of_node; | ||
87 | lvds_encoder->bridge.funcs = &funcs; | ||
88 | drm_bridge_add(&lvds_encoder->bridge); | ||
55 | 89 | ||
56 | platform_set_drvdata(pdev, bridge); | 90 | platform_set_drvdata(pdev, lvds_encoder); |
57 | 91 | ||
58 | return 0; | 92 | return 0; |
59 | } | 93 | } |
60 | 94 | ||
61 | static int lvds_encoder_remove(struct platform_device *pdev) | 95 | static int lvds_encoder_remove(struct platform_device *pdev) |
62 | { | 96 | { |
63 | struct drm_bridge *bridge = platform_get_drvdata(pdev); | 97 | struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); |
64 | 98 | ||
65 | drm_bridge_remove(bridge); | 99 | drm_bridge_remove(&lvds_encoder->bridge); |
66 | 100 | ||
67 | return 0; | 101 | return 0; |
68 | } | 102 | } |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index bf14214fa464..b72259bf6e2f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | |||
@@ -138,6 +138,7 @@ struct dw_hdmi { | |||
138 | struct device *dev; | 138 | struct device *dev; |
139 | struct clk *isfr_clk; | 139 | struct clk *isfr_clk; |
140 | struct clk *iahb_clk; | 140 | struct clk *iahb_clk; |
141 | struct clk *cec_clk; | ||
141 | struct dw_hdmi_i2c *i2c; | 142 | struct dw_hdmi_i2c *i2c; |
142 | 143 | ||
143 | struct hdmi_data_info hdmi_data; | 144 | struct hdmi_data_info hdmi_data; |
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev, | |||
2382 | goto err_isfr; | 2383 | goto err_isfr; |
2383 | } | 2384 | } |
2384 | 2385 | ||
2386 | hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec"); | ||
2387 | if (PTR_ERR(hdmi->cec_clk) == -ENOENT) { | ||
2388 | hdmi->cec_clk = NULL; | ||
2389 | } else if (IS_ERR(hdmi->cec_clk)) { | ||
2390 | ret = PTR_ERR(hdmi->cec_clk); | ||
2391 | if (ret != -EPROBE_DEFER) | ||
2392 | dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n", | ||
2393 | ret); | ||
2394 | |||
2395 | hdmi->cec_clk = NULL; | ||
2396 | goto err_iahb; | ||
2397 | } else { | ||
2398 | ret = clk_prepare_enable(hdmi->cec_clk); | ||
2399 | if (ret) { | ||
2400 | dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n", | ||
2401 | ret); | ||
2402 | goto err_iahb; | ||
2403 | } | ||
2404 | } | ||
2405 | |||
2385 | /* Product and revision IDs */ | 2406 | /* Product and revision IDs */ |
2386 | hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) | 2407 | hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) |
2387 | | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); | 2408 | | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); |
@@ -2518,6 +2539,8 @@ err_iahb: | |||
2518 | cec_notifier_put(hdmi->cec_notifier); | 2539 | cec_notifier_put(hdmi->cec_notifier); |
2519 | 2540 | ||
2520 | clk_disable_unprepare(hdmi->iahb_clk); | 2541 | clk_disable_unprepare(hdmi->iahb_clk); |
2542 | if (hdmi->cec_clk) | ||
2543 | clk_disable_unprepare(hdmi->cec_clk); | ||
2521 | err_isfr: | 2544 | err_isfr: |
2522 | clk_disable_unprepare(hdmi->isfr_clk); | 2545 | clk_disable_unprepare(hdmi->isfr_clk); |
2523 | err_res: | 2546 | err_res: |
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi) | |||
2541 | 2564 | ||
2542 | clk_disable_unprepare(hdmi->iahb_clk); | 2565 | clk_disable_unprepare(hdmi->iahb_clk); |
2543 | clk_disable_unprepare(hdmi->isfr_clk); | 2566 | clk_disable_unprepare(hdmi->isfr_clk); |
2567 | if (hdmi->cec_clk) | ||
2568 | clk_disable_unprepare(hdmi->cec_clk); | ||
2544 | 2569 | ||
2545 | if (hdmi->i2c) | 2570 | if (hdmi->i2c) |
2546 | i2c_del_adapter(&hdmi->i2c->adap); | 2571 | i2c_del_adapter(&hdmi->i2c->adap); |
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8571cfd877c5..8636e7eeb731 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c | |||
@@ -97,7 +97,7 @@ | |||
97 | #define DP0_ACTIVEVAL 0x0650 | 97 | #define DP0_ACTIVEVAL 0x0650 |
98 | #define DP0_SYNCVAL 0x0654 | 98 | #define DP0_SYNCVAL 0x0654 |
99 | #define DP0_MISC 0x0658 | 99 | #define DP0_MISC 0x0658 |
100 | #define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ | 100 | #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ |
101 | #define BPC_6 (0 << 5) | 101 | #define BPC_6 (0 << 5) |
102 | #define BPC_8 (1 << 5) | 102 | #define BPC_8 (1 << 5) |
103 | 103 | ||
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, | |||
318 | tmp = (tmp << 8) | buf[i]; | 318 | tmp = (tmp << 8) | buf[i]; |
319 | i++; | 319 | i++; |
320 | if (((i % 4) == 0) || (i == size)) { | 320 | if (((i % 4) == 0) || (i == size)) { |
321 | tc_write(DP0_AUXWDATA(i >> 2), tmp); | 321 | tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); |
322 | tmp = 0; | 322 | tmp = 0; |
323 | } | 323 | } |
324 | } | 324 | } |
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc) | |||
603 | ret = drm_dp_link_probe(&tc->aux, &tc->link.base); | 603 | ret = drm_dp_link_probe(&tc->aux, &tc->link.base); |
604 | if (ret < 0) | 604 | if (ret < 0) |
605 | goto err_dpcd_read; | 605 | goto err_dpcd_read; |
606 | if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) | 606 | if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { |
607 | goto err_dpcd_inval; | 607 | dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); |
608 | tc->link.base.rate = 270000; | ||
609 | } | ||
610 | |||
611 | if (tc->link.base.num_lanes > 2) { | ||
612 | dev_dbg(tc->dev, "Falling to 2 lanes\n"); | ||
613 | tc->link.base.num_lanes = 2; | ||
614 | } | ||
608 | 615 | ||
609 | ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); | 616 | ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); |
610 | if (ret < 0) | 617 | if (ret < 0) |
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc) | |||
637 | err_dpcd_read: | 644 | err_dpcd_read: |
638 | dev_err(tc->dev, "failed to read DPCD: %d\n", ret); | 645 | dev_err(tc->dev, "failed to read DPCD: %d\n", ret); |
639 | return ret; | 646 | return ret; |
640 | err_dpcd_inval: | ||
641 | dev_err(tc->dev, "invalid DPCD\n"); | ||
642 | return -EINVAL; | ||
643 | } | 647 | } |
644 | 648 | ||
645 | static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | 649 | static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) |
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | |||
655 | int lower_margin = mode->vsync_start - mode->vdisplay; | 659 | int lower_margin = mode->vsync_start - mode->vdisplay; |
656 | int vsync_len = mode->vsync_end - mode->vsync_start; | 660 | int vsync_len = mode->vsync_end - mode->vsync_start; |
657 | 661 | ||
662 | /* | ||
663 | * Recommended maximum number of symbols transferred in a transfer unit: | ||
664 | * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, | ||
665 | * (output active video bandwidth in bytes)) | ||
666 | * Must be less than tu_size. | ||
667 | */ | ||
668 | max_tu_symbol = TU_SIZE_RECOMMENDED - 1; | ||
669 | |||
658 | dev_dbg(tc->dev, "set mode %dx%d\n", | 670 | dev_dbg(tc->dev, "set mode %dx%d\n", |
659 | mode->hdisplay, mode->vdisplay); | 671 | mode->hdisplay, mode->vdisplay); |
660 | dev_dbg(tc->dev, "H margin %d,%d sync %d\n", | 672 | dev_dbg(tc->dev, "H margin %d,%d sync %d\n", |
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | |||
664 | dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); | 676 | dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); |
665 | 677 | ||
666 | 678 | ||
667 | /* LCD Ctl Frame Size */ | 679 | /* |
668 | tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | | 680 | * LCD Ctl Frame Size |
681 | * datasheet is not clear of vsdelay in case of DPI | ||
682 | * assume we do not need any delay when DPI is a source of | ||
683 | * sync signals | ||
684 | */ | ||
685 | tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | | ||
669 | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); | 686 | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); |
670 | tc_write(HTIM01, (left_margin << 16) | /* H back porch */ | 687 | tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ |
671 | (hsync_len << 0)); /* Hsync */ | 688 | (ALIGN(hsync_len, 2) << 0)); /* Hsync */ |
672 | tc_write(HTIM02, (right_margin << 16) | /* H front porch */ | 689 | tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ |
673 | (mode->hdisplay << 0)); /* width */ | 690 | (ALIGN(mode->hdisplay, 2) << 0)); /* width */ |
674 | tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ | 691 | tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ |
675 | (vsync_len << 0)); /* Vsync */ | 692 | (vsync_len << 0)); /* Vsync */ |
676 | tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ | 693 | tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ |
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | |||
689 | /* DP Main Stream Attributes */ | 706 | /* DP Main Stream Attributes */ |
690 | vid_sync_dly = hsync_len + left_margin + mode->hdisplay; | 707 | vid_sync_dly = hsync_len + left_margin + mode->hdisplay; |
691 | tc_write(DP0_VIDSYNCDELAY, | 708 | tc_write(DP0_VIDSYNCDELAY, |
692 | (0x003e << 16) | /* thresh_dly */ | 709 | (max_tu_symbol << 16) | /* thresh_dly */ |
693 | (vid_sync_dly << 0)); | 710 | (vid_sync_dly << 0)); |
694 | 711 | ||
695 | tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); | 712 | tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); |
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | |||
705 | tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | | 722 | tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | |
706 | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); | 723 | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); |
707 | 724 | ||
708 | /* | 725 | tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | |
709 | * Recommended maximum number of symbols transferred in a transfer unit: | 726 | BPC_8); |
710 | * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, | ||
711 | * (output active video bandwidth in bytes)) | ||
712 | * Must be less than tu_size. | ||
713 | */ | ||
714 | max_tu_symbol = TU_SIZE_RECOMMENDED - 1; | ||
715 | tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); | ||
716 | 727 | ||
717 | return 0; | 728 | return 0; |
718 | err: | 729 | err: |
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc) | |||
808 | unsigned int rate; | 819 | unsigned int rate; |
809 | u32 dp_phy_ctrl; | 820 | u32 dp_phy_ctrl; |
810 | int timeout; | 821 | int timeout; |
811 | bool aligned; | ||
812 | bool ready; | ||
813 | u32 value; | 822 | u32 value; |
814 | int ret; | 823 | int ret; |
815 | u8 tmp[8]; | 824 | u8 tmp[8]; |
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc) | |||
954 | ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); | 963 | ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); |
955 | if (ret < 0) | 964 | if (ret < 0) |
956 | goto err_dpcd_read; | 965 | goto err_dpcd_read; |
957 | ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ | 966 | } while ((--timeout) && |
958 | DP_CHANNEL_EQ_BITS)); /* Lane0 */ | 967 | !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); |
959 | aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; | ||
960 | } while ((--timeout) && !(ready && aligned)); | ||
961 | 968 | ||
962 | if (timeout == 0) { | 969 | if (timeout == 0) { |
963 | /* Read DPCD 0x200-0x201 */ | 970 | /* Read DPCD 0x200-0x201 */ |
964 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); | 971 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); |
965 | if (ret < 0) | 972 | if (ret < 0) |
966 | goto err_dpcd_read; | 973 | goto err_dpcd_read; |
974 | dev_err(dev, "channel(s) EQ not ok\n"); | ||
967 | dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); | 975 | dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); |
968 | dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", | 976 | dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", |
969 | tmp[1]); | 977 | tmp[1]); |
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc) | |||
974 | dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", | 982 | dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", |
975 | tmp[6]); | 983 | tmp[6]); |
976 | 984 | ||
977 | if (!ready) | ||
978 | dev_err(dev, "Lane0/1 not ready\n"); | ||
979 | if (!aligned) | ||
980 | dev_err(dev, "Lane0/1 not aligned\n"); | ||
981 | return -EAGAIN; | 985 | return -EAGAIN; |
982 | } | 986 | } |
983 | 987 | ||
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, | |||
1099 | static int tc_connector_mode_valid(struct drm_connector *connector, | 1103 | static int tc_connector_mode_valid(struct drm_connector *connector, |
1100 | struct drm_display_mode *mode) | 1104 | struct drm_display_mode *mode) |
1101 | { | 1105 | { |
1102 | /* Accept any mode */ | 1106 | /* DPI interface clock limitation: upto 154 MHz */ |
1107 | if (mode->clock > 154000) | ||
1108 | return MODE_CLOCK_HIGH; | ||
1109 | |||
1103 | return MODE_OK; | 1110 | return MODE_OK; |
1104 | } | 1111 | } |
1105 | 1112 | ||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 71d712f1b56a..b16f1d69a0bb 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -1225,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, | |||
1225 | return; | 1225 | return; |
1226 | 1226 | ||
1227 | for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { | 1227 | for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { |
1228 | if (!new_crtc_state->active || !new_crtc_state->planes_changed) | 1228 | if (!new_crtc_state->active) |
1229 | continue; | 1229 | continue; |
1230 | 1230 | ||
1231 | ret = drm_crtc_vblank_get(crtc); | 1231 | ret = drm_crtc_vblank_get(crtc); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 07374008f146..e56166334455 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, | |||
1809 | 1809 | ||
1810 | if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { | 1810 | if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { |
1811 | DRM_INFO("Cannot find any crtc or sizes\n"); | 1811 | DRM_INFO("Cannot find any crtc or sizes\n"); |
1812 | |||
1813 | /* First time: disable all crtc's.. */ | ||
1814 | if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master)) | ||
1815 | restore_fbdev_mode(fb_helper); | ||
1812 | return -EAGAIN; | 1816 | return -EAGAIN; |
1813 | } | 1817 | } |
1814 | 1818 | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3c318439a659..355120865efd 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) | |||
282 | static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, | 282 | static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, |
283 | int type, unsigned int resolution) | 283 | int type, unsigned int resolution) |
284 | { | 284 | { |
285 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
285 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); | 286 | struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); |
286 | 287 | ||
287 | if (WARN_ON(resolution >= GVT_EDID_NUM)) | 288 | if (WARN_ON(resolution >= GVT_EDID_NUM)) |
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, | |||
307 | port->type = type; | 308 | port->type = type; |
308 | 309 | ||
309 | emulate_monitor_status_change(vgpu); | 310 | emulate_monitor_status_change(vgpu); |
311 | vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; | ||
310 | return 0; | 312 | return 0; |
311 | } | 313 | } |
312 | 314 | ||
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 4427be18e4a9..940cdaaa3f24 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) | |||
496 | goto err_unpin_mm; | 496 | goto err_unpin_mm; |
497 | } | 497 | } |
498 | 498 | ||
499 | ret = intel_gvt_generate_request(workload); | ||
500 | if (ret) { | ||
501 | gvt_vgpu_err("fail to generate request\n"); | ||
502 | goto err_unpin_mm; | ||
503 | } | ||
504 | |||
499 | ret = prepare_shadow_batch_buffer(workload); | 505 | ret = prepare_shadow_batch_buffer(workload); |
500 | if (ret) { | 506 | if (ret) { |
501 | gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); | 507 | gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2801d70579d8..8e331142badb 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt, | |||
311 | 311 | ||
312 | #define GTT_HAW 46 | 312 | #define GTT_HAW 46 |
313 | 313 | ||
314 | #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) | 314 | #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) |
315 | #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) | 315 | #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) |
316 | #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) | 316 | #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) |
317 | 317 | ||
318 | static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) | 318 | static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) |
319 | { | 319 | { |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index a5bed2e71b92..44cd5ff5e97d 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, | |||
1381 | return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); | 1381 | return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, | ||
1385 | void *p_data, unsigned int bytes) | ||
1386 | { | ||
1387 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
1388 | u32 v = *(u32 *)p_data; | ||
1389 | |||
1390 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | ||
1391 | return intel_vgpu_default_mmio_write(vgpu, | ||
1392 | offset, p_data, bytes); | ||
1393 | |||
1394 | switch (offset) { | ||
1395 | case 0x4ddc: | ||
1396 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
1397 | vgpu_vreg(vgpu, offset) = v & ~(1 << 31); | ||
1398 | break; | ||
1399 | case 0x42080: | ||
1400 | /* bypass WaCompressedResourceDisplayNewHashMode */ | ||
1401 | vgpu_vreg(vgpu, offset) = v & ~(1 << 15); | ||
1402 | break; | ||
1403 | case 0xe194: | ||
1404 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
1405 | vgpu_vreg(vgpu, offset) = v & ~(1 << 8); | ||
1406 | break; | ||
1407 | case 0x7014: | ||
1408 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
1409 | vgpu_vreg(vgpu, offset) = v & ~(1 << 13); | ||
1410 | break; | ||
1411 | default: | ||
1412 | return -EINVAL; | ||
1413 | } | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, | 1384 | static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, |
1419 | void *p_data, unsigned int bytes) | 1385 | void *p_data, unsigned int bytes) |
1420 | { | 1386 | { |
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1671 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1637 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1672 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1638 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1673 | NULL, NULL); | 1639 | NULL, NULL); |
1674 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, | 1640 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1675 | skl_misc_ctl_write); | 1641 | NULL, NULL); |
1676 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1642 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1677 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1643 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1678 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1644 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); |
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2564 | MMIO_D(0x6e570, D_BDW_PLUS); | 2530 | MMIO_D(0x6e570, D_BDW_PLUS); |
2565 | MMIO_D(0x65f10, D_BDW_PLUS); | 2531 | MMIO_D(0x65f10, D_BDW_PLUS); |
2566 | 2532 | ||
2567 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, | 2533 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2568 | skl_misc_ctl_write); | ||
2569 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2534 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2570 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2535 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2571 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2536 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2615 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2580 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
2616 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); | 2581 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
2617 | MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | 2582 | MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
2618 | MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write); | 2583 | MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL); |
2619 | MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write); | 2584 | MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL); |
2620 | MMIO_D(0x45504, D_SKL_PLUS); | 2585 | MMIO_D(0x45504, D_SKL_PLUS); |
2621 | MMIO_D(0x45520, D_SKL_PLUS); | 2586 | MMIO_D(0x45520, D_SKL_PLUS); |
2622 | MMIO_D(0x46000, D_SKL_PLUS); | 2587 | MMIO_D(0x46000, D_SKL_PLUS); |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f6ded475bb2c..3ac1dc97a7a0 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
140 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 140 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
141 | enum intel_engine_id ring_id = req->engine->id; | 141 | enum intel_engine_id ring_id = req->engine->id; |
142 | struct intel_vgpu_workload *workload; | 142 | struct intel_vgpu_workload *workload; |
143 | unsigned long flags; | ||
143 | 144 | ||
144 | if (!is_gvt_request(req)) { | 145 | if (!is_gvt_request(req)) { |
145 | spin_lock_bh(&scheduler->mmio_context_lock); | 146 | spin_lock_irqsave(&scheduler->mmio_context_lock, flags); |
146 | if (action == INTEL_CONTEXT_SCHEDULE_IN && | 147 | if (action == INTEL_CONTEXT_SCHEDULE_IN && |
147 | scheduler->engine_owner[ring_id]) { | 148 | scheduler->engine_owner[ring_id]) { |
148 | /* Switch ring from vGPU to host. */ | 149 | /* Switch ring from vGPU to host. */ |
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
150 | NULL, ring_id); | 151 | NULL, ring_id); |
151 | scheduler->engine_owner[ring_id] = NULL; | 152 | scheduler->engine_owner[ring_id] = NULL; |
152 | } | 153 | } |
153 | spin_unlock_bh(&scheduler->mmio_context_lock); | 154 | spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); |
154 | 155 | ||
155 | return NOTIFY_OK; | 156 | return NOTIFY_OK; |
156 | } | 157 | } |
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
161 | 162 | ||
162 | switch (action) { | 163 | switch (action) { |
163 | case INTEL_CONTEXT_SCHEDULE_IN: | 164 | case INTEL_CONTEXT_SCHEDULE_IN: |
164 | spin_lock_bh(&scheduler->mmio_context_lock); | 165 | spin_lock_irqsave(&scheduler->mmio_context_lock, flags); |
165 | if (workload->vgpu != scheduler->engine_owner[ring_id]) { | 166 | if (workload->vgpu != scheduler->engine_owner[ring_id]) { |
166 | /* Switch ring from host to vGPU or vGPU to vGPU. */ | 167 | /* Switch ring from host to vGPU or vGPU to vGPU. */ |
167 | intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], | 168 | intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], |
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb, | |||
170 | } else | 171 | } else |
171 | gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", | 172 | gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", |
172 | ring_id, workload->vgpu->id); | 173 | ring_id, workload->vgpu->id); |
173 | spin_unlock_bh(&scheduler->mmio_context_lock); | 174 | spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); |
174 | atomic_set(&workload->shadow_ctx_active, 1); | 175 | atomic_set(&workload->shadow_ctx_active, 1); |
175 | break; | 176 | break; |
176 | case INTEL_CONTEXT_SCHEDULE_OUT: | 177 | case INTEL_CONTEXT_SCHEDULE_OUT: |
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
253 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | 254 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
254 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | 255 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; |
255 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; | 256 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; |
256 | struct drm_i915_gem_request *rq; | ||
257 | struct intel_vgpu *vgpu = workload->vgpu; | 257 | struct intel_vgpu *vgpu = workload->vgpu; |
258 | struct intel_ring *ring; | 258 | struct intel_ring *ring; |
259 | int ret; | 259 | int ret; |
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
299 | ret = populate_shadow_context(workload); | 299 | ret = populate_shadow_context(workload); |
300 | if (ret) | 300 | if (ret) |
301 | goto err_unpin; | 301 | goto err_unpin; |
302 | workload->shadowed = true; | ||
303 | return 0; | ||
304 | |||
305 | err_unpin: | ||
306 | engine->context_unpin(engine, shadow_ctx); | ||
307 | err_shadow: | ||
308 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
309 | err_scan: | ||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | int intel_gvt_generate_request(struct intel_vgpu_workload *workload) | ||
314 | { | ||
315 | int ring_id = workload->ring_id; | ||
316 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | ||
317 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; | ||
318 | struct drm_i915_gem_request *rq; | ||
319 | struct intel_vgpu *vgpu = workload->vgpu; | ||
320 | struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx; | ||
321 | int ret; | ||
302 | 322 | ||
303 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); | 323 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
304 | if (IS_ERR(rq)) { | 324 | if (IS_ERR(rq)) { |
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
313 | ret = copy_workload_to_ring_buffer(workload); | 333 | ret = copy_workload_to_ring_buffer(workload); |
314 | if (ret) | 334 | if (ret) |
315 | goto err_unpin; | 335 | goto err_unpin; |
316 | workload->shadowed = true; | ||
317 | return 0; | 336 | return 0; |
318 | 337 | ||
319 | err_unpin: | 338 | err_unpin: |
320 | engine->context_unpin(engine, shadow_ctx); | 339 | engine->context_unpin(engine, shadow_ctx); |
321 | err_shadow: | ||
322 | release_shadow_wa_ctx(&workload->wa_ctx); | 340 | release_shadow_wa_ctx(&workload->wa_ctx); |
323 | err_scan: | ||
324 | return ret; | 341 | return ret; |
325 | } | 342 | } |
326 | 343 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 2d694f6c0907..b9f872204d7e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu); | |||
142 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); | 142 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); |
143 | 143 | ||
144 | void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); | 144 | void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); |
145 | |||
146 | int intel_gvt_generate_request(struct intel_vgpu_workload *workload); | ||
147 | |||
145 | #endif | 148 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c index e2993857df37..888b7d3f04c3 100644 --- a/drivers/gpu/drm/i915/i915_gemfs.c +++ b/drivers/gpu/drm/i915/i915_gemfs.c | |||
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915) | |||
52 | 52 | ||
53 | if (has_transparent_hugepage()) { | 53 | if (has_transparent_hugepage()) { |
54 | struct super_block *sb = gemfs->mnt_sb; | 54 | struct super_block *sb = gemfs->mnt_sb; |
55 | char options[] = "huge=within_size"; | 55 | /* FIXME: Disabled until we get W/A for read BW issue. */ |
56 | char options[] = "huge=never"; | ||
56 | int flags = 0; | 57 | int flags = 0; |
57 | int err; | 58 | int err; |
58 | 59 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7bc60c848940..6c7f8bca574e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -1736,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock( | |||
1736 | int intel_backlight_device_register(struct intel_connector *connector); | 1736 | int intel_backlight_device_register(struct intel_connector *connector); |
1737 | void intel_backlight_device_unregister(struct intel_connector *connector); | 1737 | void intel_backlight_device_unregister(struct intel_connector *connector); |
1738 | #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ | 1738 | #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ |
1739 | static int intel_backlight_device_register(struct intel_connector *connector) | 1739 | static inline int intel_backlight_device_register(struct intel_connector *connector) |
1740 | { | 1740 | { |
1741 | return 0; | 1741 | return 0; |
1742 | } | 1742 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index b8af35187d22..ea96682568e8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) | |||
697 | 697 | ||
698 | /* Due to peculiar init order wrt to hpd handling this is separate. */ | 698 | /* Due to peculiar init order wrt to hpd handling this is separate. */ |
699 | if (drm_fb_helper_initial_config(&ifbdev->helper, | 699 | if (drm_fb_helper_initial_config(&ifbdev->helper, |
700 | ifbdev->preferred_bpp)) { | 700 | ifbdev->preferred_bpp)) |
701 | intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); | 701 | intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); |
702 | intel_fbdev_fini(to_i915(ifbdev->helper.dev)); | ||
703 | } | ||
704 | } | 702 | } |
705 | 703 | ||
706 | void intel_fbdev_initial_config_async(struct drm_device *dev) | 704 | void intel_fbdev_initial_config_async(struct drm_device *dev) |
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) | |||
800 | { | 798 | { |
801 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 799 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
802 | 800 | ||
803 | if (ifbdev) | 801 | if (!ifbdev) |
802 | return; | ||
803 | |||
804 | intel_fbdev_sync(ifbdev); | ||
805 | if (ifbdev->vma) | ||
804 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 806 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
805 | } | 807 | } |
806 | 808 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index eb5827110d8f..49fdf09f9919 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -438,7 +438,9 @@ static bool | |||
438 | gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) | 438 | gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) |
439 | { | 439 | { |
440 | return (i + 1 < num && | 440 | return (i + 1 < num && |
441 | !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && | 441 | msgs[i].addr == msgs[i + 1].addr && |
442 | !(msgs[i].flags & I2C_M_RD) && | ||
443 | (msgs[i].len == 1 || msgs[i].len == 2) && | ||
442 | (msgs[i + 1].flags & I2C_M_RD)); | 444 | (msgs[i + 1].flags & I2C_M_RD)); |
443 | } | 445 | } |
444 | 446 | ||
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 93c7e3f9b4a8..17d2f3a1c562 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) | |||
133 | plane_disabling = true; | 133 | plane_disabling = true; |
134 | } | 134 | } |
135 | 135 | ||
136 | if (plane_disabling) { | 136 | /* |
137 | drm_atomic_helper_wait_for_vblanks(dev, state); | 137 | * The flip done wait is only strictly required by imx-drm if a deferred |
138 | * plane disable is in-flight. As the core requires blocking commits | ||
139 | * to wait for the flip it is done here unconditionally. This keeps the | ||
140 | * workitem around a bit longer than required for the majority of | ||
141 | * non-blocking commits, but we accept that for the sake of simplicity. | ||
142 | */ | ||
143 | drm_atomic_helper_wait_for_flip_done(dev, state); | ||
138 | 144 | ||
145 | if (plane_disabling) { | ||
139 | for_each_old_plane_in_state(state, plane, old_plane_state, i) | 146 | for_each_old_plane_in_state(state, plane, old_plane_state, i) |
140 | ipu_plane_disable_deferred(plane); | 147 | ipu_plane_disable_deferred(plane); |
141 | 148 | ||
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index c226da145fb3..a349cb61961e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig | |||
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV | |||
35 | 35 | ||
36 | config DRM_OMAP_PANEL_DPI | 36 | config DRM_OMAP_PANEL_DPI |
37 | tristate "Generic DPI panel" | 37 | tristate "Generic DPI panel" |
38 | depends on BACKLIGHT_CLASS_DEVICE | ||
38 | help | 39 | help |
39 | Driver for generic DPI panels. | 40 | Driver for generic DPI panels. |
40 | 41 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index daf286fc8a40..ca1e3b489540 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c | |||
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll) | |||
566 | } | 566 | } |
567 | 567 | ||
568 | static const struct soc_device_attribute dpi_soc_devices[] = { | 568 | static const struct soc_device_attribute dpi_soc_devices[] = { |
569 | { .family = "OMAP3[456]*" }, | 569 | { .machine = "OMAP3[456]*" }, |
570 | { .family = "[AD]M37*" }, | 570 | { .machine = "[AD]M37*" }, |
571 | { /* sentinel */ } | 571 | { /* sentinel */ } |
572 | }; | 572 | }; |
573 | 573 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index d86873f2abe6..e626eddf24d5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c | |||
@@ -352,7 +352,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core, | |||
352 | { | 352 | { |
353 | const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | | 353 | const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | |
354 | CEC_CAP_PASSTHROUGH | CEC_CAP_RC; | 354 | CEC_CAP_PASSTHROUGH | CEC_CAP_RC; |
355 | unsigned int ret; | 355 | int ret; |
356 | 356 | ||
357 | core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core, | 357 | core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core, |
358 | "omap4", caps, CEC_MAX_LOG_ADDRS); | 358 | "omap4", caps, CEC_MAX_LOG_ADDRS); |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 62e451162d96..b06f9956e733 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | |||
@@ -886,25 +886,36 @@ struct hdmi4_features { | |||
886 | bool audio_use_mclk; | 886 | bool audio_use_mclk; |
887 | }; | 887 | }; |
888 | 888 | ||
889 | static const struct hdmi4_features hdmi4_es1_features = { | 889 | static const struct hdmi4_features hdmi4430_es1_features = { |
890 | .cts_swmode = false, | 890 | .cts_swmode = false, |
891 | .audio_use_mclk = false, | 891 | .audio_use_mclk = false, |
892 | }; | 892 | }; |
893 | 893 | ||
894 | static const struct hdmi4_features hdmi4_es2_features = { | 894 | static const struct hdmi4_features hdmi4430_es2_features = { |
895 | .cts_swmode = true, | 895 | .cts_swmode = true, |
896 | .audio_use_mclk = false, | 896 | .audio_use_mclk = false, |
897 | }; | 897 | }; |
898 | 898 | ||
899 | static const struct hdmi4_features hdmi4_es3_features = { | 899 | static const struct hdmi4_features hdmi4_features = { |
900 | .cts_swmode = true, | 900 | .cts_swmode = true, |
901 | .audio_use_mclk = true, | 901 | .audio_use_mclk = true, |
902 | }; | 902 | }; |
903 | 903 | ||
904 | static const struct soc_device_attribute hdmi4_soc_devices[] = { | 904 | static const struct soc_device_attribute hdmi4_soc_devices[] = { |
905 | { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, | 905 | { |
906 | { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, | 906 | .machine = "OMAP4430", |
907 | { .family = "OMAP4", .data = &hdmi4_es3_features }, | 907 | .revision = "ES1.?", |
908 | .data = &hdmi4430_es1_features, | ||
909 | }, | ||
910 | { | ||
911 | .machine = "OMAP4430", | ||
912 | .revision = "ES2.?", | ||
913 | .data = &hdmi4430_es2_features, | ||
914 | }, | ||
915 | { | ||
916 | .family = "OMAP4", | ||
917 | .data = &hdmi4_features, | ||
918 | }, | ||
908 | { /* sentinel */ } | 919 | { /* sentinel */ } |
909 | }; | 920 | }; |
910 | 921 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1dd3dafc59af..c60a85e82c6d 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev) | |||
638 | match = of_match_node(dmm_of_match, dev->dev.of_node); | 638 | match = of_match_node(dmm_of_match, dev->dev.of_node); |
639 | if (!match) { | 639 | if (!match) { |
640 | dev_err(&dev->dev, "failed to find matching device node\n"); | 640 | dev_err(&dev->dev, "failed to find matching device node\n"); |
641 | return -ENODEV; | 641 | ret = -ENODEV; |
642 | goto fail; | ||
642 | } | 643 | } |
643 | 644 | ||
644 | omap_dmm->plat_data = match->data; | 645 | omap_dmm->plat_data = match->data; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 898f9a078830..a6511918f632 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -5451,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
5451 | WREG32(VM_INVALIDATE_REQUEST, 0x1); | 5451 | WREG32(VM_INVALIDATE_REQUEST, 0x1); |
5452 | } | 5452 | } |
5453 | 5453 | ||
5454 | static void cik_pcie_init_compute_vmid(struct radeon_device *rdev) | ||
5455 | { | ||
5456 | int i; | ||
5457 | uint32_t sh_mem_bases, sh_mem_config; | ||
5458 | |||
5459 | sh_mem_bases = 0x6000 | 0x6000 << 16; | ||
5460 | sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED); | ||
5461 | sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED); | ||
5462 | |||
5463 | mutex_lock(&rdev->srbm_mutex); | ||
5464 | for (i = 8; i < 16; i++) { | ||
5465 | cik_srbm_select(rdev, 0, 0, 0, i); | ||
5466 | /* CP and shaders */ | ||
5467 | WREG32(SH_MEM_CONFIG, sh_mem_config); | ||
5468 | WREG32(SH_MEM_APE1_BASE, 1); | ||
5469 | WREG32(SH_MEM_APE1_LIMIT, 0); | ||
5470 | WREG32(SH_MEM_BASES, sh_mem_bases); | ||
5471 | } | ||
5472 | cik_srbm_select(rdev, 0, 0, 0, 0); | ||
5473 | mutex_unlock(&rdev->srbm_mutex); | ||
5474 | } | ||
5475 | |||
5476 | /** | 5454 | /** |
5477 | * cik_pcie_gart_enable - gart enable | 5455 | * cik_pcie_gart_enable - gart enable |
5478 | * | 5456 | * |
@@ -5586,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
5586 | cik_srbm_select(rdev, 0, 0, 0, 0); | 5564 | cik_srbm_select(rdev, 0, 0, 0, 0); |
5587 | mutex_unlock(&rdev->srbm_mutex); | 5565 | mutex_unlock(&rdev->srbm_mutex); |
5588 | 5566 | ||
5589 | cik_pcie_init_compute_vmid(rdev); | ||
5590 | |||
5591 | cik_pcie_gart_tlb_flush(rdev); | 5567 | cik_pcie_gart_tlb_flush(rdev); |
5592 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 5568 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
5593 | (unsigned)(rdev->mc.gtt_size >> 20), | 5569 | (unsigned)(rdev->mc.gtt_size >> 20), |
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index b15755b6129c..b1fe0639227e 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c | |||
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, | |||
1285 | goto err_pllref; | 1285 | goto err_pllref; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | pm_runtime_enable(dev); | ||
1289 | |||
1290 | dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; | 1288 | dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; |
1291 | dsi->dsi_host.dev = dev; | 1289 | dsi->dsi_host.dev = dev; |
1292 | ret = mipi_dsi_host_register(&dsi->dsi_host); | 1290 | ret = mipi_dsi_host_register(&dsi->dsi_host); |
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, | |||
1301 | } | 1299 | } |
1302 | 1300 | ||
1303 | dev_set_drvdata(dev, dsi); | 1301 | dev_set_drvdata(dev, dsi); |
1302 | pm_runtime_enable(dev); | ||
1304 | return 0; | 1303 | return 0; |
1305 | 1304 | ||
1306 | err_mipi_dsi_host: | 1305 | err_mipi_dsi_host: |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b0551aa677b8..8d7172e8381d 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -1062,7 +1062,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) | |||
1062 | } | 1062 | } |
1063 | EXPORT_SYMBOL(ttm_pool_unpopulate); | 1063 | EXPORT_SYMBOL(ttm_pool_unpopulate); |
1064 | 1064 | ||
1065 | #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) | ||
1066 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) | 1065 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) |
1067 | { | 1066 | { |
1068 | unsigned i, j; | 1067 | unsigned i, j; |
@@ -1133,7 +1132,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | |||
1133 | ttm_pool_unpopulate(&tt->ttm); | 1132 | ttm_pool_unpopulate(&tt->ttm); |
1134 | } | 1133 | } |
1135 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); | 1134 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); |
1136 | #endif | ||
1137 | 1135 | ||
1138 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) | 1136 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
1139 | { | 1137 | { |
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 38a2b4770c35..593811362a91 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h | |||
@@ -59,11 +59,20 @@ int ttm_pool_populate(struct ttm_tt *ttm); | |||
59 | void ttm_pool_unpopulate(struct ttm_tt *ttm); | 59 | void ttm_pool_unpopulate(struct ttm_tt *ttm); |
60 | 60 | ||
61 | /** | 61 | /** |
62 | * Populates and DMA maps pages to fullfil a ttm_dma_populate() request | ||
63 | */ | ||
64 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); | ||
65 | |||
66 | /** | ||
67 | * Unpopulates and DMA unmaps pages as part of a | ||
68 | * ttm_dma_unpopulate() request */ | ||
69 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); | ||
70 | |||
71 | /** | ||
62 | * Output the state of pools to debugfs file | 72 | * Output the state of pools to debugfs file |
63 | */ | 73 | */ |
64 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data); | 74 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data); |
65 | 75 | ||
66 | |||
67 | #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) | 76 | #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) |
68 | /** | 77 | /** |
69 | * Initialize pool allocator. | 78 | * Initialize pool allocator. |
@@ -83,17 +92,6 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); | |||
83 | int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); | 92 | int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); |
84 | void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); | 93 | void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); |
85 | 94 | ||
86 | |||
87 | /** | ||
88 | * Populates and DMA maps pages to fullfil a ttm_dma_populate() request | ||
89 | */ | ||
90 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); | ||
91 | |||
92 | /** | ||
93 | * Unpopulates and DMA unmaps pages as part of a | ||
94 | * ttm_dma_unpopulate() request */ | ||
95 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); | ||
96 | |||
97 | #else | 95 | #else |
98 | static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, | 96 | static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, |
99 | unsigned max_pages) | 97 | unsigned max_pages) |
@@ -116,16 +114,6 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, | |||
116 | struct device *dev) | 114 | struct device *dev) |
117 | { | 115 | { |
118 | } | 116 | } |
119 | |||
120 | static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) | ||
121 | { | ||
122 | return -ENOMEM; | ||
123 | } | ||
124 | |||
125 | static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | ||
126 | { | ||
127 | } | ||
128 | |||
129 | #endif | 117 | #endif |
130 | 118 | ||
131 | #endif | 119 | #endif |
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 731d0df722e3..6e80501368ae 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h | |||
@@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args { | |||
233 | }; | 233 | }; |
234 | 234 | ||
235 | struct kfd_ioctl_set_scratch_backing_va_args { | 235 | struct kfd_ioctl_set_scratch_backing_va_args { |
236 | uint64_t va_addr; /* to KFD */ | 236 | __u64 va_addr; /* to KFD */ |
237 | uint32_t gpu_id; /* to KFD */ | 237 | __u32 gpu_id; /* to KFD */ |
238 | uint32_t pad; | 238 | __u32 pad; |
239 | }; | 239 | }; |
240 | 240 | ||
241 | struct kfd_ioctl_get_tile_config_args { | 241 | struct kfd_ioctl_get_tile_config_args { |
242 | /* to KFD: pointer to tile array */ | 242 | /* to KFD: pointer to tile array */ |
243 | uint64_t tile_config_ptr; | 243 | __u64 tile_config_ptr; |
244 | /* to KFD: pointer to macro tile array */ | 244 | /* to KFD: pointer to macro tile array */ |
245 | uint64_t macro_tile_config_ptr; | 245 | __u64 macro_tile_config_ptr; |
246 | /* to KFD: array size allocated by user mode | 246 | /* to KFD: array size allocated by user mode |
247 | * from KFD: array size filled by kernel | 247 | * from KFD: array size filled by kernel |
248 | */ | 248 | */ |
249 | uint32_t num_tile_configs; | 249 | __u32 num_tile_configs; |
250 | /* to KFD: array size allocated by user mode | 250 | /* to KFD: array size allocated by user mode |
251 | * from KFD: array size filled by kernel | 251 | * from KFD: array size filled by kernel |
252 | */ | 252 | */ |
253 | uint32_t num_macro_tile_configs; | 253 | __u32 num_macro_tile_configs; |
254 | 254 | ||
255 | uint32_t gpu_id; /* to KFD */ | 255 | __u32 gpu_id; /* to KFD */ |
256 | uint32_t gb_addr_config; /* from KFD */ | 256 | __u32 gb_addr_config; /* from KFD */ |
257 | uint32_t num_banks; /* from KFD */ | 257 | __u32 num_banks; /* from KFD */ |
258 | uint32_t num_ranks; /* from KFD */ | 258 | __u32 num_ranks; /* from KFD */ |
259 | /* struct size can be extended later if needed | 259 | /* struct size can be extended later if needed |
260 | * without breaking ABI compatibility | 260 | * without breaking ABI compatibility |
261 | */ | 261 | */ |