diff options
Diffstat (limited to 'drivers/gpu')
119 files changed, 1147 insertions, 740 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index a028661d9e20..92b11de19581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
| @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | |||
| 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 576 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 577 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 578 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, | 578 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, |
| 579 | { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
| 579 | { 0, 0, 0, 0, 0 }, | 580 | { 0, 0, 0, 0, 0 }, |
| 580 | }; | 581 | }; |
| 581 | 582 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8a078f4ae73d..7ff3a28fc903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) | |||
| 1701 | amdgpu_xgmi_add_device(adev); | 1701 | amdgpu_xgmi_add_device(adev); |
| 1702 | amdgpu_amdkfd_device_init(adev); | 1702 | amdgpu_amdkfd_device_init(adev); |
| 1703 | 1703 | ||
| 1704 | if (amdgpu_sriov_vf(adev)) | 1704 | if (amdgpu_sriov_vf(adev)) { |
| 1705 | amdgpu_virt_init_data_exchange(adev); | ||
| 1705 | amdgpu_virt_release_full_gpu(adev, true); | 1706 | amdgpu_virt_release_full_gpu(adev, true); |
| 1707 | } | ||
| 1706 | 1708 | ||
| 1707 | return 0; | 1709 | return 0; |
| 1708 | } | 1710 | } |
| @@ -2632,9 +2634,6 @@ fence_driver_init: | |||
| 2632 | goto failed; | 2634 | goto failed; |
| 2633 | } | 2635 | } |
| 2634 | 2636 | ||
| 2635 | if (amdgpu_sriov_vf(adev)) | ||
| 2636 | amdgpu_virt_init_data_exchange(adev); | ||
| 2637 | |||
| 2638 | amdgpu_fbdev_init(adev); | 2637 | amdgpu_fbdev_init(adev); |
| 2639 | 2638 | ||
| 2640 | r = amdgpu_pm_sysfs_init(adev); | 2639 | r = amdgpu_pm_sysfs_init(adev); |
| @@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) | |||
| 2798 | struct drm_framebuffer *fb = crtc->primary->fb; | 2797 | struct drm_framebuffer *fb = crtc->primary->fb; |
| 2799 | struct amdgpu_bo *robj; | 2798 | struct amdgpu_bo *robj; |
| 2800 | 2799 | ||
| 2801 | if (amdgpu_crtc->cursor_bo) { | 2800 | if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { |
| 2802 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | 2801 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); |
| 2803 | r = amdgpu_bo_reserve(aobj, true); | 2802 | r = amdgpu_bo_reserve(aobj, true); |
| 2804 | if (r == 0) { | 2803 | if (r == 0) { |
| @@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
| 2906 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 2905 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 2907 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2906 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2908 | 2907 | ||
| 2909 | if (amdgpu_crtc->cursor_bo) { | 2908 | if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { |
| 2910 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | 2909 | struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); |
| 2911 | r = amdgpu_bo_reserve(aobj, true); | 2910 | r = amdgpu_bo_reserve(aobj, true); |
| 2912 | if (r == 0) { | 2911 | if (r == 0) { |
| @@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, | |||
| 3226 | r = amdgpu_ib_ring_tests(adev); | 3225 | r = amdgpu_ib_ring_tests(adev); |
| 3227 | 3226 | ||
| 3228 | error: | 3227 | error: |
| 3228 | amdgpu_virt_init_data_exchange(adev); | ||
| 3229 | amdgpu_virt_release_full_gpu(adev, true); | 3229 | amdgpu_virt_release_full_gpu(adev, true); |
| 3230 | if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { | 3230 | if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { |
| 3231 | atomic_inc(&adev->vram_lost_counter); | 3231 | atomic_inc(&adev->vram_lost_counter); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 15ce7e681d67..b083b219b1a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, | |||
| 188 | goto cleanup; | 188 | goto cleanup; |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); | 191 | if (!adev->enable_virtual_display) { |
| 192 | if (unlikely(r != 0)) { | 192 | r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); |
| 193 | DRM_ERROR("failed to pin new abo buffer before flip\n"); | 193 | if (unlikely(r != 0)) { |
| 194 | goto unreserve; | 194 | DRM_ERROR("failed to pin new abo buffer before flip\n"); |
| 195 | goto unreserve; | ||
| 196 | } | ||
| 195 | } | 197 | } |
| 196 | 198 | ||
| 197 | r = amdgpu_ttm_alloc_gart(&new_abo->tbo); | 199 | r = amdgpu_ttm_alloc_gart(&new_abo->tbo); |
| @@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, | |||
| 211 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); | 213 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); |
| 212 | amdgpu_bo_unreserve(new_abo); | 214 | amdgpu_bo_unreserve(new_abo); |
| 213 | 215 | ||
| 214 | work->base = amdgpu_bo_gpu_offset(new_abo); | 216 | if (!adev->enable_virtual_display) |
| 217 | work->base = amdgpu_bo_gpu_offset(new_abo); | ||
| 215 | work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + | 218 | work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + |
| 216 | amdgpu_get_vblank_counter_kms(dev, work->crtc_id); | 219 | amdgpu_get_vblank_counter_kms(dev, work->crtc_id); |
| 217 | 220 | ||
| @@ -242,9 +245,10 @@ pflip_cleanup: | |||
| 242 | goto cleanup; | 245 | goto cleanup; |
| 243 | } | 246 | } |
| 244 | unpin: | 247 | unpin: |
| 245 | if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { | 248 | if (!adev->enable_virtual_display) |
| 246 | DRM_ERROR("failed to unpin new abo in error path\n"); | 249 | if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) |
| 247 | } | 250 | DRM_ERROR("failed to unpin new abo in error path\n"); |
| 251 | |||
| 248 | unreserve: | 252 | unreserve: |
| 249 | amdgpu_bo_unreserve(new_abo); | 253 | amdgpu_bo_unreserve(new_abo); |
| 250 | 254 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bc62bf41b7e9..5dc349173e4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
| 212 | } | 212 | } |
| 213 | 213 | ||
| 214 | if (amdgpu_device_is_px(dev)) { | 214 | if (amdgpu_device_is_px(dev)) { |
| 215 | dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); | ||
| 215 | pm_runtime_use_autosuspend(dev->dev); | 216 | pm_runtime_use_autosuspend(dev->dev); |
| 216 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 217 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 217 | pm_runtime_set_active(dev->dev); | 218 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 1f61ed95727c..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
| 1686 | effective_mode &= ~S_IWUSR; | 1686 | effective_mode &= ~S_IWUSR; |
| 1687 | 1687 | ||
| 1688 | if ((adev->flags & AMD_IS_APU) && | 1688 | if ((adev->flags & AMD_IS_APU) && |
| 1689 | (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | 1689 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
| 1690 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | ||
| 1690 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| | 1691 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
| 1691 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) | 1692 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) |
| 1692 | return 0; | 1693 | return 0; |
| @@ -2008,6 +2009,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) | |||
| 2008 | 2009 | ||
| 2009 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) | 2010 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) |
| 2010 | { | 2011 | { |
| 2012 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | ||
| 2011 | int ret; | 2013 | int ret; |
| 2012 | 2014 | ||
| 2013 | if (adev->pm.sysfs_initialized) | 2015 | if (adev->pm.sysfs_initialized) |
| @@ -2091,12 +2093,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) | |||
| 2091 | "pp_power_profile_mode\n"); | 2093 | "pp_power_profile_mode\n"); |
| 2092 | return ret; | 2094 | return ret; |
| 2093 | } | 2095 | } |
| 2094 | ret = device_create_file(adev->dev, | 2096 | if (hwmgr->od_enabled) { |
| 2095 | &dev_attr_pp_od_clk_voltage); | 2097 | ret = device_create_file(adev->dev, |
| 2096 | if (ret) { | 2098 | &dev_attr_pp_od_clk_voltage); |
| 2097 | DRM_ERROR("failed to create device file " | 2099 | if (ret) { |
| 2098 | "pp_od_clk_voltage\n"); | 2100 | DRM_ERROR("failed to create device file " |
| 2099 | return ret; | 2101 | "pp_od_clk_voltage\n"); |
| 2102 | return ret; | ||
| 2103 | } | ||
| 2100 | } | 2104 | } |
| 2101 | ret = device_create_file(adev->dev, | 2105 | ret = device_create_file(adev->dev, |
| 2102 | &dev_attr_gpu_busy_percent); | 2106 | &dev_attr_gpu_busy_percent); |
| @@ -2118,6 +2122,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) | |||
| 2118 | 2122 | ||
| 2119 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) | 2123 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) |
| 2120 | { | 2124 | { |
| 2125 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; | ||
| 2126 | |||
| 2121 | if (adev->pm.dpm_enabled == 0) | 2127 | if (adev->pm.dpm_enabled == 0) |
| 2122 | return; | 2128 | return; |
| 2123 | 2129 | ||
| @@ -2138,8 +2144,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) | |||
| 2138 | device_remove_file(adev->dev, &dev_attr_pp_mclk_od); | 2144 | device_remove_file(adev->dev, &dev_attr_pp_mclk_od); |
| 2139 | device_remove_file(adev->dev, | 2145 | device_remove_file(adev->dev, |
| 2140 | &dev_attr_pp_power_profile_mode); | 2146 | &dev_attr_pp_power_profile_mode); |
| 2141 | device_remove_file(adev->dev, | 2147 | if (hwmgr->od_enabled) |
| 2142 | &dev_attr_pp_od_clk_voltage); | 2148 | device_remove_file(adev->dev, |
| 2149 | &dev_attr_pp_od_clk_voltage); | ||
| 2143 | device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); | 2150 | device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); |
| 2144 | } | 2151 | } |
| 2145 | 2152 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include "amdgpu_gem.h" | 38 | #include "amdgpu_gem.h" |
| 39 | #include <drm/amdgpu_drm.h> | 39 | #include <drm/amdgpu_drm.h> |
| 40 | #include <linux/dma-buf.h> | 40 | #include <linux/dma-buf.h> |
| 41 | #include <linux/dma-fence-array.h> | ||
| 41 | 42 | ||
| 42 | /** | 43 | /** |
| 43 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table | 44 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table |
| @@ -187,6 +188,48 @@ error: | |||
| 187 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
| 188 | } | 189 | } |
| 189 | 190 | ||
| 191 | static int | ||
| 192 | __reservation_object_make_exclusive(struct reservation_object *obj) | ||
| 193 | { | ||
| 194 | struct dma_fence **fences; | ||
| 195 | unsigned int count; | ||
| 196 | int r; | ||
| 197 | |||
| 198 | if (!reservation_object_get_list(obj)) /* no shared fences to convert */ | ||
| 199 | return 0; | ||
| 200 | |||
| 201 | r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); | ||
| 202 | if (r) | ||
| 203 | return r; | ||
| 204 | |||
| 205 | if (count == 0) { | ||
| 206 | /* Now that was unexpected. */ | ||
| 207 | } else if (count == 1) { | ||
| 208 | reservation_object_add_excl_fence(obj, fences[0]); | ||
| 209 | dma_fence_put(fences[0]); | ||
| 210 | kfree(fences); | ||
| 211 | } else { | ||
| 212 | struct dma_fence_array *array; | ||
| 213 | |||
| 214 | array = dma_fence_array_create(count, fences, | ||
| 215 | dma_fence_context_alloc(1), 0, | ||
| 216 | false); | ||
| 217 | if (!array) | ||
| 218 | goto err_fences_put; | ||
| 219 | |||
| 220 | reservation_object_add_excl_fence(obj, &array->base); | ||
| 221 | dma_fence_put(&array->base); | ||
| 222 | } | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | |||
| 226 | err_fences_put: | ||
| 227 | while (count--) | ||
| 228 | dma_fence_put(fences[count]); | ||
| 229 | kfree(fences); | ||
| 230 | return -ENOMEM; | ||
| 231 | } | ||
| 232 | |||
| 190 | /** | 233 | /** |
| 191 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation | 234 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation |
| 192 | * @dma_buf: Shared DMA buffer | 235 | * @dma_buf: Shared DMA buffer |
| @@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, | |||
| 218 | 261 | ||
| 219 | if (attach->dev->driver != adev->dev->driver) { | 262 | if (attach->dev->driver != adev->dev->driver) { |
| 220 | /* | 263 | /* |
| 221 | * Wait for all shared fences to complete before we switch to future | 264 | * We only create shared fences for internal use, but importers |
| 222 | * use of exclusive fence on this prime shared bo. | 265 | * of the dmabuf rely on exclusive fences for implicitly |
| 266 | * tracking write hazards. As any of the current fences may | ||
| 267 | * correspond to a write, we need to convert all existing | ||
| 268 | * fences on the reservation object into a single exclusive | ||
| 269 | * fence. | ||
| 223 | */ | 270 | */ |
| 224 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | 271 | r = __reservation_object_make_exclusive(bo->tbo.resv); |
| 225 | true, false, | 272 | if (r) |
| 226 | MAX_SCHEDULE_TIMEOUT); | ||
| 227 | if (unlikely(r < 0)) { | ||
| 228 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); | ||
| 229 | goto error_unreserve; | 273 | goto error_unreserve; |
| 230 | } | ||
| 231 | } | 274 | } |
| 232 | 275 | ||
| 233 | /* pin buffer into GTT */ | 276 | /* pin buffer into GTT */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
| @@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) | |||
| 90 | adev->psp.sos_fw = NULL; | 90 | adev->psp.sos_fw = NULL; |
| 91 | release_firmware(adev->psp.asd_fw); | 91 | release_firmware(adev->psp.asd_fw); |
| 92 | adev->psp.asd_fw = NULL; | 92 | adev->psp.asd_fw = NULL; |
| 93 | release_firmware(adev->psp.ta_fw); | 93 | if (adev->psp.ta_fw) { |
| 94 | adev->psp.ta_fw = NULL; | 94 | release_firmware(adev->psp.ta_fw); |
| 95 | adev->psp.ta_fw = NULL; | ||
| 96 | } | ||
| 95 | return 0; | 97 | return 0; |
| 96 | } | 98 | } |
| 97 | 99 | ||
| @@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) | |||
| 435 | struct ta_xgmi_shared_memory *xgmi_cmd; | 437 | struct ta_xgmi_shared_memory *xgmi_cmd; |
| 436 | int ret; | 438 | int ret; |
| 437 | 439 | ||
| 440 | if (!psp->adev->psp.ta_fw) | ||
| 441 | return -ENOENT; | ||
| 442 | |||
| 438 | if (!psp->xgmi_context.initialized) { | 443 | if (!psp->xgmi_context.initialized) { |
| 439 | ret = psp_xgmi_init_shared_buf(psp); | 444 | ret = psp_xgmi_init_shared_buf(psp); |
| 440 | if (ret) | 445 | if (ret) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e73d152659a2..698bcb8ce61d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, | |||
| 638 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | 638 | struct ttm_bo_global *glob = adev->mman.bdev.glob; |
| 639 | struct amdgpu_vm_bo_base *bo_base; | 639 | struct amdgpu_vm_bo_base *bo_base; |
| 640 | 640 | ||
| 641 | #if 0 | ||
| 641 | if (vm->bulk_moveable) { | 642 | if (vm->bulk_moveable) { |
| 642 | spin_lock(&glob->lru_lock); | 643 | spin_lock(&glob->lru_lock); |
| 643 | ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); | 644 | ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); |
| 644 | spin_unlock(&glob->lru_lock); | 645 | spin_unlock(&glob->lru_lock); |
| 645 | return; | 646 | return; |
| 646 | } | 647 | } |
| 648 | #endif | ||
| 647 | 649 | ||
| 648 | memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); | 650 | memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); |
| 649 | 651 | ||
| @@ -847,9 +849,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
| 847 | bp->size = amdgpu_vm_bo_size(adev, level); | 849 | bp->size = amdgpu_vm_bo_size(adev, level); |
| 848 | bp->byte_align = AMDGPU_GPU_PAGE_SIZE; | 850 | bp->byte_align = AMDGPU_GPU_PAGE_SIZE; |
| 849 | bp->domain = AMDGPU_GEM_DOMAIN_VRAM; | 851 | bp->domain = AMDGPU_GEM_DOMAIN_VRAM; |
| 850 | if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 && | ||
| 851 | adev->flags & AMD_IS_APU) | ||
| 852 | bp->domain |= AMDGPU_GEM_DOMAIN_GTT; | ||
| 853 | bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); | 852 | bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); |
| 854 | bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | | 853 | bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | |
| 855 | AMDGPU_GEM_CREATE_CPU_GTT_USWC; | 854 | AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
| @@ -3366,14 +3365,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, | |||
| 3366 | struct amdgpu_task_info *task_info) | 3365 | struct amdgpu_task_info *task_info) |
| 3367 | { | 3366 | { |
| 3368 | struct amdgpu_vm *vm; | 3367 | struct amdgpu_vm *vm; |
| 3368 | unsigned long flags; | ||
| 3369 | 3369 | ||
| 3370 | spin_lock(&adev->vm_manager.pasid_lock); | 3370 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); |
| 3371 | 3371 | ||
| 3372 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); | 3372 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); |
| 3373 | if (vm) | 3373 | if (vm) |
| 3374 | *task_info = vm->task_info; | 3374 | *task_info = vm->task_info; |
| 3375 | 3375 | ||
| 3376 | spin_unlock(&adev->vm_manager.pasid_lock); | 3376 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
| 3377 | } | 3377 | } |
| 3378 | 3378 | ||
| 3379 | /** | 3379 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index fdace004544d..e4cc1d48eaab 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
| @@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) | |||
| 167 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 167 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 168 | 168 | ||
| 169 | dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 169 | dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); |
| 170 | if (crtc->primary->fb) { | ||
| 171 | int r; | ||
| 172 | struct amdgpu_bo *abo; | ||
| 173 | |||
| 174 | abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); | ||
| 175 | r = amdgpu_bo_reserve(abo, true); | ||
| 176 | if (unlikely(r)) | ||
| 177 | DRM_ERROR("failed to reserve abo before unpin\n"); | ||
| 178 | else { | ||
| 179 | amdgpu_bo_unpin(abo); | ||
| 180 | amdgpu_bo_unreserve(abo); | ||
| 181 | } | ||
| 182 | } | ||
| 183 | 170 | ||
| 184 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | 171 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; |
| 185 | amdgpu_crtc->encoder = NULL; | 172 | amdgpu_crtc->encoder = NULL; |
| @@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev, | |||
| 692 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | 679 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
| 693 | 680 | ||
| 694 | drm_crtc_vblank_put(&amdgpu_crtc->base); | 681 | drm_crtc_vblank_put(&amdgpu_crtc->base); |
| 695 | schedule_work(&works->unpin_work); | 682 | amdgpu_bo_unref(&works->old_abo); |
| 683 | kfree(works->shared); | ||
| 684 | kfree(works); | ||
| 696 | 685 | ||
| 697 | return 0; | 686 | return 0; |
| 698 | } | 687 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 381f593b0cda..57cb3a51bda7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
| 4233 | u32 tmp; | 4233 | u32 tmp; |
| 4234 | u32 rb_bufsz; | 4234 | u32 rb_bufsz; |
| 4235 | u64 rb_addr, rptr_addr, wptr_gpu_addr; | 4235 | u64 rb_addr, rptr_addr, wptr_gpu_addr; |
| 4236 | int r; | ||
| 4237 | 4236 | ||
| 4238 | /* Set the write pointer delay */ | 4237 | /* Set the write pointer delay */ |
| 4239 | WREG32(mmCP_RB_WPTR_DELAY, 0); | 4238 | WREG32(mmCP_RB_WPTR_DELAY, 0); |
| @@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
| 4278 | amdgpu_ring_clear_ring(ring); | 4277 | amdgpu_ring_clear_ring(ring); |
| 4279 | gfx_v8_0_cp_gfx_start(adev); | 4278 | gfx_v8_0_cp_gfx_start(adev); |
| 4280 | ring->sched.ready = true; | 4279 | ring->sched.ready = true; |
| 4281 | r = amdgpu_ring_test_helper(ring); | ||
| 4282 | 4280 | ||
| 4283 | return r; | 4281 | return 0; |
| 4284 | } | 4282 | } |
| 4285 | 4283 | ||
| 4286 | static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | 4284 | static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) |
| @@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) | |||
| 4369 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | 4367 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); |
| 4370 | } | 4368 | } |
| 4371 | 4369 | ||
| 4372 | r = amdgpu_ring_test_helper(kiq_ring); | 4370 | amdgpu_ring_commit(kiq_ring); |
| 4373 | if (r) | 4371 | |
| 4374 | DRM_ERROR("KCQ enable failed\n"); | 4372 | return 0; |
| 4375 | return r; | ||
| 4376 | } | 4373 | } |
| 4377 | 4374 | ||
| 4378 | static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) | 4375 | static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) |
| @@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) | |||
| 4709 | if (r) | 4706 | if (r) |
| 4710 | goto done; | 4707 | goto done; |
| 4711 | 4708 | ||
| 4712 | /* Test KCQs - reversing the order of rings seems to fix ring test failure | 4709 | done: |
| 4713 | * after GPU reset | 4710 | return r; |
| 4714 | */ | 4711 | } |
| 4715 | for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { | 4712 | |
| 4713 | static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) | ||
| 4714 | { | ||
| 4715 | int r, i; | ||
| 4716 | struct amdgpu_ring *ring; | ||
| 4717 | |||
| 4718 | /* collect all the ring_tests here, gfx, kiq, compute */ | ||
| 4719 | ring = &adev->gfx.gfx_ring[0]; | ||
| 4720 | r = amdgpu_ring_test_helper(ring); | ||
| 4721 | if (r) | ||
| 4722 | return r; | ||
| 4723 | |||
| 4724 | ring = &adev->gfx.kiq.ring; | ||
| 4725 | r = amdgpu_ring_test_helper(ring); | ||
| 4726 | if (r) | ||
| 4727 | return r; | ||
| 4728 | |||
| 4729 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
| 4716 | ring = &adev->gfx.compute_ring[i]; | 4730 | ring = &adev->gfx.compute_ring[i]; |
| 4717 | r = amdgpu_ring_test_helper(ring); | 4731 | amdgpu_ring_test_helper(ring); |
| 4718 | } | 4732 | } |
| 4719 | 4733 | ||
| 4720 | done: | 4734 | return 0; |
| 4721 | return r; | ||
| 4722 | } | 4735 | } |
| 4723 | 4736 | ||
| 4724 | static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) | 4737 | static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) |
| @@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) | |||
| 4739 | r = gfx_v8_0_kcq_resume(adev); | 4752 | r = gfx_v8_0_kcq_resume(adev); |
| 4740 | if (r) | 4753 | if (r) |
| 4741 | return r; | 4754 | return r; |
| 4755 | |||
| 4756 | r = gfx_v8_0_cp_test_all_rings(adev); | ||
| 4757 | if (r) | ||
| 4758 | return r; | ||
| 4759 | |||
| 4742 | gfx_v8_0_enable_gui_idle_interrupt(adev, true); | 4760 | gfx_v8_0_enable_gui_idle_interrupt(adev, true); |
| 4743 | 4761 | ||
| 4744 | return 0; | 4762 | return 0; |
| @@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle) | |||
| 5086 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) | 5104 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) |
| 5087 | gfx_v8_0_cp_gfx_resume(adev); | 5105 | gfx_v8_0_cp_gfx_resume(adev); |
| 5088 | 5106 | ||
| 5107 | gfx_v8_0_cp_test_all_rings(adev); | ||
| 5108 | |||
| 5089 | adev->gfx.rlc.funcs->start(adev); | 5109 | adev->gfx.rlc.funcs->start(adev); |
| 5090 | 5110 | ||
| 5091 | return 0; | 5111 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7556716038d3..fbca0494f871 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
| @@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = | |||
| 113 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), | 113 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), |
| 114 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), | 114 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), |
| 115 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), | 115 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), |
| 116 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) | 116 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), |
| 117 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), | ||
| 118 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), | ||
| 119 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) | ||
| 117 | }; | 120 | }; |
| 118 | 121 | ||
| 119 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = | 122 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = |
| @@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = | |||
| 135 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), | 138 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), |
| 136 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), | 139 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), |
| 137 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), | 140 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), |
| 138 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), | 141 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) |
| 139 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), | ||
| 140 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), | ||
| 141 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) | ||
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| 144 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = | 144 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = |
| @@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
| 3587 | { | 3587 | { |
| 3588 | uint32_t data, def; | 3588 | uint32_t data, def; |
| 3589 | 3589 | ||
| 3590 | amdgpu_gfx_rlc_enter_safe_mode(adev); | ||
| 3591 | |||
| 3590 | /* It is disabled by HW by default */ | 3592 | /* It is disabled by HW by default */ |
| 3591 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | 3593 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
| 3592 | /* 1 - RLC_CGTT_MGCG_OVERRIDE */ | 3594 | /* 1 - RLC_CGTT_MGCG_OVERRIDE */ |
| @@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
| 3651 | WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); | 3653 | WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); |
| 3652 | } | 3654 | } |
| 3653 | } | 3655 | } |
| 3656 | |||
| 3657 | amdgpu_gfx_rlc_exit_safe_mode(adev); | ||
| 3654 | } | 3658 | } |
| 3655 | 3659 | ||
| 3656 | static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | 3660 | static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 8cbb4655896a..b11a1c17a7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | |||
| @@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, | |||
| 174 | return r; | 174 | return r; |
| 175 | } | 175 | } |
| 176 | /* Retrieve checksum from mailbox2 */ | 176 | /* Retrieve checksum from mailbox2 */ |
| 177 | if (req == IDH_REQ_GPU_INIT_ACCESS) { | 177 | if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { |
| 178 | adev->virt.fw_reserve.checksum_key = | 178 | adev->virt.fw_reserve.checksum_key = |
| 179 | RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, | 179 | RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, |
| 180 | mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); | 180 | mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | |||
| @@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
| 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, | 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, |
| 94 | bool enable) | 94 | bool enable) |
| 95 | { | 95 | { |
| 96 | u32 tmp = 0; | ||
| 96 | 97 | ||
| 98 | if (enable) { | ||
| 99 | tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | | ||
| 100 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | | ||
| 101 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); | ||
| 102 | |||
| 103 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, | ||
| 104 | lower_32_bits(adev->doorbell.base)); | ||
| 105 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, | ||
| 106 | upper_32_bits(adev->doorbell.base)); | ||
| 107 | } | ||
| 108 | |||
| 109 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); | ||
| 97 | } | 110 | } |
| 98 | 111 | ||
| 99 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, | 112 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | |||
| @@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
| 152 | 152 | ||
| 153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); | 153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); |
| 154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); | 154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); |
| 155 | if (err) | 155 | if (err) { |
| 156 | goto out2; | 156 | release_firmware(adev->psp.ta_fw); |
| 157 | 157 | adev->psp.ta_fw = NULL; | |
| 158 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | 158 | dev_info(adev->dev, |
| 159 | if (err) | 159 | "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); |
| 160 | goto out2; | 160 | } else { |
| 161 | 161 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | |
| 162 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; | 162 | if (err) |
| 163 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); | 163 | goto out2; |
| 164 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | 164 | |
| 165 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | 165 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; |
| 166 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | 166 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); |
| 167 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | ||
| 168 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | ||
| 169 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | ||
| 170 | } | ||
| 167 | 171 | ||
| 168 | return 0; | 172 | return 0; |
| 169 | 173 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index fd0bfe140ee0..aa2f71cc1eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
| @@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { | |||
| 78 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), | 78 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), |
| 79 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), | 79 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), |
| 80 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), | 80 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), |
| 81 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), | ||
| 82 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), | 81 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), |
| 83 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), | 82 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), |
| 84 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), | 83 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), |
| @@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { | |||
| 96 | static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { | 95 | static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { |
| 97 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), | 96 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), |
| 98 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), | 97 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), |
| 98 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | ||
| 99 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), | 99 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), |
| 100 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) | 100 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) |
| 101 | }; | 101 | }; |
| @@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { | |||
| 103 | static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { | 103 | static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { |
| 104 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), | 104 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), |
| 105 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), | 105 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), |
| 106 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | ||
| 106 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), | 107 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), |
| 107 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) | 108 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) |
| 108 | }; | 109 | }; |
| @@ -127,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { | |||
| 127 | 128 | ||
| 128 | static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = | 129 | static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = |
| 129 | { | 130 | { |
| 130 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | 131 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), |
| 131 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), | 132 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), |
| 132 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), | 133 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), |
| 133 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), | 134 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), |
| @@ -157,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = | |||
| 157 | }; | 158 | }; |
| 158 | 159 | ||
| 159 | static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { | 160 | static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { |
| 160 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | 161 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), |
| 161 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), | 162 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), |
| 162 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), | 163 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), |
| 163 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), | 164 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
| @@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) | |||
| 729 | case CHIP_RAVEN: | 729 | case CHIP_RAVEN: |
| 730 | adev->asic_funcs = &soc15_asic_funcs; | 730 | adev->asic_funcs = &soc15_asic_funcs; |
| 731 | if (adev->rev_id >= 0x8) | 731 | if (adev->rev_id >= 0x8) |
| 732 | adev->external_rev_id = adev->rev_id + 0x81; | 732 | adev->external_rev_id = adev->rev_id + 0x79; |
| 733 | else if (adev->pdev->device == 0x15d8) | 733 | else if (adev->pdev->device == 0x15d8) |
| 734 | adev->external_rev_id = adev->rev_id + 0x41; | 734 | adev->external_rev_id = adev->rev_id + 0x41; |
| 735 | else if (adev->rev_id == 1) | ||
| 736 | adev->external_rev_id = adev->rev_id + 0x20; | ||
| 735 | else | 737 | else |
| 736 | adev->external_rev_id = 0x1; | 738 | adev->external_rev_id = adev->rev_id + 0x01; |
| 737 | 739 | ||
| 738 | if (adev->rev_id >= 0x8) { | 740 | if (adev->rev_id >= 0x8) { |
| 739 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 741 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index fbf0ee5201c3..c3613604a4f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig | |||
| @@ -4,8 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | config HSA_AMD | 5 | config HSA_AMD |
| 6 | bool "HSA kernel driver for AMD GPU devices" | 6 | bool "HSA kernel driver for AMD GPU devices" |
| 7 | depends on DRM_AMDGPU && X86_64 | 7 | depends on DRM_AMDGPU && (X86_64 || ARM64) |
| 8 | imply AMD_IOMMU_V2 | 8 | imply AMD_IOMMU_V2 if X86_64 |
| 9 | select MMU_NOTIFIER | 9 | select MMU_NOTIFIER |
| 10 | help | 10 | help |
| 11 | Enable this if you want to use HSA features on AMD GPU devices. | 11 | Enable this if you want to use HSA features on AMD GPU devices. |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b7bc7d7d048f..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
| @@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 863 | return 0; | 863 | return 0; |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | #ifdef CONFIG_X86_64 | ||
| 866 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
| 867 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
| 868 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
| @@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 905 | 906 | ||
| 906 | return 0; | 907 | return 0; |
| 907 | } | 908 | } |
| 909 | #endif | ||
| 908 | 910 | ||
| 909 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU | 911 | /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU |
| 910 | * | 912 | * |
| @@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 920 | struct crat_subtype_generic *sub_type_hdr; | 922 | struct crat_subtype_generic *sub_type_hdr; |
| 921 | int avail_size = *size; | 923 | int avail_size = *size; |
| 922 | int numa_node_id; | 924 | int numa_node_id; |
| 925 | #ifdef CONFIG_X86_64 | ||
| 923 | uint32_t entries = 0; | 926 | uint32_t entries = 0; |
| 927 | #endif | ||
| 924 | int ret = 0; | 928 | int ret = 0; |
| 925 | 929 | ||
| 926 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) | 930 | if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) |
| @@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 982 | sub_type_hdr->length); | 986 | sub_type_hdr->length); |
| 983 | 987 | ||
| 984 | /* Fill in Subtype: IO Link */ | 988 | /* Fill in Subtype: IO Link */ |
| 989 | #ifdef CONFIG_X86_64 | ||
| 985 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, | 990 | ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, |
| 986 | &entries, | 991 | &entries, |
| 987 | (struct crat_subtype_iolink *)sub_type_hdr); | 992 | (struct crat_subtype_iolink *)sub_type_hdr); |
| @@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) | |||
| 992 | 997 | ||
| 993 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + | 998 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + |
| 994 | sub_type_hdr->length * entries); | 999 | sub_type_hdr->length * entries); |
| 1000 | #else | ||
| 1001 | pr_info("IO link not available for non x86 platforms\n"); | ||
| 1002 | #endif | ||
| 995 | 1003 | ||
| 996 | crat_table->num_domains++; | 1004 | crat_table->num_domains++; |
| 997 | } | 1005 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5f5b2acedbac..09da91644f9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) | |||
| 1093 | * the GPU device is not already present in the topology device | 1093 | * the GPU device is not already present in the topology device |
| 1094 | * list then return NULL. This means a new topology device has to | 1094 | * list then return NULL. This means a new topology device has to |
| 1095 | * be created for this GPU. | 1095 | * be created for this GPU. |
| 1096 | * TODO: Rather than assiging @gpu to first topology device withtout | ||
| 1097 | * gpu attached, it will better to have more stringent check. | ||
| 1098 | */ | 1096 | */ |
| 1099 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | 1097 | static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) |
| 1100 | { | 1098 | { |
| @@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) | |||
| 1102 | struct kfd_topology_device *out_dev = NULL; | 1100 | struct kfd_topology_device *out_dev = NULL; |
| 1103 | 1101 | ||
| 1104 | down_write(&topology_lock); | 1102 | down_write(&topology_lock); |
| 1105 | list_for_each_entry(dev, &topology_device_list, list) | 1103 | list_for_each_entry(dev, &topology_device_list, list) { |
| 1104 | /* Discrete GPUs need their own topology device list | ||
| 1105 | * entries. Don't assign them to CPU/APU nodes. | ||
| 1106 | */ | ||
| 1107 | if (!gpu->device_info->needs_iommu_device && | ||
| 1108 | dev->node_props.cpu_cores_count) | ||
| 1109 | continue; | ||
| 1110 | |||
| 1106 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { | 1111 | if (!dev->gpu && (dev->node_props.simd_count > 0)) { |
| 1107 | dev->gpu = gpu; | 1112 | dev->gpu = gpu; |
| 1108 | out_dev = dev; | 1113 | out_dev = dev; |
| 1109 | break; | 1114 | break; |
| 1110 | } | 1115 | } |
| 1116 | } | ||
| 1111 | up_write(&topology_lock); | 1117 | up_write(&topology_lock); |
| 1112 | return out_dev; | 1118 | return out_dev; |
| 1113 | } | 1119 | } |
| @@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) | |||
| 1392 | 1398 | ||
| 1393 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | 1399 | static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) |
| 1394 | { | 1400 | { |
| 1395 | const struct cpuinfo_x86 *cpuinfo; | ||
| 1396 | int first_cpu_of_numa_node; | 1401 | int first_cpu_of_numa_node; |
| 1397 | 1402 | ||
| 1398 | if (!cpumask || cpumask == cpu_none_mask) | 1403 | if (!cpumask || cpumask == cpu_none_mask) |
| @@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) | |||
| 1400 | first_cpu_of_numa_node = cpumask_first(cpumask); | 1405 | first_cpu_of_numa_node = cpumask_first(cpumask); |
| 1401 | if (first_cpu_of_numa_node >= nr_cpu_ids) | 1406 | if (first_cpu_of_numa_node >= nr_cpu_ids) |
| 1402 | return -1; | 1407 | return -1; |
| 1403 | cpuinfo = &cpu_data(first_cpu_of_numa_node); | 1408 | #ifdef CONFIG_X86_64 |
| 1404 | 1409 | return cpu_data(first_cpu_of_numa_node).apicid; | |
| 1405 | return cpuinfo->apicid; | 1410 | #else |
| 1411 | return first_cpu_of_numa_node; | ||
| 1412 | #endif | ||
| 1406 | } | 1413 | } |
| 1407 | 1414 | ||
| 1408 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor | 1415 | /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a9a28dbc3e24..5296b8f3e0ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -699,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) | |||
| 699 | { | 699 | { |
| 700 | struct amdgpu_dm_connector *aconnector; | 700 | struct amdgpu_dm_connector *aconnector; |
| 701 | struct drm_connector *connector; | 701 | struct drm_connector *connector; |
| 702 | struct drm_dp_mst_topology_mgr *mgr; | ||
| 703 | int ret; | ||
| 704 | bool need_hotplug = false; | ||
| 702 | 705 | ||
| 703 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | 706 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); |
| 704 | 707 | ||
| 705 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 708 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
| 706 | aconnector = to_amdgpu_dm_connector(connector); | 709 | head) { |
| 707 | if (aconnector->dc_link->type == dc_connection_mst_branch && | 710 | aconnector = to_amdgpu_dm_connector(connector); |
| 708 | !aconnector->mst_port) { | 711 | if (aconnector->dc_link->type != dc_connection_mst_branch || |
| 712 | aconnector->mst_port) | ||
| 713 | continue; | ||
| 714 | |||
| 715 | mgr = &aconnector->mst_mgr; | ||
| 709 | 716 | ||
| 710 | if (suspend) | 717 | if (suspend) { |
| 711 | drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); | 718 | drm_dp_mst_topology_mgr_suspend(mgr); |
| 712 | else | 719 | } else { |
| 713 | drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); | 720 | ret = drm_dp_mst_topology_mgr_resume(mgr); |
| 714 | } | 721 | if (ret < 0) { |
| 722 | drm_dp_mst_topology_mgr_set_mst(mgr, false); | ||
| 723 | need_hotplug = true; | ||
| 724 | } | ||
| 725 | } | ||
| 715 | } | 726 | } |
| 716 | 727 | ||
| 717 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 728 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
| 729 | |||
| 730 | if (need_hotplug) | ||
| 731 | drm_kms_helper_hotplug_event(dev); | ||
| 718 | } | 732 | } |
| 719 | 733 | ||
| 720 | /** | 734 | /** |
| @@ -772,12 +786,13 @@ static int dm_suspend(void *handle) | |||
| 772 | struct amdgpu_display_manager *dm = &adev->dm; | 786 | struct amdgpu_display_manager *dm = &adev->dm; |
| 773 | int ret = 0; | 787 | int ret = 0; |
| 774 | 788 | ||
| 789 | WARN_ON(adev->dm.cached_state); | ||
| 790 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); | ||
| 791 | |||
| 775 | s3_handle_mst(adev->ddev, true); | 792 | s3_handle_mst(adev->ddev, true); |
| 776 | 793 | ||
| 777 | amdgpu_dm_irq_suspend(adev); | 794 | amdgpu_dm_irq_suspend(adev); |
| 778 | 795 | ||
| 779 | WARN_ON(adev->dm.cached_state); | ||
| 780 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); | ||
| 781 | 796 | ||
| 782 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); | 797 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); |
| 783 | 798 | ||
| @@ -898,7 +913,6 @@ static int dm_resume(void *handle) | |||
| 898 | struct drm_plane_state *new_plane_state; | 913 | struct drm_plane_state *new_plane_state; |
| 899 | struct dm_plane_state *dm_new_plane_state; | 914 | struct dm_plane_state *dm_new_plane_state; |
| 900 | enum dc_connection_type new_connection_type = dc_connection_none; | 915 | enum dc_connection_type new_connection_type = dc_connection_none; |
| 901 | int ret; | ||
| 902 | int i; | 916 | int i; |
| 903 | 917 | ||
| 904 | /* power on hardware */ | 918 | /* power on hardware */ |
| @@ -971,13 +985,13 @@ static int dm_resume(void *handle) | |||
| 971 | } | 985 | } |
| 972 | } | 986 | } |
| 973 | 987 | ||
| 974 | ret = drm_atomic_helper_resume(ddev, dm->cached_state); | 988 | drm_atomic_helper_resume(ddev, dm->cached_state); |
| 975 | 989 | ||
| 976 | dm->cached_state = NULL; | 990 | dm->cached_state = NULL; |
| 977 | 991 | ||
| 978 | amdgpu_dm_irq_resume_late(adev); | 992 | amdgpu_dm_irq_resume_late(adev); |
| 979 | 993 | ||
| 980 | return ret; | 994 | return 0; |
| 981 | } | 995 | } |
| 982 | 996 | ||
| 983 | /** | 997 | /** |
| @@ -1759,7 +1773,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
| 1759 | + caps.min_input_signal * 0x101; | 1773 | + caps.min_input_signal * 0x101; |
| 1760 | 1774 | ||
| 1761 | if (dc_link_set_backlight_level(dm->backlight_link, | 1775 | if (dc_link_set_backlight_level(dm->backlight_link, |
| 1762 | brightness, 0, 0)) | 1776 | brightness, 0)) |
| 1763 | return 0; | 1777 | return 0; |
| 1764 | else | 1778 | else |
| 1765 | return 1; | 1779 | return 1; |
| @@ -4069,7 +4083,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
| 4069 | } | 4083 | } |
| 4070 | 4084 | ||
| 4071 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || | 4085 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || |
| 4072 | connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 4086 | connector_type == DRM_MODE_CONNECTOR_DisplayPort || |
| 4087 | connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 4073 | drm_connector_attach_vrr_capable_property( | 4088 | drm_connector_attach_vrr_capable_property( |
| 4074 | &aconnector->base); | 4089 | &aconnector->base); |
| 4075 | } | 4090 | } |
| @@ -5920,7 +5935,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
| 5920 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5935 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 5921 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && | 5936 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && |
| 5922 | !new_crtc_state->color_mgmt_changed && | 5937 | !new_crtc_state->color_mgmt_changed && |
| 5923 | !new_crtc_state->vrr_enabled) | 5938 | old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) |
| 5924 | continue; | 5939 | continue; |
| 5925 | 5940 | ||
| 5926 | if (!new_crtc_state->enable) | 5941 | if (!new_crtc_state->enable) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | |||
| @@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us | |||
| 671 | return bytes_from_user; | 671 | return bytes_from_user; |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | /* | ||
| 675 | * Returns the min and max vrr vfreq through the connector's debugfs file. | ||
| 676 | * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range | ||
| 677 | */ | ||
| 678 | static int vrr_range_show(struct seq_file *m, void *data) | ||
| 679 | { | ||
| 680 | struct drm_connector *connector = m->private; | ||
| 681 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
| 682 | |||
| 683 | if (connector->status != connector_status_connected) | ||
| 684 | return -ENODEV; | ||
| 685 | |||
| 686 | seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); | ||
| 687 | seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); | ||
| 688 | |||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | DEFINE_SHOW_ATTRIBUTE(vrr_range); | ||
| 692 | |||
| 674 | static const struct file_operations dp_link_settings_debugfs_fops = { | 693 | static const struct file_operations dp_link_settings_debugfs_fops = { |
| 675 | .owner = THIS_MODULE, | 694 | .owner = THIS_MODULE, |
| 676 | .read = dp_link_settings_read, | 695 | .read = dp_link_settings_read, |
| @@ -697,7 +716,8 @@ static const struct { | |||
| 697 | } dp_debugfs_entries[] = { | 716 | } dp_debugfs_entries[] = { |
| 698 | {"link_settings", &dp_link_settings_debugfs_fops}, | 717 | {"link_settings", &dp_link_settings_debugfs_fops}, |
| 699 | {"phy_settings", &dp_phy_settings_debugfs_fop}, | 718 | {"phy_settings", &dp_phy_settings_debugfs_fop}, |
| 700 | {"test_pattern", &dp_phy_test_pattern_fops} | 719 | {"test_pattern", &dp_phy_test_pattern_fops}, |
| 720 | {"vrr_range", &vrr_range_fops} | ||
| 701 | }; | 721 | }; |
| 702 | 722 | ||
| 703 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) | 723 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 52deacf39841..b0265dbebd4c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
| 2190 | 2190 | ||
| 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, | 2191 | bool dc_link_set_backlight_level(const struct dc_link *link, |
| 2192 | uint32_t backlight_pwm_u16_16, | 2192 | uint32_t backlight_pwm_u16_16, |
| 2193 | uint32_t frame_ramp, | 2193 | uint32_t frame_ramp) |
| 2194 | const struct dc_stream_state *stream) | ||
| 2195 | { | 2194 | { |
| 2196 | struct dc *core_dc = link->ctx->dc; | 2195 | struct dc *core_dc = link->ctx->dc; |
| 2197 | struct abm *abm = core_dc->res_pool->abm; | 2196 | struct abm *abm = core_dc->res_pool->abm; |
| @@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
| 2206 | (abm->funcs->set_backlight_level_pwm == NULL)) | 2205 | (abm->funcs->set_backlight_level_pwm == NULL)) |
| 2207 | return false; | 2206 | return false; |
| 2208 | 2207 | ||
| 2209 | if (stream) | ||
| 2210 | ((struct dc_stream_state *)stream)->bl_pwm_level = | ||
| 2211 | backlight_pwm_u16_16; | ||
| 2212 | |||
| 2213 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2208 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
| 2214 | 2209 | ||
| 2215 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 2210 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
| @@ -2637,11 +2632,6 @@ void core_link_enable_stream( | |||
| 2637 | 2632 | ||
| 2638 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) | 2633 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) |
| 2639 | enable_stream_features(pipe_ctx); | 2634 | enable_stream_features(pipe_ctx); |
| 2640 | |||
| 2641 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
| 2642 | pipe_ctx->stream->bl_pwm_level, | ||
| 2643 | 0, | ||
| 2644 | pipe_ctx->stream); | ||
| 2645 | } | 2635 | } |
| 2646 | 2636 | ||
| 2647 | } | 2637 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 29f19d57ff7a..b2243e0dad1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
| @@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
| 146 | */ | 146 | */ |
| 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | 147 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, |
| 148 | uint32_t backlight_pwm_u16_16, | 148 | uint32_t backlight_pwm_u16_16, |
| 149 | uint32_t frame_ramp, | 149 | uint32_t frame_ramp); |
| 150 | const struct dc_stream_state *stream); | ||
| 151 | 150 | ||
| 152 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 151 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
| 153 | 152 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index be34d638e15d..d70c9e1cda3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h | |||
| @@ -91,7 +91,6 @@ struct dc_stream_state { | |||
| 91 | 91 | ||
| 92 | /* DMCU info */ | 92 | /* DMCU info */ |
| 93 | unsigned int abm_level; | 93 | unsigned int abm_level; |
| 94 | unsigned int bl_pwm_level; | ||
| 95 | 94 | ||
| 96 | /* from core_stream struct */ | 95 | /* from core_stream struct */ |
| 97 | struct dc_context *ctx; | 96 | struct dc_context *ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..7a72ee46f14b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
| @@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( | |||
| 591 | dc, | 591 | dc, |
| 592 | context->bw.dce.sclk_khz); | 592 | context->bw.dce.sclk_khz); |
| 593 | 593 | ||
| 594 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | 594 | /* |
| 595 | * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. | ||
| 596 | * This is not required for less than 5 displays, | ||
| 597 | * thus don't request decfclk in dc to avoid impact | ||
| 598 | * on power saving. | ||
| 599 | * | ||
| 600 | */ | ||
| 601 | pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? | ||
| 602 | pp_display_cfg->min_engine_clock_khz : 0; | ||
| 595 | 603 | ||
| 596 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 604 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
| 597 | = context->bw.dce.sclk_deep_sleep_khz; | 605 | = context->bw.dce.sclk_deep_sleep_khz; |
| @@ -654,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, | |||
| 654 | { | 662 | { |
| 655 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | 663 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
| 656 | struct dm_pp_power_level_change_request level_change_req; | 664 | struct dm_pp_power_level_change_request level_change_req; |
| 665 | int patched_disp_clk = context->bw.dce.dispclk_khz; | ||
| 666 | |||
| 667 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
| 668 | if (!clk_mgr_dce->dfs_bypass_active) | ||
| 669 | patched_disp_clk = patched_disp_clk * 115 / 100; | ||
| 657 | 670 | ||
| 658 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | 671 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); |
| 659 | /* get max clock state from PPLIB */ | 672 | /* get max clock state from PPLIB */ |
| @@ -663,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, | |||
| 663 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | 676 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; |
| 664 | } | 677 | } |
| 665 | 678 | ||
| 666 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | 679 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
| 667 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | 680 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); |
| 668 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | 681 | clk_mgr->clks.dispclk_khz = patched_disp_clk; |
| 669 | } | 682 | } |
| 670 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | 683 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
| 671 | } | 684 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index acd418515346..a6b80fdaa666 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h | |||
| @@ -37,6 +37,10 @@ void dce100_prepare_bandwidth( | |||
| 37 | struct dc *dc, | 37 | struct dc *dc, |
| 38 | struct dc_state *context); | 38 | struct dc_state *context); |
| 39 | 39 | ||
| 40 | void dce100_optimize_bandwidth( | ||
| 41 | struct dc *dc, | ||
| 42 | struct dc_state *context); | ||
| 43 | |||
| 40 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, | 44 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, |
| 41 | struct dc_bios *dcb, | 45 | struct dc_bios *dcb, |
| 42 | enum pipe_gating_control power_gating); | 46 | enum pipe_gating_control power_gating); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4bf24758217f..8f09b8625c5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | |||
| 1000 | 1000 | ||
| 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); | 1001 | pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); |
| 1002 | 1002 | ||
| 1003 | if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | 1003 | if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) |
| 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | 1004 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ |
| 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | 1005 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); |
| 1006 | /* un-mute audio */ | 1006 | /* un-mute audio */ |
| @@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | 1017 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( |
| 1018 | pipe_ctx->stream_res.stream_enc, true); | 1018 | pipe_ctx->stream_res.stream_enc, true); |
| 1019 | if (pipe_ctx->stream_res.audio) { | 1019 | if (pipe_ctx->stream_res.audio) { |
| 1020 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
| 1021 | |||
| 1020 | if (option != KEEP_ACQUIRED_RESOURCE || | 1022 | if (option != KEEP_ACQUIRED_RESOURCE || |
| 1021 | !dc->debug.az_endpoint_mute_only) { | 1023 | !dc->debug.az_endpoint_mute_only) { |
| 1022 | /*only disalbe az_endpoint if power down or free*/ | 1024 | /*only disalbe az_endpoint if power down or free*/ |
| @@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
| 1036 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); | 1038 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); |
| 1037 | pipe_ctx->stream_res.audio = NULL; | 1039 | pipe_ctx->stream_res.audio = NULL; |
| 1038 | } | 1040 | } |
| 1041 | if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) | ||
| 1042 | /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ | ||
| 1043 | pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); | ||
| 1039 | 1044 | ||
| 1040 | /* TODO: notify audio driver for if audio modes list changed | 1045 | /* TODO: notify audio driver for if audio modes list changed |
| 1041 | * add audio mode list change flag */ | 1046 | * add audio mode list change flag */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index a60a90e68d91..c4543178ba20 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c | |||
| @@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc) | |||
| 77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
| 78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; | 78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; |
| 79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; | 79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
| 80 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; | 80 | dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; |
| 81 | } | 81 | } |
| 82 | 82 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index cdd1d6b7b9f2..4e9ea50141bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
| @@ -790,9 +790,22 @@ bool dce80_validate_bandwidth( | |||
| 790 | struct dc *dc, | 790 | struct dc *dc, |
| 791 | struct dc_state *context) | 791 | struct dc_state *context) |
| 792 | { | 792 | { |
| 793 | /* TODO implement when needed but for now hardcode max value*/ | 793 | int i; |
| 794 | context->bw.dce.dispclk_khz = 681000; | 794 | bool at_least_one_pipe = false; |
| 795 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; | 795 | |
| 796 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 797 | if (context->res_ctx.pipe_ctx[i].stream) | ||
| 798 | at_least_one_pipe = true; | ||
| 799 | } | ||
| 800 | |||
| 801 | if (at_least_one_pipe) { | ||
| 802 | /* TODO implement when needed but for now hardcode max value*/ | ||
| 803 | context->bw.dce.dispclk_khz = 681000; | ||
| 804 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; | ||
| 805 | } else { | ||
| 806 | context->bw.dce.dispclk_khz = 0; | ||
| 807 | context->bw.dce.yclk_khz = 0; | ||
| 808 | } | ||
| 796 | 809 | ||
| 797 | return true; | 810 | return true; |
| 798 | } | 811 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index dcb3c5530236..cd1ebe57ed59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | |||
| @@ -463,7 +463,7 @@ void dpp1_set_cursor_position( | |||
| 463 | if (src_y_offset >= (int)param->viewport.height) | 463 | if (src_y_offset >= (int)param->viewport.height) |
| 464 | cur_en = 0; /* not visible beyond bottom edge*/ | 464 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 465 | 465 | ||
| 466 | if (src_y_offset < 0) | 466 | if (src_y_offset + (int)height <= 0) |
| 467 | cur_en = 0; /* not visible beyond top edge*/ | 467 | cur_en = 0; /* not visible beyond top edge*/ |
| 468 | 468 | ||
| 469 | REG_UPDATE(CURSOR0_CONTROL, | 469 | REG_UPDATE(CURSOR0_CONTROL, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 345af015d061..d1acd7165bc8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | |||
| @@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( | |||
| 1140 | if (src_y_offset >= (int)param->viewport.height) | 1140 | if (src_y_offset >= (int)param->viewport.height) |
| 1141 | cur_en = 0; /* not visible beyond bottom edge*/ | 1141 | cur_en = 0; /* not visible beyond bottom edge*/ |
| 1142 | 1142 | ||
| 1143 | if (src_y_offset < 0) //+ (int)hubp->curs_attr.height | 1143 | if (src_y_offset + (int)hubp->curs_attr.height <= 0) |
| 1144 | cur_en = 0; /* not visible beyond top edge*/ | 1144 | cur_en = 0; /* not visible beyond top edge*/ |
| 1145 | 1145 | ||
| 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) | 1146 | if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 91e015e14355..41883c981789 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
| @@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( | |||
| 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) | 2355 | top_pipe_to_program->plane_state->update_flags.bits.full_update) |
| 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 2356 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | 2357 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2358 | 2358 | tg = pipe_ctx->stream_res.tg; | |
| 2359 | /* Skip inactive pipes and ones already updated */ | 2359 | /* Skip inactive pipes and ones already updated */ |
| 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2360 | if (!pipe_ctx->stream || pipe_ctx->stream == stream |
| 2361 | || !pipe_ctx->plane_state) | 2361 | || !pipe_ctx->plane_state |
| 2362 | || !tg->funcs->is_tg_enabled(tg)) | ||
| 2362 | continue; | 2363 | continue; |
| 2363 | 2364 | ||
| 2364 | pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); | 2365 | tg->funcs->lock(tg); |
| 2365 | 2366 | ||
| 2366 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( | 2367 | pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( |
| 2367 | pipe_ctx->plane_res.hubp, | 2368 | pipe_ctx->plane_res.hubp, |
| 2368 | &pipe_ctx->dlg_regs, | 2369 | &pipe_ctx->dlg_regs, |
| 2369 | &pipe_ctx->ttu_regs); | 2370 | &pipe_ctx->ttu_regs); |
| 2370 | } | ||
| 2371 | |||
| 2372 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
| 2373 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
| 2374 | 2371 | ||
| 2375 | if (!pipe_ctx->stream || pipe_ctx->stream == stream | 2372 | tg->funcs->unlock(tg); |
| 2376 | || !pipe_ctx->plane_state) | 2373 | } |
| 2377 | continue; | ||
| 2378 | |||
| 2379 | dcn10_pipe_control_lock(dc, pipe_ctx, false); | ||
| 2380 | } | ||
| 2381 | 2374 | ||
| 2382 | if (num_planes == 0) | 2375 | if (num_planes == 0) |
| 2383 | false_optc_underflow_wa(dc, stream, tg); | 2376 | false_optc_underflow_wa(dc, stream, tg); |
| @@ -2665,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) | |||
| 2665 | .mirror = pipe_ctx->plane_state->horizontal_mirror | 2658 | .mirror = pipe_ctx->plane_state->horizontal_mirror |
| 2666 | }; | 2659 | }; |
| 2667 | 2660 | ||
| 2668 | pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; | 2661 | pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; |
| 2669 | pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; | 2662 | pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; |
| 2670 | 2663 | ||
| 2671 | if (pipe_ctx->plane_state->address.type | 2664 | if (pipe_ctx->plane_state->address.type |
| 2672 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) | 2665 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) |
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 00f63b7dd32f..c11a443dcbc8 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c | |||
| @@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le | |||
| 57 | #define NUM_POWER_FN_SEGS 8 | 57 | #define NUM_POWER_FN_SEGS 8 |
| 58 | #define NUM_BL_CURVE_SEGS 16 | 58 | #define NUM_BL_CURVE_SEGS 16 |
| 59 | 59 | ||
| 60 | #pragma pack(push, 1) | ||
| 60 | /* NOTE: iRAM is 256B in size */ | 61 | /* NOTE: iRAM is 256B in size */ |
| 61 | struct iram_table_v_2 { | 62 | struct iram_table_v_2 { |
| 62 | /* flags */ | 63 | /* flags */ |
| @@ -100,6 +101,7 @@ struct iram_table_v_2 { | |||
| 100 | uint8_t dummy8; /* 0xfe */ | 101 | uint8_t dummy8; /* 0xfe */ |
| 101 | uint8_t dummy9; /* 0xff */ | 102 | uint8_t dummy9; /* 0xff */ |
| 102 | }; | 103 | }; |
| 104 | #pragma pack(pop) | ||
| 103 | 105 | ||
| 104 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) | 106 | static uint16_t backlight_8_to_16(unsigned int backlight_8bit) |
| 105 | { | 107 | { |
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 1479ea1dc3e7..789c4f288485 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h | |||
| @@ -127,12 +127,13 @@ enum amd_pp_task { | |||
| 127 | }; | 127 | }; |
| 128 | 128 | ||
| 129 | enum PP_SMC_POWER_PROFILE { | 129 | enum PP_SMC_POWER_PROFILE { |
| 130 | PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, | 130 | PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0, |
| 131 | PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, | 131 | PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1, |
| 132 | PP_SMC_POWER_PROFILE_VIDEO = 0x2, | 132 | PP_SMC_POWER_PROFILE_POWERSAVING = 0x2, |
| 133 | PP_SMC_POWER_PROFILE_VR = 0x3, | 133 | PP_SMC_POWER_PROFILE_VIDEO = 0x3, |
| 134 | PP_SMC_POWER_PROFILE_COMPUTE = 0x4, | 134 | PP_SMC_POWER_PROFILE_VR = 0x4, |
| 135 | PP_SMC_POWER_PROFILE_CUSTOM = 0x5, | 135 | PP_SMC_POWER_PROFILE_COMPUTE = 0x5, |
| 136 | PP_SMC_POWER_PROFILE_CUSTOM = 0x6, | ||
| 136 | }; | 137 | }; |
| 137 | 138 | ||
| 138 | enum { | 139 | enum { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 0173d0480024..310b102a9292 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr); | |||
| 64 | 64 | ||
| 65 | static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) | 65 | static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) |
| 66 | { | 66 | { |
| 67 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; | 67 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; |
| 68 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; | 68 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; |
| 69 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; | 69 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; |
| 70 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; | 70 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; |
| 71 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; | 71 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; |
| 72 | 72 | hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; | |
| 73 | hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; | 73 | |
| 74 | hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; | 74 | hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
| 75 | hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | 75 | hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; |
| 76 | hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; | 76 | hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; |
| 77 | hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; | 77 | hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; |
| 78 | hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; | ||
| 79 | hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; | ||
| 78 | } | 80 | } |
| 79 | 81 | ||
| 80 | int hwmgr_early_init(struct pp_hwmgr *hwmgr) | 82 | int hwmgr_early_init(struct pp_hwmgr *hwmgr) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
| @@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, | |||
| 1033 | break; | 1033 | break; |
| 1034 | case amd_pp_dpp_clock: | 1034 | case amd_pp_dpp_clock: |
| 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; | 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; |
| 1036 | break; | ||
| 1036 | default: | 1037 | default: |
| 1037 | return -EINVAL; | 1038 | return -EINVAL; |
| 1038 | } | 1039 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index d91390459326..c8f5c00dd1e7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -77,8 +77,9 @@ | |||
| 77 | #define PCIE_BUS_CLK 10000 | 77 | #define PCIE_BUS_CLK 10000 |
| 78 | #define TCLK (PCIE_BUS_CLK / 10) | 78 | #define TCLK (PCIE_BUS_CLK / 10) |
| 79 | 79 | ||
| 80 | static const struct profile_mode_setting smu7_profiling[6] = | 80 | static const struct profile_mode_setting smu7_profiling[7] = |
| 81 | {{1, 0, 100, 30, 1, 0, 100, 10}, | 81 | {{0, 0, 0, 0, 0, 0, 0, 0}, |
| 82 | {1, 0, 100, 30, 1, 0, 100, 10}, | ||
| 82 | {1, 10, 0, 30, 0, 0, 0, 0}, | 83 | {1, 10, 0, 30, 0, 0, 0, 0}, |
| 83 | {0, 0, 0, 0, 1, 10, 16, 31}, | 84 | {0, 0, 0, 0, 1, 10, 16, 31}, |
| 84 | {1, 0, 11, 50, 1, 0, 100, 10}, | 85 | {1, 0, 11, 50, 1, 0, 100, 10}, |
| @@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) | |||
| 4889 | uint32_t i, size = 0; | 4890 | uint32_t i, size = 0; |
| 4890 | uint32_t len; | 4891 | uint32_t len; |
| 4891 | 4892 | ||
| 4892 | static const char *profile_name[6] = {"3D_FULL_SCREEN", | 4893 | static const char *profile_name[7] = {"BOOTUP_DEFAULT", |
| 4894 | "3D_FULL_SCREEN", | ||
| 4893 | "POWER_SAVING", | 4895 | "POWER_SAVING", |
| 4894 | "VIDEO", | 4896 | "VIDEO", |
| 4895 | "VR", | 4897 | "VR", |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 79c86247d0ac..91e3bbe6d61d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
| @@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 804 | 804 | ||
| 805 | hwmgr->backend = data; | 805 | hwmgr->backend = data; |
| 806 | 806 | ||
| 807 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; | 807 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; |
| 808 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; | 808 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
| 809 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; | 809 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
| 810 | 810 | ||
| 811 | vega10_set_default_registry_data(hwmgr); | 811 | vega10_set_default_registry_data(hwmgr); |
| 812 | data->disable_dpm_mask = 0xff; | 812 | data->disable_dpm_mask = 0xff; |
| @@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) | |||
| 4668 | { | 4668 | { |
| 4669 | struct vega10_hwmgr *data = hwmgr->backend; | 4669 | struct vega10_hwmgr *data = hwmgr->backend; |
| 4670 | uint32_t i, size = 0; | 4670 | uint32_t i, size = 0; |
| 4671 | static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, | 4671 | static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,}, |
| 4672 | {70, 60, 1, 3,}, | ||
| 4672 | {90, 60, 0, 0,}, | 4673 | {90, 60, 0, 0,}, |
| 4673 | {70, 60, 0, 0,}, | 4674 | {70, 60, 0, 0,}, |
| 4674 | {70, 90, 0, 0,}, | 4675 | {70, 90, 0, 0,}, |
| 4675 | {30, 60, 0, 6,}, | 4676 | {30, 60, 0, 6,}, |
| 4676 | }; | 4677 | }; |
| 4677 | static const char *profile_name[6] = {"3D_FULL_SCREEN", | 4678 | static const char *profile_name[7] = {"BOOTUP_DEFAULT", |
| 4679 | "3D_FULL_SCREEN", | ||
| 4678 | "POWER_SAVING", | 4680 | "POWER_SAVING", |
| 4679 | "VIDEO", | 4681 | "VIDEO", |
| 4680 | "VR", | 4682 | "VR", |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index b8747a5c9204..99d596dc0e89 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "vega10_pptable.h" | 32 | #include "vega10_pptable.h" |
| 33 | 33 | ||
| 34 | #define NUM_DSPCLK_LEVELS 8 | 34 | #define NUM_DSPCLK_LEVELS 8 |
| 35 | #define VEGA10_ENGINECLOCK_HARDMAX 198000 | ||
| 35 | 36 | ||
| 36 | static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, | 37 | static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, |
| 37 | enum phm_platform_caps cap) | 38 | enum phm_platform_caps cap) |
| @@ -258,7 +259,26 @@ static int init_over_drive_limits( | |||
| 258 | struct pp_hwmgr *hwmgr, | 259 | struct pp_hwmgr *hwmgr, |
| 259 | const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) | 260 | const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) |
| 260 | { | 261 | { |
| 261 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | 262 | const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = |
| 263 | (const ATOM_Vega10_GFXCLK_Dependency_Table *) | ||
| 264 | (((unsigned long) powerplay_table) + | ||
| 265 | le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); | ||
| 266 | bool is_acg_enabled = false; | ||
| 267 | ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; | ||
| 268 | |||
| 269 | if (gfxclk_dep_table->ucRevId == 1) { | ||
| 270 | patom_record_v2 = | ||
| 271 | (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; | ||
| 272 | is_acg_enabled = | ||
| 273 | (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable; | ||
| 274 | } | ||
| 275 | |||
| 276 | if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX && | ||
| 277 | !is_acg_enabled) | ||
| 278 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | ||
| 279 | VEGA10_ENGINECLOCK_HARDMAX; | ||
| 280 | else | ||
| 281 | hwmgr->platform_descriptor.overdriveLimit.engineClock = | ||
| 262 | le32_to_cpu(powerplay_table->ulMaxODEngineClock); | 282 | le32_to_cpu(powerplay_table->ulMaxODEngineClock); |
| 263 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = | 283 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = |
| 264 | le32_to_cpu(powerplay_table->ulMaxODMemoryClock); | 284 | le32_to_cpu(powerplay_table->ulMaxODMemoryClock); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 54364444ecd1..0c8212902275 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
| @@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 753 | return 0; | 753 | return 0; |
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr) | ||
| 757 | { | ||
| 758 | uint32_t result; | ||
| 759 | |||
| 760 | PP_ASSERT_WITH_CODE( | ||
| 761 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, | ||
| 762 | "[Run_ACG_BTC] Attempt to run ACG BTC failed!", | ||
| 763 | return -EINVAL); | ||
| 764 | |||
| 765 | result = smum_get_argument(hwmgr); | ||
| 766 | PP_ASSERT_WITH_CODE(result == 1, | ||
| 767 | "Failed to run ACG BTC!", return -EINVAL); | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 756 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) | 772 | static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) |
| 757 | { | 773 | { |
| 758 | struct vega12_hwmgr *data = | 774 | struct vega12_hwmgr *data = |
| @@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
| 931 | "Failed to initialize SMC table!", | 947 | "Failed to initialize SMC table!", |
| 932 | result = tmp_result); | 948 | result = tmp_result); |
| 933 | 949 | ||
| 950 | tmp_result = vega12_run_acg_btc(hwmgr); | ||
| 951 | PP_ASSERT_WITH_CODE(!tmp_result, | ||
| 952 | "Failed to run ACG BTC!", | ||
| 953 | result = tmp_result); | ||
| 954 | |||
| 934 | result = vega12_enable_all_smu_features(hwmgr); | 955 | result = vega12_enable_all_smu_features(hwmgr); |
| 935 | PP_ASSERT_WITH_CODE(!result, | 956 | PP_ASSERT_WITH_CODE(!result, |
| 936 | "Failed to enable all smu features!", | 957 | "Failed to enable all smu features!", |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 26154f9b2178..82935a3bd950 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
| @@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 390 | 390 | ||
| 391 | hwmgr->backend = data; | 391 | hwmgr->backend = data; |
| 392 | 392 | ||
| 393 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; | 393 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; |
| 394 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; | 394 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
| 395 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; | 395 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
| 396 | 396 | ||
| 397 | vega20_set_default_registry_data(hwmgr); | 397 | vega20_set_default_registry_data(hwmgr); |
| 398 | 398 | ||
| @@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities( | |||
| 980 | pp_table->FanZeroRpmEnable) | 980 | pp_table->FanZeroRpmEnable) |
| 981 | od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; | 981 | od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; |
| 982 | 982 | ||
| 983 | if (!od_settings->overdrive8_capabilities) | ||
| 984 | hwmgr->od_enabled = false; | ||
| 985 | |||
| 983 | return 0; | 986 | return 0; |
| 984 | } | 987 | } |
| 985 | 988 | ||
| @@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ | |||
| 1689 | (PPCLK_UCLK << 16) | (min_freq & 0xffff))), | 1692 | (PPCLK_UCLK << 16) | (min_freq & 0xffff))), |
| 1690 | "Failed to set soft min memclk !", | 1693 | "Failed to set soft min memclk !", |
| 1691 | return ret); | 1694 | return ret); |
| 1692 | |||
| 1693 | min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; | ||
| 1694 | PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( | ||
| 1695 | hwmgr, PPSMC_MSG_SetHardMinByFreq, | ||
| 1696 | (PPCLK_UCLK << 16) | (min_freq & 0xffff))), | ||
| 1697 | "Failed to set hard min memclk !", | ||
| 1698 | return ret); | ||
| 1699 | } | 1695 | } |
| 1700 | 1696 | ||
| 1701 | if (data->smu_features[GNLD_DPM_UVD].enabled && | 1697 | if (data->smu_features[GNLD_DPM_UVD].enabled && |
| @@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, | |||
| 2248 | soft_min_level = mask ? (ffs(mask) - 1) : 0; | 2244 | soft_min_level = mask ? (ffs(mask) - 1) : 0; |
| 2249 | soft_max_level = mask ? (fls(mask) - 1) : 0; | 2245 | soft_max_level = mask ? (fls(mask) - 1) : 0; |
| 2250 | 2246 | ||
| 2247 | if (soft_max_level >= data->dpm_table.gfx_table.count) { | ||
| 2248 | pr_err("Clock level specified %d is over max allowed %d\n", | ||
| 2249 | soft_max_level, | ||
| 2250 | data->dpm_table.gfx_table.count - 1); | ||
| 2251 | return -EINVAL; | ||
| 2252 | } | ||
| 2253 | |||
| 2251 | data->dpm_table.gfx_table.dpm_state.soft_min_level = | 2254 | data->dpm_table.gfx_table.dpm_state.soft_min_level = |
| 2252 | data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; | 2255 | data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; |
| 2253 | data->dpm_table.gfx_table.dpm_state.soft_max_level = | 2256 | data->dpm_table.gfx_table.dpm_state.soft_max_level = |
| @@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, | |||
| 2268 | soft_min_level = mask ? (ffs(mask) - 1) : 0; | 2271 | soft_min_level = mask ? (ffs(mask) - 1) : 0; |
| 2269 | soft_max_level = mask ? (fls(mask) - 1) : 0; | 2272 | soft_max_level = mask ? (fls(mask) - 1) : 0; |
| 2270 | 2273 | ||
| 2274 | if (soft_max_level >= data->dpm_table.mem_table.count) { | ||
| 2275 | pr_err("Clock level specified %d is over max allowed %d\n", | ||
| 2276 | soft_max_level, | ||
| 2277 | data->dpm_table.mem_table.count - 1); | ||
| 2278 | return -EINVAL; | ||
| 2279 | } | ||
| 2280 | |||
| 2271 | data->dpm_table.mem_table.dpm_state.soft_min_level = | 2281 | data->dpm_table.mem_table.dpm_state.soft_min_level = |
| 2272 | data->dpm_table.mem_table.dpm_levels[soft_min_level].value; | 2282 | data->dpm_table.mem_table.dpm_levels[soft_min_level].value; |
| 2273 | data->dpm_table.mem_table.dpm_state.soft_max_level = | 2283 | data->dpm_table.mem_table.dpm_state.soft_max_level = |
| @@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) | |||
| 3261 | int pplib_workload = 0; | 3271 | int pplib_workload = 0; |
| 3262 | 3272 | ||
| 3263 | switch (power_profile) { | 3273 | switch (power_profile) { |
| 3274 | case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: | ||
| 3275 | pplib_workload = WORKLOAD_DEFAULT_BIT; | ||
| 3276 | break; | ||
| 3264 | case PP_SMC_POWER_PROFILE_FULLSCREEN3D: | 3277 | case PP_SMC_POWER_PROFILE_FULLSCREEN3D: |
| 3265 | pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; | 3278 | pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; |
| 3266 | break; | 3279 | break; |
| @@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) | |||
| 3290 | uint32_t i, size = 0; | 3303 | uint32_t i, size = 0; |
| 3291 | uint16_t workload_type = 0; | 3304 | uint16_t workload_type = 0; |
| 3292 | static const char *profile_name[] = { | 3305 | static const char *profile_name[] = { |
| 3306 | "BOOTUP_DEFAULT", | ||
| 3293 | "3D_FULL_SCREEN", | 3307 | "3D_FULL_SCREEN", |
| 3294 | "POWER_SAVING", | 3308 | "POWER_SAVING", |
| 3295 | "VIDEO", | 3309 | "VIDEO", |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 0d298a0409f5..8cb831b6a016 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
| @@ -705,7 +705,7 @@ enum PP_TABLE_VERSION { | |||
| 705 | /** | 705 | /** |
| 706 | * The main hardware manager structure. | 706 | * The main hardware manager structure. |
| 707 | */ | 707 | */ |
| 708 | #define Workload_Policy_Max 5 | 708 | #define Workload_Policy_Max 6 |
| 709 | 709 | ||
| 710 | struct pp_hwmgr { | 710 | struct pp_hwmgr { |
| 711 | void *adev; | 711 | void *adev; |
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8e28e738cb52..e6403b9549f1 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c | |||
| @@ -98,6 +98,8 @@ | |||
| 98 | #define DP0_STARTVAL 0x064c | 98 | #define DP0_STARTVAL 0x064c |
| 99 | #define DP0_ACTIVEVAL 0x0650 | 99 | #define DP0_ACTIVEVAL 0x0650 |
| 100 | #define DP0_SYNCVAL 0x0654 | 100 | #define DP0_SYNCVAL 0x0654 |
| 101 | #define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) | ||
| 102 | #define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) | ||
| 101 | #define DP0_MISC 0x0658 | 103 | #define DP0_MISC 0x0658 |
| 102 | #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ | 104 | #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ |
| 103 | #define BPC_6 (0 << 5) | 105 | #define BPC_6 (0 << 5) |
| @@ -142,6 +144,8 @@ | |||
| 142 | #define DP0_LTLOOPCTRL 0x06d8 | 144 | #define DP0_LTLOOPCTRL 0x06d8 |
| 143 | #define DP0_SNKLTCTRL 0x06e4 | 145 | #define DP0_SNKLTCTRL 0x06e4 |
| 144 | 146 | ||
| 147 | #define DP1_SRCCTRL 0x07a0 | ||
| 148 | |||
| 145 | /* PHY */ | 149 | /* PHY */ |
| 146 | #define DP_PHY_CTRL 0x0800 | 150 | #define DP_PHY_CTRL 0x0800 |
| 147 | #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ | 151 | #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ |
| @@ -150,6 +154,7 @@ | |||
| 150 | #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ | 154 | #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ |
| 151 | #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ | 155 | #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ |
| 152 | #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ | 156 | #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ |
| 157 | #define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ | ||
| 153 | #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ | 158 | #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ |
| 154 | #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ | 159 | #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ |
| 155 | 160 | ||
| @@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc) | |||
| 540 | unsigned long rate; | 545 | unsigned long rate; |
| 541 | u32 value; | 546 | u32 value; |
| 542 | int ret; | 547 | int ret; |
| 548 | u32 dp_phy_ctrl; | ||
| 543 | 549 | ||
| 544 | rate = clk_get_rate(tc->refclk); | 550 | rate = clk_get_rate(tc->refclk); |
| 545 | switch (rate) { | 551 | switch (rate) { |
| @@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc) | |||
| 564 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; | 570 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; |
| 565 | tc_write(SYS_PLLPARAM, value); | 571 | tc_write(SYS_PLLPARAM, value); |
| 566 | 572 | ||
| 567 | tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); | 573 | dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN; |
| 574 | if (tc->link.base.num_lanes == 2) | ||
| 575 | dp_phy_ctrl |= PHY_2LANE; | ||
| 576 | tc_write(DP_PHY_CTRL, dp_phy_ctrl); | ||
| 568 | 577 | ||
| 569 | /* | 578 | /* |
| 570 | * Initially PLLs are in bypass. Force PLL parameter update, | 579 | * Initially PLLs are in bypass. Force PLL parameter update, |
| @@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) | |||
| 719 | 728 | ||
| 720 | tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); | 729 | tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); |
| 721 | 730 | ||
| 722 | tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); | 731 | tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) | |
| 732 | ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) | | ||
| 733 | ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0)); | ||
| 723 | 734 | ||
| 724 | tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | | 735 | tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | |
| 725 | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); | 736 | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); |
| @@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc) | |||
| 829 | if (!tc->mode) | 840 | if (!tc->mode) |
| 830 | return -EINVAL; | 841 | return -EINVAL; |
| 831 | 842 | ||
| 832 | /* from excel file - DP0_SrcCtrl */ | 843 | tc_write(DP0_SRCCTRL, tc_srcctrl(tc)); |
| 833 | tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | | 844 | /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ |
| 834 | DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | | 845 | tc_write(DP1_SRCCTRL, |
| 835 | DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); | 846 | (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | |
| 836 | /* from excel file - DP1_SrcCtrl */ | 847 | ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); |
| 837 | tc_write(0x07a0, 0x00003083); | ||
| 838 | 848 | ||
| 839 | rate = clk_get_rate(tc->refclk); | 849 | rate = clk_get_rate(tc->refclk); |
| 840 | switch (rate) { | 850 | switch (rate) { |
| @@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc) | |||
| 855 | } | 865 | } |
| 856 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; | 866 | value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; |
| 857 | tc_write(SYS_PLLPARAM, value); | 867 | tc_write(SYS_PLLPARAM, value); |
| 868 | |||
| 858 | /* Setup Main Link */ | 869 | /* Setup Main Link */ |
| 859 | dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; | 870 | dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; |
| 871 | if (tc->link.base.num_lanes == 2) | ||
| 872 | dp_phy_ctrl |= PHY_2LANE; | ||
| 860 | tc_write(DP_PHY_CTRL, dp_phy_ctrl); | 873 | tc_write(DP_PHY_CTRL, dp_phy_ctrl); |
| 861 | msleep(100); | 874 | msleep(100); |
| 862 | 875 | ||
| @@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, | |||
| 1105 | static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, | 1118 | static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, |
| 1106 | struct drm_display_mode *mode) | 1119 | struct drm_display_mode *mode) |
| 1107 | { | 1120 | { |
| 1121 | struct tc_data *tc = connector_to_tc(connector); | ||
| 1122 | u32 req, avail; | ||
| 1123 | u32 bits_per_pixel = 24; | ||
| 1124 | |||
| 1108 | /* DPI interface clock limitation: upto 154 MHz */ | 1125 | /* DPI interface clock limitation: upto 154 MHz */ |
| 1109 | if (mode->clock > 154000) | 1126 | if (mode->clock > 154000) |
| 1110 | return MODE_CLOCK_HIGH; | 1127 | return MODE_CLOCK_HIGH; |
| 1111 | 1128 | ||
| 1129 | req = mode->clock * bits_per_pixel / 8; | ||
| 1130 | avail = tc->link.base.num_lanes * tc->link.base.rate; | ||
| 1131 | |||
| 1132 | if (req > avail) | ||
| 1133 | return MODE_BAD; | ||
| 1134 | |||
| 1112 | return MODE_OK; | 1135 | return MODE_OK; |
| 1113 | } | 1136 | } |
| 1114 | 1137 | ||
| @@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge) | |||
| 1186 | /* Create eDP connector */ | 1209 | /* Create eDP connector */ |
| 1187 | drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); | 1210 | drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); |
| 1188 | ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, | 1211 | ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, |
| 1189 | DRM_MODE_CONNECTOR_eDP); | 1212 | tc->panel ? DRM_MODE_CONNECTOR_eDP : |
| 1213 | DRM_MODE_CONNECTOR_DisplayPort); | ||
| 1190 | if (ret) | 1214 | if (ret) |
| 1191 | return ret; | 1215 | return ret; |
| 1192 | 1216 | ||
| @@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge) | |||
| 1195 | 1219 | ||
| 1196 | drm_display_info_set_bus_formats(&tc->connector.display_info, | 1220 | drm_display_info_set_bus_formats(&tc->connector.display_info, |
| 1197 | &bus_format, 1); | 1221 | &bus_format, 1); |
| 1222 | tc->connector.display_info.bus_flags = | ||
| 1223 | DRM_BUS_FLAG_DE_HIGH | | ||
| 1224 | DRM_BUS_FLAG_PIXDATA_NEGEDGE | | ||
| 1225 | DRM_BUS_FLAG_SYNC_NEGEDGE; | ||
| 1198 | drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); | 1226 | drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); |
| 1199 | 1227 | ||
| 1200 | return 0; | 1228 | return 0; |
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index c40889888a16..9a1f41adfc67 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c | |||
| @@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 1296 | (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) | 1296 | (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) |
| 1297 | return -EINVAL; | 1297 | return -EINVAL; |
| 1298 | 1298 | ||
| 1299 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | ||
| 1300 | |||
| 1301 | state = drm_atomic_state_alloc(dev); | 1299 | state = drm_atomic_state_alloc(dev); |
| 1302 | if (!state) | 1300 | if (!state) |
| 1303 | return -ENOMEM; | 1301 | return -ENOMEM; |
| 1304 | 1302 | ||
| 1303 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | ||
| 1305 | state->acquire_ctx = &ctx; | 1304 | state->acquire_ctx = &ctx; |
| 1306 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); | 1305 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); |
| 1307 | 1306 | ||
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 2d6c491a0542..516e82d0ed50 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
| @@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { | |||
| 1273 | { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, | 1273 | { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, |
| 1274 | /* LG LP140WF6-SPM1 eDP panel */ | 1274 | /* LG LP140WF6-SPM1 eDP panel */ |
| 1275 | { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, | 1275 | { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, |
| 1276 | /* Apple panels need some additional handling to support PSR */ | ||
| 1277 | { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) } | ||
| 1276 | }; | 1278 | }; |
| 1277 | 1279 | ||
| 1278 | #undef OUI | 1280 | #undef OUI |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d3af098b0922..d73703a695e8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, | |||
| 1621 | var_1->transp.msb_right == var_2->transp.msb_right; | 1621 | var_1->transp.msb_right == var_2->transp.msb_right; |
| 1622 | } | 1622 | } |
| 1623 | 1623 | ||
| 1624 | static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, | ||
| 1625 | u8 depth) | ||
| 1626 | { | ||
| 1627 | switch (depth) { | ||
| 1628 | case 8: | ||
| 1629 | var->red.offset = 0; | ||
| 1630 | var->green.offset = 0; | ||
| 1631 | var->blue.offset = 0; | ||
| 1632 | var->red.length = 8; /* 8bit DAC */ | ||
| 1633 | var->green.length = 8; | ||
| 1634 | var->blue.length = 8; | ||
| 1635 | var->transp.offset = 0; | ||
| 1636 | var->transp.length = 0; | ||
| 1637 | break; | ||
| 1638 | case 15: | ||
| 1639 | var->red.offset = 10; | ||
| 1640 | var->green.offset = 5; | ||
| 1641 | var->blue.offset = 0; | ||
| 1642 | var->red.length = 5; | ||
| 1643 | var->green.length = 5; | ||
| 1644 | var->blue.length = 5; | ||
| 1645 | var->transp.offset = 15; | ||
| 1646 | var->transp.length = 1; | ||
| 1647 | break; | ||
| 1648 | case 16: | ||
| 1649 | var->red.offset = 11; | ||
| 1650 | var->green.offset = 5; | ||
| 1651 | var->blue.offset = 0; | ||
| 1652 | var->red.length = 5; | ||
| 1653 | var->green.length = 6; | ||
| 1654 | var->blue.length = 5; | ||
| 1655 | var->transp.offset = 0; | ||
| 1656 | break; | ||
| 1657 | case 24: | ||
| 1658 | var->red.offset = 16; | ||
| 1659 | var->green.offset = 8; | ||
| 1660 | var->blue.offset = 0; | ||
| 1661 | var->red.length = 8; | ||
| 1662 | var->green.length = 8; | ||
| 1663 | var->blue.length = 8; | ||
| 1664 | var->transp.offset = 0; | ||
| 1665 | var->transp.length = 0; | ||
| 1666 | break; | ||
| 1667 | case 32: | ||
| 1668 | var->red.offset = 16; | ||
| 1669 | var->green.offset = 8; | ||
| 1670 | var->blue.offset = 0; | ||
| 1671 | var->red.length = 8; | ||
| 1672 | var->green.length = 8; | ||
| 1673 | var->blue.length = 8; | ||
| 1674 | var->transp.offset = 24; | ||
| 1675 | var->transp.length = 8; | ||
| 1676 | break; | ||
| 1677 | default: | ||
| 1678 | break; | ||
| 1679 | } | ||
| 1680 | } | ||
| 1681 | |||
| 1624 | /** | 1682 | /** |
| 1625 | * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var | 1683 | * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var |
| 1626 | * @var: screeninfo to check | 1684 | * @var: screeninfo to check |
| @@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
| 1632 | struct drm_fb_helper *fb_helper = info->par; | 1690 | struct drm_fb_helper *fb_helper = info->par; |
| 1633 | struct drm_framebuffer *fb = fb_helper->fb; | 1691 | struct drm_framebuffer *fb = fb_helper->fb; |
| 1634 | 1692 | ||
| 1635 | if (var->pixclock != 0 || in_dbg_master()) | 1693 | if (in_dbg_master()) |
| 1636 | return -EINVAL; | 1694 | return -EINVAL; |
| 1637 | 1695 | ||
| 1696 | if (var->pixclock != 0) { | ||
| 1697 | DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); | ||
| 1698 | var->pixclock = 0; | ||
| 1699 | } | ||
| 1700 | |||
| 1638 | if ((drm_format_info_block_width(fb->format, 0) > 1) || | 1701 | if ((drm_format_info_block_width(fb->format, 0) > 1) || |
| 1639 | (drm_format_info_block_height(fb->format, 0) > 1)) | 1702 | (drm_format_info_block_height(fb->format, 0) > 1)) |
| 1640 | return -EINVAL; | 1703 | return -EINVAL; |
| @@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
| 1655 | } | 1718 | } |
| 1656 | 1719 | ||
| 1657 | /* | 1720 | /* |
| 1721 | * Workaround for SDL 1.2, which is known to be setting all pixel format | ||
| 1722 | * fields values to zero in some cases. We treat this situation as a | ||
| 1723 | * kind of "use some reasonable autodetected values". | ||
| 1724 | */ | ||
| 1725 | if (!var->red.offset && !var->green.offset && | ||
| 1726 | !var->blue.offset && !var->transp.offset && | ||
| 1727 | !var->red.length && !var->green.length && | ||
| 1728 | !var->blue.length && !var->transp.length && | ||
| 1729 | !var->red.msb_right && !var->green.msb_right && | ||
| 1730 | !var->blue.msb_right && !var->transp.msb_right) { | ||
| 1731 | drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | /* | ||
| 1658 | * drm fbdev emulation doesn't support changing the pixel format at all, | 1735 | * drm fbdev emulation doesn't support changing the pixel format at all, |
| 1659 | * so reject all pixel format changing requests. | 1736 | * so reject all pixel format changing requests. |
| 1660 | */ | 1737 | */ |
| @@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe | |||
| 1967 | info->var.yoffset = 0; | 2044 | info->var.yoffset = 0; |
| 1968 | info->var.activate = FB_ACTIVATE_NOW; | 2045 | info->var.activate = FB_ACTIVATE_NOW; |
| 1969 | 2046 | ||
| 1970 | switch (fb->format->depth) { | 2047 | drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); |
| 1971 | case 8: | ||
| 1972 | info->var.red.offset = 0; | ||
| 1973 | info->var.green.offset = 0; | ||
| 1974 | info->var.blue.offset = 0; | ||
| 1975 | info->var.red.length = 8; /* 8bit DAC */ | ||
| 1976 | info->var.green.length = 8; | ||
| 1977 | info->var.blue.length = 8; | ||
| 1978 | info->var.transp.offset = 0; | ||
| 1979 | info->var.transp.length = 0; | ||
| 1980 | break; | ||
| 1981 | case 15: | ||
| 1982 | info->var.red.offset = 10; | ||
| 1983 | info->var.green.offset = 5; | ||
| 1984 | info->var.blue.offset = 0; | ||
| 1985 | info->var.red.length = 5; | ||
| 1986 | info->var.green.length = 5; | ||
| 1987 | info->var.blue.length = 5; | ||
| 1988 | info->var.transp.offset = 15; | ||
| 1989 | info->var.transp.length = 1; | ||
| 1990 | break; | ||
| 1991 | case 16: | ||
| 1992 | info->var.red.offset = 11; | ||
| 1993 | info->var.green.offset = 5; | ||
| 1994 | info->var.blue.offset = 0; | ||
| 1995 | info->var.red.length = 5; | ||
| 1996 | info->var.green.length = 6; | ||
| 1997 | info->var.blue.length = 5; | ||
| 1998 | info->var.transp.offset = 0; | ||
| 1999 | break; | ||
| 2000 | case 24: | ||
| 2001 | info->var.red.offset = 16; | ||
| 2002 | info->var.green.offset = 8; | ||
| 2003 | info->var.blue.offset = 0; | ||
| 2004 | info->var.red.length = 8; | ||
| 2005 | info->var.green.length = 8; | ||
| 2006 | info->var.blue.length = 8; | ||
| 2007 | info->var.transp.offset = 0; | ||
| 2008 | info->var.transp.length = 0; | ||
| 2009 | break; | ||
| 2010 | case 32: | ||
| 2011 | info->var.red.offset = 16; | ||
| 2012 | info->var.green.offset = 8; | ||
| 2013 | info->var.blue.offset = 0; | ||
| 2014 | info->var.red.length = 8; | ||
| 2015 | info->var.green.length = 8; | ||
| 2016 | info->var.blue.length = 8; | ||
| 2017 | info->var.transp.offset = 24; | ||
| 2018 | info->var.transp.length = 8; | ||
| 2019 | break; | ||
| 2020 | default: | ||
| 2021 | break; | ||
| 2022 | } | ||
| 2023 | 2048 | ||
| 2024 | info->var.xres = fb_width; | 2049 | info->var.xres = fb_width; |
| 2025 | info->var.yres = fb_height; | 2050 | info->var.yres = fb_height; |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
| @@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, | |||
| 528 | 528 | ||
| 529 | object_count = cl->object_count; | 529 | object_count = cl->object_count; |
| 530 | 530 | ||
| 531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); | 531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), |
| 532 | array_size(object_count, sizeof(__u32))); | ||
| 532 | if (IS_ERR(object_ids)) | 533 | if (IS_ERR(object_ids)) |
| 533 | return PTR_ERR(object_ids); | 534 | return PTR_ERR(object_ids); |
| 534 | 535 | ||
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index cd9bc0ce9be0..004191d01772 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c | |||
| @@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj, | |||
| 459 | struct drm_modeset_acquire_ctx ctx; | 459 | struct drm_modeset_acquire_ctx ctx; |
| 460 | int ret; | 460 | int ret; |
| 461 | 461 | ||
| 462 | drm_modeset_acquire_init(&ctx, 0); | ||
| 463 | |||
| 464 | state = drm_atomic_state_alloc(dev); | 462 | state = drm_atomic_state_alloc(dev); |
| 465 | if (!state) | 463 | if (!state) |
| 466 | return -ENOMEM; | 464 | return -ENOMEM; |
| 465 | |||
| 466 | drm_modeset_acquire_init(&ctx, 0); | ||
| 467 | state->acquire_ctx = &ctx; | 467 | state->acquire_ctx = &ctx; |
| 468 | retry: | 468 | retry: |
| 469 | if (prop == state->dev->mode_config.dpms_property) { | 469 | if (prop == state->dev->mode_config.dpms_property) { |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) | |||
| 758 | if (mode->hsync) | 758 | if (mode->hsync) |
| 759 | return mode->hsync; | 759 | return mode->hsync; |
| 760 | 760 | ||
| 761 | if (mode->htotal < 0) | 761 | if (mode->htotal <= 0) |
| 762 | return 0; | 762 | return 0; |
| 763 | 763 | ||
| 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index a9d9df6c85ad..693748ad8b88 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
| @@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali | |||
| 61 | return NULL; | 61 | return NULL; |
| 62 | 62 | ||
| 63 | dmah->size = size; | 63 | dmah->size = size; |
| 64 | dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, | 64 | dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, |
| 65 | GFP_KERNEL | __GFP_COMP); | 65 | &dmah->busaddr, |
| 66 | GFP_KERNEL | __GFP_COMP); | ||
| 66 | 67 | ||
| 67 | if (dmah->vaddr == NULL) { | 68 | if (dmah->vaddr == NULL) { |
| 68 | kfree(dmah); | 69 | kfree(dmah); |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
| 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
| 2802 | MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
| 2802 | return 0; | 2803 | return 0; |
| 2803 | } | 2804 | } |
| 2804 | 2805 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
| @@ -41,7 +41,7 @@ struct intel_gvt_mpt { | |||
| 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
| 42 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
| 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
| 44 | void (*detach_vgpu)(unsigned long handle); | 44 | void (*detach_vgpu)(void *vgpu); |
| 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); | 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); |
| 46 | unsigned long (*from_virt_to_mfn)(void *p); | 46 | unsigned long (*from_virt_to_mfn)(void *p); |
| 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); | 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 996 | { | 996 | { |
| 997 | unsigned int index; | 997 | unsigned int index; |
| 998 | u64 virtaddr; | 998 | u64 virtaddr; |
| 999 | unsigned long req_size, pgoff = 0; | 999 | unsigned long req_size, pgoff, req_start; |
| 1000 | pgprot_t pg_prot; | 1000 | pgprot_t pg_prot; |
| 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
| 1002 | 1002 | ||
| @@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
| 1014 | pg_prot = vma->vm_page_prot; | 1014 | pg_prot = vma->vm_page_prot; |
| 1015 | virtaddr = vma->vm_start; | 1015 | virtaddr = vma->vm_start; |
| 1016 | req_size = vma->vm_end - vma->vm_start; | 1016 | req_size = vma->vm_end - vma->vm_start; |
| 1017 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | 1017 | pgoff = vma->vm_pgoff & |
| 1018 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | ||
| 1019 | req_start = pgoff << PAGE_SHIFT; | ||
| 1020 | |||
| 1021 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | ||
| 1022 | return -EINVAL; | ||
| 1023 | if (req_start + req_size > | ||
| 1024 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | ||
| 1025 | return -EINVAL; | ||
| 1026 | |||
| 1027 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | ||
| 1018 | 1028 | ||
| 1019 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | 1029 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
| 1020 | } | 1030 | } |
| @@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) | |||
| 1662 | return 0; | 1672 | return 0; |
| 1663 | } | 1673 | } |
| 1664 | 1674 | ||
| 1665 | static void kvmgt_detach_vgpu(unsigned long handle) | 1675 | static void kvmgt_detach_vgpu(void *p_vgpu) |
| 1666 | { | 1676 | { |
| 1667 | /* nothing to do here */ | 1677 | int i; |
| 1678 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | ||
| 1679 | |||
| 1680 | if (!vgpu->vdev.region) | ||
| 1681 | return; | ||
| 1682 | |||
| 1683 | for (i = 0; i < vgpu->vdev.num_regions; i++) | ||
| 1684 | if (vgpu->vdev.region[i].ops->release) | ||
| 1685 | vgpu->vdev.region[i].ops->release(vgpu, | ||
| 1686 | &vgpu->vdev.region[i]); | ||
| 1687 | vgpu->vdev.num_regions = 0; | ||
| 1688 | kfree(vgpu->vdev.region); | ||
| 1689 | vgpu->vdev.region = NULL; | ||
| 1668 | } | 1690 | } |
| 1669 | 1691 | ||
| 1670 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | 1692 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
| @@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) | |||
| 101 | if (!intel_gvt_host.mpt->detach_vgpu) | 101 | if (!intel_gvt_host.mpt->detach_vgpu) |
| 102 | return; | 102 | return; |
| 103 | 103 | ||
| 104 | intel_gvt_host.mpt->detach_vgpu(vgpu->handle); | 104 | intel_gvt_host.mpt->detach_vgpu(vgpu); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) | 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1ad8c5e1455d..55bb7885e228 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 332 | 332 | ||
| 333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
| 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); | 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
| 335 | |||
| 336 | wa_ctx->indirect_ctx.obj = NULL; | ||
| 337 | wa_ctx->indirect_ctx.shadow_va = NULL; | ||
| 335 | } | 338 | } |
| 336 | 339 | ||
| 337 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | 340 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, |
| @@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | |||
| 356 | return 0; | 359 | return 0; |
| 357 | } | 360 | } |
| 358 | 361 | ||
| 362 | static int | ||
| 363 | intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) | ||
| 364 | { | ||
| 365 | struct intel_vgpu *vgpu = workload->vgpu; | ||
| 366 | struct intel_vgpu_submission *s = &vgpu->submission; | ||
| 367 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; | ||
| 368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 369 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; | ||
| 370 | struct i915_request *rq; | ||
| 371 | int ret = 0; | ||
| 372 | |||
| 373 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | ||
| 374 | |||
| 375 | if (workload->req) | ||
| 376 | goto out; | ||
| 377 | |||
| 378 | rq = i915_request_alloc(engine, shadow_ctx); | ||
| 379 | if (IS_ERR(rq)) { | ||
| 380 | gvt_vgpu_err("fail to allocate gem request\n"); | ||
| 381 | ret = PTR_ERR(rq); | ||
| 382 | goto out; | ||
| 383 | } | ||
| 384 | workload->req = i915_request_get(rq); | ||
| 385 | out: | ||
| 386 | return ret; | ||
| 387 | } | ||
| 388 | |||
| 359 | /** | 389 | /** |
| 360 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and | 390 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and |
| 361 | * shadow it as well, include ringbuffer,wa_ctx and ctx. | 391 | * shadow it as well, include ringbuffer,wa_ctx and ctx. |
| @@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
| 372 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 402 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 373 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; | 403 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; |
| 374 | struct intel_context *ce; | 404 | struct intel_context *ce; |
| 375 | struct i915_request *rq; | ||
| 376 | int ret; | 405 | int ret; |
| 377 | 406 | ||
| 378 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 407 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
| 379 | 408 | ||
| 380 | if (workload->req) | 409 | if (workload->shadow) |
| 381 | return 0; | 410 | return 0; |
| 382 | 411 | ||
| 383 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); | 412 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); |
| @@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
| 417 | goto err_shadow; | 446 | goto err_shadow; |
| 418 | } | 447 | } |
| 419 | 448 | ||
| 420 | rq = i915_request_alloc(engine, shadow_ctx); | 449 | workload->shadow = true; |
| 421 | if (IS_ERR(rq)) { | ||
| 422 | gvt_vgpu_err("fail to allocate gem request\n"); | ||
| 423 | ret = PTR_ERR(rq); | ||
| 424 | goto err_shadow; | ||
| 425 | } | ||
| 426 | workload->req = i915_request_get(rq); | ||
| 427 | |||
| 428 | ret = populate_shadow_context(workload); | ||
| 429 | if (ret) | ||
| 430 | goto err_req; | ||
| 431 | |||
| 432 | return 0; | 450 | return 0; |
| 433 | err_req: | ||
| 434 | rq = fetch_and_zero(&workload->req); | ||
| 435 | i915_request_put(rq); | ||
| 436 | err_shadow: | 451 | err_shadow: |
| 437 | release_shadow_wa_ctx(&workload->wa_ctx); | 452 | release_shadow_wa_ctx(&workload->wa_ctx); |
| 438 | err_unpin: | 453 | err_unpin: |
| @@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 671 | mutex_lock(&vgpu->vgpu_lock); | 686 | mutex_lock(&vgpu->vgpu_lock); |
| 672 | mutex_lock(&dev_priv->drm.struct_mutex); | 687 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 673 | 688 | ||
| 689 | ret = intel_gvt_workload_req_alloc(workload); | ||
| 690 | if (ret) | ||
| 691 | goto err_req; | ||
| 692 | |||
| 674 | ret = intel_gvt_scan_and_shadow_workload(workload); | 693 | ret = intel_gvt_scan_and_shadow_workload(workload); |
| 675 | if (ret) | 694 | if (ret) |
| 676 | goto out; | 695 | goto out; |
| 677 | 696 | ||
| 678 | ret = prepare_workload(workload); | 697 | ret = populate_shadow_context(workload); |
| 698 | if (ret) { | ||
| 699 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
| 700 | goto out; | ||
| 701 | } | ||
| 679 | 702 | ||
| 703 | ret = prepare_workload(workload); | ||
| 680 | out: | 704 | out: |
| 681 | if (ret) | ||
| 682 | workload->status = ret; | ||
| 683 | |||
| 684 | if (!IS_ERR_OR_NULL(workload->req)) { | 705 | if (!IS_ERR_OR_NULL(workload->req)) { |
| 685 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", | 706 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", |
| 686 | ring_id, workload->req); | 707 | ring_id, workload->req); |
| 687 | i915_request_add(workload->req); | 708 | i915_request_add(workload->req); |
| 688 | workload->dispatched = true; | 709 | workload->dispatched = true; |
| 689 | } | 710 | } |
| 690 | 711 | err_req: | |
| 712 | if (ret) | ||
| 713 | workload->status = ret; | ||
| 691 | mutex_unlock(&dev_priv->drm.struct_mutex); | 714 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 692 | mutex_unlock(&vgpu->vgpu_lock); | 715 | mutex_unlock(&vgpu->vgpu_lock); |
| 693 | return ret; | 716 | return ret; |
| @@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 891 | 914 | ||
| 892 | list_del_init(&workload->list); | 915 | list_del_init(&workload->list); |
| 893 | 916 | ||
| 894 | if (!workload->status) { | ||
| 895 | release_shadow_batch_buffer(workload); | ||
| 896 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
| 897 | } | ||
| 898 | |||
| 899 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { | 917 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { |
| 900 | /* if workload->status is not successful means HW GPU | 918 | /* if workload->status is not successful means HW GPU |
| 901 | * has occurred GPU hang or something wrong with i915/GVT, | 919 | * has occurred GPU hang or something wrong with i915/GVT, |
| @@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) | |||
| 1263 | { | 1281 | { |
| 1264 | struct intel_vgpu_submission *s = &workload->vgpu->submission; | 1282 | struct intel_vgpu_submission *s = &workload->vgpu->submission; |
| 1265 | 1283 | ||
| 1284 | release_shadow_batch_buffer(workload); | ||
| 1285 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
| 1286 | |||
| 1266 | if (workload->shadow_mm) | 1287 | if (workload->shadow_mm) |
| 1267 | intel_vgpu_mm_put(workload->shadow_mm); | 1288 | intel_vgpu_mm_put(workload->shadow_mm); |
| 1268 | 1289 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..2065cba59aab 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
| @@ -83,6 +83,7 @@ struct intel_vgpu_workload { | |||
| 83 | struct i915_request *req; | 83 | struct i915_request *req; |
| 84 | /* if this workload has been dispatched to i915? */ | 84 | /* if this workload has been dispatched to i915? */ |
| 85 | bool dispatched; | 85 | bool dispatched; |
| 86 | bool shadow; /* if workload has done shadow of guest request */ | ||
| 86 | int status; | 87 | int status; |
| 87 | 88 | ||
| 88 | struct intel_vgpu_mm *shadow_mm; | 89 | struct intel_vgpu_mm *shadow_mm; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 38dcee1ca062..40a61ef9aac1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) | |||
| 984 | intel_runtime_pm_get(i915); | 984 | intel_runtime_pm_get(i915); |
| 985 | gpu = i915_capture_gpu_state(i915); | 985 | gpu = i915_capture_gpu_state(i915); |
| 986 | intel_runtime_pm_put(i915); | 986 | intel_runtime_pm_put(i915); |
| 987 | if (!gpu) | 987 | if (IS_ERR(gpu)) |
| 988 | return -ENOMEM; | 988 | return PTR_ERR(gpu); |
| 989 | 989 | ||
| 990 | file->private_data = gpu; | 990 | file->private_data = gpu; |
| 991 | return 0; | 991 | return 0; |
| @@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp, | |||
| 1018 | 1018 | ||
| 1019 | static int i915_error_state_open(struct inode *inode, struct file *file) | 1019 | static int i915_error_state_open(struct inode *inode, struct file *file) |
| 1020 | { | 1020 | { |
| 1021 | file->private_data = i915_first_error_state(inode->i_private); | 1021 | struct i915_gpu_state *error; |
| 1022 | |||
| 1023 | error = i915_first_error_state(inode->i_private); | ||
| 1024 | if (IS_ERR(error)) | ||
| 1025 | return PTR_ERR(error); | ||
| 1026 | |||
| 1027 | file->private_data = error; | ||
| 1022 | return 0; | 1028 | return 0; |
| 1023 | } | 1029 | } |
| 1024 | 1030 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
| 1824 | return 0; | 1824 | return 0; |
| 1825 | } | 1825 | } |
| 1826 | 1826 | ||
| 1827 | static inline bool | ||
| 1828 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | ||
| 1829 | unsigned long addr, unsigned long size) | ||
| 1830 | { | ||
| 1831 | if (vma->vm_file != filp) | ||
| 1832 | return false; | ||
| 1833 | |||
| 1834 | return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; | ||
| 1835 | } | ||
| 1836 | |||
| 1827 | /** | 1837 | /** |
| 1828 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 1838 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
| 1829 | * it is mapped to. | 1839 | * it is mapped to. |
| @@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
| 1882 | return -EINTR; | 1892 | return -EINTR; |
| 1883 | } | 1893 | } |
| 1884 | vma = find_vma(mm, addr); | 1894 | vma = find_vma(mm, addr); |
| 1885 | if (vma) | 1895 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
| 1886 | vma->vm_page_prot = | 1896 | vma->vm_page_prot = |
| 1887 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 1897 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 1888 | else | 1898 | else |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index add1fe7aeb93..bd17dd1f5da5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) | |||
| 2075 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) | 2075 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) |
| 2076 | { | 2076 | { |
| 2077 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); | 2077 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); |
| 2078 | int err; | ||
| 2078 | 2079 | ||
| 2079 | /* | 2080 | /* |
| 2080 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt | 2081 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt |
| @@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) | |||
| 2090 | * allocator works in address space sizes, so it's multiplied by page | 2091 | * allocator works in address space sizes, so it's multiplied by page |
| 2091 | * size. We allocate at the top of the GTT to avoid fragmentation. | 2092 | * size. We allocate at the top of the GTT to avoid fragmentation. |
| 2092 | */ | 2093 | */ |
| 2093 | return i915_vma_pin(ppgtt->vma, | 2094 | err = i915_vma_pin(ppgtt->vma, |
| 2094 | 0, GEN6_PD_ALIGN, | 2095 | 0, GEN6_PD_ALIGN, |
| 2095 | PIN_GLOBAL | PIN_HIGH); | 2096 | PIN_GLOBAL | PIN_HIGH); |
| 2097 | if (err) | ||
| 2098 | goto unpin; | ||
| 2099 | |||
| 2100 | return 0; | ||
| 2101 | |||
| 2102 | unpin: | ||
| 2103 | ppgtt->pin_count = 0; | ||
| 2104 | return err; | ||
| 2096 | } | 2105 | } |
| 2097 | 2106 | ||
| 2098 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) | 2107 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 07465123c166..3f9ce403c755 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
| @@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915) | |||
| 1907 | { | 1907 | { |
| 1908 | struct i915_gpu_state *error; | 1908 | struct i915_gpu_state *error; |
| 1909 | 1909 | ||
| 1910 | /* Check if GPU capture has been disabled */ | ||
| 1911 | error = READ_ONCE(i915->gpu_error.first_error); | ||
| 1912 | if (IS_ERR(error)) | ||
| 1913 | return error; | ||
| 1914 | |||
| 1910 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | 1915 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
| 1911 | if (!error) | 1916 | if (!error) { |
| 1912 | return NULL; | 1917 | i915_disable_error_state(i915, -ENOMEM); |
| 1918 | return ERR_PTR(-ENOMEM); | ||
| 1919 | } | ||
| 1913 | 1920 | ||
| 1914 | kref_init(&error->ref); | 1921 | kref_init(&error->ref); |
| 1915 | error->i915 = i915; | 1922 | error->i915 = i915; |
| @@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915, | |||
| 1945 | return; | 1952 | return; |
| 1946 | 1953 | ||
| 1947 | error = i915_capture_gpu_state(i915); | 1954 | error = i915_capture_gpu_state(i915); |
| 1948 | if (!error) { | 1955 | if (IS_ERR(error)) |
| 1949 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | ||
| 1950 | i915_disable_error_state(i915, -ENOMEM); | ||
| 1951 | return; | 1956 | return; |
| 1952 | } | ||
| 1953 | 1957 | ||
| 1954 | i915_error_capture_msg(i915, error, engine_mask, error_msg); | 1958 | i915_error_capture_msg(i915, error, engine_mask, error_msg); |
| 1955 | DRM_INFO("%s\n", error->error_msg); | 1959 | DRM_INFO("%s\n", error->error_msg); |
| @@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915) | |||
| 1987 | 1991 | ||
| 1988 | spin_lock_irq(&i915->gpu_error.lock); | 1992 | spin_lock_irq(&i915->gpu_error.lock); |
| 1989 | error = i915->gpu_error.first_error; | 1993 | error = i915->gpu_error.first_error; |
| 1990 | if (error) | 1994 | if (!IS_ERR_OR_NULL(error)) |
| 1991 | i915_gpu_state_get(error); | 1995 | i915_gpu_state_get(error); |
| 1992 | spin_unlock_irq(&i915->gpu_error.lock); | 1996 | spin_unlock_irq(&i915->gpu_error.lock); |
| 1993 | 1997 | ||
| @@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915) | |||
| 2000 | 2004 | ||
| 2001 | spin_lock_irq(&i915->gpu_error.lock); | 2005 | spin_lock_irq(&i915->gpu_error.lock); |
| 2002 | error = i915->gpu_error.first_error; | 2006 | error = i915->gpu_error.first_error; |
| 2003 | i915->gpu_error.first_error = NULL; | 2007 | if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ |
| 2008 | i915->gpu_error.first_error = NULL; | ||
| 2004 | spin_unlock_irq(&i915->gpu_error.lock); | 2009 | spin_unlock_irq(&i915->gpu_error.lock); |
| 2005 | 2010 | ||
| 2006 | if (!IS_ERR(error)) | 2011 | if (!IS_ERR_OR_NULL(error)) |
| 2007 | i915_gpu_state_put(error); | 2012 | i915_gpu_state_put(error); |
| 2008 | } | 2013 | } |
| 2009 | 2014 | ||
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
| @@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) | |||
| 594 | * Update the bitmask of enabled events and increment | 594 | * Update the bitmask of enabled events and increment |
| 595 | * the event reference counter. | 595 | * the event reference counter. |
| 596 | */ | 596 | */ |
| 597 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 597 | BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); |
| 598 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); | ||
| 598 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); | 599 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
| 599 | i915->pmu.enable |= BIT_ULL(bit); | 600 | i915->pmu.enable |= BIT_ULL(bit); |
| 600 | i915->pmu.enable_count[bit]++; | 601 | i915->pmu.enable_count[bit]++; |
| @@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) | |||
| 615 | engine = intel_engine_lookup_user(i915, | 616 | engine = intel_engine_lookup_user(i915, |
| 616 | engine_event_class(event), | 617 | engine_event_class(event), |
| 617 | engine_event_instance(event)); | 618 | engine_event_instance(event)); |
| 618 | GEM_BUG_ON(!engine); | ||
| 619 | engine->pmu.enable |= BIT(sample); | ||
| 620 | 619 | ||
| 621 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 620 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != |
| 621 | I915_ENGINE_SAMPLE_COUNT); | ||
| 622 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != | ||
| 623 | I915_ENGINE_SAMPLE_COUNT); | ||
| 624 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); | ||
| 625 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
| 622 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); | 626 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
| 627 | |||
| 628 | engine->pmu.enable |= BIT(sample); | ||
| 623 | engine->pmu.enable_count[sample]++; | 629 | engine->pmu.enable_count[sample]++; |
| 624 | } | 630 | } |
| 625 | 631 | ||
| @@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) | |||
| 649 | engine = intel_engine_lookup_user(i915, | 655 | engine = intel_engine_lookup_user(i915, |
| 650 | engine_event_class(event), | 656 | engine_event_class(event), |
| 651 | engine_event_instance(event)); | 657 | engine_event_instance(event)); |
| 652 | GEM_BUG_ON(!engine); | 658 | |
| 653 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 659 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); |
| 660 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
| 654 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); | 661 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
| 662 | |||
| 655 | /* | 663 | /* |
| 656 | * Decrement the reference count and clear the enabled | 664 | * Decrement the reference count and clear the enabled |
| 657 | * bitmask when the last listener on an event goes away. | 665 | * bitmask when the last listener on an event goes away. |
| @@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) | |||
| 660 | engine->pmu.enable &= ~BIT(sample); | 668 | engine->pmu.enable &= ~BIT(sample); |
| 661 | } | 669 | } |
| 662 | 670 | ||
| 663 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 671 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); |
| 664 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); | 672 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
| 665 | /* | 673 | /* |
| 666 | * Decrement the reference count and clear the enabled | 674 | * Decrement the reference count and clear the enabled |
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
| @@ -31,6 +31,8 @@ enum { | |||
| 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ | 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ |
| 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) | 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) |
| 33 | 33 | ||
| 34 | #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) | ||
| 35 | |||
| 34 | struct i915_pmu_sample { | 36 | struct i915_pmu_sample { |
| 35 | u64 cur; | 37 | u64 cur; |
| 36 | }; | 38 | }; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -1790,7 +1790,7 @@ enum i915_power_well_id { | |||
| 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 | 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 |
| 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 | 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 |
| 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 | 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 |
| 1793 | #define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ | 1793 | #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ |
| 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
| 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
| 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
| @@ -1798,7 +1798,7 @@ enum i915_power_well_id { | |||
| 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
| 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ | 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ |
| 1800 | 4 * (dw)) | 1800 | 4 * (dw)) |
| 1801 | #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ | 1801 | #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ |
| 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ | 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ |
| 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
| 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
| @@ -1834,9 +1834,9 @@ enum i915_power_well_id { | |||
| 1834 | 1834 | ||
| 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 | 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 |
| 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 | 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 |
| 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) | 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) |
| 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) | 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) |
| 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ |
| 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
| 1841 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
| 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
| @@ -1864,8 +1864,12 @@ enum i915_power_well_id { | |||
| 1864 | #define RTERM_SELECT(x) ((x) << 3) | 1864 | #define RTERM_SELECT(x) ((x) << 3) |
| 1865 | #define RTERM_SELECT_MASK (0x7 << 3) | 1865 | #define RTERM_SELECT_MASK (0x7 << 3) |
| 1866 | 1866 | ||
| 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) | 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) |
| 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) | 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) |
| 1869 | #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) | ||
| 1870 | #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) | ||
| 1871 | #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) | ||
| 1872 | #define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) | ||
| 1869 | #define N_SCALAR(x) ((x) << 24) | 1873 | #define N_SCALAR(x) ((x) << 24) |
| 1870 | #define N_SCALAR_MASK (0x7F << 24) | 1874 | #define N_SCALAR_MASK (0x7F << 24) |
| 1871 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 535caebd9813..c0cfe7ae2ba5 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
| @@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, | |||
| 521 | ssize_t ret; | 521 | ssize_t ret; |
| 522 | 522 | ||
| 523 | gpu = i915_first_error_state(i915); | 523 | gpu = i915_first_error_state(i915); |
| 524 | if (gpu) { | 524 | if (IS_ERR(gpu)) { |
| 525 | ret = PTR_ERR(gpu); | ||
| 526 | } else if (gpu) { | ||
| 525 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); | 527 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); |
| 526 | i915_gpu_state_put(gpu); | 528 | i915_gpu_state_put(gpu); |
| 527 | } else { | 529 | } else { |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { | |||
| 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ | 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ |
| 495 | }; | 495 | }; |
| 496 | 496 | ||
| 497 | struct icl_combo_phy_ddi_buf_trans { | 497 | /* icl_combo_phy_ddi_translations */ |
| 498 | u32 dw2_swing_select; | 498 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { |
| 499 | u32 dw2_swing_scalar; | 499 | /* NT mV Trans mV db */ |
| 500 | u32 dw4_scaling; | 500 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
| 501 | }; | 501 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
| 502 | 502 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ | |
| 503 | /* Voltage Swing Programming for VccIO 0.85V for DP */ | 503 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
| 504 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { | 504 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
| 505 | /* Voltage mV db */ | 505 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
| 506 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 506 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
| 507 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 507 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
| 508 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 508 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
| 509 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 509 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
| 510 | { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ | ||
| 511 | { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ | ||
| 512 | { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ | ||
| 513 | { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ | ||
| 514 | { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ | ||
| 515 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
| 516 | }; | ||
| 517 | |||
| 518 | /* FIXME - After table is updated in Bspec */ | ||
| 519 | /* Voltage Swing Programming for VccIO 0.85V for eDP */ | ||
| 520 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { | ||
| 521 | /* Voltage mV db */ | ||
| 522 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | ||
| 523 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | ||
| 524 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | ||
| 525 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | ||
| 526 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | ||
| 527 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
| 528 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
| 529 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
| 530 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
| 531 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
| 532 | }; | ||
| 533 | |||
| 534 | /* Voltage Swing Programming for VccIO 0.95V for DP */ | ||
| 535 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { | ||
| 536 | /* Voltage mV db */ | ||
| 537 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | ||
| 538 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | ||
| 539 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | ||
| 540 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | ||
| 541 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | ||
| 542 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | ||
| 543 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | ||
| 544 | { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ | ||
| 545 | { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ | ||
| 546 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
| 547 | }; | 510 | }; |
| 548 | 511 | ||
| 549 | /* FIXME - After table is updated in Bspec */ | 512 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { |
| 550 | /* Voltage Swing Programming for VccIO 0.95V for eDP */ | 513 | /* NT mV Trans mV db */ |
| 551 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { | 514 | { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ |
| 552 | /* Voltage mV db */ | 515 | { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ |
| 553 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 516 | { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ |
| 554 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 517 | { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ |
| 555 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 518 | { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ |
| 556 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 519 | { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ |
| 557 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 520 | { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ |
| 558 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | 521 | { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ |
| 559 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | 522 | { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ |
| 560 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | 523 | { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
| 561 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
| 562 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
| 563 | }; | 524 | }; |
| 564 | 525 | ||
| 565 | /* Voltage Swing Programming for VccIO 1.05V for DP */ | 526 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { |
| 566 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { | 527 | /* NT mV Trans mV db */ |
| 567 | /* Voltage mV db */ | 528 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
| 568 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 529 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
| 569 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 530 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ |
| 570 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 531 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
| 571 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 532 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
| 572 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | 533 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
| 573 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | 534 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
| 574 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | 535 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
| 575 | { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ | 536 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
| 576 | { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ | 537 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
| 577 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
| 578 | }; | 538 | }; |
| 579 | 539 | ||
| 580 | /* FIXME - After table is updated in Bspec */ | 540 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { |
| 581 | /* Voltage Swing Programming for VccIO 1.05V for eDP */ | 541 | /* NT mV Trans mV db */ |
| 582 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { | 542 | { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ |
| 583 | /* Voltage mV db */ | 543 | { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ |
| 584 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 544 | { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ |
| 585 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 545 | { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ |
| 586 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 546 | { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ |
| 587 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 547 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ |
| 588 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 548 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ |
| 589 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
| 590 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
| 591 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
| 592 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
| 593 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
| 594 | }; | 549 | }; |
| 595 | 550 | ||
| 596 | struct icl_mg_phy_ddi_buf_trans { | 551 | struct icl_mg_phy_ddi_buf_trans { |
| @@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
| 871 | } | 826 | } |
| 872 | } | 827 | } |
| 873 | 828 | ||
| 874 | static const struct icl_combo_phy_ddi_buf_trans * | 829 | static const struct cnl_ddi_buf_trans * |
| 875 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, | 830 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, |
| 876 | int type, int *n_entries) | 831 | int type, int rate, int *n_entries) |
| 877 | { | 832 | { |
| 878 | u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; | 833 | if (type == INTEL_OUTPUT_HDMI) { |
| 879 | 834 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); | |
| 880 | if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { | 835 | return icl_combo_phy_ddi_translations_hdmi; |
| 881 | switch (voltage) { | 836 | } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { |
| 882 | case VOLTAGE_INFO_0_85V: | 837 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); |
| 883 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); | 838 | return icl_combo_phy_ddi_translations_edp_hbr3; |
| 884 | return icl_combo_phy_ddi_translations_edp_0_85V; | 839 | } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { |
| 885 | case VOLTAGE_INFO_0_95V: | 840 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); |
| 886 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); | 841 | return icl_combo_phy_ddi_translations_edp_hbr2; |
| 887 | return icl_combo_phy_ddi_translations_edp_0_95V; | ||
| 888 | case VOLTAGE_INFO_1_05V: | ||
| 889 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); | ||
| 890 | return icl_combo_phy_ddi_translations_edp_1_05V; | ||
| 891 | default: | ||
| 892 | MISSING_CASE(voltage); | ||
| 893 | return NULL; | ||
| 894 | } | ||
| 895 | } else { | ||
| 896 | switch (voltage) { | ||
| 897 | case VOLTAGE_INFO_0_85V: | ||
| 898 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); | ||
| 899 | return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; | ||
| 900 | case VOLTAGE_INFO_0_95V: | ||
| 901 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); | ||
| 902 | return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; | ||
| 903 | case VOLTAGE_INFO_1_05V: | ||
| 904 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); | ||
| 905 | return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; | ||
| 906 | default: | ||
| 907 | MISSING_CASE(voltage); | ||
| 908 | return NULL; | ||
| 909 | } | ||
| 910 | } | 842 | } |
| 843 | |||
| 844 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); | ||
| 845 | return icl_combo_phy_ddi_translations_dp_hbr2; | ||
| 911 | } | 846 | } |
| 912 | 847 | ||
| 913 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | 848 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) |
| @@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
| 918 | 853 | ||
| 919 | if (IS_ICELAKE(dev_priv)) { | 854 | if (IS_ICELAKE(dev_priv)) { |
| 920 | if (intel_port_is_combophy(dev_priv, port)) | 855 | if (intel_port_is_combophy(dev_priv, port)) |
| 921 | icl_get_combo_buf_trans(dev_priv, port, | 856 | icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, |
| 922 | INTEL_OUTPUT_HDMI, &n_entries); | 857 | 0, &n_entries); |
| 923 | else | 858 | else |
| 924 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 859 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
| 925 | default_entry = n_entries - 1; | 860 | default_entry = n_entries - 1; |
| @@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
| 1086 | return DDI_CLK_SEL_TBT_810; | 1021 | return DDI_CLK_SEL_TBT_810; |
| 1087 | default: | 1022 | default: |
| 1088 | MISSING_CASE(clock); | 1023 | MISSING_CASE(clock); |
| 1089 | break; | 1024 | return DDI_CLK_SEL_NONE; |
| 1090 | } | 1025 | } |
| 1091 | case DPLL_ID_ICL_MGPLL1: | 1026 | case DPLL_ID_ICL_MGPLL1: |
| 1092 | case DPLL_ID_ICL_MGPLL2: | 1027 | case DPLL_ID_ICL_MGPLL2: |
| @@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
| 2275 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) | 2210 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) |
| 2276 | { | 2211 | { |
| 2277 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2212 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| 2213 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
| 2278 | enum port port = encoder->port; | 2214 | enum port port = encoder->port; |
| 2279 | int n_entries; | 2215 | int n_entries; |
| 2280 | 2216 | ||
| 2281 | if (IS_ICELAKE(dev_priv)) { | 2217 | if (IS_ICELAKE(dev_priv)) { |
| 2282 | if (intel_port_is_combophy(dev_priv, port)) | 2218 | if (intel_port_is_combophy(dev_priv, port)) |
| 2283 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, | 2219 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, |
| 2284 | &n_entries); | 2220 | intel_dp->link_rate, &n_entries); |
| 2285 | else | 2221 | else |
| 2286 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 2222 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
| 2287 | } else if (IS_CANNONLAKE(dev_priv)) { | 2223 | } else if (IS_CANNONLAKE(dev_priv)) { |
| @@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
| 2462 | } | 2398 | } |
| 2463 | 2399 | ||
| 2464 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | 2400 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, |
| 2465 | u32 level, enum port port, int type) | 2401 | u32 level, enum port port, int type, |
| 2402 | int rate) | ||
| 2466 | { | 2403 | { |
| 2467 | const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; | 2404 | const struct cnl_ddi_buf_trans *ddi_translations = NULL; |
| 2468 | u32 n_entries, val; | 2405 | u32 n_entries, val; |
| 2469 | int ln; | 2406 | int ln; |
| 2470 | 2407 | ||
| 2471 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, | 2408 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, |
| 2472 | &n_entries); | 2409 | rate, &n_entries); |
| 2473 | if (!ddi_translations) | 2410 | if (!ddi_translations) |
| 2474 | return; | 2411 | return; |
| 2475 | 2412 | ||
| @@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
| 2478 | level = n_entries - 1; | 2415 | level = n_entries - 1; |
| 2479 | } | 2416 | } |
| 2480 | 2417 | ||
| 2481 | /* Set PORT_TX_DW5 Rterm Sel to 110b. */ | 2418 | /* Set PORT_TX_DW5 */ |
| 2482 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2419 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
| 2483 | val &= ~RTERM_SELECT_MASK; | 2420 | val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | |
| 2421 | TAP2_DISABLE | TAP3_DISABLE); | ||
| 2422 | val |= SCALING_MODE_SEL(0x2); | ||
| 2484 | val |= RTERM_SELECT(0x6); | 2423 | val |= RTERM_SELECT(0x6); |
| 2485 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2424 | val |= TAP3_DISABLE; |
| 2486 | |||
| 2487 | /* Program PORT_TX_DW5 */ | ||
| 2488 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
| 2489 | /* Set DisableTap2 and DisableTap3 if MIPI DSI | ||
| 2490 | * Clear DisableTap2 and DisableTap3 for all other Ports | ||
| 2491 | */ | ||
| 2492 | if (type == INTEL_OUTPUT_DSI) { | ||
| 2493 | val |= TAP2_DISABLE; | ||
| 2494 | val |= TAP3_DISABLE; | ||
| 2495 | } else { | ||
| 2496 | val &= ~TAP2_DISABLE; | ||
| 2497 | val &= ~TAP3_DISABLE; | ||
| 2498 | } | ||
| 2499 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2425 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
| 2500 | 2426 | ||
| 2501 | /* Program PORT_TX_DW2 */ | 2427 | /* Program PORT_TX_DW2 */ |
| 2502 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | 2428 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); |
| 2503 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | 2429 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | |
| 2504 | RCOMP_SCALAR_MASK); | 2430 | RCOMP_SCALAR_MASK); |
| 2505 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); | 2431 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); |
| 2506 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); | 2432 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); |
| 2507 | /* Program Rcomp scalar for every table entry */ | 2433 | /* Program Rcomp scalar for every table entry */ |
| 2508 | val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); | 2434 | val |= RCOMP_SCALAR(0x98); |
| 2509 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); | 2435 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); |
| 2510 | 2436 | ||
| 2511 | /* Program PORT_TX_DW4 */ | 2437 | /* Program PORT_TX_DW4 */ |
| @@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
| 2514 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); | 2440 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); |
| 2515 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | 2441 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | |
| 2516 | CURSOR_COEFF_MASK); | 2442 | CURSOR_COEFF_MASK); |
| 2517 | val |= ddi_translations[level].dw4_scaling; | 2443 | val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); |
| 2444 | val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); | ||
| 2445 | val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); | ||
| 2518 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); | 2446 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); |
| 2519 | } | 2447 | } |
| 2448 | |||
| 2449 | /* Program PORT_TX_DW7 */ | ||
| 2450 | val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); | ||
| 2451 | val &= ~N_SCALAR_MASK; | ||
| 2452 | val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); | ||
| 2453 | I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); | ||
| 2520 | } | 2454 | } |
| 2521 | 2455 | ||
| 2522 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | 2456 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, |
| @@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
| 2581 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2515 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
| 2582 | 2516 | ||
| 2583 | /* 5. Program swing and de-emphasis */ | 2517 | /* 5. Program swing and de-emphasis */ |
| 2584 | icl_ddi_combo_vswing_program(dev_priv, level, port, type); | 2518 | icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); |
| 2585 | 2519 | ||
| 2586 | /* 6. Set training enable to trigger update */ | 2520 | /* 6. Set training enable to trigger update */ |
| 2587 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2521 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
| 15415 | } | 15415 | } |
| 15416 | } | 15416 | } |
| 15417 | 15417 | ||
| 15418 | static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) | ||
| 15419 | { | ||
| 15420 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
| 15421 | |||
| 15422 | /* | ||
| 15423 | * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram | ||
| 15424 | * the hardware when a high res displays plugged in. DPLL P | ||
| 15425 | * divider is zero, and the pipe timings are bonkers. We'll | ||
| 15426 | * try to disable everything in that case. | ||
| 15427 | * | ||
| 15428 | * FIXME would be nice to be able to sanitize this state | ||
| 15429 | * without several WARNs, but for now let's take the easy | ||
| 15430 | * road. | ||
| 15431 | */ | ||
| 15432 | return IS_GEN6(dev_priv) && | ||
| 15433 | crtc_state->base.active && | ||
| 15434 | crtc_state->shared_dpll && | ||
| 15435 | crtc_state->port_clock == 0; | ||
| 15436 | } | ||
| 15437 | |||
| 15418 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15438 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
| 15419 | { | 15439 | { |
| 15420 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 15440 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| 15421 | struct intel_connector *connector; | 15441 | struct intel_connector *connector; |
| 15442 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
| 15443 | struct intel_crtc_state *crtc_state = crtc ? | ||
| 15444 | to_intel_crtc_state(crtc->base.state) : NULL; | ||
| 15422 | 15445 | ||
| 15423 | /* We need to check both for a crtc link (meaning that the | 15446 | /* We need to check both for a crtc link (meaning that the |
| 15424 | * encoder is active and trying to read from a pipe) and the | 15447 | * encoder is active and trying to read from a pipe) and the |
| 15425 | * pipe itself being active. */ | 15448 | * pipe itself being active. */ |
| 15426 | bool has_active_crtc = encoder->base.crtc && | 15449 | bool has_active_crtc = crtc_state && |
| 15427 | to_intel_crtc(encoder->base.crtc)->active; | 15450 | crtc_state->base.active; |
| 15451 | |||
| 15452 | if (crtc_state && has_bogus_dpll_config(crtc_state)) { | ||
| 15453 | DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", | ||
| 15454 | pipe_name(crtc->pipe)); | ||
| 15455 | has_active_crtc = false; | ||
| 15456 | } | ||
| 15428 | 15457 | ||
| 15429 | connector = intel_encoder_find_connector(encoder); | 15458 | connector = intel_encoder_find_connector(encoder); |
| 15430 | if (connector && !has_active_crtc) { | 15459 | if (connector && !has_active_crtc) { |
| @@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
| 15435 | /* Connector is active, but has no active pipe. This is | 15464 | /* Connector is active, but has no active pipe. This is |
| 15436 | * fallout from our resume register restoring. Disable | 15465 | * fallout from our resume register restoring. Disable |
| 15437 | * the encoder manually again. */ | 15466 | * the encoder manually again. */ |
| 15438 | if (encoder->base.crtc) { | 15467 | if (crtc_state) { |
| 15439 | struct drm_crtc_state *crtc_state = encoder->base.crtc->state; | 15468 | struct drm_encoder *best_encoder; |
| 15440 | 15469 | ||
| 15441 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15470 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
| 15442 | encoder->base.base.id, | 15471 | encoder->base.base.id, |
| 15443 | encoder->base.name); | 15472 | encoder->base.name); |
| 15473 | |||
| 15474 | /* avoid oopsing in case the hooks consult best_encoder */ | ||
| 15475 | best_encoder = connector->base.state->best_encoder; | ||
| 15476 | connector->base.state->best_encoder = &encoder->base; | ||
| 15477 | |||
| 15444 | if (encoder->disable) | 15478 | if (encoder->disable) |
| 15445 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15479 | encoder->disable(encoder, crtc_state, |
| 15480 | connector->base.state); | ||
| 15446 | if (encoder->post_disable) | 15481 | if (encoder->post_disable) |
| 15447 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15482 | encoder->post_disable(encoder, crtc_state, |
| 15483 | connector->base.state); | ||
| 15484 | |||
| 15485 | connector->base.state->best_encoder = best_encoder; | ||
| 15448 | } | 15486 | } |
| 15449 | encoder->base.crtc = NULL; | 15487 | encoder->base.crtc = NULL; |
| 15450 | 15488 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) | |||
| 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) | 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
| 305 | { | 305 | { |
| 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
| 307 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
| 307 | enum port port = dig_port->base.port; | 308 | enum port port = dig_port->base.port; |
| 308 | 309 | ||
| 309 | if (port == PORT_B) | 310 | if (intel_port_is_combophy(dev_priv, port) && |
| 311 | !intel_dp_is_edp(intel_dp)) | ||
| 310 | return 540000; | 312 | return 540000; |
| 311 | 313 | ||
| 312 | return 810000; | 314 | return 810000; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -209,6 +209,16 @@ struct intel_fbdev { | |||
| 209 | unsigned long vma_flags; | 209 | unsigned long vma_flags; |
| 210 | async_cookie_t cookie; | 210 | async_cookie_t cookie; |
| 211 | int preferred_bpp; | 211 | int preferred_bpp; |
| 212 | |||
| 213 | /* Whether or not fbdev hpd processing is temporarily suspended */ | ||
| 214 | bool hpd_suspended : 1; | ||
| 215 | /* Set when a hotplug was received while HPD processing was | ||
| 216 | * suspended | ||
| 217 | */ | ||
| 218 | bool hpd_waiting : 1; | ||
| 219 | |||
| 220 | /* Protects hpd_suspended */ | ||
| 221 | struct mutex hpd_lock; | ||
| 212 | }; | 222 | }; |
| 213 | 223 | ||
| 214 | struct intel_encoder { | 224 | struct intel_encoder { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..4ee16b264dbe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 336 | bool *enabled, int width, int height) | 336 | bool *enabled, int width, int height) |
| 337 | { | 337 | { |
| 338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | 338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); |
| 339 | unsigned long conn_configured, conn_seq, mask; | ||
| 340 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | 339 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); |
| 340 | unsigned long conn_configured, conn_seq; | ||
| 341 | int i, j; | 341 | int i, j; |
| 342 | bool *save_enabled; | 342 | bool *save_enabled; |
| 343 | bool fallback = true, ret = true; | 343 | bool fallback = true, ret = true; |
| @@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 355 | drm_modeset_backoff(&ctx); | 355 | drm_modeset_backoff(&ctx); |
| 356 | 356 | ||
| 357 | memcpy(save_enabled, enabled, count); | 357 | memcpy(save_enabled, enabled, count); |
| 358 | mask = GENMASK(count - 1, 0); | 358 | conn_seq = GENMASK(count - 1, 0); |
| 359 | conn_configured = 0; | 359 | conn_configured = 0; |
| 360 | retry: | 360 | retry: |
| 361 | conn_seq = conn_configured; | ||
| 362 | for (i = 0; i < count; i++) { | 361 | for (i = 0; i < count; i++) { |
| 363 | struct drm_fb_helper_connector *fb_conn; | 362 | struct drm_fb_helper_connector *fb_conn; |
| 364 | struct drm_connector *connector; | 363 | struct drm_connector *connector; |
| @@ -371,7 +370,8 @@ retry: | |||
| 371 | if (conn_configured & BIT(i)) | 370 | if (conn_configured & BIT(i)) |
| 372 | continue; | 371 | continue; |
| 373 | 372 | ||
| 374 | if (conn_seq == 0 && !connector->has_tile) | 373 | /* First pass, only consider tiled connectors */ |
| 374 | if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) | ||
| 375 | continue; | 375 | continue; |
| 376 | 376 | ||
| 377 | if (connector->status == connector_status_connected) | 377 | if (connector->status == connector_status_connected) |
| @@ -475,8 +475,10 @@ retry: | |||
| 475 | conn_configured |= BIT(i); | 475 | conn_configured |= BIT(i); |
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | if ((conn_configured & mask) != mask && conn_configured != conn_seq) | 478 | if (conn_configured != conn_seq) { /* repeat until no more are found */ |
| 479 | conn_seq = conn_configured; | ||
| 479 | goto retry; | 480 | goto retry; |
| 481 | } | ||
| 480 | 482 | ||
| 481 | /* | 483 | /* |
| 482 | * If the BIOS didn't enable everything it could, fall back to have the | 484 | * If the BIOS didn't enable everything it could, fall back to have the |
| @@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
| 679 | if (ifbdev == NULL) | 681 | if (ifbdev == NULL) |
| 680 | return -ENOMEM; | 682 | return -ENOMEM; |
| 681 | 683 | ||
| 684 | mutex_init(&ifbdev->hpd_lock); | ||
| 682 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); | 685 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); |
| 683 | 686 | ||
| 684 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 687 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
| @@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) | |||
| 752 | intel_fbdev_destroy(ifbdev); | 755 | intel_fbdev_destroy(ifbdev); |
| 753 | } | 756 | } |
| 754 | 757 | ||
| 758 | /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD | ||
| 759 | * processing, fbdev will perform a full connector reprobe if a hotplug event | ||
| 760 | * was received while HPD was suspended. | ||
| 761 | */ | ||
| 762 | static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) | ||
| 763 | { | ||
| 764 | bool send_hpd = false; | ||
| 765 | |||
| 766 | mutex_lock(&ifbdev->hpd_lock); | ||
| 767 | ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; | ||
| 768 | send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; | ||
| 769 | ifbdev->hpd_waiting = false; | ||
| 770 | mutex_unlock(&ifbdev->hpd_lock); | ||
| 771 | |||
| 772 | if (send_hpd) { | ||
| 773 | DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); | ||
| 774 | drm_fb_helper_hotplug_event(&ifbdev->helper); | ||
| 775 | } | ||
| 776 | } | ||
| 777 | |||
| 755 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) | 778 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) |
| 756 | { | 779 | { |
| 757 | struct drm_i915_private *dev_priv = to_i915(dev); | 780 | struct drm_i915_private *dev_priv = to_i915(dev); |
| @@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
| 773 | */ | 796 | */ |
| 774 | if (state != FBINFO_STATE_RUNNING) | 797 | if (state != FBINFO_STATE_RUNNING) |
| 775 | flush_work(&dev_priv->fbdev_suspend_work); | 798 | flush_work(&dev_priv->fbdev_suspend_work); |
| 799 | |||
| 776 | console_lock(); | 800 | console_lock(); |
| 777 | } else { | 801 | } else { |
| 778 | /* | 802 | /* |
| @@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
| 800 | 824 | ||
| 801 | drm_fb_helper_set_suspend(&ifbdev->helper, state); | 825 | drm_fb_helper_set_suspend(&ifbdev->helper, state); |
| 802 | console_unlock(); | 826 | console_unlock(); |
| 827 | |||
| 828 | intel_fbdev_hpd_set_suspend(ifbdev, state); | ||
| 803 | } | 829 | } |
| 804 | 830 | ||
| 805 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 831 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
| 806 | { | 832 | { |
| 807 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 833 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
| 834 | bool send_hpd; | ||
| 808 | 835 | ||
| 809 | if (!ifbdev) | 836 | if (!ifbdev) |
| 810 | return; | 837 | return; |
| 811 | 838 | ||
| 812 | intel_fbdev_sync(ifbdev); | 839 | intel_fbdev_sync(ifbdev); |
| 813 | if (ifbdev->vma || ifbdev->helper.deferred_setup) | 840 | |
| 841 | mutex_lock(&ifbdev->hpd_lock); | ||
| 842 | send_hpd = !ifbdev->hpd_suspended; | ||
| 843 | ifbdev->hpd_waiting = true; | ||
| 844 | mutex_unlock(&ifbdev->hpd_lock); | ||
| 845 | |||
| 846 | if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) | ||
| 814 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 847 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
| 815 | } | 848 | } |
| 816 | 849 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4be167dcd209..eab9341a5152 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||
| 303 | */ | 303 | */ |
| 304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { | 304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { |
| 305 | prio |= I915_PRIORITY_NEWCLIENT; | 305 | prio |= I915_PRIORITY_NEWCLIENT; |
| 306 | active->sched.attr.priority = prio; | ||
| 306 | list_move_tail(&active->sched.link, | 307 | list_move_tail(&active->sched.link, |
| 307 | i915_sched_lookup_priolist(engine, prio)); | 308 | i915_sched_lookup_priolist(engine, prio)); |
| 308 | } | 309 | } |
| @@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
| 645 | int i; | 646 | int i; |
| 646 | 647 | ||
| 647 | priolist_for_each_request_consume(rq, rn, p, i) { | 648 | priolist_for_each_request_consume(rq, rn, p, i) { |
| 649 | GEM_BUG_ON(last && | ||
| 650 | need_preempt(engine, last, rq_prio(rq))); | ||
| 651 | |||
| 648 | /* | 652 | /* |
| 649 | * Can we combine this request with the current port? | 653 | * Can we combine this request with the current port? |
| 650 | * It has to be the same context/ringbuffer and not | 654 | * It has to be the same context/ringbuffer and not |
| @@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine) | |||
| 2244 | if (ret) | 2248 | if (ret) |
| 2245 | return ret; | 2249 | return ret; |
| 2246 | 2250 | ||
| 2251 | intel_engine_init_workarounds(engine); | ||
| 2252 | |||
| 2247 | if (HAS_LOGICAL_RING_ELSQ(i915)) { | 2253 | if (HAS_LOGICAL_RING_ELSQ(i915)) { |
| 2248 | execlists->submit_reg = i915->regs + | 2254 | execlists->submit_reg = i915->regs + |
| 2249 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); | 2255 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); |
| @@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
| 2310 | } | 2316 | } |
| 2311 | 2317 | ||
| 2312 | intel_engine_init_whitelist(engine); | 2318 | intel_engine_init_whitelist(engine); |
| 2313 | intel_engine_init_workarounds(engine); | ||
| 2314 | 2319 | ||
| 2315 | return 0; | 2320 | return 0; |
| 2316 | } | 2321 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -55,7 +55,12 @@ | |||
| 55 | struct opregion_header { | 55 | struct opregion_header { |
| 56 | u8 signature[16]; | 56 | u8 signature[16]; |
| 57 | u32 size; | 57 | u32 size; |
| 58 | u32 opregion_ver; | 58 | struct { |
| 59 | u8 rsvd; | ||
| 60 | u8 revision; | ||
| 61 | u8 minor; | ||
| 62 | u8 major; | ||
| 63 | } __packed over; | ||
| 59 | u8 bios_ver[32]; | 64 | u8 bios_ver[32]; |
| 60 | u8 vbios_ver[16]; | 65 | u8 vbios_ver[16]; |
| 61 | u8 driver_ver[16]; | 66 | u8 driver_ver[16]; |
| @@ -119,7 +124,8 @@ struct opregion_asle { | |||
| 119 | u64 fdss; | 124 | u64 fdss; |
| 120 | u32 fdsp; | 125 | u32 fdsp; |
| 121 | u32 stat; | 126 | u32 stat; |
| 122 | u64 rvda; /* Physical address of raw vbt data */ | 127 | u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) |
| 128 | * address of raw VBT data. */ | ||
| 123 | u32 rvds; /* Size of raw vbt data */ | 129 | u32 rvds; /* Size of raw vbt data */ |
| 124 | u8 rsvd[58]; | 130 | u8 rsvd[58]; |
| 125 | } __packed; | 131 | } __packed; |
| @@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
| 925 | opregion->header = base; | 931 | opregion->header = base; |
| 926 | opregion->lid_state = base + ACPI_CLID; | 932 | opregion->lid_state = base + ACPI_CLID; |
| 927 | 933 | ||
| 934 | DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", | ||
| 935 | opregion->header->over.major, | ||
| 936 | opregion->header->over.minor, | ||
| 937 | opregion->header->over.revision); | ||
| 938 | |||
| 928 | mboxes = opregion->header->mboxes; | 939 | mboxes = opregion->header->mboxes; |
| 929 | if (mboxes & MBOX_ACPI) { | 940 | if (mboxes & MBOX_ACPI) { |
| 930 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
| @@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
| 953 | if (dmi_check_system(intel_no_opregion_vbt)) | 964 | if (dmi_check_system(intel_no_opregion_vbt)) |
| 954 | goto out; | 965 | goto out; |
| 955 | 966 | ||
| 956 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | 967 | if (opregion->header->over.major >= 2 && opregion->asle && |
| 957 | opregion->asle->rvda && opregion->asle->rvds) { | 968 | opregion->asle->rvda && opregion->asle->rvds) { |
| 958 | opregion->rvda = memremap(opregion->asle->rvda, | 969 | resource_size_t rvda = opregion->asle->rvda; |
| 959 | opregion->asle->rvds, | 970 | |
| 971 | /* | ||
| 972 | * opregion 2.0: rvda is the physical VBT address. | ||
| 973 | * | ||
| 974 | * opregion 2.1+: rvda is unsigned, relative offset from | ||
| 975 | * opregion base, and should never point within opregion. | ||
| 976 | */ | ||
| 977 | if (opregion->header->over.major > 2 || | ||
| 978 | opregion->header->over.minor >= 1) { | ||
| 979 | WARN_ON(rvda < OPREGION_SIZE); | ||
| 980 | |||
| 981 | rvda += asls; | ||
| 982 | } | ||
| 983 | |||
| 984 | opregion->rvda = memremap(rvda, opregion->asle->rvds, | ||
| 960 | MEMREMAP_WB); | 985 | MEMREMAP_WB); |
| 986 | |||
| 961 | vbt = opregion->rvda; | 987 | vbt = opregion->rvda; |
| 962 | vbt_size = opregion->asle->rvds; | 988 | vbt_size = opregion->asle->rvds; |
| 963 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 989 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
| @@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
| 967 | goto out; | 993 | goto out; |
| 968 | } else { | 994 | } else { |
| 969 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); | 995 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); |
| 996 | memunmap(opregion->rvda); | ||
| 997 | opregion->rvda = NULL; | ||
| 970 | } | 998 | } |
| 971 | } | 999 | } |
| 972 | 1000 | ||
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 419e56342523..f71970df9936 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
| @@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) | |||
| 274 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", | 274 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", |
| 275 | intel_dp->psr_dpcd[0]); | 275 | intel_dp->psr_dpcd[0]); |
| 276 | 276 | ||
| 277 | if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { | ||
| 278 | DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); | ||
| 279 | return; | ||
| 280 | } | ||
| 281 | |||
| 277 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { | 282 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
| 278 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); | 283 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); |
| 279 | return; | 284 | return; |
| 280 | } | 285 | } |
| 286 | |||
| 281 | dev_priv->psr.sink_support = true; | 287 | dev_priv->psr.sink_support = true; |
| 282 | dev_priv->psr.sink_sync_latency = | 288 | dev_priv->psr.sink_sync_latency = |
| 283 | intel_dp_get_sink_sync_latency(intel_dp); | 289 | intel_dp_get_sink_sync_latency(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -415,16 +415,17 @@ struct intel_engine_cs { | |||
| 415 | /** | 415 | /** |
| 416 | * @enable_count: Reference count for the enabled samplers. | 416 | * @enable_count: Reference count for the enabled samplers. |
| 417 | * | 417 | * |
| 418 | * Index number corresponds to the bit number from @enable. | 418 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. |
| 419 | */ | 419 | */ |
| 420 | unsigned int enable_count[I915_PMU_SAMPLE_BITS]; | 420 | unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; |
| 421 | /** | 421 | /** |
| 422 | * @sample: Counter values for sampling events. | 422 | * @sample: Counter values for sampling events. |
| 423 | * | 423 | * |
| 424 | * Our internal timer stores the current counters in this field. | 424 | * Our internal timer stores the current counters in this field. |
| 425 | * | ||
| 426 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. | ||
| 425 | */ | 427 | */ |
| 426 | #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) | 428 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; |
| 427 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; | ||
| 428 | } pmu; | 429 | } pmu; |
| 429 | 430 | ||
| 430 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, | |||
| 494 | 494 | ||
| 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); | 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
| 496 | 496 | ||
| 497 | keymsk = key->channel_mask & 0x3ffffff; | 497 | keymsk = key->channel_mask & 0x7ffffff; |
| 498 | if (alpha < 0xff) | 498 | if (alpha < 0xff) |
| 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; |
| 500 | 500 | ||
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
| @@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 643 | int bus_format; | 643 | int bus_format; |
| 644 | 644 | ||
| 645 | ret = of_property_read_u32(child, "reg", &i); | 645 | ret = of_property_read_u32(child, "reg", &i); |
| 646 | if (ret || i < 0 || i > 1) | 646 | if (ret || i < 0 || i > 1) { |
| 647 | return -EINVAL; | 647 | ret = -EINVAL; |
| 648 | goto free_child; | ||
| 649 | } | ||
| 648 | 650 | ||
| 649 | if (!of_device_is_available(child)) | 651 | if (!of_device_is_available(child)) |
| 650 | continue; | 652 | continue; |
| @@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 657 | channel = &imx_ldb->channel[i]; | 659 | channel = &imx_ldb->channel[i]; |
| 658 | channel->ldb = imx_ldb; | 660 | channel->ldb = imx_ldb; |
| 659 | channel->chno = i; | 661 | channel->chno = i; |
| 660 | channel->child = child; | ||
| 661 | 662 | ||
| 662 | /* | 663 | /* |
| 663 | * The output port is port@4 with an external 4-port mux or | 664 | * The output port is port@4 with an external 4-port mux or |
| @@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 667 | imx_ldb->lvds_mux ? 4 : 2, 0, | 668 | imx_ldb->lvds_mux ? 4 : 2, 0, |
| 668 | &channel->panel, &channel->bridge); | 669 | &channel->panel, &channel->bridge); |
| 669 | if (ret && ret != -ENODEV) | 670 | if (ret && ret != -ENODEV) |
| 670 | return ret; | 671 | goto free_child; |
| 671 | 672 | ||
| 672 | /* panel ddc only if there is no bridge */ | 673 | /* panel ddc only if there is no bridge */ |
| 673 | if (!channel->bridge) { | 674 | if (!channel->bridge) { |
| 674 | ret = imx_ldb_panel_ddc(dev, channel, child); | 675 | ret = imx_ldb_panel_ddc(dev, channel, child); |
| 675 | if (ret) | 676 | if (ret) |
| 676 | return ret; | 677 | goto free_child; |
| 677 | } | 678 | } |
| 678 | 679 | ||
| 679 | bus_format = of_get_bus_format(dev, child); | 680 | bus_format = of_get_bus_format(dev, child); |
| @@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 689 | if (bus_format < 0) { | 690 | if (bus_format < 0) { |
| 690 | dev_err(dev, "could not determine data mapping: %d\n", | 691 | dev_err(dev, "could not determine data mapping: %d\n", |
| 691 | bus_format); | 692 | bus_format); |
| 692 | return bus_format; | 693 | ret = bus_format; |
| 694 | goto free_child; | ||
| 693 | } | 695 | } |
| 694 | channel->bus_format = bus_format; | 696 | channel->bus_format = bus_format; |
| 697 | channel->child = child; | ||
| 695 | 698 | ||
| 696 | ret = imx_ldb_register(drm, channel); | 699 | ret = imx_ldb_register(drm, channel); |
| 697 | if (ret) | 700 | if (ret) { |
| 698 | return ret; | 701 | channel->child = NULL; |
| 702 | goto free_child; | ||
| 703 | } | ||
| 699 | } | 704 | } |
| 700 | 705 | ||
| 701 | dev_set_drvdata(dev, imx_ldb); | 706 | dev_set_drvdata(dev, imx_ldb); |
| 702 | 707 | ||
| 703 | return 0; | 708 | return 0; |
| 709 | |||
| 710 | free_child: | ||
| 711 | of_node_put(child); | ||
| 712 | return ret; | ||
| 704 | } | 713 | } |
| 705 | 714 | ||
| 706 | static void imx_ldb_unbind(struct device *dev, struct device *master, | 715 | static void imx_ldb_unbind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
| @@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
| 370 | if (ret) | 370 | if (ret) |
| 371 | return ret; | 371 | return ret; |
| 372 | 372 | ||
| 373 | /* CRTC should be enabled */ | 373 | /* nothing to check when disabling or disabled */ |
| 374 | if (!crtc_state->enable) | 374 | if (!crtc_state->enable) |
| 375 | return -EINVAL; | 375 | return 0; |
| 376 | 376 | ||
| 377 | switch (plane->type) { | 377 | switch (plane->type) { |
| 378 | case DRM_PLANE_TYPE_PRIMARY: | 378 | case DRM_PLANE_TYPE_PRIMARY: |
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 75d97f1b2e8f..4f5c67f70c4d 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
| @@ -46,7 +46,6 @@ struct meson_crtc { | |||
| 46 | struct drm_crtc base; | 46 | struct drm_crtc base; |
| 47 | struct drm_pending_vblank_event *event; | 47 | struct drm_pending_vblank_event *event; |
| 48 | struct meson_drm *priv; | 48 | struct meson_drm *priv; |
| 49 | bool enabled; | ||
| 50 | }; | 49 | }; |
| 51 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) | 50 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) |
| 52 | 51 | ||
| @@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { | |||
| 82 | 81 | ||
| 83 | }; | 82 | }; |
| 84 | 83 | ||
| 85 | static void meson_crtc_enable(struct drm_crtc *crtc) | 84 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, |
| 85 | struct drm_crtc_state *old_state) | ||
| 86 | { | 86 | { |
| 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 87 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 88 | struct drm_crtc_state *crtc_state = crtc->state; | 88 | struct drm_crtc_state *crtc_state = crtc->state; |
| @@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) | |||
| 108 | 108 | ||
| 109 | drm_crtc_vblank_on(crtc); | 109 | drm_crtc_vblank_on(crtc); |
| 110 | 110 | ||
| 111 | meson_crtc->enabled = true; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
| 115 | struct drm_crtc_state *old_state) | ||
| 116 | { | ||
| 117 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
| 118 | struct meson_drm *priv = meson_crtc->priv; | ||
| 119 | |||
| 120 | DRM_DEBUG_DRIVER("\n"); | ||
| 121 | |||
| 122 | if (!meson_crtc->enabled) | ||
| 123 | meson_crtc_enable(crtc); | ||
| 124 | |||
| 125 | priv->viu.osd1_enabled = true; | 111 | priv->viu.osd1_enabled = true; |
| 126 | } | 112 | } |
| 127 | 113 | ||
| @@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, | |||
| 153 | 139 | ||
| 154 | crtc->state->event = NULL; | 140 | crtc->state->event = NULL; |
| 155 | } | 141 | } |
| 156 | |||
| 157 | meson_crtc->enabled = false; | ||
| 158 | } | 142 | } |
| 159 | 143 | ||
| 160 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | 144 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, |
| @@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | |||
| 163 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | 147 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); |
| 164 | unsigned long flags; | 148 | unsigned long flags; |
| 165 | 149 | ||
| 166 | if (crtc->state->enable && !meson_crtc->enabled) | ||
| 167 | meson_crtc_enable(crtc); | ||
| 168 | |||
| 169 | if (crtc->state->event) { | 150 | if (crtc->state->event) { |
| 170 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | 151 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); |
| 171 | 152 | ||
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3ee4d4a4ecba..12ff47b13668 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c | |||
| @@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { | |||
| 75 | .fb_create = drm_gem_fb_create, | 75 | .fb_create = drm_gem_fb_create, |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { | ||
| 79 | .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, | ||
| 80 | }; | ||
| 81 | |||
| 78 | static irqreturn_t meson_irq(int irq, void *arg) | 82 | static irqreturn_t meson_irq(int irq, void *arg) |
| 79 | { | 83 | { |
| 80 | struct drm_device *dev = arg; | 84 | struct drm_device *dev = arg; |
| @@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | |||
| 266 | drm->mode_config.max_width = 3840; | 270 | drm->mode_config.max_width = 3840; |
| 267 | drm->mode_config.max_height = 2160; | 271 | drm->mode_config.max_height = 2160; |
| 268 | drm->mode_config.funcs = &meson_mode_config_funcs; | 272 | drm->mode_config.funcs = &meson_mode_config_funcs; |
| 273 | drm->mode_config.helper_private = &meson_mode_config_helpers; | ||
| 269 | 274 | ||
| 270 | /* Hardware Initialization */ | 275 | /* Hardware Initialization */ |
| 271 | 276 | ||
| @@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, | |||
| 388 | remote_node = of_graph_get_remote_port_parent(ep); | 393 | remote_node = of_graph_get_remote_port_parent(ep); |
| 389 | if (!remote_node || | 394 | if (!remote_node || |
| 390 | remote_node == parent || /* Ignore parent endpoint */ | 395 | remote_node == parent || /* Ignore parent endpoint */ |
| 391 | !of_device_is_available(remote_node)) | 396 | !of_device_is_available(remote_node)) { |
| 397 | of_node_put(remote_node); | ||
| 392 | continue; | 398 | continue; |
| 399 | } | ||
| 393 | 400 | ||
| 394 | count += meson_probe_remote(pdev, match, remote, remote_node); | 401 | count += meson_probe_remote(pdev, match, remote, remote_node); |
| 395 | 402 | ||
| @@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) | |||
| 408 | 415 | ||
| 409 | for_each_endpoint_of_node(np, ep) { | 416 | for_each_endpoint_of_node(np, ep) { |
| 410 | remote = of_graph_get_remote_port_parent(ep); | 417 | remote = of_graph_get_remote_port_parent(ep); |
| 411 | if (!remote || !of_device_is_available(remote)) | 418 | if (!remote || !of_device_is_available(remote)) { |
| 419 | of_node_put(remote); | ||
| 412 | continue; | 420 | continue; |
| 421 | } | ||
| 413 | 422 | ||
| 414 | count += meson_probe_remote(pdev, &match, np, remote); | 423 | count += meson_probe_remote(pdev, &match, np, remote); |
| 424 | of_node_put(remote); | ||
| 415 | } | 425 | } |
| 416 | 426 | ||
| 417 | if (count && !match) | 427 | if (count && !match) |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 5beb83d1cf87..ce1b3cc4bf6d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c | |||
| @@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) | |||
| 944 | np = dev_pm_opp_get_of_node(opp); | 944 | np = dev_pm_opp_get_of_node(opp); |
| 945 | 945 | ||
| 946 | if (np) { | 946 | if (np) { |
| 947 | of_property_read_u32(np, "qcom,level", &val); | 947 | of_property_read_u32(np, "opp-level", &val); |
| 948 | of_node_put(np); | 948 | of_node_put(np); |
| 949 | } | 949 | } |
| 950 | 950 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2e4372ef17a3..2cfee1a4fe0b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 765 | adreno_gpu->rev = config->rev; | 765 | adreno_gpu->rev = config->rev; |
| 766 | 766 | ||
| 767 | adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; | 767 | adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; |
| 768 | adreno_gpu_config.irqname = "kgsl_3d0_irq"; | ||
| 769 | 768 | ||
| 770 | adreno_gpu_config.va_start = SZ_16M; | 769 | adreno_gpu_config.va_start = SZ_16M; |
| 771 | adreno_gpu_config.va_end = 0xffffffff; | 770 | adreno_gpu_config.va_end = 0xffffffff; |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index fd75870eb17f..6aefcd6db46b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | |||
| @@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, | |||
| 365 | &pdpu->pipe_qos_cfg); | 365 | &pdpu->pipe_qos_cfg); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) | ||
| 369 | { | ||
| 370 | struct dpu_plane *pdpu = to_dpu_plane(plane); | ||
| 371 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); | ||
| 372 | |||
| 373 | if (!pdpu->is_rt_pipe) | ||
| 374 | return; | ||
| 375 | |||
| 376 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | ||
| 377 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); | ||
| 378 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | ||
| 379 | } | ||
| 380 | |||
| 381 | /** | 368 | /** |
| 382 | * _dpu_plane_set_ot_limit - set OT limit for the given plane | 369 | * _dpu_plane_set_ot_limit - set OT limit for the given plane |
| 383 | * @plane: Pointer to drm plane | 370 | * @plane: Pointer to drm plane |
| @@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane) | |||
| 1248 | } | 1235 | } |
| 1249 | 1236 | ||
| 1250 | #ifdef CONFIG_DEBUG_FS | 1237 | #ifdef CONFIG_DEBUG_FS |
| 1238 | static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) | ||
| 1239 | { | ||
| 1240 | struct dpu_plane *pdpu = to_dpu_plane(plane); | ||
| 1241 | struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); | ||
| 1242 | |||
| 1243 | if (!pdpu->is_rt_pipe) | ||
| 1244 | return; | ||
| 1245 | |||
| 1246 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | ||
| 1247 | _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); | ||
| 1248 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | static ssize_t _dpu_plane_danger_read(struct file *file, | 1251 | static ssize_t _dpu_plane_danger_read(struct file *file, |
| 1252 | char __user *buff, size_t count, loff_t *ppos) | 1252 | char __user *buff, size_t count, loff_t *ppos) |
| 1253 | { | 1253 | { |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9cd6a96c6bf2..927e5d86f7c1 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, | |||
| 250 | void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | 250 | void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, |
| 251 | struct msm_gem_vma *vma); | 251 | struct msm_gem_vma *vma); |
| 252 | int msm_gem_map_vma(struct msm_gem_address_space *aspace, | 252 | int msm_gem_map_vma(struct msm_gem_address_space *aspace, |
| 253 | struct msm_gem_vma *vma, struct sg_table *sgt, int npages); | 253 | struct msm_gem_vma *vma, int prot, |
| 254 | struct sg_table *sgt, int npages); | ||
| 254 | void msm_gem_close_vma(struct msm_gem_address_space *aspace, | 255 | void msm_gem_close_vma(struct msm_gem_address_space *aspace, |
| 255 | struct msm_gem_vma *vma); | 256 | struct msm_gem_vma *vma); |
| 256 | 257 | ||
| @@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo, | |||
| 333 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | 334 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
| 334 | struct dma_buf *dmabuf, struct sg_table *sgt); | 335 | struct dma_buf *dmabuf, struct sg_table *sgt); |
| 335 | 336 | ||
| 337 | __printf(2, 3) | ||
| 336 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); | 338 | void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); |
| 337 | 339 | ||
| 338 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, | 340 | int msm_framebuffer_prepare(struct drm_framebuffer *fb, |
| @@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); | |||
| 396 | int msm_debugfs_late_init(struct drm_device *dev); | 398 | int msm_debugfs_late_init(struct drm_device *dev); |
| 397 | int msm_rd_debugfs_init(struct drm_minor *minor); | 399 | int msm_rd_debugfs_init(struct drm_minor *minor); |
| 398 | void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); | 400 | void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); |
| 401 | __printf(3, 4) | ||
| 399 | void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | 402 | void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, |
| 400 | const char *fmt, ...); | 403 | const char *fmt, ...); |
| 401 | int msm_perf_debugfs_init(struct drm_minor *minor); | 404 | int msm_perf_debugfs_init(struct drm_minor *minor); |
| 402 | void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); | 405 | void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); |
| 403 | #else | 406 | #else |
| 404 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } | 407 | static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } |
| 408 | __printf(3, 4) | ||
| 405 | static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, | 409 | static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, |
| 406 | const char *fmt, ...) {} | 410 | const char *fmt, ...) {} |
| 407 | static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} | 411 | static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 51a95da694d8..c8886d3071fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, | |||
| 391 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | 391 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
| 392 | struct msm_gem_vma *vma; | 392 | struct msm_gem_vma *vma; |
| 393 | struct page **pages; | 393 | struct page **pages; |
| 394 | int prot = IOMMU_READ; | ||
| 395 | |||
| 396 | if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) | ||
| 397 | prot |= IOMMU_WRITE; | ||
| 394 | 398 | ||
| 395 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); | 399 | WARN_ON(!mutex_is_locked(&msm_obj->lock)); |
| 396 | 400 | ||
| @@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, | |||
| 405 | if (IS_ERR(pages)) | 409 | if (IS_ERR(pages)) |
| 406 | return PTR_ERR(pages); | 410 | return PTR_ERR(pages); |
| 407 | 411 | ||
| 408 | return msm_gem_map_vma(aspace, vma, msm_obj->sgt, | 412 | return msm_gem_map_vma(aspace, vma, prot, |
| 409 | obj->size >> PAGE_SHIFT); | 413 | msm_obj->sgt, obj->size >> PAGE_SHIFT); |
| 410 | } | 414 | } |
| 411 | 415 | ||
| 412 | /* get iova and pin it. Should have a matching put */ | 416 | /* get iova and pin it. Should have a matching put */ |
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 557360788084..49c04829cf34 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c | |||
| @@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | |||
| 68 | 68 | ||
| 69 | int | 69 | int |
| 70 | msm_gem_map_vma(struct msm_gem_address_space *aspace, | 70 | msm_gem_map_vma(struct msm_gem_address_space *aspace, |
| 71 | struct msm_gem_vma *vma, struct sg_table *sgt, int npages) | 71 | struct msm_gem_vma *vma, int prot, |
| 72 | struct sg_table *sgt, int npages) | ||
| 72 | { | 73 | { |
| 73 | unsigned size = npages << PAGE_SHIFT; | 74 | unsigned size = npages << PAGE_SHIFT; |
| 74 | int ret = 0; | 75 | int ret = 0; |
| @@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, | |||
| 86 | 87 | ||
| 87 | if (aspace->mmu) | 88 | if (aspace->mmu) |
| 88 | ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, | 89 | ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, |
| 89 | size, IOMMU_READ | IOMMU_WRITE); | 90 | size, prot); |
| 90 | 91 | ||
| 91 | if (ret) | 92 | if (ret) |
| 92 | vma->mapped = false; | 93 | vma->mapped = false; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5f3eff304355..10babd18e286 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 900 | } | 900 | } |
| 901 | 901 | ||
| 902 | /* Get Interrupt: */ | 902 | /* Get Interrupt: */ |
| 903 | gpu->irq = platform_get_irq_byname(pdev, config->irqname); | 903 | gpu->irq = platform_get_irq(pdev, 0); |
| 904 | if (gpu->irq < 0) { | 904 | if (gpu->irq < 0) { |
| 905 | ret = gpu->irq; | 905 | ret = gpu->irq; |
| 906 | DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); | 906 | DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); |
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index efb49bb64191..ca17086f72c9 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h | |||
| @@ -31,7 +31,6 @@ struct msm_gpu_state; | |||
| 31 | 31 | ||
| 32 | struct msm_gpu_config { | 32 | struct msm_gpu_config { |
| 33 | const char *ioname; | 33 | const char *ioname; |
| 34 | const char *irqname; | ||
| 35 | uint64_t va_start; | 34 | uint64_t va_start; |
| 36 | uint64_t va_end; | 35 | uint64_t va_end; |
| 37 | unsigned int nr_rings; | 36 | unsigned int nr_rings; |
| @@ -63,7 +62,7 @@ struct msm_gpu_funcs { | |||
| 63 | struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); | 62 | struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); |
| 64 | void (*recover)(struct msm_gpu *gpu); | 63 | void (*recover)(struct msm_gpu *gpu); |
| 65 | void (*destroy)(struct msm_gpu *gpu); | 64 | void (*destroy)(struct msm_gpu *gpu); |
| 66 | #ifdef CONFIG_DEBUG_FS | 65 | #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) |
| 67 | /* show GPU status in debugfs: */ | 66 | /* show GPU status in debugfs: */ |
| 68 | void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, | 67 | void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, |
| 69 | struct drm_printer *p); | 68 | struct drm_printer *p); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 90e9d0a48dc0..d21172933d92 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
| 115 | char *fptr = &fifo->buf[fifo->head]; | 115 | char *fptr = &fifo->buf[fifo->head]; |
| 116 | int n; | 116 | int n; |
| 117 | 117 | ||
| 118 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 118 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); |
| 119 | if (!rd->open) | ||
| 120 | return; | ||
| 119 | 121 | ||
| 120 | /* Note that smp_load_acquire() is not strictly required | 122 | /* Note that smp_load_acquire() is not strictly required |
| 121 | * as CIRC_SPACE_TO_END() does not access the tail more | 123 | * as CIRC_SPACE_TO_END() does not access the tail more |
| @@ -213,7 +215,10 @@ out: | |||
| 213 | static int rd_release(struct inode *inode, struct file *file) | 215 | static int rd_release(struct inode *inode, struct file *file) |
| 214 | { | 216 | { |
| 215 | struct msm_rd_state *rd = inode->i_private; | 217 | struct msm_rd_state *rd = inode->i_private; |
| 218 | |||
| 216 | rd->open = false; | 219 | rd->open = false; |
| 220 | wake_up_all(&rd->fifo_event); | ||
| 221 | |||
| 217 | return 0; | 222 | return 0; |
| 218 | } | 223 | } |
| 219 | 224 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 5f5be6368aed..c7a94c94dbf3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
| @@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector) | |||
| 253 | case NV_DEVICE_INFO_V0_FERMI: | 253 | case NV_DEVICE_INFO_V0_FERMI: |
| 254 | case NV_DEVICE_INFO_V0_KEPLER: | 254 | case NV_DEVICE_INFO_V0_KEPLER: |
| 255 | case NV_DEVICE_INFO_V0_MAXWELL: | 255 | case NV_DEVICE_INFO_V0_MAXWELL: |
| 256 | case NV_DEVICE_INFO_V0_PASCAL: | ||
| 257 | case NV_DEVICE_INFO_V0_VOLTA: | ||
| 258 | case NV_DEVICE_INFO_V0_TURING: | ||
| 256 | ret = nv50_backlight_init(nv_encoder, &props, &ops); | 259 | ret = nv50_backlight_init(nv_encoder, &props, &ops); |
| 257 | break; | 260 | break; |
| 258 | default: | 261 | default: |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index bfbc9341e0c2..d9edb5785813 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
| @@ -2435,6 +2435,38 @@ nv140_chipset = { | |||
| 2435 | }; | 2435 | }; |
| 2436 | 2436 | ||
| 2437 | static const struct nvkm_device_chip | 2437 | static const struct nvkm_device_chip |
| 2438 | nv162_chipset = { | ||
| 2439 | .name = "TU102", | ||
| 2440 | .bar = tu104_bar_new, | ||
| 2441 | .bios = nvkm_bios_new, | ||
| 2442 | .bus = gf100_bus_new, | ||
| 2443 | .devinit = tu104_devinit_new, | ||
| 2444 | .fault = tu104_fault_new, | ||
| 2445 | .fb = gv100_fb_new, | ||
| 2446 | .fuse = gm107_fuse_new, | ||
| 2447 | .gpio = gk104_gpio_new, | ||
| 2448 | .i2c = gm200_i2c_new, | ||
| 2449 | .ibus = gm200_ibus_new, | ||
| 2450 | .imem = nv50_instmem_new, | ||
| 2451 | .ltc = gp102_ltc_new, | ||
| 2452 | .mc = tu104_mc_new, | ||
| 2453 | .mmu = tu104_mmu_new, | ||
| 2454 | .pci = gp100_pci_new, | ||
| 2455 | .pmu = gp102_pmu_new, | ||
| 2456 | .therm = gp100_therm_new, | ||
| 2457 | .timer = gk20a_timer_new, | ||
| 2458 | .top = gk104_top_new, | ||
| 2459 | .ce[0] = tu104_ce_new, | ||
| 2460 | .ce[1] = tu104_ce_new, | ||
| 2461 | .ce[2] = tu104_ce_new, | ||
| 2462 | .ce[3] = tu104_ce_new, | ||
| 2463 | .ce[4] = tu104_ce_new, | ||
| 2464 | .disp = tu104_disp_new, | ||
| 2465 | .dma = gv100_dma_new, | ||
| 2466 | .fifo = tu104_fifo_new, | ||
| 2467 | }; | ||
| 2468 | |||
| 2469 | static const struct nvkm_device_chip | ||
| 2438 | nv164_chipset = { | 2470 | nv164_chipset = { |
| 2439 | .name = "TU104", | 2471 | .name = "TU104", |
| 2440 | .bar = tu104_bar_new, | 2472 | .bar = tu104_bar_new, |
| @@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, | |||
| 2950 | case 0x138: device->chip = &nv138_chipset; break; | 2982 | case 0x138: device->chip = &nv138_chipset; break; |
| 2951 | case 0x13b: device->chip = &nv13b_chipset; break; | 2983 | case 0x13b: device->chip = &nv13b_chipset; break; |
| 2952 | case 0x140: device->chip = &nv140_chipset; break; | 2984 | case 0x140: device->chip = &nv140_chipset; break; |
| 2985 | case 0x162: device->chip = &nv162_chipset; break; | ||
| 2953 | case 0x164: device->chip = &nv164_chipset; break; | 2986 | case 0x164: device->chip = &nv164_chipset; break; |
| 2954 | case 0x166: device->chip = &nv166_chipset; break; | 2987 | case 0x166: device->chip = &nv166_chipset; break; |
| 2955 | default: | 2988 | default: |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index 816ccaedfc73..8675613e142b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <engine/falcon.h> | 22 | #include <engine/falcon.h> |
| 23 | 23 | ||
| 24 | #include <core/gpuobj.h> | 24 | #include <core/gpuobj.h> |
| 25 | #include <subdev/mc.h> | ||
| 25 | #include <subdev/timer.h> | 26 | #include <subdev/timer.h> |
| 26 | #include <engine/fifo.h> | 27 | #include <engine/fifo.h> |
| 27 | 28 | ||
| @@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) | |||
| 107 | } | 108 | } |
| 108 | } | 109 | } |
| 109 | 110 | ||
| 110 | nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); | 111 | if (nvkm_mc_enabled(device, engine->subdev.index)) { |
| 111 | nvkm_wr32(device, base + 0x014, 0xffffffff); | 112 | nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); |
| 113 | nvkm_wr32(device, base + 0x014, 0xffffffff); | ||
| 114 | } | ||
| 112 | return 0; | 115 | return 0; |
| 113 | } | 116 | } |
| 114 | 117 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index 3695cde669f8..07914e36939e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | |||
| @@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) | |||
| 132 | duty = nvkm_therm_update_linear(therm); | 132 | duty = nvkm_therm_update_linear(therm); |
| 133 | break; | 133 | break; |
| 134 | case NVBIOS_THERM_FAN_OTHER: | 134 | case NVBIOS_THERM_FAN_OTHER: |
| 135 | if (therm->cstate) | 135 | if (therm->cstate) { |
| 136 | duty = therm->cstate; | 136 | duty = therm->cstate; |
| 137 | else | 137 | poll = false; |
| 138 | } else { | ||
| 138 | duty = nvkm_therm_update_linear_fallback(therm); | 139 | duty = nvkm_therm_update_linear_fallback(therm); |
| 139 | poll = false; | 140 | } |
| 140 | break; | 141 | break; |
| 141 | } | 142 | } |
| 142 | immd = false; | 143 | immd = false; |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) | |||
| 1406 | 1406 | ||
| 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) |
| 1408 | { | 1408 | { |
| 1409 | struct dsi_data *dsi = p; | 1409 | struct dsi_data *dsi = s->private; |
| 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; | 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; |
| 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; | 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; |
| 1412 | int dsi_module = dsi->module_id; | 1412 | int dsi_module = dsi->module_id; |
| @@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | |||
| 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
| 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) |
| 1469 | { | 1469 | { |
| 1470 | struct dsi_data *dsi = p; | 1470 | struct dsi_data *dsi = s->private; |
| 1471 | unsigned long flags; | 1471 | unsigned long flags; |
| 1472 | struct dsi_irq_stats stats; | 1472 | struct dsi_irq_stats stats; |
| 1473 | 1473 | ||
| @@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | |||
| 1558 | 1558 | ||
| 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) | 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) |
| 1560 | { | 1560 | { |
| 1561 | struct dsi_data *dsi = p; | 1561 | struct dsi_data *dsi = s->private; |
| 1562 | 1562 | ||
| 1563 | if (dsi_runtime_get(dsi)) | 1563 | if (dsi_runtime_get(dsi)) |
| 1564 | return 0; | 1564 | return 0; |
| @@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, | |||
| 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; | 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; |
| 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; | 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; |
| 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; | 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; |
| 4754 | /* | ||
| 4755 | * HACK: These flags should be handled through the omap_dss_device bus | ||
| 4756 | * flags, but this will only be possible when the DSI encoder will be | ||
| 4757 | * converted to the omapdrm-managed encoder model. | ||
| 4758 | */ | ||
| 4759 | dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; | ||
| 4760 | dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; | ||
| 4761 | dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; | ||
| 4762 | dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; | ||
| 4763 | dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; | ||
| 4764 | dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; | ||
| 4754 | 4765 | ||
| 4755 | dss_mgr_set_timings(&dsi->output, &dsi->vm); | 4766 | dss_mgr_set_timings(&dsi->output, &dsi->vm); |
| 4756 | 4767 | ||
| @@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) | |||
| 5083 | 5094 | ||
| 5084 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); | 5095 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); |
| 5085 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, | 5096 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, |
| 5086 | dsi_dump_dsi_regs, &dsi); | 5097 | dsi_dump_dsi_regs, dsi); |
| 5087 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 5098 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
| 5088 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); | 5099 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); |
| 5089 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, | 5100 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, |
| 5090 | dsi_dump_dsi_irqs, &dsi); | 5101 | dsi_dump_dsi_irqs, dsi); |
| 5091 | #endif | 5102 | #endif |
| 5092 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); | 5103 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); |
| 5093 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, | 5104 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, |
| 5094 | dsi_dump_dsi_clocks, &dsi); | 5105 | dsi_dump_dsi_clocks, dsi); |
| 5095 | 5106 | ||
| 5096 | return 0; | 5107 | return 0; |
| 5097 | } | 5108 | } |
| @@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) | |||
| 5104 | dss_debugfs_remove_file(dsi->debugfs.irqs); | 5115 | dss_debugfs_remove_file(dsi->debugfs.irqs); |
| 5105 | dss_debugfs_remove_file(dsi->debugfs.regs); | 5116 | dss_debugfs_remove_file(dsi->debugfs.regs); |
| 5106 | 5117 | ||
| 5107 | of_platform_depopulate(dev); | ||
| 5108 | |||
| 5109 | WARN_ON(dsi->scp_clk_refcount > 0); | 5118 | WARN_ON(dsi->scp_clk_refcount > 0); |
| 5110 | 5119 | ||
| 5111 | dss_pll_unregister(&dsi->pll); | 5120 | dss_pll_unregister(&dsi->pll); |
| @@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) | |||
| 5457 | 5466 | ||
| 5458 | dsi_uninit_output(dsi); | 5467 | dsi_uninit_output(dsi); |
| 5459 | 5468 | ||
| 5469 | of_platform_depopulate(&pdev->dev); | ||
| 5470 | |||
| 5460 | pm_runtime_disable(&pdev->dev); | 5471 | pm_runtime_disable(&pdev->dev); |
| 5461 | 5472 | ||
| 5462 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { | 5473 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 13c8a662f9b4..ccb090f3ab30 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
| @@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { | |||
| 250 | #if defined(CONFIG_DEBUG_FS) | 250 | #if defined(CONFIG_DEBUG_FS) |
| 251 | .debugfs_init = qxl_debugfs_init, | 251 | .debugfs_init = qxl_debugfs_init, |
| 252 | #endif | 252 | #endif |
| 253 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 254 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 255 | .gem_prime_export = drm_gem_prime_export, | 253 | .gem_prime_export = drm_gem_prime_export, |
| 256 | .gem_prime_import = drm_gem_prime_import, | 254 | .gem_prime_import = drm_gem_prime_import, |
| 257 | .gem_prime_pin = qxl_gem_prime_pin, | 255 | .gem_prime_pin = qxl_gem_prime_pin, |
| 258 | .gem_prime_unpin = qxl_gem_prime_unpin, | 256 | .gem_prime_unpin = qxl_gem_prime_unpin, |
| 259 | .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, | ||
| 260 | .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, | ||
| 261 | .gem_prime_vmap = qxl_gem_prime_vmap, | 257 | .gem_prime_vmap = qxl_gem_prime_vmap, |
| 262 | .gem_prime_vunmap = qxl_gem_prime_vunmap, | 258 | .gem_prime_vunmap = qxl_gem_prime_vunmap, |
| 263 | .gem_prime_mmap = qxl_gem_prime_mmap, | 259 | .gem_prime_mmap = qxl_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index a55dece118b2..df65d3c1a7b8 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c | |||
| @@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 38 | WARN_ONCE(1, "not implemented"); | 38 | WARN_ONCE(1, "not implemented"); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 42 | { | ||
| 43 | WARN_ONCE(1, "not implemented"); | ||
| 44 | return ERR_PTR(-ENOSYS); | ||
| 45 | } | ||
| 46 | |||
| 47 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | ||
| 48 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 49 | struct sg_table *table) | ||
| 50 | { | ||
| 51 | WARN_ONCE(1, "not implemented"); | ||
| 52 | return ERR_PTR(-ENOSYS); | ||
| 53 | } | ||
| 54 | |||
| 55 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) | 41 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj) |
| 56 | { | 42 | { |
| 57 | WARN_ONCE(1, "not implemented"); | 43 | WARN_ONCE(1, "not implemented"); |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
| @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
| 5676 | u16 data_offset, size; | 5676 | u16 data_offset, size; |
| 5677 | u8 frev, crev; | 5677 | u8 frev, crev; |
| 5678 | struct ci_power_info *pi; | 5678 | struct ci_power_info *pi; |
| 5679 | enum pci_bus_speed speed_cap; | 5679 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
| 5680 | struct pci_dev *root = rdev->pdev->bus->self; | 5680 | struct pci_dev *root = rdev->pdev->bus->self; |
| 5681 | int ret; | 5681 | int ret; |
| 5682 | 5682 | ||
| @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
| 5685 | return -ENOMEM; | 5685 | return -ENOMEM; |
| 5686 | rdev->pm.dpm.priv = pi; | 5686 | rdev->pm.dpm.priv = pi; |
| 5687 | 5687 | ||
| 5688 | speed_cap = pcie_get_speed_cap(root); | 5688 | if (!pci_is_root_bus(rdev->pdev->bus)) |
| 5689 | speed_cap = pcie_get_speed_cap(root); | ||
| 5689 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 5690 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
| 5690 | pi->sys_pcie_mask = 0; | 5691 | pi->sys_pcie_mask = 0; |
| 5691 | } else { | 5692 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dec1e081f529..6a8fb6fd183c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | if (radeon_is_px(dev)) { | 174 | if (radeon_is_px(dev)) { |
| 175 | dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); | ||
| 175 | pm_runtime_use_autosuspend(dev->dev); | 176 | pm_runtime_use_autosuspend(dev->dev); |
| 176 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 177 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 177 | pm_runtime_set_active(dev->dev); | 178 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6899 | struct ni_power_info *ni_pi; | 6899 | struct ni_power_info *ni_pi; |
| 6900 | struct si_power_info *si_pi; | 6900 | struct si_power_info *si_pi; |
| 6901 | struct atom_clock_dividers dividers; | 6901 | struct atom_clock_dividers dividers; |
| 6902 | enum pci_bus_speed speed_cap; | 6902 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
| 6903 | struct pci_dev *root = rdev->pdev->bus->self; | 6903 | struct pci_dev *root = rdev->pdev->bus->self; |
| 6904 | int ret; | 6904 | int ret; |
| 6905 | 6905 | ||
| @@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6911 | eg_pi = &ni_pi->eg; | 6911 | eg_pi = &ni_pi->eg; |
| 6912 | pi = &eg_pi->rv7xx; | 6912 | pi = &eg_pi->rv7xx; |
| 6913 | 6913 | ||
| 6914 | speed_cap = pcie_get_speed_cap(root); | 6914 | if (!pci_is_root_bus(rdev->pdev->bus)) |
| 6915 | speed_cap = pcie_get_speed_cap(root); | ||
| 6915 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 6916 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
| 6916 | si_pi->sys_pcie_mask = 0; | 6917 | si_pi->sys_pcie_mask = 0; |
| 6917 | } else { | 6918 | } else { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 96ac1458a59c..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
| @@ -1,17 +1,8 @@ | |||
| 1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
| 4 | * Author: | 4 | * Author: |
| 5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | 6 | */ |
| 16 | 7 | ||
| 17 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
| @@ -113,8 +104,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, | |||
| 113 | child_count++; | 104 | child_count++; |
| 114 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, | 105 | ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, |
| 115 | &panel, &bridge); | 106 | &panel, &bridge); |
| 116 | if (!ret) | 107 | if (!ret) { |
| 108 | of_node_put(endpoint); | ||
| 117 | break; | 109 | break; |
| 110 | } | ||
| 118 | } | 111 | } |
| 119 | 112 | ||
| 120 | of_node_put(port); | 113 | of_node_put(port); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h | |||
| @@ -1,17 +1,8 @@ | |||
| 1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
| 4 | * Author: | 4 | * Author: |
| 5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | 6 | */ |
| 16 | 7 | ||
| 17 | #ifdef CONFIG_ROCKCHIP_RGB | 8 | #ifdef CONFIG_ROCKCHIP_RGB |
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c | |||
| @@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) | |||
| 440 | 440 | ||
| 441 | while ((entity->dependency = | 441 | while ((entity->dependency = |
| 442 | sched->ops->dependency(sched_job, entity))) { | 442 | sched->ops->dependency(sched_job, entity))) { |
| 443 | trace_drm_sched_job_wait_dep(sched_job, entity->dependency); | ||
| 443 | 444 | ||
| 444 | if (drm_sched_entity_add_dependency_cb(entity)) { | 445 | if (drm_sched_entity_add_dependency_cb(entity)) |
| 445 | |||
| 446 | trace_drm_sched_job_wait_dep(sched_job, | ||
| 447 | entity->dependency); | ||
| 448 | return NULL; | 446 | return NULL; |
| 449 | } | ||
| 450 | } | 447 | } |
| 451 | 448 | ||
| 452 | /* skip jobs from entity that marked guilty */ | 449 | /* skip jobs from entity that marked guilty */ |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 9e9255ee59cd..a021bab11a4f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
| @@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, | |||
| 786 | remote = of_graph_get_remote_port_parent(ep); | 786 | remote = of_graph_get_remote_port_parent(ep); |
| 787 | if (!remote) | 787 | if (!remote) |
| 788 | continue; | 788 | continue; |
| 789 | of_node_put(remote); | ||
| 789 | 790 | ||
| 790 | /* does this node match any registered engines? */ | 791 | /* does this node match any registered engines? */ |
| 791 | list_for_each_entry(frontend, &drv->frontend_list, list) { | 792 | list_for_each_entry(frontend, &drv->frontend_list, list) { |
| 792 | if (remote == frontend->node) { | 793 | if (remote == frontend->node) { |
| 793 | of_node_put(remote); | ||
| 794 | of_node_put(port); | 794 | of_node_put(port); |
| 795 | of_node_put(ep); | ||
| 795 | return frontend; | 796 | return frontend; |
| 796 | } | 797 | } |
| 797 | } | 798 | } |
| 798 | } | 799 | } |
| 799 | 800 | of_node_put(port); | |
| 800 | return ERR_PTR(-EINVAL); | 801 | return ERR_PTR(-EINVAL); |
| 801 | } | 802 | } |
| 802 | 803 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 061d2e0d9011..416da5376701 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | |||
| @@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) | |||
| 92 | val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); | 92 | val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
| 93 | val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; | 93 | val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; |
| 94 | writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); | 94 | writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); |
| 95 | |||
| 96 | clk_disable_unprepare(hdmi->tmds_clk); | ||
| 95 | } | 97 | } |
| 96 | 98 | ||
| 97 | static void sun4i_hdmi_enable(struct drm_encoder *encoder) | 99 | static void sun4i_hdmi_enable(struct drm_encoder *encoder) |
| @@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder) | |||
| 102 | 104 | ||
| 103 | DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); | 105 | DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); |
| 104 | 106 | ||
| 107 | clk_prepare_enable(hdmi->tmds_clk); | ||
| 108 | |||
| 105 | sun4i_hdmi_setup_avi_infoframes(hdmi, mode); | 109 | sun4i_hdmi_setup_avi_infoframes(hdmi, mode); |
| 106 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); | 110 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); |
| 107 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); | 111 | val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
| 761 | return PTR_ERR(tcon->sclk0); | 761 | return PTR_ERR(tcon->sclk0); |
| 762 | } | 762 | } |
| 763 | } | 763 | } |
| 764 | clk_prepare_enable(tcon->sclk0); | ||
| 764 | 765 | ||
| 765 | if (tcon->quirks->has_channel_1) { | 766 | if (tcon->quirks->has_channel_1) { |
| 766 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); | 767 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); |
| @@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
| 775 | 776 | ||
| 776 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) | 777 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) |
| 777 | { | 778 | { |
| 779 | clk_disable_unprepare(tcon->sclk0); | ||
| 778 | clk_disable_unprepare(tcon->clk); | 780 | clk_disable_unprepare(tcon->clk); |
| 779 | } | 781 | } |
| 780 | 782 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index f7f32a885af7..2d1aaca49105 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c | |||
| @@ -127,14 +127,10 @@ static struct drm_driver driver = { | |||
| 127 | #if defined(CONFIG_DEBUG_FS) | 127 | #if defined(CONFIG_DEBUG_FS) |
| 128 | .debugfs_init = virtio_gpu_debugfs_init, | 128 | .debugfs_init = virtio_gpu_debugfs_init, |
| 129 | #endif | 129 | #endif |
| 130 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | ||
| 131 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | ||
| 132 | .gem_prime_export = drm_gem_prime_export, | 130 | .gem_prime_export = drm_gem_prime_export, |
| 133 | .gem_prime_import = drm_gem_prime_import, | 131 | .gem_prime_import = drm_gem_prime_import, |
| 134 | .gem_prime_pin = virtgpu_gem_prime_pin, | 132 | .gem_prime_pin = virtgpu_gem_prime_pin, |
| 135 | .gem_prime_unpin = virtgpu_gem_prime_unpin, | 133 | .gem_prime_unpin = virtgpu_gem_prime_unpin, |
| 136 | .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, | ||
| 137 | .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, | ||
| 138 | .gem_prime_vmap = virtgpu_gem_prime_vmap, | 134 | .gem_prime_vmap = virtgpu_gem_prime_vmap, |
| 139 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, | 135 | .gem_prime_vunmap = virtgpu_gem_prime_vunmap, |
| 140 | .gem_prime_mmap = virtgpu_gem_prime_mmap, | 136 | .gem_prime_mmap = virtgpu_gem_prime_mmap, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1deb41d42ea4..0c15000f926e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
| @@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); | |||
| 372 | /* virtgpu_prime.c */ | 372 | /* virtgpu_prime.c */ |
| 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); | 373 | int virtgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); | 374 | void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 375 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | ||
| 376 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 377 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 378 | struct sg_table *sgt); | ||
| 379 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); | 375 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 380 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 376 | void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 381 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, | 377 | int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 86ce0ae93f59..c59ec34c80a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c | |||
| @@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 39 | WARN_ONCE(1, "not implemented"); | 39 | WARN_ONCE(1, "not implemented"); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | ||
| 43 | { | ||
| 44 | WARN_ONCE(1, "not implemented"); | ||
| 45 | return ERR_PTR(-ENODEV); | ||
| 46 | } | ||
| 47 | |||
| 48 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( | ||
| 49 | struct drm_device *dev, struct dma_buf_attachment *attach, | ||
| 50 | struct sg_table *table) | ||
| 51 | { | ||
| 52 | WARN_ONCE(1, "not implemented"); | ||
| 53 | return ERR_PTR(-ENODEV); | ||
| 54 | } | ||
| 55 | |||
| 56 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) | 42 | void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
| 57 | { | 43 | { |
| 58 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 44 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | |||
| 2 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
| 3 | #include <linux/crc32.h> | 4 | #include <linux/crc32.h> |
| 4 | #include <drm/drm_atomic.h> | 5 | #include <drm/drm_atomic.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c | |||
| @@ -1,10 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License as published by | ||
| 5 | * the Free Software Foundation; either version 2 of the License, or | ||
| 6 | * (at your option) any later version. | ||
| 7 | */ | ||
| 8 | 2 | ||
| 9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
| 10 | #include <drm/drm_atomic_helper.h> | 4 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c | |||
| @@ -1,9 +1,4 @@ | |||
| 1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | */ | ||
| 7 | 2 | ||
| 8 | /** | 3 | /** |
| 9 | * DOC: vkms (Virtual Kernel Modesetting) | 4 | * DOC: vkms (Virtual Kernel Modesetting) |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h | |||
| @@ -1,3 +1,5 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 2 | |||
| 1 | #ifndef _VKMS_DRV_H_ | 3 | #ifndef _VKMS_DRV_H_ |
| 2 | #define _VKMS_DRV_H_ | 4 | #define _VKMS_DRV_H_ |
| 3 | 5 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c | |||
| @@ -1,10 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License as published by | ||
| 5 | * the Free Software Foundation; either version 2 of the License, or | ||
| 6 | * (at your option) any later version. | ||
| 7 | */ | ||
| 8 | 2 | ||
| 9 | #include <linux/shmem_fs.h> | 3 | #include <linux/shmem_fs.h> |
| 10 | 4 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c | |||
| @@ -1,10 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License as published by | ||
| 5 | * the Free Software Foundation; either version 2 of the License, or | ||
| 6 | * (at your option) any later version. | ||
| 7 | */ | ||
| 8 | 2 | ||
| 9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
| 10 | #include <drm/drm_crtc_helper.h> | 4 | #include <drm/drm_crtc_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
| @@ -1,10 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* | ||
| 3 | * This program is free software; you can redistribute it and/or modify | ||
| 4 | * it under the terms of the GNU General Public License as published by | ||
| 5 | * the Free Software Foundation; either version 2 of the License, or | ||
| 6 | * (at your option) any later version. | ||
| 7 | */ | ||
| 8 | 2 | ||
| 9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
| 10 | #include <drm/drm_plane_helper.h> | 4 | #include <drm/drm_plane_helper.h> |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | **************************************************************************/ | 26 | **************************************************************************/ |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
| 29 | #include <linux/dma-mapping.h> | ||
| 29 | 30 | ||
| 30 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
| 31 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
| @@ -34,7 +35,6 @@ | |||
| 34 | #include <drm/ttm/ttm_placement.h> | 35 | #include <drm/ttm/ttm_placement.h> |
| 35 | #include <drm/ttm/ttm_bo_driver.h> | 36 | #include <drm/ttm/ttm_bo_driver.h> |
| 36 | #include <drm/ttm/ttm_module.h> | 37 | #include <drm/ttm/ttm_module.h> |
| 37 | #include <linux/intel-iommu.h> | ||
| 38 | 38 | ||
| 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
| 40 | #define VMWGFX_CHIP_SVGAII 0 | 40 | #define VMWGFX_CHIP_SVGAII 0 |
| @@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | /** | 548 | /** |
| 549 | * vmw_assume_iommu - Figure out whether coherent dma-remapping might be | ||
| 550 | * taking place. | ||
| 551 | * @dev: Pointer to the struct drm_device. | ||
| 552 | * | ||
| 553 | * Return: true if iommu present, false otherwise. | ||
| 554 | */ | ||
| 555 | static bool vmw_assume_iommu(struct drm_device *dev) | ||
| 556 | { | ||
| 557 | const struct dma_map_ops *ops = get_dma_ops(dev->dev); | ||
| 558 | |||
| 559 | return !dma_is_direct(ops) && ops && | ||
| 560 | ops->map_page != dma_direct_map_page; | ||
| 561 | } | ||
| 562 | |||
| 563 | /** | ||
| 549 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | 564 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 550 | * system. | 565 | * system. |
| 551 | * | 566 | * |
| @@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
| 565 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 580 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 566 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 581 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
| 567 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 582 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
| 568 | #ifdef CONFIG_X86 | ||
| 569 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
| 570 | 583 | ||
| 571 | #ifdef CONFIG_INTEL_IOMMU | 584 | if (vmw_force_coherent) |
| 572 | if (intel_iommu_enabled) { | 585 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 586 | else if (vmw_assume_iommu(dev_priv->dev)) | ||
| 573 | dev_priv->map_mode = vmw_dma_map_populate; | 587 | dev_priv->map_mode = vmw_dma_map_populate; |
| 574 | goto out_fixup; | 588 | else if (!vmw_force_iommu) |
| 575 | } | ||
| 576 | #endif | ||
| 577 | |||
| 578 | if (!(vmw_force_iommu || vmw_force_coherent)) { | ||
| 579 | dev_priv->map_mode = vmw_dma_phys; | 589 | dev_priv->map_mode = vmw_dma_phys; |
| 580 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 590 | else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) |
| 581 | return 0; | ||
| 582 | } | ||
| 583 | |||
| 584 | dev_priv->map_mode = vmw_dma_map_populate; | ||
| 585 | |||
| 586 | if (dma_ops && dma_ops->sync_single_for_cpu) | ||
| 587 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 591 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 588 | #ifdef CONFIG_SWIOTLB | 592 | else |
| 589 | if (swiotlb_nr_tbl() == 0) | ||
| 590 | dev_priv->map_mode = vmw_dma_map_populate; | 593 | dev_priv->map_mode = vmw_dma_map_populate; |
| 591 | #endif | ||
| 592 | 594 | ||
| 593 | #ifdef CONFIG_INTEL_IOMMU | 595 | if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) |
| 594 | out_fixup: | ||
| 595 | #endif | ||
| 596 | if (dev_priv->map_mode == vmw_dma_map_populate && | ||
| 597 | vmw_restrict_iommu) | ||
| 598 | dev_priv->map_mode = vmw_dma_map_bind; | 596 | dev_priv->map_mode = vmw_dma_map_bind; |
| 599 | 597 | ||
| 600 | if (vmw_force_coherent) | 598 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ |
| 601 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 599 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && |
| 602 | 600 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) | |
| 603 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) | ||
| 604 | /* | ||
| 605 | * No coherent page pool | ||
| 606 | */ | ||
| 607 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | ||
| 608 | return -EINVAL; | 601 | return -EINVAL; |
| 609 | #endif | ||
| 610 | |||
| 611 | #else /* CONFIG_X86 */ | ||
| 612 | dev_priv->map_mode = vmw_dma_map_populate; | ||
| 613 | #endif /* CONFIG_X86 */ | ||
| 614 | 602 | ||
| 615 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 603 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 616 | |||
| 617 | return 0; | 604 | return 0; |
| 618 | } | 605 | } |
| 619 | 606 | ||
| @@ -625,24 +612,20 @@ out_fixup: | |||
| 625 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | 612 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
| 626 | * restriction also for 64-bit systems. | 613 | * restriction also for 64-bit systems. |
| 627 | */ | 614 | */ |
| 628 | #ifdef CONFIG_INTEL_IOMMU | ||
| 629 | static int vmw_dma_masks(struct vmw_private *dev_priv) | 615 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 630 | { | 616 | { |
| 631 | struct drm_device *dev = dev_priv->dev; | 617 | struct drm_device *dev = dev_priv->dev; |
| 618 | int ret = 0; | ||
| 632 | 619 | ||
| 633 | if (intel_iommu_enabled && | 620 | ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
| 621 | if (dev_priv->map_mode != vmw_dma_phys && | ||
| 634 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | 622 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
| 635 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | 623 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
| 636 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | 624 | return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
| 637 | } | 625 | } |
| 638 | return 0; | 626 | |
| 639 | } | 627 | return ret; |
| 640 | #else | ||
| 641 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 642 | { | ||
| 643 | return 0; | ||
| 644 | } | 628 | } |
| 645 | #endif | ||
| 646 | 629 | ||
| 647 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 630 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 648 | { | 631 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
| 3570 | *p_fence = NULL; | 3570 | *p_fence = NULL; |
| 3571 | } | 3571 | } |
| 3572 | 3572 | ||
| 3573 | return 0; | 3573 | return ret; |
| 3574 | } | 3574 | } |
| 3575 | 3575 | ||
| 3576 | /** | 3576 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
| 1646 | struct drm_connector_state *conn_state; | 1646 | struct drm_connector_state *conn_state; |
| 1647 | struct vmw_connector_state *vmw_conn_state; | 1647 | struct vmw_connector_state *vmw_conn_state; |
| 1648 | 1648 | ||
| 1649 | if (!du->pref_active) { | 1649 | if (!du->pref_active && new_crtc_state->enable) { |
| 1650 | ret = -EINVAL; | 1650 | ret = -EINVAL; |
| 1651 | goto clean; | 1651 | goto clean; |
| 1652 | } | 1652 | } |
| @@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, | |||
| 2554 | user_fence_rep) | 2554 | user_fence_rep) |
| 2555 | { | 2555 | { |
| 2556 | struct vmw_fence_obj *fence = NULL; | 2556 | struct vmw_fence_obj *fence = NULL; |
| 2557 | uint32_t handle; | 2557 | uint32_t handle = 0; |
| 2558 | int ret; | 2558 | int ret = 0; |
| 2559 | 2559 | ||
| 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || | 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || |
| 2561 | out_fence) | 2561 | out_fence) |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { | |||
| 898 | .cpmem_ofs = 0x1f000000, | 898 | .cpmem_ofs = 0x1f000000, |
| 899 | .srm_ofs = 0x1f040000, | 899 | .srm_ofs = 0x1f040000, |
| 900 | .tpm_ofs = 0x1f060000, | 900 | .tpm_ofs = 0x1f060000, |
| 901 | .csi0_ofs = 0x1f030000, | 901 | .csi0_ofs = 0x1e030000, |
| 902 | .csi1_ofs = 0x1f038000, | 902 | .csi1_ofs = 0x1e038000, |
| 903 | .ic_ofs = 0x1e020000, | 903 | .ic_ofs = 0x1e020000, |
| 904 | .disp0_ofs = 0x1e040000, | 904 | .disp0_ofs = 0x1e040000, |
| 905 | .disp1_ofs = 0x1e048000, | 905 | .disp1_ofs = 0x1e048000, |
| @@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { | |||
| 914 | .cpmem_ofs = 0x07000000, | 914 | .cpmem_ofs = 0x07000000, |
| 915 | .srm_ofs = 0x07040000, | 915 | .srm_ofs = 0x07040000, |
| 916 | .tpm_ofs = 0x07060000, | 916 | .tpm_ofs = 0x07060000, |
| 917 | .csi0_ofs = 0x07030000, | 917 | .csi0_ofs = 0x06030000, |
| 918 | .csi1_ofs = 0x07038000, | 918 | .csi1_ofs = 0x06038000, |
| 919 | .ic_ofs = 0x06020000, | 919 | .ic_ofs = 0x06020000, |
| 920 | .disp0_ofs = 0x06040000, | 920 | .disp0_ofs = 0x06040000, |
| 921 | .disp1_ofs = 0x06048000, | 921 | .disp1_ofs = 0x06048000, |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
| @@ -106,6 +106,7 @@ struct ipu_pre { | |||
| 106 | void *buffer_virt; | 106 | void *buffer_virt; |
| 107 | bool in_use; | 107 | bool in_use; |
| 108 | unsigned int safe_window_end; | 108 | unsigned int safe_window_end; |
| 109 | unsigned int last_bufaddr; | ||
| 109 | }; | 110 | }; |
| 110 | 111 | ||
| 111 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 112 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
| @@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 185 | 186 | ||
| 186 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 187 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
| 187 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 188 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 189 | pre->last_bufaddr = bufaddr; | ||
| 188 | 190 | ||
| 189 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | | 191 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | |
| 190 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | | 192 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | |
| @@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | |||
| 242 | unsigned short current_yblock; | 244 | unsigned short current_yblock; |
| 243 | u32 val; | 245 | u32 val; |
| 244 | 246 | ||
| 247 | if (bufaddr == pre->last_bufaddr) | ||
| 248 | return; | ||
| 249 | |||
| 245 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 250 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 251 | pre->last_bufaddr = bufaddr; | ||
| 246 | 252 | ||
| 247 | do { | 253 | do { |
| 248 | if (time_after(jiffies, timeout)) { | 254 | if (time_after(jiffies, timeout)) { |
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index b677e5d524e6..d5f1d8e1c6f8 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig | |||
| @@ -21,6 +21,7 @@ config VGA_SWITCHEROO | |||
| 21 | bool "Laptop Hybrid Graphics - GPU switching support" | 21 | bool "Laptop Hybrid Graphics - GPU switching support" |
| 22 | depends on X86 | 22 | depends on X86 |
| 23 | depends on ACPI | 23 | depends on ACPI |
| 24 | depends on PCI | ||
| 24 | select VGA_ARB | 25 | select VGA_ARB |
| 25 | help | 26 | help |
| 26 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer | 27 | Many laptops released in 2008/9/10 have two GPUs with a multiplexer |
