diff options
Diffstat (limited to 'drivers/gpu')
22 files changed, 198 insertions, 110 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
| 1686 | effective_mode &= ~S_IWUSR; | 1686 | effective_mode &= ~S_IWUSR; |
| 1687 | 1687 | ||
| 1688 | if ((adev->flags & AMD_IS_APU) && | 1688 | if ((adev->flags & AMD_IS_APU) && |
| 1689 | (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | 1689 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
| 1690 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | ||
| 1690 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| | 1691 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
| 1691 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) | 1692 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) |
| 1692 | return 0; | 1693 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include "amdgpu_gem.h" | 38 | #include "amdgpu_gem.h" |
| 39 | #include <drm/amdgpu_drm.h> | 39 | #include <drm/amdgpu_drm.h> |
| 40 | #include <linux/dma-buf.h> | 40 | #include <linux/dma-buf.h> |
| 41 | #include <linux/dma-fence-array.h> | ||
| 41 | 42 | ||
| 42 | /** | 43 | /** |
| 43 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table | 44 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table |
| @@ -187,6 +188,48 @@ error: | |||
| 187 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
| 188 | } | 189 | } |
| 189 | 190 | ||
| 191 | static int | ||
| 192 | __reservation_object_make_exclusive(struct reservation_object *obj) | ||
| 193 | { | ||
| 194 | struct dma_fence **fences; | ||
| 195 | unsigned int count; | ||
| 196 | int r; | ||
| 197 | |||
| 198 | if (!reservation_object_get_list(obj)) /* no shared fences to convert */ | ||
| 199 | return 0; | ||
| 200 | |||
| 201 | r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); | ||
| 202 | if (r) | ||
| 203 | return r; | ||
| 204 | |||
| 205 | if (count == 0) { | ||
| 206 | /* Now that was unexpected. */ | ||
| 207 | } else if (count == 1) { | ||
| 208 | reservation_object_add_excl_fence(obj, fences[0]); | ||
| 209 | dma_fence_put(fences[0]); | ||
| 210 | kfree(fences); | ||
| 211 | } else { | ||
| 212 | struct dma_fence_array *array; | ||
| 213 | |||
| 214 | array = dma_fence_array_create(count, fences, | ||
| 215 | dma_fence_context_alloc(1), 0, | ||
| 216 | false); | ||
| 217 | if (!array) | ||
| 218 | goto err_fences_put; | ||
| 219 | |||
| 220 | reservation_object_add_excl_fence(obj, &array->base); | ||
| 221 | dma_fence_put(&array->base); | ||
| 222 | } | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | |||
| 226 | err_fences_put: | ||
| 227 | while (count--) | ||
| 228 | dma_fence_put(fences[count]); | ||
| 229 | kfree(fences); | ||
| 230 | return -ENOMEM; | ||
| 231 | } | ||
| 232 | |||
| 190 | /** | 233 | /** |
| 191 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation | 234 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation |
| 192 | * @dma_buf: Shared DMA buffer | 235 | * @dma_buf: Shared DMA buffer |
| @@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, | |||
| 218 | 261 | ||
| 219 | if (attach->dev->driver != adev->dev->driver) { | 262 | if (attach->dev->driver != adev->dev->driver) { |
| 220 | /* | 263 | /* |
| 221 | * Wait for all shared fences to complete before we switch to future | 264 | * We only create shared fences for internal use, but importers |
| 222 | * use of exclusive fence on this prime shared bo. | 265 | * of the dmabuf rely on exclusive fences for implicitly |
| 266 | * tracking write hazards. As any of the current fences may | ||
| 267 | * correspond to a write, we need to convert all existing | ||
| 268 | * fences on the reservation object into a single exclusive | ||
| 269 | * fence. | ||
| 223 | */ | 270 | */ |
| 224 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | 271 | r = __reservation_object_make_exclusive(bo->tbo.resv); |
| 225 | true, false, | 272 | if (r) |
| 226 | MAX_SCHEDULE_TIMEOUT); | ||
| 227 | if (unlikely(r < 0)) { | ||
| 228 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); | ||
| 229 | goto error_unreserve; | 273 | goto error_unreserve; |
| 230 | } | ||
| 231 | } | 274 | } |
| 232 | 275 | ||
| 233 | /* pin buffer into GTT */ | 276 | /* pin buffer into GTT */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..7c108e687683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, | |||
| 3363 | struct amdgpu_task_info *task_info) | 3363 | struct amdgpu_task_info *task_info) |
| 3364 | { | 3364 | { |
| 3365 | struct amdgpu_vm *vm; | 3365 | struct amdgpu_vm *vm; |
| 3366 | unsigned long flags; | ||
| 3366 | 3367 | ||
| 3367 | spin_lock(&adev->vm_manager.pasid_lock); | 3368 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); |
| 3368 | 3369 | ||
| 3369 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); | 3370 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); |
| 3370 | if (vm) | 3371 | if (vm) |
| 3371 | *task_info = vm->task_info; | 3372 | *task_info = vm->task_info; |
| 3372 | 3373 | ||
| 3373 | spin_unlock(&adev->vm_manager.pasid_lock); | 3374 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
| 3374 | } | 3375 | } |
| 3375 | 3376 | ||
| 3376 | /** | 3377 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | |||
| @@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
| 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, | 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, |
| 94 | bool enable) | 94 | bool enable) |
| 95 | { | 95 | { |
| 96 | u32 tmp = 0; | ||
| 96 | 97 | ||
| 98 | if (enable) { | ||
| 99 | tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | | ||
| 100 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | | ||
| 101 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); | ||
| 102 | |||
| 103 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, | ||
| 104 | lower_32_bits(adev->doorbell.base)); | ||
| 105 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, | ||
| 106 | upper_32_bits(adev->doorbell.base)); | ||
| 107 | } | ||
| 108 | |||
| 109 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); | ||
| 97 | } | 110 | } |
| 98 | 111 | ||
| 99 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, | 112 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
| @@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) | |||
| 729 | case CHIP_RAVEN: | 729 | case CHIP_RAVEN: |
| 730 | adev->asic_funcs = &soc15_asic_funcs; | 730 | adev->asic_funcs = &soc15_asic_funcs; |
| 731 | if (adev->rev_id >= 0x8) | 731 | if (adev->rev_id >= 0x8) |
| 732 | adev->external_rev_id = adev->rev_id + 0x81; | 732 | adev->external_rev_id = adev->rev_id + 0x79; |
| 733 | else if (adev->pdev->device == 0x15d8) | 733 | else if (adev->pdev->device == 0x15d8) |
| 734 | adev->external_rev_id = adev->rev_id + 0x41; | 734 | adev->external_rev_id = adev->rev_id + 0x41; |
| 735 | else if (adev->rev_id == 1) | ||
| 736 | adev->external_rev_id = adev->rev_id + 0x20; | ||
| 735 | else | 737 | else |
| 736 | adev->external_rev_id = 0x1; | 738 | adev->external_rev_id = adev->rev_id + 0x01; |
| 737 | 739 | ||
| 738 | if (adev->rev_id >= 0x8) { | 740 | if (adev->rev_id >= 0x8) { |
| 739 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 741 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
| @@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
| 863 | return 0; | 863 | return 0; |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | #if CONFIG_X86_64 | 866 | #ifdef CONFIG_X86_64 |
| 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
| 868 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
| 869 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..0b392bfca284 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
| 4082 | } | 4082 | } |
| 4083 | 4083 | ||
| 4084 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || | 4084 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || |
| 4085 | connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 4085 | connector_type == DRM_MODE_CONNECTOR_DisplayPort || |
| 4086 | connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 4086 | drm_connector_attach_vrr_capable_property( | 4087 | drm_connector_attach_vrr_capable_property( |
| 4087 | &aconnector->base); | 4088 | &aconnector->base); |
| 4088 | } | 4089 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..19801bdba0d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
| @@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( | |||
| 591 | dc, | 591 | dc, |
| 592 | context->bw.dce.sclk_khz); | 592 | context->bw.dce.sclk_khz); |
| 593 | 593 | ||
| 594 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | 594 | /* |
| 595 | * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. | ||
| 596 | * This is not required for less than 5 displays, | ||
| 597 | * thus don't request decfclk in dc to avoid impact | ||
| 598 | * on power saving. | ||
| 599 | * | ||
| 600 | */ | ||
| 601 | pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? | ||
| 602 | pp_display_cfg->min_engine_clock_khz : 0; | ||
| 595 | 603 | ||
| 596 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 604 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
| 597 | = context->bw.dce.sclk_deep_sleep_khz; | 605 | = context->bw.dce.sclk_deep_sleep_khz; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
| @@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, | |||
| 1033 | break; | 1033 | break; |
| 1034 | case amd_pp_dpp_clock: | 1034 | case amd_pp_dpp_clock: |
| 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; | 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; |
| 1036 | break; | ||
| 1036 | default: | 1037 | default: |
| 1037 | return -EINVAL; | 1038 | return -EINVAL; |
| 1038 | } | 1039 | } |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) | |||
| 758 | if (mode->hsync) | 758 | if (mode->hsync) |
| 759 | return mode->hsync; | 759 | return mode->hsync; |
| 760 | 760 | ||
| 761 | if (mode->htotal < 0) | 761 | if (mode->htotal <= 0) |
| 762 | return 0; | 762 | return 0; |
| 763 | 763 | ||
| 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..4079050f9d6c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1086,7 +1086,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
| 1086 | return DDI_CLK_SEL_TBT_810; | 1086 | return DDI_CLK_SEL_TBT_810; |
| 1087 | default: | 1087 | default: |
| 1088 | MISSING_CASE(clock); | 1088 | MISSING_CASE(clock); |
| 1089 | break; | 1089 | return DDI_CLK_SEL_NONE; |
| 1090 | } | 1090 | } |
| 1091 | case DPLL_ID_ICL_MGPLL1: | 1091 | case DPLL_ID_ICL_MGPLL1: |
| 1092 | case DPLL_ID_ICL_MGPLL2: | 1092 | case DPLL_ID_ICL_MGPLL2: |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
| 15415 | } | 15415 | } |
| 15416 | } | 15416 | } |
| 15417 | 15417 | ||
| 15418 | static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) | ||
| 15419 | { | ||
| 15420 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
| 15421 | |||
| 15422 | /* | ||
| 15423 | * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram | ||
| 15424 | * the hardware when a high res displays plugged in. DPLL P | ||
| 15425 | * divider is zero, and the pipe timings are bonkers. We'll | ||
| 15426 | * try to disable everything in that case. | ||
| 15427 | * | ||
| 15428 | * FIXME would be nice to be able to sanitize this state | ||
| 15429 | * without several WARNs, but for now let's take the easy | ||
| 15430 | * road. | ||
| 15431 | */ | ||
| 15432 | return IS_GEN6(dev_priv) && | ||
| 15433 | crtc_state->base.active && | ||
| 15434 | crtc_state->shared_dpll && | ||
| 15435 | crtc_state->port_clock == 0; | ||
| 15436 | } | ||
| 15437 | |||
| 15418 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15438 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
| 15419 | { | 15439 | { |
| 15420 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 15440 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
| 15421 | struct intel_connector *connector; | 15441 | struct intel_connector *connector; |
| 15442 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
| 15443 | struct intel_crtc_state *crtc_state = crtc ? | ||
| 15444 | to_intel_crtc_state(crtc->base.state) : NULL; | ||
| 15422 | 15445 | ||
| 15423 | /* We need to check both for a crtc link (meaning that the | 15446 | /* We need to check both for a crtc link (meaning that the |
| 15424 | * encoder is active and trying to read from a pipe) and the | 15447 | * encoder is active and trying to read from a pipe) and the |
| 15425 | * pipe itself being active. */ | 15448 | * pipe itself being active. */ |
| 15426 | bool has_active_crtc = encoder->base.crtc && | 15449 | bool has_active_crtc = crtc_state && |
| 15427 | to_intel_crtc(encoder->base.crtc)->active; | 15450 | crtc_state->base.active; |
| 15451 | |||
| 15452 | if (crtc_state && has_bogus_dpll_config(crtc_state)) { | ||
| 15453 | DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", | ||
| 15454 | pipe_name(crtc->pipe)); | ||
| 15455 | has_active_crtc = false; | ||
| 15456 | } | ||
| 15428 | 15457 | ||
| 15429 | connector = intel_encoder_find_connector(encoder); | 15458 | connector = intel_encoder_find_connector(encoder); |
| 15430 | if (connector && !has_active_crtc) { | 15459 | if (connector && !has_active_crtc) { |
| @@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
| 15435 | /* Connector is active, but has no active pipe. This is | 15464 | /* Connector is active, but has no active pipe. This is |
| 15436 | * fallout from our resume register restoring. Disable | 15465 | * fallout from our resume register restoring. Disable |
| 15437 | * the encoder manually again. */ | 15466 | * the encoder manually again. */ |
| 15438 | if (encoder->base.crtc) { | 15467 | if (crtc_state) { |
| 15439 | struct drm_crtc_state *crtc_state = encoder->base.crtc->state; | 15468 | struct drm_encoder *best_encoder; |
| 15440 | 15469 | ||
| 15441 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15470 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
| 15442 | encoder->base.base.id, | 15471 | encoder->base.base.id, |
| 15443 | encoder->base.name); | 15472 | encoder->base.name); |
| 15473 | |||
| 15474 | /* avoid oopsing in case the hooks consult best_encoder */ | ||
| 15475 | best_encoder = connector->base.state->best_encoder; | ||
| 15476 | connector->base.state->best_encoder = &encoder->base; | ||
| 15477 | |||
| 15444 | if (encoder->disable) | 15478 | if (encoder->disable) |
| 15445 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15479 | encoder->disable(encoder, crtc_state, |
| 15480 | connector->base.state); | ||
| 15446 | if (encoder->post_disable) | 15481 | if (encoder->post_disable) |
| 15447 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15482 | encoder->post_disable(encoder, crtc_state, |
| 15483 | connector->base.state); | ||
| 15484 | |||
| 15485 | connector->base.state->best_encoder = best_encoder; | ||
| 15448 | } | 15486 | } |
| 15449 | encoder->base.crtc = NULL; | 15487 | encoder->base.crtc = NULL; |
| 15450 | 15488 | ||
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, | |||
| 494 | 494 | ||
| 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); | 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
| 496 | 496 | ||
| 497 | keymsk = key->channel_mask & 0x3ffffff; | 497 | keymsk = key->channel_mask & 0x7ffffff; |
| 498 | if (alpha < 0xff) | 498 | if (alpha < 0xff) |
| 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; |
| 500 | 500 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) | |||
| 1406 | 1406 | ||
| 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) |
| 1408 | { | 1408 | { |
| 1409 | struct dsi_data *dsi = p; | 1409 | struct dsi_data *dsi = s->private; |
| 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; | 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; |
| 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; | 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; |
| 1412 | int dsi_module = dsi->module_id; | 1412 | int dsi_module = dsi->module_id; |
| @@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | |||
| 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
| 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) |
| 1469 | { | 1469 | { |
| 1470 | struct dsi_data *dsi = p; | 1470 | struct dsi_data *dsi = s->private; |
| 1471 | unsigned long flags; | 1471 | unsigned long flags; |
| 1472 | struct dsi_irq_stats stats; | 1472 | struct dsi_irq_stats stats; |
| 1473 | 1473 | ||
| @@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | |||
| 1558 | 1558 | ||
| 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) | 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) |
| 1560 | { | 1560 | { |
| 1561 | struct dsi_data *dsi = p; | 1561 | struct dsi_data *dsi = s->private; |
| 1562 | 1562 | ||
| 1563 | if (dsi_runtime_get(dsi)) | 1563 | if (dsi_runtime_get(dsi)) |
| 1564 | return 0; | 1564 | return 0; |
| @@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, | |||
| 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; | 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; |
| 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; | 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; |
| 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; | 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; |
| 4754 | /* | ||
| 4755 | * HACK: These flags should be handled through the omap_dss_device bus | ||
| 4756 | * flags, but this will only be possible when the DSI encoder will be | ||
| 4757 | * converted to the omapdrm-managed encoder model. | ||
| 4758 | */ | ||
| 4759 | dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; | ||
| 4760 | dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; | ||
| 4761 | dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; | ||
| 4762 | dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; | ||
| 4763 | dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; | ||
| 4764 | dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; | ||
| 4754 | 4765 | ||
| 4755 | dss_mgr_set_timings(&dsi->output, &dsi->vm); | 4766 | dss_mgr_set_timings(&dsi->output, &dsi->vm); |
| 4756 | 4767 | ||
| @@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) | |||
| 5083 | 5094 | ||
| 5084 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); | 5095 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); |
| 5085 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, | 5096 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, |
| 5086 | dsi_dump_dsi_regs, &dsi); | 5097 | dsi_dump_dsi_regs, dsi); |
| 5087 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 5098 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
| 5088 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); | 5099 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); |
| 5089 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, | 5100 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, |
| 5090 | dsi_dump_dsi_irqs, &dsi); | 5101 | dsi_dump_dsi_irqs, dsi); |
| 5091 | #endif | 5102 | #endif |
| 5092 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); | 5103 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); |
| 5093 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, | 5104 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, |
| 5094 | dsi_dump_dsi_clocks, &dsi); | 5105 | dsi_dump_dsi_clocks, dsi); |
| 5095 | 5106 | ||
| 5096 | return 0; | 5107 | return 0; |
| 5097 | } | 5108 | } |
| @@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) | |||
| 5104 | dss_debugfs_remove_file(dsi->debugfs.irqs); | 5115 | dss_debugfs_remove_file(dsi->debugfs.irqs); |
| 5105 | dss_debugfs_remove_file(dsi->debugfs.regs); | 5116 | dss_debugfs_remove_file(dsi->debugfs.regs); |
| 5106 | 5117 | ||
| 5107 | of_platform_depopulate(dev); | ||
| 5108 | |||
| 5109 | WARN_ON(dsi->scp_clk_refcount > 0); | 5118 | WARN_ON(dsi->scp_clk_refcount > 0); |
| 5110 | 5119 | ||
| 5111 | dss_pll_unregister(&dsi->pll); | 5120 | dss_pll_unregister(&dsi->pll); |
| @@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) | |||
| 5457 | 5466 | ||
| 5458 | dsi_uninit_output(dsi); | 5467 | dsi_uninit_output(dsi); |
| 5459 | 5468 | ||
| 5469 | of_platform_depopulate(&pdev->dev); | ||
| 5470 | |||
| 5460 | pm_runtime_disable(&pdev->dev); | 5471 | pm_runtime_disable(&pdev->dev); |
| 5461 | 5472 | ||
| 5462 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { | 5473 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
| @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
| 5676 | u16 data_offset, size; | 5676 | u16 data_offset, size; |
| 5677 | u8 frev, crev; | 5677 | u8 frev, crev; |
| 5678 | struct ci_power_info *pi; | 5678 | struct ci_power_info *pi; |
| 5679 | enum pci_bus_speed speed_cap; | 5679 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
| 5680 | struct pci_dev *root = rdev->pdev->bus->self; | 5680 | struct pci_dev *root = rdev->pdev->bus->self; |
| 5681 | int ret; | 5681 | int ret; |
| 5682 | 5682 | ||
| @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
| 5685 | return -ENOMEM; | 5685 | return -ENOMEM; |
| 5686 | rdev->pm.dpm.priv = pi; | 5686 | rdev->pm.dpm.priv = pi; |
| 5687 | 5687 | ||
| 5688 | speed_cap = pcie_get_speed_cap(root); | 5688 | if (!pci_is_root_bus(rdev->pdev->bus)) |
| 5689 | speed_cap = pcie_get_speed_cap(root); | ||
| 5689 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 5690 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
| 5690 | pi->sys_pcie_mask = 0; | 5691 | pi->sys_pcie_mask = 0; |
| 5691 | } else { | 5692 | } else { |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6899 | struct ni_power_info *ni_pi; | 6899 | struct ni_power_info *ni_pi; |
| 6900 | struct si_power_info *si_pi; | 6900 | struct si_power_info *si_pi; |
| 6901 | struct atom_clock_dividers dividers; | 6901 | struct atom_clock_dividers dividers; |
| 6902 | enum pci_bus_speed speed_cap; | 6902 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
| 6903 | struct pci_dev *root = rdev->pdev->bus->self; | 6903 | struct pci_dev *root = rdev->pdev->bus->self; |
| 6904 | int ret; | 6904 | int ret; |
| 6905 | 6905 | ||
| @@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6911 | eg_pi = &ni_pi->eg; | 6911 | eg_pi = &ni_pi->eg; |
| 6912 | pi = &eg_pi->rv7xx; | 6912 | pi = &eg_pi->rv7xx; |
| 6913 | 6913 | ||
| 6914 | speed_cap = pcie_get_speed_cap(root); | 6914 | if (!pci_is_root_bus(rdev->pdev->bus)) |
| 6915 | speed_cap = pcie_get_speed_cap(root); | ||
| 6915 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 6916 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
| 6916 | si_pi->sys_pcie_mask = 0; | 6917 | si_pi->sys_pcie_mask = 0; |
| 6917 | } else { | 6918 | } else { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 37f93022a106..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
| @@ -1,17 +1,8 @@ | |||
| 1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
| 4 | * Author: | 4 | * Author: |
| 5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | 6 | */ |
| 16 | 7 | ||
| 17 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h | |||
| @@ -1,17 +1,8 @@ | |||
| 1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
| 4 | * Author: | 4 | * Author: |
| 5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
| 6 | * | ||
| 7 | * This software is licensed under the terms of the GNU General Public | ||
| 8 | * License version 2, as published by the Free Software Foundation, and | ||
| 9 | * may be copied, distributed, and modified under those terms. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | 6 | */ |
| 16 | 7 | ||
| 17 | #ifdef CONFIG_ROCKCHIP_RGB | 8 | #ifdef CONFIG_ROCKCHIP_RGB |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
| 761 | return PTR_ERR(tcon->sclk0); | 761 | return PTR_ERR(tcon->sclk0); |
| 762 | } | 762 | } |
| 763 | } | 763 | } |
| 764 | clk_prepare_enable(tcon->sclk0); | ||
| 764 | 765 | ||
| 765 | if (tcon->quirks->has_channel_1) { | 766 | if (tcon->quirks->has_channel_1) { |
| 766 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); | 767 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); |
| @@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
| 775 | 776 | ||
| 776 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) | 777 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) |
| 777 | { | 778 | { |
| 779 | clk_disable_unprepare(tcon->sclk0); | ||
| 778 | clk_disable_unprepare(tcon->clk); | 780 | clk_disable_unprepare(tcon->clk); |
| 779 | } | 781 | } |
| 780 | 782 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | **************************************************************************/ | 26 | **************************************************************************/ |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
| 29 | #include <linux/dma-mapping.h> | ||
| 29 | 30 | ||
| 30 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
| 31 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
| @@ -34,7 +35,6 @@ | |||
| 34 | #include <drm/ttm/ttm_placement.h> | 35 | #include <drm/ttm/ttm_placement.h> |
| 35 | #include <drm/ttm/ttm_bo_driver.h> | 36 | #include <drm/ttm/ttm_bo_driver.h> |
| 36 | #include <drm/ttm/ttm_module.h> | 37 | #include <drm/ttm/ttm_module.h> |
| 37 | #include <linux/intel-iommu.h> | ||
| 38 | 38 | ||
| 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
| 40 | #define VMWGFX_CHIP_SVGAII 0 | 40 | #define VMWGFX_CHIP_SVGAII 0 |
| @@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | /** | 548 | /** |
| 549 | * vmw_assume_iommu - Figure out whether coherent dma-remapping might be | ||
| 550 | * taking place. | ||
| 551 | * @dev: Pointer to the struct drm_device. | ||
| 552 | * | ||
| 553 | * Return: true if iommu present, false otherwise. | ||
| 554 | */ | ||
| 555 | static bool vmw_assume_iommu(struct drm_device *dev) | ||
| 556 | { | ||
| 557 | const struct dma_map_ops *ops = get_dma_ops(dev->dev); | ||
| 558 | |||
| 559 | return !dma_is_direct(ops) && ops && | ||
| 560 | ops->map_page != dma_direct_map_page; | ||
| 561 | } | ||
| 562 | |||
| 563 | /** | ||
| 549 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | 564 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
| 550 | * system. | 565 | * system. |
| 551 | * | 566 | * |
| @@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
| 565 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 580 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
| 566 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 581 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
| 567 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 582 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
| 568 | #ifdef CONFIG_X86 | ||
| 569 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
| 570 | 583 | ||
| 571 | #ifdef CONFIG_INTEL_IOMMU | 584 | if (vmw_force_coherent) |
| 572 | if (intel_iommu_enabled) { | 585 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 586 | else if (vmw_assume_iommu(dev_priv->dev)) | ||
| 573 | dev_priv->map_mode = vmw_dma_map_populate; | 587 | dev_priv->map_mode = vmw_dma_map_populate; |
| 574 | goto out_fixup; | 588 | else if (!vmw_force_iommu) |
| 575 | } | ||
| 576 | #endif | ||
| 577 | |||
| 578 | if (!(vmw_force_iommu || vmw_force_coherent)) { | ||
| 579 | dev_priv->map_mode = vmw_dma_phys; | 589 | dev_priv->map_mode = vmw_dma_phys; |
| 580 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 590 | else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) |
| 581 | return 0; | ||
| 582 | } | ||
| 583 | |||
| 584 | dev_priv->map_mode = vmw_dma_map_populate; | ||
| 585 | |||
| 586 | if (dma_ops && dma_ops->sync_single_for_cpu) | ||
| 587 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 591 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
| 588 | #ifdef CONFIG_SWIOTLB | 592 | else |
| 589 | if (swiotlb_nr_tbl() == 0) | ||
| 590 | dev_priv->map_mode = vmw_dma_map_populate; | 593 | dev_priv->map_mode = vmw_dma_map_populate; |
| 591 | #endif | ||
| 592 | 594 | ||
| 593 | #ifdef CONFIG_INTEL_IOMMU | 595 | if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) |
| 594 | out_fixup: | ||
| 595 | #endif | ||
| 596 | if (dev_priv->map_mode == vmw_dma_map_populate && | ||
| 597 | vmw_restrict_iommu) | ||
| 598 | dev_priv->map_mode = vmw_dma_map_bind; | 596 | dev_priv->map_mode = vmw_dma_map_bind; |
| 599 | 597 | ||
| 600 | if (vmw_force_coherent) | 598 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ |
| 601 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 599 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && |
| 602 | 600 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) | |
| 603 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) | ||
| 604 | /* | ||
| 605 | * No coherent page pool | ||
| 606 | */ | ||
| 607 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | ||
| 608 | return -EINVAL; | 601 | return -EINVAL; |
| 609 | #endif | ||
| 610 | |||
| 611 | #else /* CONFIG_X86 */ | ||
| 612 | dev_priv->map_mode = vmw_dma_map_populate; | ||
| 613 | #endif /* CONFIG_X86 */ | ||
| 614 | 602 | ||
| 615 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 603 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
| 616 | |||
| 617 | return 0; | 604 | return 0; |
| 618 | } | 605 | } |
| 619 | 606 | ||
| @@ -625,24 +612,20 @@ out_fixup: | |||
| 625 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | 612 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
| 626 | * restriction also for 64-bit systems. | 613 | * restriction also for 64-bit systems. |
| 627 | */ | 614 | */ |
| 628 | #ifdef CONFIG_INTEL_IOMMU | ||
| 629 | static int vmw_dma_masks(struct vmw_private *dev_priv) | 615 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
| 630 | { | 616 | { |
| 631 | struct drm_device *dev = dev_priv->dev; | 617 | struct drm_device *dev = dev_priv->dev; |
| 618 | int ret = 0; | ||
| 632 | 619 | ||
| 633 | if (intel_iommu_enabled && | 620 | ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
| 621 | if (dev_priv->map_mode != vmw_dma_phys && | ||
| 634 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | 622 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
| 635 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | 623 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
| 636 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | 624 | return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
| 637 | } | 625 | } |
| 638 | return 0; | 626 | |
| 639 | } | 627 | return ret; |
| 640 | #else | ||
| 641 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 642 | { | ||
| 643 | return 0; | ||
| 644 | } | 628 | } |
| 645 | #endif | ||
| 646 | 629 | ||
| 647 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 630 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 648 | { | 631 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
| 3570 | *p_fence = NULL; | 3570 | *p_fence = NULL; |
| 3571 | } | 3571 | } |
| 3572 | 3572 | ||
| 3573 | return 0; | 3573 | return ret; |
| 3574 | } | 3574 | } |
| 3575 | 3575 | ||
| 3576 | /** | 3576 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
| 1646 | struct drm_connector_state *conn_state; | 1646 | struct drm_connector_state *conn_state; |
| 1647 | struct vmw_connector_state *vmw_conn_state; | 1647 | struct vmw_connector_state *vmw_conn_state; |
| 1648 | 1648 | ||
| 1649 | if (!du->pref_active) { | 1649 | if (!du->pref_active && new_crtc_state->enable) { |
| 1650 | ret = -EINVAL; | 1650 | ret = -EINVAL; |
| 1651 | goto clean; | 1651 | goto clean; |
| 1652 | } | 1652 | } |
| @@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, | |||
| 2554 | user_fence_rep) | 2554 | user_fence_rep) |
| 2555 | { | 2555 | { |
| 2556 | struct vmw_fence_obj *fence = NULL; | 2556 | struct vmw_fence_obj *fence = NULL; |
| 2557 | uint32_t handle; | 2557 | uint32_t handle = 0; |
| 2558 | int ret; | 2558 | int ret = 0; |
| 2559 | 2559 | ||
| 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || | 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || |
| 2561 | out_fence) | 2561 | out_fence) |
