diff options
Diffstat (limited to 'drivers/gpu/drm')
56 files changed, 779 insertions, 200 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e930d4fe29c7..1ef5ab9c9d51 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
145 | 145 | ||
146 | plane->crtc = crtc; | 146 | plane->crtc = crtc; |
147 | plane->fb = crtc->primary->fb; | 147 | plane->fb = crtc->primary->fb; |
148 | drm_framebuffer_reference(plane->fb); | ||
148 | 149 | ||
149 | return 0; | 150 | return 0; |
150 | } | 151 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index c786cd4f457b..2a3ad24276f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | |||
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, | |||
263 | buffer->sgt = sgt; | 263 | buffer->sgt = sgt; |
264 | exynos_gem_obj->base.import_attach = attach; | 264 | exynos_gem_obj->base.import_attach = attach; |
265 | 265 | ||
266 | DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, | 266 | DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, |
267 | buffer->size); | 267 | buffer->size); |
268 | 268 | ||
269 | return &exynos_gem_obj->base; | 269 | return &exynos_gem_obj->base; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index eb73e3bf2a0c..4ac438187568 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) | |||
1426 | 1426 | ||
1427 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1427 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1428 | dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); | 1428 | dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); |
1429 | if (!dsi->reg_base) { | 1429 | if (IS_ERR(dsi->reg_base)) { |
1430 | dev_err(&pdev->dev, "failed to remap io region\n"); | 1430 | dev_err(&pdev->dev, "failed to remap io region\n"); |
1431 | return -EADDRNOTAVAIL; | 1431 | return PTR_ERR(dsi->reg_base); |
1432 | } | 1432 | } |
1433 | 1433 | ||
1434 | dsi->phy = devm_phy_get(&pdev->dev, "dsim"); | 1434 | dsi->phy = devm_phy_get(&pdev->dev, "dsim"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 7afead9c3f30..852f2dadaebd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) | |||
220 | 220 | ||
221 | win_data->enabled = true; | 221 | win_data->enabled = true; |
222 | 222 | ||
223 | DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); | 223 | DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); |
224 | 224 | ||
225 | if (ctx->vblank_on) | 225 | if (ctx->vblank_on) |
226 | schedule_work(&ctx->work); | 226 | schedule_work(&ctx->work); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ec82f6bff122..108e1ec2fa4b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1954,6 +1954,9 @@ struct drm_i915_cmd_table { | |||
1954 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) | 1954 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) |
1955 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ | 1955 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
1956 | ((dev)->pdev->device & 0x00F0) == 0x0020) | 1956 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
1957 | /* ULX machines are also considered ULT. */ | ||
1958 | #define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ | ||
1959 | (dev)->pdev->device == 0x0A1E) | ||
1957 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 1960 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
1958 | 1961 | ||
1959 | /* | 1962 | /* |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ab5e93c30aa2..154b0f8bb88d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); | |||
34 | 34 | ||
35 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) | 35 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
36 | { | 36 | { |
37 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | 37 | if (i915.enable_ppgtt == 0) |
38 | return false; | 38 | return false; |
39 | 39 | ||
40 | if (i915.enable_ppgtt == 1 && full) | 40 | if (i915.enable_ppgtt == 1 && full) |
41 | return false; | 41 | return false; |
42 | 42 | ||
43 | return true; | ||
44 | } | ||
45 | |||
46 | static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | ||
47 | { | ||
48 | if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | ||
49 | return 0; | ||
50 | |||
51 | if (enable_ppgtt == 1) | ||
52 | return 1; | ||
53 | |||
54 | if (enable_ppgtt == 2 && HAS_PPGTT(dev)) | ||
55 | return 2; | ||
56 | |||
43 | #ifdef CONFIG_INTEL_IOMMU | 57 | #ifdef CONFIG_INTEL_IOMMU |
44 | /* Disable ppgtt on SNB if VT-d is on. */ | 58 | /* Disable ppgtt on SNB if VT-d is on. */ |
45 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { | 59 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
46 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); | 60 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
47 | return false; | 61 | return 0; |
48 | } | 62 | } |
49 | #endif | 63 | #endif |
50 | 64 | ||
51 | /* Full ppgtt disabled by default for now due to issues. */ | 65 | return HAS_ALIASING_PPGTT(dev) ? 1 : 0; |
52 | if (full) | ||
53 | return false; /* HAS_PPGTT(dev) */ | ||
54 | else | ||
55 | return HAS_ALIASING_PPGTT(dev); | ||
56 | } | 66 | } |
57 | 67 | ||
58 | #define GEN6_PPGTT_PD_ENTRIES 512 | 68 | #define GEN6_PPGTT_PD_ENTRIES 512 |
@@ -2031,6 +2041,14 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
2031 | gtt->base.total >> 20); | 2041 | gtt->base.total >> 20); |
2032 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); | 2042 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
2033 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); | 2043 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
2044 | /* | ||
2045 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | ||
2046 | * user's requested state against the hardware/driver capabilities. We | ||
2047 | * do this now so that we can print out any log messages once rather | ||
2048 | * than every time we check intel_enable_ppgtt(). | ||
2049 | */ | ||
2050 | i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); | ||
2051 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); | ||
2034 | 2052 | ||
2035 | return 0; | 2053 | return 0; |
2036 | } | 2054 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7753249b3a95..f98ba4e6e70b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
1362 | spin_lock(&dev_priv->irq_lock); | 1362 | spin_lock(&dev_priv->irq_lock); |
1363 | for (i = 1; i < HPD_NUM_PINS; i++) { | 1363 | for (i = 1; i < HPD_NUM_PINS; i++) { |
1364 | 1364 | ||
1365 | WARN_ONCE(hpd[i] & hotplug_trigger && | 1365 | if (hpd[i] & hotplug_trigger && |
1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, | 1366 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { |
1367 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | 1367 | /* |
1368 | hotplug_trigger, i, hpd[i]); | 1368 | * On GMCH platforms the interrupt mask bits only |
1369 | * prevent irq generation, not the setting of the | ||
1370 | * hotplug bits itself. So only WARN about unexpected | ||
1371 | * interrupts on saner platforms. | ||
1372 | */ | ||
1373 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | ||
1374 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | ||
1375 | hotplug_trigger, i, hpd[i]); | ||
1376 | |||
1377 | continue; | ||
1378 | } | ||
1369 | 1379 | ||
1370 | if (!(hpd[i] & hotplug_trigger) || | 1380 | if (!(hpd[i] & hotplug_trigger) || |
1371 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 1381 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9f5b18d9d885..c77af69c2d8f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -827,6 +827,7 @@ enum punit_power_well { | |||
827 | # define MI_FLUSH_ENABLE (1 << 12) | 827 | # define MI_FLUSH_ENABLE (1 << 12) |
828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) | 828 | # define ASYNC_FLIP_PERF_DISABLE (1 << 14) |
829 | # define MODE_IDLE (1 << 9) | 829 | # define MODE_IDLE (1 << 9) |
830 | # define STOP_RING (1 << 8) | ||
830 | 831 | ||
831 | #define GEN6_GT_MODE 0x20d0 | 832 | #define GEN6_GT_MODE 0x20d0 |
832 | #define GEN7_GT_MODE 0x7008 | 833 | #define GEN7_GT_MODE 0x7008 |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index fa486c5fbb02..aff4a113cda3 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
560 | 560 | ||
561 | dev_priv->vbt.edp_pps = *edp_pps; | 561 | dev_priv->vbt.edp_pps = *edp_pps; |
562 | 562 | ||
563 | dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : | 563 | switch (edp_link_params->rate) { |
564 | DP_LINK_BW_1_62; | 564 | case EDP_RATE_1_62: |
565 | dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; | ||
566 | break; | ||
567 | case EDP_RATE_2_7: | ||
568 | dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; | ||
569 | break; | ||
570 | default: | ||
571 | DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", | ||
572 | edp_link_params->rate); | ||
573 | break; | ||
574 | } | ||
575 | |||
565 | switch (edp_link_params->lanes) { | 576 | switch (edp_link_params->lanes) { |
566 | case 0: | 577 | case EDP_LANE_1: |
567 | dev_priv->vbt.edp_lanes = 1; | 578 | dev_priv->vbt.edp_lanes = 1; |
568 | break; | 579 | break; |
569 | case 1: | 580 | case EDP_LANE_2: |
570 | dev_priv->vbt.edp_lanes = 2; | 581 | dev_priv->vbt.edp_lanes = 2; |
571 | break; | 582 | break; |
572 | case 3: | 583 | case EDP_LANE_4: |
573 | default: | ||
574 | dev_priv->vbt.edp_lanes = 4; | 584 | dev_priv->vbt.edp_lanes = 4; |
575 | break; | 585 | break; |
586 | default: | ||
587 | DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", | ||
588 | edp_link_params->lanes); | ||
589 | break; | ||
576 | } | 590 | } |
591 | |||
577 | switch (edp_link_params->preemphasis) { | 592 | switch (edp_link_params->preemphasis) { |
578 | case 0: | 593 | case EDP_PREEMPHASIS_NONE: |
579 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | 594 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; |
580 | break; | 595 | break; |
581 | case 1: | 596 | case EDP_PREEMPHASIS_3_5dB: |
582 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | 597 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; |
583 | break; | 598 | break; |
584 | case 2: | 599 | case EDP_PREEMPHASIS_6dB: |
585 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | 600 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; |
586 | break; | 601 | break; |
587 | case 3: | 602 | case EDP_PREEMPHASIS_9_5dB: |
588 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | 603 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; |
589 | break; | 604 | break; |
605 | default: | ||
606 | DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", | ||
607 | edp_link_params->preemphasis); | ||
608 | break; | ||
590 | } | 609 | } |
610 | |||
591 | switch (edp_link_params->vswing) { | 611 | switch (edp_link_params->vswing) { |
592 | case 0: | 612 | case EDP_VSWING_0_4V: |
593 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; | 613 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; |
594 | break; | 614 | break; |
595 | case 1: | 615 | case EDP_VSWING_0_6V: |
596 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; | 616 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; |
597 | break; | 617 | break; |
598 | case 2: | 618 | case EDP_VSWING_0_8V: |
599 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; | 619 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; |
600 | break; | 620 | break; |
601 | case 3: | 621 | case EDP_VSWING_1_2V: |
602 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; | 622 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; |
603 | break; | 623 | break; |
624 | default: | ||
625 | DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", | ||
626 | edp_link_params->vswing); | ||
627 | break; | ||
604 | } | 628 | } |
605 | } | 629 | } |
606 | 630 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dae976f51d83..48aa516a1ac0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
9654 | PIPE_CONF_CHECK_I(pipe_src_w); | 9654 | PIPE_CONF_CHECK_I(pipe_src_w); |
9655 | PIPE_CONF_CHECK_I(pipe_src_h); | 9655 | PIPE_CONF_CHECK_I(pipe_src_h); |
9656 | 9656 | ||
9657 | PIPE_CONF_CHECK_I(gmch_pfit.control); | 9657 | /* |
9658 | /* pfit ratios are autocomputed by the hw on gen4+ */ | 9658 | * FIXME: BIOS likes to set up a cloned config with lvds+external |
9659 | if (INTEL_INFO(dev)->gen < 4) | 9659 | * screen. Since we don't yet re-compute the pipe config when moving |
9660 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | 9660 | * just the lvds port away to another pipe the sw tracking won't match. |
9661 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | 9661 | * |
9662 | * Proper atomic modesets with recomputed global state will fix this. | ||
9663 | * Until then just don't check gmch state for inherited modes. | ||
9664 | */ | ||
9665 | if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { | ||
9666 | PIPE_CONF_CHECK_I(gmch_pfit.control); | ||
9667 | /* pfit ratios are autocomputed by the hw on gen4+ */ | ||
9668 | if (INTEL_INFO(dev)->gen < 4) | ||
9669 | PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); | ||
9670 | PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); | ||
9671 | } | ||
9672 | |||
9662 | PIPE_CONF_CHECK_I(pch_pfit.enabled); | 9673 | PIPE_CONF_CHECK_I(pch_pfit.enabled); |
9663 | if (current_config->pch_pfit.enabled) { | 9674 | if (current_config->pch_pfit.enabled) { |
9664 | PIPE_CONF_CHECK_I(pch_pfit.pos); | 9675 | PIPE_CONF_CHECK_I(pch_pfit.pos); |
@@ -11384,15 +11395,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
11384 | } | 11395 | } |
11385 | } | 11396 | } |
11386 | 11397 | ||
11387 | static void | ||
11388 | intel_connector_break_all_links(struct intel_connector *connector) | ||
11389 | { | ||
11390 | connector->base.dpms = DRM_MODE_DPMS_OFF; | ||
11391 | connector->base.encoder = NULL; | ||
11392 | connector->encoder->connectors_active = false; | ||
11393 | connector->encoder->base.crtc = NULL; | ||
11394 | } | ||
11395 | |||
11396 | static void intel_enable_pipe_a(struct drm_device *dev) | 11398 | static void intel_enable_pipe_a(struct drm_device *dev) |
11397 | { | 11399 | { |
11398 | struct intel_connector *connector; | 11400 | struct intel_connector *connector; |
@@ -11474,8 +11476,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
11474 | if (connector->encoder->base.crtc != &crtc->base) | 11476 | if (connector->encoder->base.crtc != &crtc->base) |
11475 | continue; | 11477 | continue; |
11476 | 11478 | ||
11477 | intel_connector_break_all_links(connector); | 11479 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
11480 | connector->base.encoder = NULL; | ||
11478 | } | 11481 | } |
11482 | /* multiple connectors may have the same encoder: | ||
11483 | * handle them and break crtc link separately */ | ||
11484 | list_for_each_entry(connector, &dev->mode_config.connector_list, | ||
11485 | base.head) | ||
11486 | if (connector->encoder->base.crtc == &crtc->base) { | ||
11487 | connector->encoder->base.crtc = NULL; | ||
11488 | connector->encoder->connectors_active = false; | ||
11489 | } | ||
11479 | 11490 | ||
11480 | WARN_ON(crtc->active); | 11491 | WARN_ON(crtc->active); |
11481 | crtc->base.enabled = false; | 11492 | crtc->base.enabled = false; |
@@ -11557,6 +11568,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
11557 | drm_get_encoder_name(&encoder->base)); | 11568 | drm_get_encoder_name(&encoder->base)); |
11558 | encoder->disable(encoder); | 11569 | encoder->disable(encoder); |
11559 | } | 11570 | } |
11571 | encoder->base.crtc = NULL; | ||
11572 | encoder->connectors_active = false; | ||
11560 | 11573 | ||
11561 | /* Inconsistent output/port/pipe state happens presumably due to | 11574 | /* Inconsistent output/port/pipe state happens presumably due to |
11562 | * a bug in one of the get_hw_state functions. Or someplace else | 11575 | * a bug in one of the get_hw_state functions. Or someplace else |
@@ -11567,8 +11580,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
11567 | base.head) { | 11580 | base.head) { |
11568 | if (connector->encoder != encoder) | 11581 | if (connector->encoder != encoder) |
11569 | continue; | 11582 | continue; |
11570 | 11583 | connector->base.dpms = DRM_MODE_DPMS_OFF; | |
11571 | intel_connector_break_all_links(connector); | 11584 | connector->base.encoder = NULL; |
11572 | } | 11585 | } |
11573 | } | 11586 | } |
11574 | /* Enabled encoders without active connectors will be fixed in | 11587 | /* Enabled encoders without active connectors will be fixed in |
@@ -11616,6 +11629,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
11616 | base.head) { | 11629 | base.head) { |
11617 | memset(&crtc->config, 0, sizeof(crtc->config)); | 11630 | memset(&crtc->config, 0, sizeof(crtc->config)); |
11618 | 11631 | ||
11632 | crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; | ||
11633 | |||
11619 | crtc->active = dev_priv->display.get_pipe_config(crtc, | 11634 | crtc->active = dev_priv->display.get_pipe_config(crtc, |
11620 | &crtc->config); | 11635 | &crtc->config); |
11621 | 11636 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d2a55884ad52..2a00cb828d20 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
105 | case DP_LINK_BW_2_7: | 105 | case DP_LINK_BW_2_7: |
106 | break; | 106 | break; |
107 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ | 107 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ |
108 | if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && | 108 | if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || |
109 | INTEL_INFO(dev)->gen >= 8) && | ||
109 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) | 110 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) |
110 | max_link_bw = DP_LINK_BW_5_4; | 111 | max_link_bw = DP_LINK_BW_5_4; |
111 | else | 112 | else |
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
120 | return max_link_bw; | 121 | return max_link_bw; |
121 | } | 122 | } |
122 | 123 | ||
124 | static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) | ||
125 | { | ||
126 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
127 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
128 | u8 source_max, sink_max; | ||
129 | |||
130 | source_max = 4; | ||
131 | if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && | ||
132 | (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) | ||
133 | source_max = 2; | ||
134 | |||
135 | sink_max = drm_dp_max_lane_count(intel_dp->dpcd); | ||
136 | |||
137 | return min(source_max, sink_max); | ||
138 | } | ||
139 | |||
123 | /* | 140 | /* |
124 | * The units on the numbers in the next two are... bizarre. Examples will | 141 | * The units on the numbers in the next two are... bizarre. Examples will |
125 | * make it clearer; this one parallels an example in the eDP spec. | 142 | * make it clearer; this one parallels an example in the eDP spec. |
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
170 | } | 187 | } |
171 | 188 | ||
172 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); | 189 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
173 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); | 190 | max_lanes = intel_dp_max_lane_count(intel_dp); |
174 | 191 | ||
175 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | 192 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
176 | mode_rate = intel_dp_link_required(target_clock, 18); | 193 | mode_rate = intel_dp_link_required(target_clock, 18); |
@@ -750,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
750 | struct intel_crtc *intel_crtc = encoder->new_crtc; | 767 | struct intel_crtc *intel_crtc = encoder->new_crtc; |
751 | struct intel_connector *intel_connector = intel_dp->attached_connector; | 768 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
752 | int lane_count, clock; | 769 | int lane_count, clock; |
753 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | 770 | int min_lane_count = 1; |
771 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | ||
754 | /* Conveniently, the link BW constants become indices with a shift...*/ | 772 | /* Conveniently, the link BW constants become indices with a shift...*/ |
773 | int min_clock = 0; | ||
755 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; | 774 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; |
756 | int bpp, mode_rate; | 775 | int bpp, mode_rate; |
757 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; | 776 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; |
@@ -784,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
784 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 803 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
785 | * bpc in between. */ | 804 | * bpc in between. */ |
786 | bpp = pipe_config->pipe_bpp; | 805 | bpp = pipe_config->pipe_bpp; |
787 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && | 806 | if (is_edp(intel_dp)) { |
788 | dev_priv->vbt.edp_bpp < bpp) { | 807 | if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { |
789 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | 808 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", |
790 | dev_priv->vbt.edp_bpp); | 809 | dev_priv->vbt.edp_bpp); |
791 | bpp = dev_priv->vbt.edp_bpp; | 810 | bpp = dev_priv->vbt.edp_bpp; |
811 | } | ||
812 | |||
813 | if (IS_BROADWELL(dev)) { | ||
814 | /* Yes, it's an ugly hack. */ | ||
815 | min_lane_count = max_lane_count; | ||
816 | DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", | ||
817 | min_lane_count); | ||
818 | } else if (dev_priv->vbt.edp_lanes) { | ||
819 | min_lane_count = min(dev_priv->vbt.edp_lanes, | ||
820 | max_lane_count); | ||
821 | DRM_DEBUG_KMS("using min %u lanes per VBT\n", | ||
822 | min_lane_count); | ||
823 | } | ||
824 | |||
825 | if (dev_priv->vbt.edp_rate) { | ||
826 | min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); | ||
827 | DRM_DEBUG_KMS("using min %02x link bw per VBT\n", | ||
828 | bws[min_clock]); | ||
829 | } | ||
792 | } | 830 | } |
793 | 831 | ||
794 | for (; bpp >= 6*3; bpp -= 2*3) { | 832 | for (; bpp >= 6*3; bpp -= 2*3) { |
795 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | 833 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, |
796 | bpp); | 834 | bpp); |
797 | 835 | ||
798 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 836 | for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { |
799 | for (clock = 0; clock <= max_clock; clock++) { | 837 | for (clock = min_clock; clock <= max_clock; clock++) { |
800 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); | 838 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); |
801 | link_avail = intel_dp_max_data_rate(link_clock, | 839 | link_avail = intel_dp_max_data_rate(link_clock, |
802 | lane_count); | 840 | lane_count); |
@@ -3619,7 +3657,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
3619 | { | 3657 | { |
3620 | struct drm_connector *connector = &intel_connector->base; | 3658 | struct drm_connector *connector = &intel_connector->base; |
3621 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 3659 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
3622 | struct drm_device *dev = intel_dig_port->base.base.dev; | 3660 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
3661 | struct drm_device *dev = intel_encoder->base.dev; | ||
3623 | struct drm_i915_private *dev_priv = dev->dev_private; | 3662 | struct drm_i915_private *dev_priv = dev->dev_private; |
3624 | struct drm_display_mode *fixed_mode = NULL; | 3663 | struct drm_display_mode *fixed_mode = NULL; |
3625 | bool has_dpcd; | 3664 | bool has_dpcd; |
@@ -3629,6 +3668,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
3629 | if (!is_edp(intel_dp)) | 3668 | if (!is_edp(intel_dp)) |
3630 | return true; | 3669 | return true; |
3631 | 3670 | ||
3671 | /* The VDD bit needs a power domain reference, so if the bit is already | ||
3672 | * enabled when we boot, grab this reference. */ | ||
3673 | if (edp_have_panel_vdd(intel_dp)) { | ||
3674 | enum intel_display_power_domain power_domain; | ||
3675 | power_domain = intel_display_port_power_domain(intel_encoder); | ||
3676 | intel_display_power_get(dev_priv, power_domain); | ||
3677 | } | ||
3678 | |||
3632 | /* Cache DPCD and EDID for edp. */ | 3679 | /* Cache DPCD and EDID for edp. */ |
3633 | intel_edp_panel_vdd_on(intel_dp); | 3680 | intel_edp_panel_vdd_on(intel_dp); |
3634 | has_dpcd = intel_dp_get_dpcd(intel_dp); | 3681 | has_dpcd = intel_dp_get_dpcd(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0542de982260..328b1a70264b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -236,7 +236,8 @@ struct intel_crtc_config { | |||
236 | * tracked with quirk flags so that fastboot and state checker can act | 236 | * tracked with quirk flags so that fastboot and state checker can act |
237 | * accordingly. | 237 | * accordingly. |
238 | */ | 238 | */ |
239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ | 239 | #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ |
240 | #define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ | ||
240 | unsigned long quirks; | 241 | unsigned long quirks; |
241 | 242 | ||
242 | /* User requested mode, only valid as a starting point to | 243 | /* User requested mode, only valid as a starting point to |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index b4d44e62f0c7..f73ba5e6b7a8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper, | |||
132 | 132 | ||
133 | mutex_lock(&dev->struct_mutex); | 133 | mutex_lock(&dev->struct_mutex); |
134 | 134 | ||
135 | if (intel_fb && | ||
136 | (sizes->fb_width > intel_fb->base.width || | ||
137 | sizes->fb_height > intel_fb->base.height)) { | ||
138 | DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," | ||
139 | " releasing it\n", | ||
140 | intel_fb->base.width, intel_fb->base.height, | ||
141 | sizes->fb_width, sizes->fb_height); | ||
142 | drm_framebuffer_unreference(&intel_fb->base); | ||
143 | intel_fb = ifbdev->fb = NULL; | ||
144 | } | ||
135 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { | 145 | if (!intel_fb || WARN_ON(!intel_fb->obj)) { |
136 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); | 146 | DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); |
137 | ret = intelfb_alloc(helper, sizes); | 147 | ret = intelfb_alloc(helper, sizes); |
@@ -377,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
377 | height); | 387 | height); |
378 | } | 388 | } |
379 | 389 | ||
390 | /* No preferred mode marked by the EDID? Are there any modes? */ | ||
391 | if (!modes[i] && !list_empty(&connector->modes)) { | ||
392 | DRM_DEBUG_KMS("using first mode listed on connector %s\n", | ||
393 | drm_get_connector_name(connector)); | ||
394 | modes[i] = list_first_entry(&connector->modes, | ||
395 | struct drm_display_mode, | ||
396 | head); | ||
397 | } | ||
398 | |||
380 | /* last resort: use current mode */ | 399 | /* last resort: use current mode */ |
381 | if (!modes[i]) { | 400 | if (!modes[i]) { |
382 | /* | 401 | /* |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b0413e190625..157267aa3561 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
821 | } | 821 | } |
822 | } | 822 | } |
823 | 823 | ||
824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | 824 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) |
825 | { | 825 | { |
826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 826 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
827 | 827 | ||
828 | if (!hdmi->has_hdmi_sink || IS_G4X(dev)) | 828 | if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) |
829 | return 165000; | 829 | return 165000; |
830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 830 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) |
831 | return 300000; | 831 | return 300000; |
@@ -837,7 +837,8 @@ static enum drm_mode_status | |||
837 | intel_hdmi_mode_valid(struct drm_connector *connector, | 837 | intel_hdmi_mode_valid(struct drm_connector *connector, |
838 | struct drm_display_mode *mode) | 838 | struct drm_display_mode *mode) |
839 | { | 839 | { |
840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) | 840 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), |
841 | true)) | ||
841 | return MODE_CLOCK_HIGH; | 842 | return MODE_CLOCK_HIGH; |
842 | if (mode->clock < 20000) | 843 | if (mode->clock < 20000) |
843 | return MODE_CLOCK_LOW; | 844 | return MODE_CLOCK_LOW; |
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
879 | struct drm_device *dev = encoder->base.dev; | 880 | struct drm_device *dev = encoder->base.dev; |
880 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 881 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
881 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; | 882 | int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; |
882 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | 883 | int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); |
883 | int desired_bpp; | 884 | int desired_bpp; |
884 | 885 | ||
885 | if (intel_hdmi->color_range_auto) { | 886 | if (intel_hdmi->color_range_auto) { |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 0eead16aeda7..cb8cfb7e0974 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
492 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 492 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
493 | u32 freq; | 493 | u32 freq; |
494 | unsigned long flags; | 494 | unsigned long flags; |
495 | u64 n; | ||
495 | 496 | ||
496 | if (!panel->backlight.present || pipe == INVALID_PIPE) | 497 | if (!panel->backlight.present || pipe == INVALID_PIPE) |
497 | return; | 498 | return; |
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
502 | 503 | ||
503 | /* scale to hardware max, but be careful to not overflow */ | 504 | /* scale to hardware max, but be careful to not overflow */ |
504 | freq = panel->backlight.max; | 505 | freq = panel->backlight.max; |
505 | if (freq < max) | 506 | n = (u64)level * freq; |
506 | level = level * freq / max; | 507 | do_div(n, max); |
507 | else | 508 | level = n; |
508 | level = freq / max * level; | ||
509 | 509 | ||
510 | panel->backlight.level = level; | 510 | panel->backlight.level = level; |
511 | if (panel->backlight.device) | 511 | if (panel->backlight.device) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 19e94c3edc19..d93dcf683e8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev, | |||
2095 | } | 2095 | } |
2096 | } | 2096 | } |
2097 | 2097 | ||
2098 | static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, | ||
2099 | uint16_t wm[5], uint16_t min) | ||
2100 | { | ||
2101 | int level, max_level = ilk_wm_max_level(dev_priv->dev); | ||
2102 | |||
2103 | if (wm[0] >= min) | ||
2104 | return false; | ||
2105 | |||
2106 | wm[0] = max(wm[0], min); | ||
2107 | for (level = 1; level <= max_level; level++) | ||
2108 | wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); | ||
2109 | |||
2110 | return true; | ||
2111 | } | ||
2112 | |||
2113 | static void snb_wm_latency_quirk(struct drm_device *dev) | ||
2114 | { | ||
2115 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2116 | bool changed; | ||
2117 | |||
2118 | /* | ||
2119 | * The BIOS provided WM memory latency values are often | ||
2120 | * inadequate for high resolution displays. Adjust them. | ||
2121 | */ | ||
2122 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | ||
2123 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | ||
2124 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | ||
2125 | |||
2126 | if (!changed) | ||
2127 | return; | ||
2128 | |||
2129 | DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); | ||
2130 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | ||
2131 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | ||
2132 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | ||
2133 | } | ||
2134 | |||
2098 | static void ilk_setup_wm_latency(struct drm_device *dev) | 2135 | static void ilk_setup_wm_latency(struct drm_device *dev) |
2099 | { | 2136 | { |
2100 | struct drm_i915_private *dev_priv = dev->dev_private; | 2137 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) | |||
2112 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | 2149 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); |
2113 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | 2150 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); |
2114 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2151 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
2152 | |||
2153 | if (IS_GEN6(dev)) | ||
2154 | snb_wm_latency_quirk(dev); | ||
2115 | } | 2155 | } |
2116 | 2156 | ||
2117 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | 2157 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6bc68bdcf433..79fb4cc2137c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) | |||
437 | I915_WRITE(HWS_PGA, addr); | 437 | I915_WRITE(HWS_PGA, addr); |
438 | } | 438 | } |
439 | 439 | ||
440 | static int init_ring_common(struct intel_ring_buffer *ring) | 440 | static bool stop_ring(struct intel_ring_buffer *ring) |
441 | { | 441 | { |
442 | struct drm_device *dev = ring->dev; | 442 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
443 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
444 | struct drm_i915_gem_object *obj = ring->obj; | ||
445 | int ret = 0; | ||
446 | u32 head; | ||
447 | 443 | ||
448 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 444 | if (!IS_GEN2(ring->dev)) { |
445 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); | ||
446 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { | ||
447 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
448 | return false; | ||
449 | } | ||
450 | } | ||
449 | 451 | ||
450 | /* Stop the ring if it's running. */ | ||
451 | I915_WRITE_CTL(ring, 0); | 452 | I915_WRITE_CTL(ring, 0); |
452 | I915_WRITE_HEAD(ring, 0); | 453 | I915_WRITE_HEAD(ring, 0); |
453 | ring->write_tail(ring, 0); | 454 | ring->write_tail(ring, 0); |
454 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) | ||
455 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | ||
456 | 455 | ||
457 | if (I915_NEED_GFX_HWS(dev)) | 456 | if (!IS_GEN2(ring->dev)) { |
458 | intel_ring_setup_status_page(ring); | 457 | (void)I915_READ_CTL(ring); |
459 | else | 458 | I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); |
460 | ring_setup_phys_status_page(ring); | 459 | } |
461 | 460 | ||
462 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 461 | return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; |
462 | } | ||
463 | 463 | ||
464 | /* G45 ring initialization fails to reset head to zero */ | 464 | static int init_ring_common(struct intel_ring_buffer *ring) |
465 | if (head != 0) { | 465 | { |
466 | struct drm_device *dev = ring->dev; | ||
467 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
468 | struct drm_i915_gem_object *obj = ring->obj; | ||
469 | int ret = 0; | ||
470 | |||
471 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | ||
472 | |||
473 | if (!stop_ring(ring)) { | ||
474 | /* G45 ring initialization often fails to reset head to zero */ | ||
466 | DRM_DEBUG_KMS("%s head not reset to zero " | 475 | DRM_DEBUG_KMS("%s head not reset to zero " |
467 | "ctl %08x head %08x tail %08x start %08x\n", | 476 | "ctl %08x head %08x tail %08x start %08x\n", |
468 | ring->name, | 477 | ring->name, |
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
471 | I915_READ_TAIL(ring), | 480 | I915_READ_TAIL(ring), |
472 | I915_READ_START(ring)); | 481 | I915_READ_START(ring)); |
473 | 482 | ||
474 | I915_WRITE_HEAD(ring, 0); | 483 | if (!stop_ring(ring)) { |
475 | |||
476 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { | ||
477 | DRM_ERROR("failed to set %s head to zero " | 484 | DRM_ERROR("failed to set %s head to zero " |
478 | "ctl %08x head %08x tail %08x start %08x\n", | 485 | "ctl %08x head %08x tail %08x start %08x\n", |
479 | ring->name, | 486 | ring->name, |
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
481 | I915_READ_HEAD(ring), | 488 | I915_READ_HEAD(ring), |
482 | I915_READ_TAIL(ring), | 489 | I915_READ_TAIL(ring), |
483 | I915_READ_START(ring)); | 490 | I915_READ_START(ring)); |
491 | ret = -EIO; | ||
492 | goto out; | ||
484 | } | 493 | } |
485 | } | 494 | } |
486 | 495 | ||
496 | if (I915_NEED_GFX_HWS(dev)) | ||
497 | intel_ring_setup_status_page(ring); | ||
498 | else | ||
499 | ring_setup_phys_status_page(ring); | ||
500 | |||
487 | /* Initialize the ring. This must happen _after_ we've cleared the ring | 501 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
488 | * registers with the above sequence (the readback of the HEAD registers | 502 | * registers with the above sequence (the readback of the HEAD registers |
489 | * also enforces ordering), otherwise the hw might lose the new ring | 503 | * also enforces ordering), otherwise the hw might lose the new ring |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 270a6a973438..2b91c4b4d34b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -34,6 +34,7 @@ struct intel_hw_status_page { | |||
34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | 34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
35 | 35 | ||
36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) | 36 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
37 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) | ||
37 | 38 | ||
38 | enum intel_ring_hangcheck_action { | 39 | enum intel_ring_hangcheck_action { |
39 | HANGCHECK_IDLE = 0, | 40 | HANGCHECK_IDLE = 0, |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d27155adf5db..46be00d66df3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, | |||
2424 | if (ret < 0) | 2424 | if (ret < 0) |
2425 | goto err1; | 2425 | goto err1; |
2426 | 2426 | ||
2427 | ret = sysfs_create_link(&encoder->ddc.dev.kobj, | 2427 | ret = sysfs_create_link(&drm_connector->kdev->kobj, |
2428 | &drm_connector->kdev->kobj, | 2428 | &encoder->ddc.dev.kobj, |
2429 | encoder->ddc.dev.kobj.name); | 2429 | encoder->ddc.dev.kobj.name); |
2430 | if (ret < 0) | 2430 | if (ret < 0) |
2431 | goto err2; | 2431 | goto err2; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f729dc71d5be..d0c75779d3f6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | |||
185 | { | 185 | { |
186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | 186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
187 | _MASKED_BIT_DISABLE(0xffff)); | 187 | _MASKED_BIT_DISABLE(0xffff)); |
188 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
189 | _MASKED_BIT_DISABLE(0xffff)); | ||
188 | /* something from same cacheline, but !FORCEWAKE_VLV */ | 190 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
189 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); | 191 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
190 | } | 192 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 3e6c0f3ed592..ef9957dbac94 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc) | |||
510 | MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); | 510 | MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); |
511 | } else { | 511 | } else { |
512 | /* disable cursor: */ | 512 | /* disable cursor: */ |
513 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); | 513 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), |
514 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), | 514 | mdp4_kms->blank_cursor_iova); |
515 | MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); | ||
516 | } | 515 | } |
517 | 516 | ||
518 | /* and drop the iova ref + obj rev when done scanning out: */ | 517 | /* and drop the iova ref + obj rev when done scanning out: */ |
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, | |||
574 | 573 | ||
575 | if (old_bo) { | 574 | if (old_bo) { |
576 | /* drop our previous reference: */ | 575 | /* drop our previous reference: */ |
577 | msm_gem_put_iova(old_bo, mdp4_kms->id); | 576 | drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); |
578 | drm_gem_object_unreference_unlocked(old_bo); | ||
579 | } | 577 | } |
580 | 578 | ||
581 | crtc_flush(crtc); | ||
582 | request_pending(crtc, PENDING_CURSOR); | 579 | request_pending(crtc, PENDING_CURSOR); |
583 | 580 | ||
584 | return 0; | 581 | return 0; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index c740ccd1cc67..8edd531cb621 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) | |||
70 | 70 | ||
71 | VERB("status=%08x", status); | 71 | VERB("status=%08x", status); |
72 | 72 | ||
73 | mdp_dispatch_irqs(mdp_kms, status); | ||
74 | |||
73 | for (id = 0; id < priv->num_crtcs; id++) | 75 | for (id = 0; id < priv->num_crtcs; id++) |
74 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) | 76 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) |
75 | drm_handle_vblank(dev, id); | 77 | drm_handle_vblank(dev, id); |
76 | 78 | ||
77 | mdp_dispatch_irqs(mdp_kms, status); | ||
78 | |||
79 | return IRQ_HANDLED; | 79 | return IRQ_HANDLED; |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 272e707c9487..0bb4faa17523 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | |||
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) | |||
144 | static void mdp4_destroy(struct msm_kms *kms) | 144 | static void mdp4_destroy(struct msm_kms *kms) |
145 | { | 145 | { |
146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 146 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
147 | if (mdp4_kms->blank_cursor_iova) | ||
148 | msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); | ||
149 | if (mdp4_kms->blank_cursor_bo) | ||
150 | drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); | ||
147 | kfree(mdp4_kms); | 151 | kfree(mdp4_kms); |
148 | } | 152 | } |
149 | 153 | ||
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) | |||
372 | goto fail; | 376 | goto fail; |
373 | } | 377 | } |
374 | 378 | ||
379 | mutex_lock(&dev->struct_mutex); | ||
380 | mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); | ||
381 | mutex_unlock(&dev->struct_mutex); | ||
382 | if (IS_ERR(mdp4_kms->blank_cursor_bo)) { | ||
383 | ret = PTR_ERR(mdp4_kms->blank_cursor_bo); | ||
384 | dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); | ||
385 | mdp4_kms->blank_cursor_bo = NULL; | ||
386 | goto fail; | ||
387 | } | ||
388 | |||
389 | ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, | ||
390 | &mdp4_kms->blank_cursor_iova); | ||
391 | if (ret) { | ||
392 | dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); | ||
393 | goto fail; | ||
394 | } | ||
395 | |||
375 | return kms; | 396 | return kms; |
376 | 397 | ||
377 | fail: | 398 | fail: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 66a4d31aec80..715520c54cde 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | |||
@@ -44,6 +44,10 @@ struct mdp4_kms { | |||
44 | struct clk *lut_clk; | 44 | struct clk *lut_clk; |
45 | 45 | ||
46 | struct mdp_irq error_handler; | 46 | struct mdp_irq error_handler; |
47 | |||
48 | /* empty/blank cursor bo to use when cursor is "disabled" */ | ||
49 | struct drm_gem_object *blank_cursor_bo; | ||
50 | uint32_t blank_cursor_iova; | ||
47 | }; | 51 | }; |
48 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) | 52 | #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) |
49 | 53 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 353d494a497f..f2b985bc2adf 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | |||
71 | 71 | ||
72 | VERB("status=%08x", status); | 72 | VERB("status=%08x", status); |
73 | 73 | ||
74 | mdp_dispatch_irqs(mdp_kms, status); | ||
75 | |||
74 | for (id = 0; id < priv->num_crtcs; id++) | 76 | for (id = 0; id < priv->num_crtcs; id++) |
75 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) | 77 | if (status & mdp5_crtc_vblank(priv->crtcs[id])) |
76 | drm_handle_vblank(dev, id); | 78 | drm_handle_vblank(dev, id); |
77 | |||
78 | mdp_dispatch_irqs(mdp_kms, status); | ||
79 | } | 79 | } |
80 | 80 | ||
81 | irqreturn_t mdp5_irq(struct msm_kms *kms) | 81 | irqreturn_t mdp5_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 6c6d7d4c9b4e..a752ab83b810 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
62 | dma_addr_t paddr; | 62 | dma_addr_t paddr; |
63 | int ret, size; | 63 | int ret, size; |
64 | 64 | ||
65 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
66 | * with video overlays: | ||
67 | */ | ||
68 | sizes->surface_bpp = 32; | 65 | sizes->surface_bpp = 32; |
69 | sizes->surface_depth = 32; | 66 | sizes->surface_depth = 24; |
70 | 67 | ||
71 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | 68 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, |
72 | sizes->surface_height, sizes->surface_bpp, | 69 | sizes->surface_height, sizes->surface_bpp, |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3da8264d3039..bb8026daebc9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj) | |||
118 | 118 | ||
119 | if (iommu_present(&platform_bus_type)) | 119 | if (iommu_present(&platform_bus_type)) |
120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); | 120 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
121 | else | 121 | else { |
122 | drm_mm_remove_node(msm_obj->vram_node); | 122 | drm_mm_remove_node(msm_obj->vram_node); |
123 | drm_free_large(msm_obj->pages); | ||
124 | } | ||
123 | 125 | ||
124 | msm_obj->pages = NULL; | 126 | msm_obj->pages = NULL; |
125 | } | 127 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c index 1dc37b1ddbfa..b0d0fb2f4d08 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c | |||
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
863 | { | 863 | { |
864 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 864 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); |
865 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 865 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); |
866 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 866 | mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); |
867 | 867 | ||
868 | mmio_list(0x40800c, 0x00000000, 8, 1); | 868 | mmio_list(0x40800c, 0x00000000, 8, 1); |
869 | mmio_list(0x408010, 0x80000000, 0, 0); | 869 | mmio_list(0x408010, 0x80000000, 0, 0); |
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
877 | mmio_list(0x418e24, 0x00000000, 8, 0); | 877 | mmio_list(0x418e24, 0x00000000, 8, 0); |
878 | mmio_list(0x418e28, 0x80000030, 0, 0); | 878 | mmio_list(0x418e28, 0x80000030, 0, 0); |
879 | 879 | ||
880 | mmio_list(0x4064c8, 0x018002c0, 0, 0); | ||
881 | |||
880 | mmio_list(0x418810, 0x80000000, 12, 2); | 882 | mmio_list(0x418810, 0x80000000, 12, 2); |
881 | mmio_list(0x419848, 0x10000000, 12, 2); | 883 | mmio_list(0x419848, 0x10000000, 12, 2); |
882 | mmio_list(0x419c2c, 0x10000000, 12, 2); | 884 | mmio_list(0x419c2c, 0x10000000, 12, 2); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index fb0b6b2d1427..222e8ebb669d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
168 | */ | 168 | */ |
169 | i = 16; | 169 | i = 16; |
170 | do { | 170 | do { |
171 | if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) | 171 | u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff; |
172 | if (data == 0xaa55) | ||
172 | break; | 173 | break; |
173 | } while (i--); | 174 | } while (i--); |
174 | 175 | ||
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
176 | goto out; | 177 | goto out; |
177 | 178 | ||
178 | /* read entire bios image to system memory */ | 179 | /* read entire bios image to system memory */ |
179 | bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; | 180 | bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff; |
181 | bios->size = bios->size * 512; | ||
180 | if (!bios->size) | 182 | if (!bios->size) |
181 | goto out; | 183 | goto out; |
182 | 184 | ||
183 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 185 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
184 | if (bios->data) { | 186 | if (bios->data) { |
185 | for (i = 0; i < bios->size; i+=4) | 187 | for (i = 0; i < bios->size; i += 4) |
186 | nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); | 188 | ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i); |
187 | } | 189 | } |
188 | 190 | ||
189 | /* check the PCI record header */ | 191 | /* check the PCI record header */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 83face3f608f..279206997e5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | |||
389 | acpi_status status; | 389 | acpi_status status; |
390 | acpi_handle dhandle, rom_handle; | 390 | acpi_handle dhandle, rom_handle; |
391 | 391 | ||
392 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) | ||
393 | return false; | ||
394 | |||
395 | dhandle = ACPI_HANDLE(&pdev->dev); | 392 | dhandle = ACPI_HANDLE(&pdev->dev); |
396 | if (!dhandle) | 393 | if (!dhandle) |
397 | return false; | 394 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 3ff030dc1ee3..da764a4ed958 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
764 | } | 764 | } |
765 | 765 | ||
766 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 766 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
767 | mutex_unlock(&chan->cli->mutex); | ||
768 | if (ret) | 767 | if (ret) |
769 | goto fail_unreserve; | 768 | goto fail_unreserve; |
769 | mutex_unlock(&chan->cli->mutex); | ||
770 | 770 | ||
771 | /* Update the crtc struct and cleanup */ | 771 | /* Update the crtc struct and cleanup */ |
772 | crtc->primary->fb = fb; | 772 | crtc->primary->fb = fb; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fb187c78978f..c31c12b4e666 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1177 | 1177 | ||
1178 | /* Set NUM_BANKS. */ | 1178 | /* Set NUM_BANKS. */ |
1179 | if (rdev->family >= CHIP_TAHITI) { | 1179 | if (rdev->family >= CHIP_TAHITI) { |
1180 | unsigned tileb, index, num_banks, tile_split_bytes; | 1180 | unsigned index, num_banks; |
1181 | 1181 | ||
1182 | /* Calculate the macrotile mode index. */ | 1182 | if (rdev->family >= CHIP_BONAIRE) { |
1183 | tile_split_bytes = 64 << tile_split; | 1183 | unsigned tileb, tile_split_bytes; |
1184 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; | ||
1185 | tileb = min(tile_split_bytes, tileb); | ||
1186 | 1184 | ||
1187 | for (index = 0; tileb > 64; index++) { | 1185 | /* Calculate the macrotile mode index. */ |
1188 | tileb >>= 1; | 1186 | tile_split_bytes = 64 << tile_split; |
1189 | } | 1187 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; |
1188 | tileb = min(tile_split_bytes, tileb); | ||
1190 | 1189 | ||
1191 | if (index >= 16) { | 1190 | for (index = 0; tileb > 64; index++) |
1192 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | 1191 | tileb >>= 1; |
1193 | target_fb->bits_per_pixel, tile_split); | 1192 | |
1194 | return -EINVAL; | 1193 | if (index >= 16) { |
1195 | } | 1194 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", |
1195 | target_fb->bits_per_pixel, tile_split); | ||
1196 | return -EINVAL; | ||
1197 | } | ||
1196 | 1198 | ||
1197 | if (rdev->family >= CHIP_BONAIRE) | ||
1198 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | 1199 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; |
1199 | else | 1200 | } else { |
1201 | switch (target_fb->bits_per_pixel) { | ||
1202 | case 8: | ||
1203 | index = 10; | ||
1204 | break; | ||
1205 | case 16: | ||
1206 | index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; | ||
1207 | break; | ||
1208 | default: | ||
1209 | case 32: | ||
1210 | index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; | ||
1211 | break; | ||
1212 | } | ||
1213 | |||
1200 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; | 1214 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; |
1215 | } | ||
1216 | |||
1201 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | 1217 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); |
1202 | } else { | 1218 | } else { |
1203 | /* NI and older. */ | 1219 | /* NI and older. */ |
@@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1720 | } | 1736 | } |
1721 | /* otherwise, pick one of the plls */ | 1737 | /* otherwise, pick one of the plls */ |
1722 | if ((rdev->family == CHIP_KAVERI) || | 1738 | if ((rdev->family == CHIP_KAVERI) || |
1723 | (rdev->family == CHIP_KABINI)) { | 1739 | (rdev->family == CHIP_KABINI) || |
1724 | /* KB/KV has PPLL1 and PPLL2 */ | 1740 | (rdev->family == CHIP_MULLINS)) { |
1741 | /* KB/KV/ML has PPLL1 and PPLL2 */ | ||
1725 | pll_in_use = radeon_get_pll_use_mask(crtc); | 1742 | pll_in_use = radeon_get_pll_use_mask(crtc); |
1726 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | 1743 | if (!(pll_in_use & (1 << ATOM_PPLL2))) |
1727 | return ATOM_PPLL2; | 1744 | return ATOM_PPLL2; |
@@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1885 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1902 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
1886 | is_tvcv = true; | 1903 | is_tvcv = true; |
1887 | 1904 | ||
1905 | if (!radeon_crtc->adjusted_clock) | ||
1906 | return -EINVAL; | ||
1907 | |||
1888 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1908 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1889 | 1909 | ||
1890 | if (ASIC_IS_DCE4(rdev)) | 1910 | if (ASIC_IS_DCE4(rdev)) |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index bc0119fb6c12..54e4f52549af 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -366,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) | |||
366 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | 366 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
367 | return; | 367 | return; |
368 | 368 | ||
369 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) | 369 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) |
370 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 370 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
371 | buf[0], buf[1], buf[2]); | 371 | buf[0], buf[1], buf[2]); |
372 | 372 | ||
373 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) | 373 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) |
374 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 374 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
375 | buf[0], buf[1], buf[2]); | 375 | buf[0], buf[1], buf[2]); |
376 | } | 376 | } |
@@ -419,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, | |||
419 | 419 | ||
420 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { | 420 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { |
421 | /* DP bridge chips */ | 421 | /* DP bridge chips */ |
422 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, | 422 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, |
423 | DP_EDP_CONFIGURATION_CAP, &tmp); | 423 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { |
424 | if (tmp & 1) | 424 | if (tmp & 1) |
425 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 425 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
426 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || | 426 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || |
427 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) | 427 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) |
428 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 428 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
429 | else | 429 | else |
430 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | 430 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
431 | } | ||
431 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 432 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
432 | /* eDP */ | 433 | /* eDP */ |
433 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, | 434 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, |
434 | DP_EDP_CONFIGURATION_CAP, &tmp); | 435 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { |
435 | if (tmp & 1) | 436 | if (tmp & 1) |
436 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 437 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
438 | } | ||
437 | } | 439 | } |
438 | 440 | ||
439 | return panel_mode; | 441 | return panel_mode; |
@@ -809,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder, | |||
809 | else | 811 | else |
810 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; | 812 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; |
811 | 813 | ||
812 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); | 814 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) |
813 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) | 815 | == 1) { |
814 | dp_info.tp3_supported = true; | 816 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
815 | else | 817 | dp_info.tp3_supported = true; |
818 | else | ||
819 | dp_info.tp3_supported = false; | ||
820 | } else { | ||
816 | dp_info.tp3_supported = false; | 821 | dp_info.tp3_supported = false; |
822 | } | ||
817 | 823 | ||
818 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); | 824 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); |
819 | dp_info.rdev = rdev; | 825 | dp_info.rdev = rdev; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 199eb194716f..d2fd98968085 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin"); | |||
63 | MODULE_FIRMWARE("radeon/KABINI_mec.bin"); | 63 | MODULE_FIRMWARE("radeon/KABINI_mec.bin"); |
64 | MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); | 64 | MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); |
65 | MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); | 65 | MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); |
66 | MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); | ||
67 | MODULE_FIRMWARE("radeon/MULLINS_me.bin"); | ||
68 | MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); | ||
69 | MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); | ||
70 | MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); | ||
71 | MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); | ||
66 | 72 | ||
67 | extern int r600_ih_ring_alloc(struct radeon_device *rdev); | 73 | extern int r600_ih_ring_alloc(struct radeon_device *rdev); |
68 | extern void r600_ih_ring_fini(struct radeon_device *rdev); | 74 | extern void r600_ih_ring_fini(struct radeon_device *rdev); |
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] = | |||
1473 | 0xd80c, 0xff000ff0, 0x00000100 | 1479 | 0xd80c, 0xff000ff0, 0x00000100 |
1474 | }; | 1480 | }; |
1475 | 1481 | ||
1482 | static const u32 godavari_golden_registers[] = | ||
1483 | { | ||
1484 | 0x55e4, 0xff607fff, 0xfc000100, | ||
1485 | 0x6ed8, 0x00010101, 0x00010000, | ||
1486 | 0x9830, 0xffffffff, 0x00000000, | ||
1487 | 0x98302, 0xf00fffff, 0x00000400, | ||
1488 | 0x6130, 0xffffffff, 0x00010000, | ||
1489 | 0x5bb0, 0x000000f0, 0x00000070, | ||
1490 | 0x5bc0, 0xf0311fff, 0x80300000, | ||
1491 | 0x98f8, 0x73773777, 0x12010001, | ||
1492 | 0x98fc, 0xffffffff, 0x00000010, | ||
1493 | 0x8030, 0x00001f0f, 0x0000100a, | ||
1494 | 0x2f48, 0x73773777, 0x12010001, | ||
1495 | 0x2408, 0x000fffff, 0x000c007f, | ||
1496 | 0x8a14, 0xf000003f, 0x00000007, | ||
1497 | 0x8b24, 0xffffffff, 0x00ff0fff, | ||
1498 | 0x30a04, 0x0000ff0f, 0x00000000, | ||
1499 | 0x28a4c, 0x07ffffff, 0x06000000, | ||
1500 | 0x4d8, 0x00000fff, 0x00000100, | ||
1501 | 0xd014, 0x00010000, 0x00810001, | ||
1502 | 0xd814, 0x00010000, 0x00810001, | ||
1503 | 0x3e78, 0x00000001, 0x00000002, | ||
1504 | 0xc768, 0x00000008, 0x00000008, | ||
1505 | 0xc770, 0x00000f00, 0x00000800, | ||
1506 | 0xc774, 0x00000f00, 0x00000800, | ||
1507 | 0xc798, 0x00ffffff, 0x00ff7fbf, | ||
1508 | 0xc79c, 0x00ffffff, 0x00ff7faf, | ||
1509 | 0x8c00, 0x000000ff, 0x00000001, | ||
1510 | 0x214f8, 0x01ff01ff, 0x00000002, | ||
1511 | 0x21498, 0x007ff800, 0x00200000, | ||
1512 | 0x2015c, 0xffffffff, 0x00000f40, | ||
1513 | 0x88c4, 0x001f3ae3, 0x00000082, | ||
1514 | 0x88d4, 0x0000001f, 0x00000010, | ||
1515 | 0x30934, 0xffffffff, 0x00000000 | ||
1516 | }; | ||
1517 | |||
1518 | |||
1476 | static void cik_init_golden_registers(struct radeon_device *rdev) | 1519 | static void cik_init_golden_registers(struct radeon_device *rdev) |
1477 | { | 1520 | { |
1478 | switch (rdev->family) { | 1521 | switch (rdev->family) { |
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev) | |||
1504 | kalindi_golden_spm_registers, | 1547 | kalindi_golden_spm_registers, |
1505 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); | 1548 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); |
1506 | break; | 1549 | break; |
1550 | case CHIP_MULLINS: | ||
1551 | radeon_program_register_sequence(rdev, | ||
1552 | kalindi_mgcg_cgcg_init, | ||
1553 | (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); | ||
1554 | radeon_program_register_sequence(rdev, | ||
1555 | godavari_golden_registers, | ||
1556 | (const u32)ARRAY_SIZE(godavari_golden_registers)); | ||
1557 | radeon_program_register_sequence(rdev, | ||
1558 | kalindi_golden_common_registers, | ||
1559 | (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); | ||
1560 | radeon_program_register_sequence(rdev, | ||
1561 | kalindi_golden_spm_registers, | ||
1562 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); | ||
1563 | break; | ||
1507 | case CHIP_KAVERI: | 1564 | case CHIP_KAVERI: |
1508 | radeon_program_register_sequence(rdev, | 1565 | radeon_program_register_sequence(rdev, |
1509 | spectre_mgcg_cgcg_init, | 1566 | spectre_mgcg_cgcg_init, |
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
1834 | rlc_req_size = KB_RLC_UCODE_SIZE * 4; | 1891 | rlc_req_size = KB_RLC_UCODE_SIZE * 4; |
1835 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | 1892 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; |
1836 | break; | 1893 | break; |
1894 | case CHIP_MULLINS: | ||
1895 | chip_name = "MULLINS"; | ||
1896 | pfp_req_size = CIK_PFP_UCODE_SIZE * 4; | ||
1897 | me_req_size = CIK_ME_UCODE_SIZE * 4; | ||
1898 | ce_req_size = CIK_CE_UCODE_SIZE * 4; | ||
1899 | mec_req_size = CIK_MEC_UCODE_SIZE * 4; | ||
1900 | rlc_req_size = ML_RLC_UCODE_SIZE * 4; | ||
1901 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | ||
1902 | break; | ||
1837 | default: BUG(); | 1903 | default: BUG(); |
1838 | } | 1904 | } |
1839 | 1905 | ||
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
3272 | gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; | 3338 | gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; |
3273 | break; | 3339 | break; |
3274 | case CHIP_KABINI: | 3340 | case CHIP_KABINI: |
3341 | case CHIP_MULLINS: | ||
3275 | default: | 3342 | default: |
3276 | rdev->config.cik.max_shader_engines = 1; | 3343 | rdev->config.cik.max_shader_engines = 1; |
3277 | rdev->config.cik.max_tile_pipes = 2; | 3344 | rdev->config.cik.max_tile_pipes = 2; |
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
3702 | r = radeon_fence_emit(rdev, fence, ring->idx); | 3769 | r = radeon_fence_emit(rdev, fence, ring->idx); |
3703 | if (r) { | 3770 | if (r) { |
3704 | radeon_ring_unlock_undo(rdev, ring); | 3771 | radeon_ring_unlock_undo(rdev, ring); |
3772 | radeon_semaphore_free(rdev, &sem, NULL); | ||
3705 | return r; | 3773 | return r; |
3706 | } | 3774 | } |
3707 | 3775 | ||
@@ -5800,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) | |||
5800 | case CHIP_KABINI: | 5868 | case CHIP_KABINI: |
5801 | size = KB_RLC_UCODE_SIZE; | 5869 | size = KB_RLC_UCODE_SIZE; |
5802 | break; | 5870 | break; |
5871 | case CHIP_MULLINS: | ||
5872 | size = ML_RLC_UCODE_SIZE; | ||
5873 | break; | ||
5803 | } | 5874 | } |
5804 | 5875 | ||
5805 | cik_rlc_stop(rdev); | 5876 | cik_rlc_stop(rdev); |
@@ -6548,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) | |||
6548 | buffer[count++] = cpu_to_le32(0x00000000); | 6619 | buffer[count++] = cpu_to_le32(0x00000000); |
6549 | break; | 6620 | break; |
6550 | case CHIP_KABINI: | 6621 | case CHIP_KABINI: |
6622 | case CHIP_MULLINS: | ||
6551 | buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ | 6623 | buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ |
6552 | buffer[count++] = cpu_to_le32(0x00000000); | 6624 | buffer[count++] = cpu_to_le32(0x00000000); |
6553 | break; | 6625 | break; |
@@ -6693,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) | |||
6693 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 6765 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
6694 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 6766 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); |
6695 | } | 6767 | } |
6768 | /* pflip */ | ||
6769 | if (rdev->num_crtc >= 2) { | ||
6770 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
6771 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
6772 | } | ||
6773 | if (rdev->num_crtc >= 4) { | ||
6774 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
6775 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
6776 | } | ||
6777 | if (rdev->num_crtc >= 6) { | ||
6778 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
6779 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
6780 | } | ||
6696 | 6781 | ||
6697 | /* dac hotplug */ | 6782 | /* dac hotplug */ |
6698 | WREG32(DAC_AUTODETECT_INT_CONTROL, 0); | 6783 | WREG32(DAC_AUTODETECT_INT_CONTROL, 0); |
@@ -7049,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev) | |||
7049 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 7134 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); |
7050 | } | 7135 | } |
7051 | 7136 | ||
7137 | if (rdev->num_crtc >= 2) { | ||
7138 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
7139 | GRPH_PFLIP_INT_MASK); | ||
7140 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
7141 | GRPH_PFLIP_INT_MASK); | ||
7142 | } | ||
7143 | if (rdev->num_crtc >= 4) { | ||
7144 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
7145 | GRPH_PFLIP_INT_MASK); | ||
7146 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
7147 | GRPH_PFLIP_INT_MASK); | ||
7148 | } | ||
7149 | if (rdev->num_crtc >= 6) { | ||
7150 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
7151 | GRPH_PFLIP_INT_MASK); | ||
7152 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
7153 | GRPH_PFLIP_INT_MASK); | ||
7154 | } | ||
7155 | |||
7052 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 7156 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
7053 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | 7157 | WREG32(DC_HPD2_INT_CONTROL, hpd2); |
7054 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | 7158 | WREG32(DC_HPD3_INT_CONTROL, hpd3); |
@@ -7085,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7085 | rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | 7189 | rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); |
7086 | rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); | 7190 | rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); |
7087 | 7191 | ||
7192 | rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + | ||
7193 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
7194 | rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + | ||
7195 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
7196 | if (rdev->num_crtc >= 4) { | ||
7197 | rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + | ||
7198 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
7199 | rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + | ||
7200 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
7201 | } | ||
7202 | if (rdev->num_crtc >= 6) { | ||
7203 | rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + | ||
7204 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
7205 | rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + | ||
7206 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
7207 | } | ||
7208 | |||
7209 | if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7210 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
7211 | GRPH_PFLIP_INT_CLEAR); | ||
7212 | if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7213 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
7214 | GRPH_PFLIP_INT_CLEAR); | ||
7088 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) | 7215 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) |
7089 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | 7216 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); |
7090 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) | 7217 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) |
@@ -7095,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7095 | WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | 7222 | WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); |
7096 | 7223 | ||
7097 | if (rdev->num_crtc >= 4) { | 7224 | if (rdev->num_crtc >= 4) { |
7225 | if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7226 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
7227 | GRPH_PFLIP_INT_CLEAR); | ||
7228 | if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7229 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
7230 | GRPH_PFLIP_INT_CLEAR); | ||
7098 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | 7231 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) |
7099 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | 7232 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); |
7100 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | 7233 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) |
@@ -7106,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7106 | } | 7239 | } |
7107 | 7240 | ||
7108 | if (rdev->num_crtc >= 6) { | 7241 | if (rdev->num_crtc >= 6) { |
7242 | if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7243 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
7244 | GRPH_PFLIP_INT_CLEAR); | ||
7245 | if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7246 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
7247 | GRPH_PFLIP_INT_CLEAR); | ||
7109 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | 7248 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) |
7110 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | 7249 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); |
7111 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | 7250 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) |
@@ -7457,6 +7596,15 @@ restart_ih: | |||
7457 | break; | 7596 | break; |
7458 | } | 7597 | } |
7459 | break; | 7598 | break; |
7599 | case 8: /* D1 page flip */ | ||
7600 | case 10: /* D2 page flip */ | ||
7601 | case 12: /* D3 page flip */ | ||
7602 | case 14: /* D4 page flip */ | ||
7603 | case 16: /* D5 page flip */ | ||
7604 | case 18: /* D6 page flip */ | ||
7605 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
7606 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
7607 | break; | ||
7460 | case 42: /* HPD hotplug */ | 7608 | case 42: /* HPD hotplug */ |
7461 | switch (src_data) { | 7609 | switch (src_data) { |
7462 | case 0: | 7610 | case 0: |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index f7e46cf682af..72e464c79a88 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, | |||
562 | r = radeon_fence_emit(rdev, fence, ring->idx); | 562 | r = radeon_fence_emit(rdev, fence, ring->idx); |
563 | if (r) { | 563 | if (r) { |
564 | radeon_ring_unlock_undo(rdev, ring); | 564 | radeon_ring_unlock_undo(rdev, ring); |
565 | radeon_semaphore_free(rdev, &sem, NULL); | ||
565 | return r; | 566 | return r; |
566 | } | 567 | } |
567 | 568 | ||
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 213873270d5f..dd7926394a8f 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -888,6 +888,15 @@ | |||
888 | # define DC_HPD6_RX_INTERRUPT (1 << 18) | 888 | # define DC_HPD6_RX_INTERRUPT (1 << 18) |
889 | #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 | 889 | #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 |
890 | 890 | ||
891 | /* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ | ||
892 | #define GRPH_INT_STATUS 0x6858 | ||
893 | # define GRPH_PFLIP_INT_OCCURRED (1 << 0) | ||
894 | # define GRPH_PFLIP_INT_CLEAR (1 << 8) | ||
895 | /* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ | ||
896 | #define GRPH_INT_CONTROL 0x685c | ||
897 | # define GRPH_PFLIP_INT_MASK (1 << 0) | ||
898 | # define GRPH_PFLIP_INT_TYPE (1 << 8) | ||
899 | |||
891 | #define DAC_AUTODETECT_INT_CONTROL 0x67c8 | 900 | #define DAC_AUTODETECT_INT_CONTROL 0x67c8 |
892 | 901 | ||
893 | #define DC_HPD1_INT_STATUS 0x601c | 902 | #define DC_HPD1_INT_STATUS 0x601c |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b406546440da..0f7a51a3694f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4371 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 4371 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
4372 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | 4372 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
4373 | u32 grbm_int_cntl = 0; | 4373 | u32 grbm_int_cntl = 0; |
4374 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
4375 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; | 4374 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; |
4376 | u32 dma_cntl, dma_cntl1 = 0; | 4375 | u32 dma_cntl, dma_cntl1 = 0; |
4377 | u32 thermal_int = 0; | 4376 | u32 thermal_int = 0; |
@@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4554 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 4553 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); |
4555 | } | 4554 | } |
4556 | 4555 | ||
4557 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | 4556 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, |
4558 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | 4557 | GRPH_PFLIP_INT_MASK); |
4558 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
4559 | GRPH_PFLIP_INT_MASK); | ||
4559 | if (rdev->num_crtc >= 4) { | 4560 | if (rdev->num_crtc >= 4) { |
4560 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | 4561 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, |
4561 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | 4562 | GRPH_PFLIP_INT_MASK); |
4563 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
4564 | GRPH_PFLIP_INT_MASK); | ||
4562 | } | 4565 | } |
4563 | if (rdev->num_crtc >= 6) { | 4566 | if (rdev->num_crtc >= 6) { |
4564 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | 4567 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, |
4565 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | 4568 | GRPH_PFLIP_INT_MASK); |
4569 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
4570 | GRPH_PFLIP_INT_MASK); | ||
4566 | } | 4571 | } |
4567 | 4572 | ||
4568 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 4573 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
@@ -4951,6 +4956,15 @@ restart_ih: | |||
4951 | break; | 4956 | break; |
4952 | } | 4957 | } |
4953 | break; | 4958 | break; |
4959 | case 8: /* D1 page flip */ | ||
4960 | case 10: /* D2 page flip */ | ||
4961 | case 12: /* D3 page flip */ | ||
4962 | case 14: /* D4 page flip */ | ||
4963 | case 16: /* D5 page flip */ | ||
4964 | case 18: /* D6 page flip */ | ||
4965 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
4966 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
4967 | break; | ||
4954 | case 42: /* HPD hotplug */ | 4968 | case 42: /* HPD hotplug */ |
4955 | switch (src_data) { | 4969 | switch (src_data) { |
4956 | case 0: | 4970 | case 0: |
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 287fe966d7de..478caefe0fef 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, | |||
151 | r = radeon_fence_emit(rdev, fence, ring->idx); | 151 | r = radeon_fence_emit(rdev, fence, ring->idx); |
152 | if (r) { | 152 | if (r) { |
153 | radeon_ring_unlock_undo(rdev, ring); | 153 | radeon_ring_unlock_undo(rdev, ring); |
154 | radeon_semaphore_free(rdev, &sem, NULL); | ||
154 | return r; | 155 | return r; |
155 | } | 156 | } |
156 | 157 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 16ec9d56a234..3f6e817d97ee 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev, | |||
546 | return 0; | 546 | return 0; |
547 | } | 547 | } |
548 | 548 | ||
549 | static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, | ||
550 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
551 | u32 vid_2bit) | ||
552 | { | ||
553 | struct radeon_clock_voltage_dependency_table *vddc_sclk_table = | ||
554 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
555 | u32 i; | ||
556 | |||
557 | if (vddc_sclk_table && vddc_sclk_table->count) { | ||
558 | if (vid_2bit < vddc_sclk_table->count) | ||
559 | return vddc_sclk_table->entries[vid_2bit].v; | ||
560 | else | ||
561 | return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; | ||
562 | } else { | ||
563 | for (i = 0; i < vid_mapping_table->num_entries; i++) { | ||
564 | if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) | ||
565 | return vid_mapping_table->entries[i].vid_7bit; | ||
566 | } | ||
567 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, | ||
572 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
573 | u32 vid_7bit) | ||
574 | { | ||
575 | struct radeon_clock_voltage_dependency_table *vddc_sclk_table = | ||
576 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
577 | u32 i; | ||
578 | |||
579 | if (vddc_sclk_table && vddc_sclk_table->count) { | ||
580 | for (i = 0; i < vddc_sclk_table->count; i++) { | ||
581 | if (vddc_sclk_table->entries[i].v == vid_7bit) | ||
582 | return i; | ||
583 | } | ||
584 | return vddc_sclk_table->count - 1; | ||
585 | } else { | ||
586 | for (i = 0; i < vid_mapping_table->num_entries; i++) { | ||
587 | if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) | ||
588 | return vid_mapping_table->entries[i].vid_2bit; | ||
589 | } | ||
590 | |||
591 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; | ||
592 | } | ||
593 | } | ||
594 | |||
549 | static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, | 595 | static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, |
550 | u16 voltage) | 596 | u16 voltage) |
551 | { | 597 | { |
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, | |||
556 | u32 vid_2bit) | 602 | u32 vid_2bit) |
557 | { | 603 | { |
558 | struct kv_power_info *pi = kv_get_pi(rdev); | 604 | struct kv_power_info *pi = kv_get_pi(rdev); |
559 | u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, | 605 | u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, |
560 | &pi->sys_info.vid_mapping_table, | 606 | &pi->sys_info.vid_mapping_table, |
561 | vid_2bit); | 607 | vid_2bit); |
562 | 608 | ||
563 | return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); | 609 | return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); |
564 | } | 610 | } |
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) | |||
639 | 685 | ||
640 | static int kv_unforce_levels(struct radeon_device *rdev) | 686 | static int kv_unforce_levels(struct radeon_device *rdev) |
641 | { | 687 | { |
642 | if (rdev->family == CHIP_KABINI) | 688 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
643 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); | 689 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); |
644 | else | 690 | else |
645 | return kv_set_enabled_levels(rdev); | 691 | return kv_set_enabled_levels(rdev); |
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
1362 | struct radeon_uvd_clock_voltage_dependency_table *table = | 1408 | struct radeon_uvd_clock_voltage_dependency_table *table = |
1363 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | 1409 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
1364 | int ret; | 1410 | int ret; |
1411 | u32 mask; | ||
1365 | 1412 | ||
1366 | if (!gate) { | 1413 | if (!gate) { |
1367 | if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) | 1414 | if (table->count) |
1368 | pi->uvd_boot_level = table->count - 1; | 1415 | pi->uvd_boot_level = table->count - 1; |
1369 | else | 1416 | else |
1370 | pi->uvd_boot_level = 0; | 1417 | pi->uvd_boot_level = 0; |
1371 | 1418 | ||
1419 | if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { | ||
1420 | mask = 1 << pi->uvd_boot_level; | ||
1421 | } else { | ||
1422 | mask = 0x1f; | ||
1423 | } | ||
1424 | |||
1372 | ret = kv_copy_bytes_to_smc(rdev, | 1425 | ret = kv_copy_bytes_to_smc(rdev, |
1373 | pi->dpm_table_start + | 1426 | pi->dpm_table_start + |
1374 | offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), | 1427 | offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), |
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
1377 | if (ret) | 1430 | if (ret) |
1378 | return ret; | 1431 | return ret; |
1379 | 1432 | ||
1380 | if (!pi->caps_uvd_dpm || | 1433 | kv_send_msg_to_smc_with_parameter(rdev, |
1381 | pi->caps_stable_p_state) | 1434 | PPSMC_MSG_UVDDPM_SetEnabledMask, |
1382 | kv_send_msg_to_smc_with_parameter(rdev, | 1435 | mask); |
1383 | PPSMC_MSG_UVDDPM_SetEnabledMask, | ||
1384 | (1 << pi->uvd_boot_level)); | ||
1385 | } | 1436 | } |
1386 | 1437 | ||
1387 | return kv_enable_uvd_dpm(rdev, !gate); | 1438 | return kv_enable_uvd_dpm(rdev, !gate); |
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) | |||
1617 | if (pi->acp_power_gated == gate) | 1668 | if (pi->acp_power_gated == gate) |
1618 | return; | 1669 | return; |
1619 | 1670 | ||
1620 | if (rdev->family == CHIP_KABINI) | 1671 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1621 | return; | 1672 | return; |
1622 | 1673 | ||
1623 | pi->acp_power_gated = gate; | 1674 | pi->acp_power_gated = gate; |
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1786 | } | 1837 | } |
1787 | } | 1838 | } |
1788 | 1839 | ||
1789 | if (rdev->family == CHIP_KABINI) { | 1840 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
1790 | if (pi->enable_dpm) { | 1841 | if (pi->enable_dpm) { |
1791 | kv_set_valid_clock_range(rdev, new_ps); | 1842 | kv_set_valid_clock_range(rdev, new_ps); |
1792 | kv_update_dfs_bypass_settings(rdev, new_ps); | 1843 | kv_update_dfs_bypass_settings(rdev, new_ps); |
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1812 | return ret; | 1863 | return ret; |
1813 | } | 1864 | } |
1814 | kv_update_sclk_t(rdev); | 1865 | kv_update_sclk_t(rdev); |
1866 | if (rdev->family == CHIP_MULLINS) | ||
1867 | kv_enable_nb_dpm(rdev); | ||
1815 | } | 1868 | } |
1816 | } else { | 1869 | } else { |
1817 | if (pi->enable_dpm) { | 1870 | if (pi->enable_dpm) { |
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev) | |||
1862 | { | 1915 | { |
1863 | struct kv_power_info *pi = kv_get_pi(rdev); | 1916 | struct kv_power_info *pi = kv_get_pi(rdev); |
1864 | 1917 | ||
1865 | if (rdev->family == CHIP_KABINI) { | 1918 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
1866 | kv_force_lowest_valid(rdev); | 1919 | kv_force_lowest_valid(rdev); |
1867 | kv_init_graphics_levels(rdev); | 1920 | kv_init_graphics_levels(rdev); |
1868 | kv_program_bootup_state(rdev); | 1921 | kv_program_bootup_state(rdev); |
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev, | |||
1901 | static void kv_patch_voltage_values(struct radeon_device *rdev) | 1954 | static void kv_patch_voltage_values(struct radeon_device *rdev) |
1902 | { | 1955 | { |
1903 | int i; | 1956 | int i; |
1904 | struct radeon_uvd_clock_voltage_dependency_table *table = | 1957 | struct radeon_uvd_clock_voltage_dependency_table *uvd_table = |
1905 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | 1958 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
1959 | struct radeon_vce_clock_voltage_dependency_table *vce_table = | ||
1960 | &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | ||
1961 | struct radeon_clock_voltage_dependency_table *samu_table = | ||
1962 | &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; | ||
1963 | struct radeon_clock_voltage_dependency_table *acp_table = | ||
1964 | &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
1906 | 1965 | ||
1907 | if (table->count) { | 1966 | if (uvd_table->count) { |
1908 | for (i = 0; i < table->count; i++) | 1967 | for (i = 0; i < uvd_table->count; i++) |
1909 | table->entries[i].v = | 1968 | uvd_table->entries[i].v = |
1910 | kv_convert_8bit_index_to_voltage(rdev, | 1969 | kv_convert_8bit_index_to_voltage(rdev, |
1911 | table->entries[i].v); | 1970 | uvd_table->entries[i].v); |
1971 | } | ||
1972 | |||
1973 | if (vce_table->count) { | ||
1974 | for (i = 0; i < vce_table->count; i++) | ||
1975 | vce_table->entries[i].v = | ||
1976 | kv_convert_8bit_index_to_voltage(rdev, | ||
1977 | vce_table->entries[i].v); | ||
1978 | } | ||
1979 | |||
1980 | if (samu_table->count) { | ||
1981 | for (i = 0; i < samu_table->count; i++) | ||
1982 | samu_table->entries[i].v = | ||
1983 | kv_convert_8bit_index_to_voltage(rdev, | ||
1984 | samu_table->entries[i].v); | ||
1985 | } | ||
1986 | |||
1987 | if (acp_table->count) { | ||
1988 | for (i = 0; i < acp_table->count; i++) | ||
1989 | acp_table->entries[i].v = | ||
1990 | kv_convert_8bit_index_to_voltage(rdev, | ||
1991 | acp_table->entries[i].v); | ||
1912 | } | 1992 | } |
1913 | 1993 | ||
1914 | } | 1994 | } |
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) | |||
1941 | break; | 2021 | break; |
1942 | } | 2022 | } |
1943 | 2023 | ||
1944 | if (rdev->family == CHIP_KABINI) | 2024 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1945 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 2025 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); |
1946 | else | 2026 | else |
1947 | return kv_set_enabled_level(rdev, i); | 2027 | return kv_set_enabled_level(rdev, i); |
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) | |||
1961 | break; | 2041 | break; |
1962 | } | 2042 | } |
1963 | 2043 | ||
1964 | if (rdev->family == CHIP_KABINI) | 2044 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1965 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 2045 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); |
1966 | else | 2046 | else |
1967 | return kv_set_enabled_level(rdev, i); | 2047 | return kv_set_enabled_level(rdev, i); |
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2118 | else | 2198 | else |
2119 | pi->battery_state = false; | 2199 | pi->battery_state = false; |
2120 | 2200 | ||
2121 | if (rdev->family == CHIP_KABINI) { | 2201 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
2122 | ps->dpm0_pg_nb_ps_lo = 0x1; | 2202 | ps->dpm0_pg_nb_ps_lo = 0x1; |
2123 | ps->dpm0_pg_nb_ps_hi = 0x0; | 2203 | ps->dpm0_pg_nb_ps_hi = 0x0; |
2124 | ps->dpmx_nb_ps_lo = 0x1; | 2204 | ps->dpmx_nb_ps_lo = 0x1; |
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) | |||
2179 | if (pi->lowest_valid > pi->highest_valid) | 2259 | if (pi->lowest_valid > pi->highest_valid) |
2180 | return -EINVAL; | 2260 | return -EINVAL; |
2181 | 2261 | ||
2182 | if (rdev->family == CHIP_KABINI) { | 2262 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
2183 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { | 2263 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { |
2184 | pi->graphics_level[i].GnbSlow = 1; | 2264 | pi->graphics_level[i].GnbSlow = 1; |
2185 | pi->graphics_level[i].ForceNbPs1 = 0; | 2265 | pi->graphics_level[i].ForceNbPs1 = 0; |
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev) | |||
2253 | break; | 2333 | break; |
2254 | 2334 | ||
2255 | kv_set_divider_value(rdev, i, table->entries[i].clk); | 2335 | kv_set_divider_value(rdev, i, table->entries[i].clk); |
2256 | vid_2bit = sumo_convert_vid7_to_vid2(rdev, | 2336 | vid_2bit = kv_convert_vid7_to_vid2(rdev, |
2257 | &pi->sys_info.vid_mapping_table, | 2337 | &pi->sys_info.vid_mapping_table, |
2258 | table->entries[i].v); | 2338 | table->entries[i].v); |
2259 | kv_set_vid(rdev, i, vid_2bit); | 2339 | kv_set_vid(rdev, i, vid_2bit); |
2260 | kv_set_at(rdev, i, pi->at[i]); | 2340 | kv_set_at(rdev, i, pi->at[i]); |
2261 | kv_dpm_power_level_enabled_for_throttle(rdev, i, true); | 2341 | kv_dpm_power_level_enabled_for_throttle(rdev, i, true); |
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev, | |||
2324 | struct kv_power_info *pi = kv_get_pi(rdev); | 2404 | struct kv_power_info *pi = kv_get_pi(rdev); |
2325 | u32 nbdpmconfig1; | 2405 | u32 nbdpmconfig1; |
2326 | 2406 | ||
2327 | if (rdev->family == CHIP_KABINI) | 2407 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
2328 | return; | 2408 | return; |
2329 | 2409 | ||
2330 | if (pi->sys_info.nb_dpm_enable) { | 2410 | if (pi->sys_info.nb_dpm_enable) { |
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev) | |||
2631 | 2711 | ||
2632 | pi->sram_end = SMC_RAM_END; | 2712 | pi->sram_end = SMC_RAM_END; |
2633 | 2713 | ||
2634 | if (rdev->family == CHIP_KABINI) | ||
2635 | pi->high_voltage_t = 4001; | ||
2636 | |||
2637 | pi->enable_nb_dpm = true; | 2714 | pi->enable_nb_dpm = true; |
2638 | 2715 | ||
2639 | pi->caps_power_containment = true; | 2716 | pi->caps_power_containment = true; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6e887d004eba..bbc189fd3ddc 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
2839 | r = radeon_fence_emit(rdev, fence, ring->idx); | 2839 | r = radeon_fence_emit(rdev, fence, ring->idx); |
2840 | if (r) { | 2840 | if (r) { |
2841 | radeon_ring_unlock_undo(rdev, ring); | 2841 | radeon_ring_unlock_undo(rdev, ring); |
2842 | radeon_semaphore_free(rdev, &sem, NULL); | ||
2842 | return r; | 2843 | return r; |
2843 | } | 2844 | } |
2844 | 2845 | ||
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3505 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 3506 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
3506 | u32 grbm_int_cntl = 0; | 3507 | u32 grbm_int_cntl = 0; |
3507 | u32 hdmi0, hdmi1; | 3508 | u32 hdmi0, hdmi1; |
3508 | u32 d1grph = 0, d2grph = 0; | ||
3509 | u32 dma_cntl; | 3509 | u32 dma_cntl; |
3510 | u32 thermal_int = 0; | 3510 | u32 thermal_int = 0; |
3511 | 3511 | ||
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3614 | WREG32(CP_INT_CNTL, cp_int_cntl); | 3614 | WREG32(CP_INT_CNTL, cp_int_cntl); |
3615 | WREG32(DMA_CNTL, dma_cntl); | 3615 | WREG32(DMA_CNTL, dma_cntl); |
3616 | WREG32(DxMODE_INT_MASK, mode_int); | 3616 | WREG32(DxMODE_INT_MASK, mode_int); |
3617 | WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); | 3617 | WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
3618 | WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); | 3618 | WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
3619 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | 3619 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
3620 | if (ASIC_IS_DCE3(rdev)) { | 3620 | if (ASIC_IS_DCE3(rdev)) { |
3621 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 3621 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
@@ -3918,6 +3918,14 @@ restart_ih: | |||
3918 | break; | 3918 | break; |
3919 | } | 3919 | } |
3920 | break; | 3920 | break; |
3921 | case 9: /* D1 pflip */ | ||
3922 | DRM_DEBUG("IH: D1 flip\n"); | ||
3923 | radeon_crtc_handle_flip(rdev, 0); | ||
3924 | break; | ||
3925 | case 11: /* D2 pflip */ | ||
3926 | DRM_DEBUG("IH: D2 flip\n"); | ||
3927 | radeon_crtc_handle_flip(rdev, 1); | ||
3928 | break; | ||
3921 | case 19: /* HPD/DAC hotplug */ | 3929 | case 19: /* HPD/DAC hotplug */ |
3922 | switch (src_data) { | 3930 | switch (src_data) { |
3923 | case 0: | 3931 | case 0: |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 53fcb28f5578..4969cef44a19 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev, | |||
489 | r = radeon_fence_emit(rdev, fence, ring->idx); | 489 | r = radeon_fence_emit(rdev, fence, ring->idx); |
490 | if (r) { | 490 | if (r) { |
491 | radeon_ring_unlock_undo(rdev, ring); | 491 | radeon_ring_unlock_undo(rdev, ring); |
492 | radeon_semaphore_free(rdev, &sem, NULL); | ||
492 | return r; | 493 | return r; |
493 | } | 494 | } |
494 | 495 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b58e1afdda76..68528619834a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -730,6 +730,12 @@ struct cik_irq_stat_regs { | |||
730 | u32 disp_int_cont4; | 730 | u32 disp_int_cont4; |
731 | u32 disp_int_cont5; | 731 | u32 disp_int_cont5; |
732 | u32 disp_int_cont6; | 732 | u32 disp_int_cont6; |
733 | u32 d1grph_int; | ||
734 | u32 d2grph_int; | ||
735 | u32 d3grph_int; | ||
736 | u32 d4grph_int; | ||
737 | u32 d5grph_int; | ||
738 | u32 d6grph_int; | ||
733 | }; | 739 | }; |
734 | 740 | ||
735 | union radeon_irq_stat_regs { | 741 | union radeon_irq_stat_regs { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index b8a24a75d4ff..be20e62dac83 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2516 | break; | 2516 | break; |
2517 | case CHIP_KAVERI: | 2517 | case CHIP_KAVERI: |
2518 | case CHIP_KABINI: | 2518 | case CHIP_KABINI: |
2519 | case CHIP_MULLINS: | ||
2519 | rdev->asic = &kv_asic; | 2520 | rdev->asic = &kv_asic; |
2520 | /* set num crtcs */ | 2521 | /* set num crtcs */ |
2521 | if (rdev->family == CHIP_KAVERI) { | 2522 | if (rdev->family == CHIP_KAVERI) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 511fe26198e4..0e770bbf7e29 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = { | |||
99 | "KAVERI", | 99 | "KAVERI", |
100 | "KABINI", | 100 | "KABINI", |
101 | "HAWAII", | 101 | "HAWAII", |
102 | "MULLINS", | ||
102 | "LAST", | 103 | "LAST", |
103 | }; | 104 | }; |
104 | 105 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8d99d5ee8014..408b6ac53f0b 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) | |||
284 | u32 update_pending; | 284 | u32 update_pending; |
285 | int vpos, hpos; | 285 | int vpos, hpos; |
286 | 286 | ||
287 | /* can happen during initialization */ | ||
288 | if (radeon_crtc == NULL) | ||
289 | return; | ||
290 | |||
287 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); | 291 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
288 | work = radeon_crtc->unpin_work; | 292 | work = radeon_crtc->unpin_work; |
289 | if (work == NULL || | 293 | if (work == NULL || |
@@ -826,14 +830,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den, | |||
826 | 830 | ||
827 | /* make sure nominator is large enough */ | 831 | /* make sure nominator is large enough */ |
828 | if (*nom < nom_min) { | 832 | if (*nom < nom_min) { |
829 | tmp = (nom_min + *nom - 1) / *nom; | 833 | tmp = DIV_ROUND_UP(nom_min, *nom); |
830 | *nom *= tmp; | 834 | *nom *= tmp; |
831 | *den *= tmp; | 835 | *den *= tmp; |
832 | } | 836 | } |
833 | 837 | ||
834 | /* make sure the denominator is large enough */ | 838 | /* make sure the denominator is large enough */ |
835 | if (*den < den_min) { | 839 | if (*den < den_min) { |
836 | tmp = (den_min + *den - 1) / *den; | 840 | tmp = DIV_ROUND_UP(den_min, *den); |
837 | *nom *= tmp; | 841 | *nom *= tmp; |
838 | *den *= tmp; | 842 | *den *= tmp; |
839 | } | 843 | } |
@@ -858,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, | |||
858 | unsigned *fb_div, unsigned *ref_div) | 862 | unsigned *fb_div, unsigned *ref_div) |
859 | { | 863 | { |
860 | /* limit reference * post divider to a maximum */ | 864 | /* limit reference * post divider to a maximum */ |
861 | ref_div_max = min(210 / post_div, ref_div_max); | 865 | ref_div_max = min(128 / post_div, ref_div_max); |
862 | 866 | ||
863 | /* get matching reference and feedback divider */ | 867 | /* get matching reference and feedback divider */ |
864 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); | 868 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); |
@@ -993,6 +997,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
993 | /* this also makes sure that the reference divider is large enough */ | 997 | /* this also makes sure that the reference divider is large enough */ |
994 | avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); | 998 | avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); |
995 | 999 | ||
1000 | /* avoid high jitter with small fractional dividers */ | ||
1001 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { | ||
1002 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); | ||
1003 | if (fb_div < fb_div_min) { | ||
1004 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); | ||
1005 | fb_div *= tmp; | ||
1006 | ref_div *= tmp; | ||
1007 | } | ||
1008 | } | ||
1009 | |||
996 | /* and finally save the result */ | 1010 | /* and finally save the result */ |
997 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 1011 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
998 | *fb_div_p = fb_div / 10; | 1012 | *fb_div_p = fb_div / 10; |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 9da5da4ffd17..4b7b87f71a63 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -97,6 +97,7 @@ enum radeon_family { | |||
97 | CHIP_KAVERI, | 97 | CHIP_KAVERI, |
98 | CHIP_KABINI, | 98 | CHIP_KABINI, |
99 | CHIP_HAWAII, | 99 | CHIP_HAWAII, |
100 | CHIP_MULLINS, | ||
100 | CHIP_LAST, | 101 | CHIP_LAST, |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fac8efe8340..f30b8426eee2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1300,6 +1300,7 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1300 | case CHIP_KABINI: | 1300 | case CHIP_KABINI: |
1301 | case CHIP_KAVERI: | 1301 | case CHIP_KAVERI: |
1302 | case CHIP_HAWAII: | 1302 | case CHIP_HAWAII: |
1303 | case CHIP_MULLINS: | ||
1303 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1304 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1304 | if (!rdev->rlc_fw) | 1305 | if (!rdev->rlc_fw) |
1305 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1306 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index 58d12938c0b8..4e7c3269b183 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h | |||
@@ -52,6 +52,7 @@ | |||
52 | #define BONAIRE_RLC_UCODE_SIZE 2048 | 52 | #define BONAIRE_RLC_UCODE_SIZE 2048 |
53 | #define KB_RLC_UCODE_SIZE 2560 | 53 | #define KB_RLC_UCODE_SIZE 2560 |
54 | #define KV_RLC_UCODE_SIZE 2560 | 54 | #define KV_RLC_UCODE_SIZE 2560 |
55 | #define ML_RLC_UCODE_SIZE 2560 | ||
55 | 56 | ||
56 | /* MC */ | 57 | /* MC */ |
57 | #define BTC_MC_UCODE_SIZE 6024 | 58 | #define BTC_MC_UCODE_SIZE 6024 |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5748bdaeacce..1b65ae2433cd 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
99 | case CHIP_KABINI: | 99 | case CHIP_KABINI: |
100 | case CHIP_KAVERI: | 100 | case CHIP_KAVERI: |
101 | case CHIP_HAWAII: | 101 | case CHIP_HAWAII: |
102 | case CHIP_MULLINS: | ||
102 | fw_name = FIRMWARE_BONAIRE; | 103 | fw_name = FIRMWARE_BONAIRE; |
103 | break; | 104 | break; |
104 | 105 | ||
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
465 | cmd = radeon_get_ib_value(p, p->idx) >> 1; | 466 | cmd = radeon_get_ib_value(p, p->idx) >> 1; |
466 | 467 | ||
467 | if (cmd < 0x4) { | 468 | if (cmd < 0x4) { |
469 | if (end <= start) { | ||
470 | DRM_ERROR("invalid reloc offset %X!\n", offset); | ||
471 | return -EINVAL; | ||
472 | } | ||
468 | if ((end - start) < buf_sizes[cmd]) { | 473 | if ((end - start) < buf_sizes[cmd]) { |
469 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | 474 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
470 | (unsigned)(end - start), buf_sizes[cmd]); | 475 | (unsigned)(end - start), buf_sizes[cmd]); |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ced53dd03e7c..f73324c81491 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev) | |||
66 | case CHIP_BONAIRE: | 66 | case CHIP_BONAIRE: |
67 | case CHIP_KAVERI: | 67 | case CHIP_KAVERI: |
68 | case CHIP_KABINI: | 68 | case CHIP_KABINI: |
69 | case CHIP_MULLINS: | ||
69 | fw_name = FIRMWARE_BONAIRE; | 70 | fw_name = FIRMWARE_BONAIRE; |
70 | break; | 71 | break; |
71 | 72 | ||
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index aca8cbe8a335..bbf2e076ee45 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c | |||
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev, | |||
86 | r = radeon_fence_emit(rdev, fence, ring->idx); | 86 | r = radeon_fence_emit(rdev, fence, ring->idx); |
87 | if (r) { | 87 | if (r) { |
88 | radeon_ring_unlock_undo(rdev, ring); | 88 | radeon_ring_unlock_undo(rdev, ring); |
89 | radeon_semaphore_free(rdev, &sem, NULL); | ||
89 | return r; | 90 | return r; |
90 | } | 91 | } |
91 | 92 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ac708e006180..22a63c98ba14 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -5780,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev) | |||
5780 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 5780 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
5781 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 5781 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
5782 | u32 grbm_int_cntl = 0; | 5782 | u32 grbm_int_cntl = 0; |
5783 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
5784 | u32 dma_cntl, dma_cntl1; | 5783 | u32 dma_cntl, dma_cntl1; |
5785 | u32 thermal_int = 0; | 5784 | u32 thermal_int = 0; |
5786 | 5785 | ||
@@ -5919,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev) | |||
5919 | } | 5918 | } |
5920 | 5919 | ||
5921 | if (rdev->num_crtc >= 2) { | 5920 | if (rdev->num_crtc >= 2) { |
5922 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | 5921 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, |
5923 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | 5922 | GRPH_PFLIP_INT_MASK); |
5923 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
5924 | GRPH_PFLIP_INT_MASK); | ||
5924 | } | 5925 | } |
5925 | if (rdev->num_crtc >= 4) { | 5926 | if (rdev->num_crtc >= 4) { |
5926 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | 5927 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, |
5927 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | 5928 | GRPH_PFLIP_INT_MASK); |
5929 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
5930 | GRPH_PFLIP_INT_MASK); | ||
5928 | } | 5931 | } |
5929 | if (rdev->num_crtc >= 6) { | 5932 | if (rdev->num_crtc >= 6) { |
5930 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | 5933 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, |
5931 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | 5934 | GRPH_PFLIP_INT_MASK); |
5935 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
5936 | GRPH_PFLIP_INT_MASK); | ||
5932 | } | 5937 | } |
5933 | 5938 | ||
5934 | if (!ASIC_IS_NODCE(rdev)) { | 5939 | if (!ASIC_IS_NODCE(rdev)) { |
@@ -6292,6 +6297,15 @@ restart_ih: | |||
6292 | break; | 6297 | break; |
6293 | } | 6298 | } |
6294 | break; | 6299 | break; |
6300 | case 8: /* D1 page flip */ | ||
6301 | case 10: /* D2 page flip */ | ||
6302 | case 12: /* D3 page flip */ | ||
6303 | case 14: /* D4 page flip */ | ||
6304 | case 16: /* D5 page flip */ | ||
6305 | case 18: /* D6 page flip */ | ||
6306 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
6307 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
6308 | break; | ||
6295 | case 42: /* HPD hotplug */ | 6309 | case 42: /* HPD hotplug */ |
6296 | switch (src_data) { | 6310 | switch (src_data) { |
6297 | case 0: | 6311 | case 0: |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index cf0fdad8c278..de0ca070122f 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev, | |||
213 | r = radeon_fence_emit(rdev, fence, ring->idx); | 213 | r = radeon_fence_emit(rdev, fence, ring->idx); |
214 | if (r) { | 214 | if (r) { |
215 | radeon_ring_unlock_undo(rdev, ring); | 215 | radeon_ring_unlock_undo(rdev, ring); |
216 | radeon_semaphore_free(rdev, &sem, NULL); | ||
216 | return r; | 217 | return r; |
217 | } | 218 | } |
218 | 219 | ||
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 0a243f0e5d68..be42c8125203 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev) | |||
83 | int r; | 83 | int r; |
84 | 84 | ||
85 | /* raise clocks while booting up the VCPU */ | 85 | /* raise clocks while booting up the VCPU */ |
86 | radeon_set_uvd_clocks(rdev, 53300, 40000); | 86 | if (rdev->family < CHIP_RV740) |
87 | radeon_set_uvd_clocks(rdev, 10000, 10000); | ||
88 | else | ||
89 | radeon_set_uvd_clocks(rdev, 53300, 40000); | ||
87 | 90 | ||
88 | r = uvd_v1_0_start(rdev); | 91 | r = uvd_v1_0_start(rdev); |
89 | if (r) | 92 | if (r) |
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
407 | struct radeon_fence *fence = NULL; | 410 | struct radeon_fence *fence = NULL; |
408 | int r; | 411 | int r; |
409 | 412 | ||
410 | r = radeon_set_uvd_clocks(rdev, 53300, 40000); | 413 | if (rdev->family < CHIP_RV740) |
414 | r = radeon_set_uvd_clocks(rdev, 10000, 10000); | ||
415 | else | ||
416 | r = radeon_set_uvd_clocks(rdev, 53300, 40000); | ||
411 | if (r) { | 417 | if (r) { |
412 | DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); | 418 | DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); |
413 | return r; | 419 | return r; |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 36c717af6cf9..edb871d7d395 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) | |||
312 | struct drm_device *drm = crtc->dev; | 312 | struct drm_device *drm = crtc->dev; |
313 | struct drm_plane *plane; | 313 | struct drm_plane *plane; |
314 | 314 | ||
315 | list_for_each_entry(plane, &drm->mode_config.plane_list, head) { | 315 | drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { |
316 | if (plane->crtc == crtc) { | 316 | if (plane->crtc == crtc) { |
317 | tegra_plane_disable(plane); | 317 | tegra_plane_disable(plane); |
318 | plane->crtc = NULL; | 318 | plane->crtc = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 931490b9cfed..87df0b3674fd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
1214 | SVGA3dCmdSurfaceDMA dma; | 1214 | SVGA3dCmdSurfaceDMA dma; |
1215 | } *cmd; | 1215 | } *cmd; |
1216 | int ret; | 1216 | int ret; |
1217 | SVGA3dCmdSurfaceDMASuffix *suffix; | ||
1218 | uint32_t bo_size; | ||
1217 | 1219 | ||
1218 | cmd = container_of(header, struct vmw_dma_cmd, header); | 1220 | cmd = container_of(header, struct vmw_dma_cmd, header); |
1221 | suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + | ||
1222 | header->size - sizeof(*suffix)); | ||
1223 | |||
1224 | /* Make sure device and verifier stays in sync. */ | ||
1225 | if (unlikely(suffix->suffixSize != sizeof(*suffix))) { | ||
1226 | DRM_ERROR("Invalid DMA suffix size.\n"); | ||
1227 | return -EINVAL; | ||
1228 | } | ||
1229 | |||
1219 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 1230 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
1220 | &cmd->dma.guest.ptr, | 1231 | &cmd->dma.guest.ptr, |
1221 | &vmw_bo); | 1232 | &vmw_bo); |
1222 | if (unlikely(ret != 0)) | 1233 | if (unlikely(ret != 0)) |
1223 | return ret; | 1234 | return ret; |
1224 | 1235 | ||
1236 | /* Make sure DMA doesn't cross BO boundaries. */ | ||
1237 | bo_size = vmw_bo->base.num_pages * PAGE_SIZE; | ||
1238 | if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { | ||
1239 | DRM_ERROR("Invalid DMA offset.\n"); | ||
1240 | return -EINVAL; | ||
1241 | } | ||
1242 | |||
1243 | bo_size -= cmd->dma.guest.ptr.offset; | ||
1244 | if (unlikely(suffix->maximumOffset > bo_size)) | ||
1245 | suffix->maximumOffset = bo_size; | ||
1246 | |||
1225 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1247 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1226 | user_surface_converter, &cmd->dma.host.sid, | 1248 | user_surface_converter, &cmd->dma.host.sid, |
1227 | NULL); | 1249 | NULL); |