diff options
Diffstat (limited to 'drivers/gpu')
118 files changed, 2682 insertions, 1292 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
| @@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
| 323 | 323 | ||
| 324 | astbo->gem.driver_private = NULL; | 324 | astbo->gem.driver_private = NULL; |
| 325 | astbo->bo.bdev = &ast->ttm.bdev; | 325 | astbo->bo.bdev = &ast->ttm.bdev; |
| 326 | astbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 326 | 327 | ||
| 327 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 328 | 329 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
| @@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
| 328 | 328 | ||
| 329 | cirrusbo->gem.driver_private = NULL; | 329 | cirrusbo->gem.driver_private = NULL; |
| 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; | 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; |
| 331 | cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 331 | 332 | ||
| 332 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 333 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 333 | 334 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..6a647493ca7f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 677 | /* don't break so fail path works correct */ | 677 | /* don't break so fail path works correct */ |
| 678 | fail = 1; | 678 | fail = 1; |
| 679 | break; | 679 | break; |
| 680 | |||
| 681 | if (connector->dpms != DRM_MODE_DPMS_ON) { | ||
| 682 | DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); | ||
| 683 | mode_changed = true; | ||
| 684 | } | ||
| 680 | } | 685 | } |
| 681 | } | 686 | } |
| 682 | 687 | ||
| @@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 754 | ret = -EINVAL; | 759 | ret = -EINVAL; |
| 755 | goto fail; | 760 | goto fail; |
| 756 | } | 761 | } |
| 762 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
| 763 | for (i = 0; i < set->num_connectors; i++) { | ||
| 764 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
| 765 | drm_get_connector_name(set->connectors[i])); | ||
| 766 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
| 767 | } | ||
| 757 | } | 768 | } |
| 758 | drm_helper_disable_unused_functions(dev); | 769 | drm_helper_disable_unused_functions(dev); |
| 759 | } else if (fb_changed) { | 770 | } else if (fb_changed) { |
| @@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 771 | } | 782 | } |
| 772 | } | 783 | } |
| 773 | 784 | ||
| 774 | /* | ||
| 775 | * crtc set_config helpers implicit set the crtc and all connected | ||
| 776 | * encoders to DPMS on for a full mode set. But for just an fb update it | ||
| 777 | * doesn't do that. To not confuse userspace, do an explicit DPMS_ON | ||
| 778 | * unconditionally. This will also ensure driver internal dpms state is | ||
| 779 | * consistent again. | ||
| 780 | */ | ||
| 781 | if (set->crtc->enabled) { | ||
| 782 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
| 783 | for (i = 0; i < set->num_connectors; i++) { | ||
| 784 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
| 785 | drm_get_connector_name(set->connectors[i])); | ||
| 786 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
| 787 | } | ||
| 788 | } | ||
| 789 | |||
| 790 | kfree(save_connectors); | 785 | kfree(save_connectors); |
| 791 | kfree(save_encoders); | 786 | kfree(save_encoders); |
| 792 | kfree(save_crtcs); | 787 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
| 708 | /* Subtract time delta from raw timestamp to get final | 708 | /* Subtract time delta from raw timestamp to get final |
| 709 | * vblank_time timestamp for end of vblank. | 709 | * vblank_time timestamp for end of vblank. |
| 710 | */ | 710 | */ |
| 711 | etime = ktime_sub_ns(etime, delta_ns); | 711 | if (delta_ns < 0) |
| 712 | etime = ktime_add_ns(etime, -delta_ns); | ||
| 713 | else | ||
| 714 | etime = ktime_sub_ns(etime, delta_ns); | ||
| 712 | *vblank_time = ktime_to_timeval(etime); | 715 | *vblank_time = ktime_to_timeval(etime); |
| 713 | 716 | ||
| 714 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", | 717 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", |
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 95c75edef01a..30ef41bcd7b8 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
| 18 | #include <linux/module.h> | ||
| 19 | 18 | ||
| 20 | 19 | ||
| 21 | #include "exynos_drm_drv.h" | 20 | #include "exynos_drm_drv.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 61b094f689a7..6e047bd53e2f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/mfd/syscon.h> | 16 | #include <linux/mfd/syscon.h> |
| 18 | #include <linux/regmap.h> | 17 | #include <linux/regmap.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3e106beca5b6..1c263dac3c1c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <drm/drmP.h> | 14 | #include <drm/drmP.h> |
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 19 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 20 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
| @@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { | |||
| 130 | .data = &exynos5_fimd_driver_data }, | 129 | .data = &exynos5_fimd_driver_data }, |
| 131 | {}, | 130 | {}, |
| 132 | }; | 131 | }; |
| 133 | MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); | ||
| 134 | #endif | 132 | #endif |
| 135 | 133 | ||
| 136 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( | 134 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( |
| @@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = { | |||
| 1082 | }, | 1080 | }, |
| 1083 | {}, | 1081 | {}, |
| 1084 | }; | 1082 | }; |
| 1085 | MODULE_DEVICE_TABLE(platform, fimd_driver_ids); | ||
| 1086 | 1083 | ||
| 1087 | static const struct dev_pm_ops fimd_pm_ops = { | 1084 | static const struct dev_pm_ops fimd_pm_ops = { |
| 1088 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) | 1085 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 42a5a5466075..eddea4941483 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
| 13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
| 14 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
| @@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d, | |||
| 806 | struct g2d_cmdlist_node *node = | 805 | struct g2d_cmdlist_node *node = |
| 807 | list_first_entry(&runqueue_node->run_cmdlist, | 806 | list_first_entry(&runqueue_node->run_cmdlist, |
| 808 | struct g2d_cmdlist_node, list); | 807 | struct g2d_cmdlist_node, list); |
| 808 | int ret; | ||
| 809 | |||
| 810 | ret = pm_runtime_get_sync(g2d->dev); | ||
| 811 | if (ret < 0) { | ||
| 812 | dev_warn(g2d->dev, "failed pm power on.\n"); | ||
| 813 | return; | ||
| 814 | } | ||
| 809 | 815 | ||
| 810 | pm_runtime_get_sync(g2d->dev); | 816 | ret = clk_prepare_enable(g2d->gate_clk); |
| 811 | clk_enable(g2d->gate_clk); | 817 | if (ret < 0) { |
| 818 | dev_warn(g2d->dev, "failed to enable clock.\n"); | ||
| 819 | pm_runtime_put_sync(g2d->dev); | ||
| 820 | return; | ||
| 821 | } | ||
| 812 | 822 | ||
| 813 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); | 823 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); |
| 814 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); | 824 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); |
| @@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work) | |||
| 861 | runqueue_work); | 871 | runqueue_work); |
| 862 | 872 | ||
| 863 | mutex_lock(&g2d->runqueue_mutex); | 873 | mutex_lock(&g2d->runqueue_mutex); |
| 864 | clk_disable(g2d->gate_clk); | 874 | clk_disable_unprepare(g2d->gate_clk); |
| 865 | pm_runtime_put_sync(g2d->dev); | 875 | pm_runtime_put_sync(g2d->dev); |
| 866 | 876 | ||
| 867 | complete(&g2d->runqueue_node->complete); | 877 | complete(&g2d->runqueue_node->complete); |
| @@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = { | |||
| 1521 | { .compatible = "samsung,exynos5250-g2d" }, | 1531 | { .compatible = "samsung,exynos5250-g2d" }, |
| 1522 | {}, | 1532 | {}, |
| 1523 | }; | 1533 | }; |
| 1524 | MODULE_DEVICE_TABLE(of, exynos_g2d_match); | ||
| 1525 | #endif | 1534 | #endif |
| 1526 | 1535 | ||
| 1527 | struct platform_driver g2d_driver = { | 1536 | struct platform_driver g2d_driver = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 472e3b25e7f2..90b8a1a5344c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
| 18 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index aaa550d622f0..8d3bc01d6834 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/wait.h> | 17 | #include <linux/wait.h> |
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 20 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
| 21 | 20 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index b1ef8e7ff9c9..d2b6ab4def93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 17 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 18 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| @@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, | |||
| 342 | */ | 341 | */ |
| 343 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, | 342 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, |
| 344 | prop_list->ipp_id); | 343 | prop_list->ipp_id); |
| 345 | if (!ippdrv) { | 344 | if (IS_ERR(ippdrv)) { |
| 346 | DRM_ERROR("not found ipp%d driver.\n", | 345 | DRM_ERROR("not found ipp%d driver.\n", |
| 347 | prop_list->ipp_id); | 346 | prop_list->ipp_id); |
| 348 | return -EINVAL; | 347 | return PTR_ERR(ippdrv); |
| 349 | } | 348 | } |
| 350 | 349 | ||
| 351 | prop_list = ippdrv->prop_list; | 350 | prop_list = ippdrv->prop_list; |
| @@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, | |||
| 970 | /* find command node */ | 969 | /* find command node */ |
| 971 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 970 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
| 972 | qbuf->prop_id); | 971 | qbuf->prop_id); |
| 973 | if (!c_node) { | 972 | if (IS_ERR(c_node)) { |
| 974 | DRM_ERROR("failed to get command node.\n"); | 973 | DRM_ERROR("failed to get command node.\n"); |
| 975 | return -EFAULT; | 974 | return PTR_ERR(c_node); |
| 976 | } | 975 | } |
| 977 | 976 | ||
| 978 | /* buffer control */ | 977 | /* buffer control */ |
| @@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, | |||
| 1106 | 1105 | ||
| 1107 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 1106 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
| 1108 | cmd_ctrl->prop_id); | 1107 | cmd_ctrl->prop_id); |
| 1109 | if (!c_node) { | 1108 | if (IS_ERR(c_node)) { |
| 1110 | DRM_ERROR("invalid command node list.\n"); | 1109 | DRM_ERROR("invalid command node list.\n"); |
| 1111 | return -EINVAL; | 1110 | return PTR_ERR(c_node); |
| 1112 | } | 1111 | } |
| 1113 | 1112 | ||
| 1114 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, | 1113 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 427640aa5148..49669aa24c45 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/err.h> | 13 | #include <linux/err.h> |
| 15 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 16 | #include <linux/io.h> | 15 | #include <linux/io.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 41cc74d83e4e..c57c56519add 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <drm/drmP.h> | 13 | #include <drm/drmP.h> |
| 14 | 14 | ||
| 15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 18 | 17 | ||
| 19 | #include <drm/exynos_drm.h> | 18 | #include <drm/exynos_drm.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 62ef5971ac3c..2f5c6942c968 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
| 26 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 30 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index ef04255076c7..6e320ae9afed 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
| 18 | #include <linux/module.h> | ||
| 19 | 18 | ||
| 20 | #include "exynos_drm_drv.h" | 19 | #include "exynos_drm_drv.h" |
| 21 | #include "exynos_hdmi.h" | 20 | #include "exynos_hdmi.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 42ffb71c63bc..c9a137caea41 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/wait.h> | 24 | #include <linux/wait.h> |
| 25 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 28 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
| 29 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index adb319b53ecd..f4669802a0fb 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1495 | dev_priv->dev = dev; | 1495 | dev_priv->dev = dev; |
| 1496 | dev_priv->info = info; | 1496 | dev_priv->info = info; |
| 1497 | 1497 | ||
| 1498 | spin_lock_init(&dev_priv->irq_lock); | ||
| 1499 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
| 1500 | spin_lock_init(&dev_priv->rps.lock); | ||
| 1501 | spin_lock_init(&dev_priv->gt_lock); | ||
| 1502 | spin_lock_init(&dev_priv->backlight.lock); | ||
| 1503 | mutex_init(&dev_priv->dpio_lock); | ||
| 1504 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 1505 | mutex_init(&dev_priv->modeset_restore_lock); | ||
| 1506 | |||
| 1498 | i915_dump_device_info(dev_priv); | 1507 | i915_dump_device_info(dev_priv); |
| 1499 | 1508 | ||
| 1500 | if (i915_get_bridge_dev(dev)) { | 1509 | if (i915_get_bridge_dev(dev)) { |
| @@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1585 | intel_detect_pch(dev); | 1594 | intel_detect_pch(dev); |
| 1586 | 1595 | ||
| 1587 | intel_irq_init(dev); | 1596 | intel_irq_init(dev); |
| 1597 | intel_pm_init(dev); | ||
| 1598 | intel_gt_sanitize(dev); | ||
| 1588 | intel_gt_init(dev); | 1599 | intel_gt_init(dev); |
| 1589 | 1600 | ||
| 1590 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1601 | /* Try to make sure MCHBAR is enabled before poking at it */ |
| @@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1610 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1621 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
| 1611 | pci_enable_msi(dev->pdev); | 1622 | pci_enable_msi(dev->pdev); |
| 1612 | 1623 | ||
| 1613 | spin_lock_init(&dev_priv->irq_lock); | ||
| 1614 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
| 1615 | spin_lock_init(&dev_priv->rps.lock); | ||
| 1616 | spin_lock_init(&dev_priv->backlight.lock); | ||
| 1617 | mutex_init(&dev_priv->dpio_lock); | ||
| 1618 | |||
| 1619 | mutex_init(&dev_priv->rps.hw_lock); | ||
| 1620 | mutex_init(&dev_priv->modeset_restore_lock); | ||
| 1621 | |||
| 1622 | dev_priv->num_plane = 1; | 1624 | dev_priv->num_plane = 1; |
| 1623 | if (IS_VALLEYVIEW(dev)) | 1625 | if (IS_VALLEYVIEW(dev)) |
| 1624 | dev_priv->num_plane = 2; | 1626 | dev_priv->num_plane = 2; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 062cbda1bf4a..45b3c030f483 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060 | |||
| 123 | MODULE_PARM_DESC(preliminary_hw_support, | 123 | MODULE_PARM_DESC(preliminary_hw_support, |
| 124 | "Enable preliminary hardware support. (default: false)"); | 124 | "Enable preliminary hardware support. (default: false)"); |
| 125 | 125 | ||
| 126 | int i915_disable_power_well __read_mostly = 0; | 126 | int i915_disable_power_well __read_mostly = 1; |
| 127 | module_param_named(disable_power_well, i915_disable_power_well, int, 0600); | 127 | module_param_named(disable_power_well, i915_disable_power_well, int, 0600); |
| 128 | MODULE_PARM_DESC(disable_power_well, | 128 | MODULE_PARM_DESC(disable_power_well, |
| 129 | "Disable the power well when possible (default: false)"); | 129 | "Disable the power well when possible (default: true)"); |
| 130 | 130 | ||
| 131 | int i915_enable_ips __read_mostly = 1; | 131 | int i915_enable_ips __read_mostly = 1; |
| 132 | module_param_named(enable_ips, i915_enable_ips, int, 0600); | 132 | module_param_named(enable_ips, i915_enable_ips, int, 0600); |
| @@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
| 706 | { | 706 | { |
| 707 | int error = 0; | 707 | int error = 0; |
| 708 | 708 | ||
| 709 | intel_gt_reset(dev); | 709 | intel_gt_sanitize(dev); |
| 710 | 710 | ||
| 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 712 | mutex_lock(&dev->struct_mutex); | 712 | mutex_lock(&dev->struct_mutex); |
| @@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev) | |||
| 732 | 732 | ||
| 733 | pci_set_master(dev->pdev); | 733 | pci_set_master(dev->pdev); |
| 734 | 734 | ||
| 735 | intel_gt_reset(dev); | 735 | intel_gt_sanitize(dev); |
| 736 | 736 | ||
| 737 | /* | 737 | /* |
| 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
| @@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |||
| 1253 | 1253 | ||
| 1254 | #define __i915_read(x, y) \ | 1254 | #define __i915_read(x, y) \ |
| 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
| 1256 | unsigned long irqflags; \ | ||
| 1256 | u##x val = 0; \ | 1257 | u##x val = 0; \ |
| 1258 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1257 | if (IS_GEN5(dev_priv->dev)) \ | 1259 | if (IS_GEN5(dev_priv->dev)) \ |
| 1258 | ilk_dummy_write(dev_priv); \ | 1260 | ilk_dummy_write(dev_priv); \ |
| 1259 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1261 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 1260 | unsigned long irqflags; \ | ||
| 1261 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1262 | if (dev_priv->forcewake_count == 0) \ | 1262 | if (dev_priv->forcewake_count == 0) \ |
| 1263 | dev_priv->gt.force_wake_get(dev_priv); \ | 1263 | dev_priv->gt.force_wake_get(dev_priv); \ |
| 1264 | val = read##y(dev_priv->regs + reg); \ | 1264 | val = read##y(dev_priv->regs + reg); \ |
| 1265 | if (dev_priv->forcewake_count == 0) \ | 1265 | if (dev_priv->forcewake_count == 0) \ |
| 1266 | dev_priv->gt.force_wake_put(dev_priv); \ | 1266 | dev_priv->gt.force_wake_put(dev_priv); \ |
| 1267 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1268 | } else { \ | 1267 | } else { \ |
| 1269 | val = read##y(dev_priv->regs + reg); \ | 1268 | val = read##y(dev_priv->regs + reg); \ |
| 1270 | } \ | 1269 | } \ |
| 1270 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
| 1272 | return val; \ | 1272 | return val; \ |
| 1273 | } | 1273 | } |
| @@ -1280,8 +1280,10 @@ __i915_read(64, q) | |||
| 1280 | 1280 | ||
| 1281 | #define __i915_write(x, y) \ | 1281 | #define __i915_write(x, y) \ |
| 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
| 1283 | unsigned long irqflags; \ | ||
| 1283 | u32 __fifo_ret = 0; \ | 1284 | u32 __fifo_ret = 0; \ |
| 1284 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1285 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
| 1286 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
| 1285 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1287 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
| 1286 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1288 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
| 1287 | } \ | 1289 | } \ |
| @@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
| 1293 | gen6_gt_check_fifodbg(dev_priv); \ | 1295 | gen6_gt_check_fifodbg(dev_priv); \ |
| 1294 | } \ | 1296 | } \ |
| 1295 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 1297 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
| 1298 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
| 1296 | } | 1299 | } |
| 1297 | __i915_write(8, b) | 1300 | __i915_write(8, b) |
| 1298 | __i915_write(16, w) | 1301 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a416645bcd23..1929bffc1c77 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -555,6 +555,7 @@ enum intel_sbi_destination { | |||
| 555 | #define QUIRK_PIPEA_FORCE (1<<0) | 555 | #define QUIRK_PIPEA_FORCE (1<<0) |
| 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
| 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
| 558 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
| 558 | 559 | ||
| 559 | struct intel_fbdev; | 560 | struct intel_fbdev; |
| 560 | struct intel_fbc_work; | 561 | struct intel_fbc_work; |
| @@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data); | |||
| 1581 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1582 | void i915_handle_error(struct drm_device *dev, bool wedged); |
| 1582 | 1583 | ||
| 1583 | extern void intel_irq_init(struct drm_device *dev); | 1584 | extern void intel_irq_init(struct drm_device *dev); |
| 1585 | extern void intel_pm_init(struct drm_device *dev); | ||
| 1584 | extern void intel_hpd_init(struct drm_device *dev); | 1586 | extern void intel_hpd_init(struct drm_device *dev); |
| 1585 | extern void intel_gt_init(struct drm_device *dev); | 1587 | extern void intel_gt_init(struct drm_device *dev); |
| 1586 | extern void intel_gt_reset(struct drm_device *dev); | 1588 | extern void intel_gt_sanitize(struct drm_device *dev); |
| 1587 | 1589 | ||
| 1588 | void i915_error_state_free(struct kref *error_ref); | 1590 | void i915_error_state_free(struct kref *error_ref); |
| 1589 | 1591 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4200c32407ec..d9e2208cfe98 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
| 1880 | u32 seqno = intel_ring_get_seqno(ring); | 1880 | u32 seqno = intel_ring_get_seqno(ring); |
| 1881 | 1881 | ||
| 1882 | BUG_ON(ring == NULL); | 1882 | BUG_ON(ring == NULL); |
| 1883 | if (obj->ring != ring && obj->last_write_seqno) { | ||
| 1884 | /* Keep the seqno relative to the current ring */ | ||
| 1885 | obj->last_write_seqno = seqno; | ||
| 1886 | } | ||
| 1883 | obj->ring = ring; | 1887 | obj->ring = ring; |
| 1884 | 1888 | ||
| 1885 | /* Add a reference if we're newly entering the active list. */ | 1889 | /* Add a reference if we're newly entering the active list. */ |
| @@ -2254,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
| 2254 | 2258 | ||
| 2255 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
| 2256 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
| 2257 | i915_gem_write_fence(dev, i, reg->obj); | 2261 | |
| 2262 | /* | ||
| 2263 | * Commit delayed tiling changes if we have an object still | ||
| 2264 | * attached to the fence, otherwise just clear the fence. | ||
| 2265 | */ | ||
| 2266 | if (reg->obj) { | ||
| 2267 | i915_gem_object_update_fence(reg->obj, reg, | ||
| 2268 | reg->obj->tiling_mode); | ||
| 2269 | } else { | ||
| 2270 | i915_gem_write_fence(dev, i, NULL); | ||
| 2271 | } | ||
| 2258 | } | 2272 | } |
| 2259 | } | 2273 | } |
| 2260 | 2274 | ||
| @@ -2653,7 +2667,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
| 2653 | drm_i915_private_t *dev_priv = dev->dev_private; | 2667 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2654 | int fence_reg; | 2668 | int fence_reg; |
| 2655 | int fence_pitch_shift; | 2669 | int fence_pitch_shift; |
| 2656 | uint64_t val; | ||
| 2657 | 2670 | ||
| 2658 | if (INTEL_INFO(dev)->gen >= 6) { | 2671 | if (INTEL_INFO(dev)->gen >= 6) { |
| 2659 | fence_reg = FENCE_REG_SANDYBRIDGE_0; | 2672 | fence_reg = FENCE_REG_SANDYBRIDGE_0; |
| @@ -2663,8 +2676,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
| 2663 | fence_pitch_shift = I965_FENCE_PITCH_SHIFT; | 2676 | fence_pitch_shift = I965_FENCE_PITCH_SHIFT; |
| 2664 | } | 2677 | } |
| 2665 | 2678 | ||
| 2679 | fence_reg += reg * 8; | ||
| 2680 | |||
| 2681 | /* To w/a incoherency with non-atomic 64-bit register updates, | ||
| 2682 | * we split the 64-bit update into two 32-bit writes. In order | ||
| 2683 | * for a partial fence not to be evaluated between writes, we | ||
| 2684 | * precede the update with write to turn off the fence register, | ||
| 2685 | * and only enable the fence as the last step. | ||
| 2686 | * | ||
| 2687 | * For extra levels of paranoia, we make sure each step lands | ||
| 2688 | * before applying the next step. | ||
| 2689 | */ | ||
| 2690 | I915_WRITE(fence_reg, 0); | ||
| 2691 | POSTING_READ(fence_reg); | ||
| 2692 | |||
| 2666 | if (obj) { | 2693 | if (obj) { |
| 2667 | u32 size = obj->gtt_space->size; | 2694 | u32 size = obj->gtt_space->size; |
| 2695 | uint64_t val; | ||
| 2668 | 2696 | ||
| 2669 | val = (uint64_t)((obj->gtt_offset + size - 4096) & | 2697 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
| 2670 | 0xfffff000) << 32; | 2698 | 0xfffff000) << 32; |
| @@ -2673,12 +2701,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
| 2673 | if (obj->tiling_mode == I915_TILING_Y) | 2701 | if (obj->tiling_mode == I915_TILING_Y) |
| 2674 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 2702 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
| 2675 | val |= I965_FENCE_REG_VALID; | 2703 | val |= I965_FENCE_REG_VALID; |
| 2676 | } else | ||
| 2677 | val = 0; | ||
| 2678 | 2704 | ||
| 2679 | fence_reg += reg * 8; | 2705 | I915_WRITE(fence_reg + 4, val >> 32); |
| 2680 | I915_WRITE64(fence_reg, val); | 2706 | POSTING_READ(fence_reg + 4); |
| 2681 | POSTING_READ(fence_reg); | 2707 | |
| 2708 | I915_WRITE(fence_reg + 0, val); | ||
| 2709 | POSTING_READ(fence_reg); | ||
| 2710 | } else { | ||
| 2711 | I915_WRITE(fence_reg + 4, 0); | ||
| 2712 | POSTING_READ(fence_reg + 4); | ||
| 2713 | } | ||
| 2682 | } | 2714 | } |
| 2683 | 2715 | ||
| 2684 | static void i915_write_fence_reg(struct drm_device *dev, int reg, | 2716 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
| @@ -2773,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
| 2773 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | 2805 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
| 2774 | mb(); | 2806 | mb(); |
| 2775 | 2807 | ||
| 2808 | WARN(obj && (!obj->stride || !obj->tiling_mode), | ||
| 2809 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | ||
| 2810 | obj->stride, obj->tiling_mode); | ||
| 2811 | |||
| 2776 | switch (INTEL_INFO(dev)->gen) { | 2812 | switch (INTEL_INFO(dev)->gen) { |
| 2777 | case 7: | 2813 | case 7: |
| 2778 | case 6: | 2814 | case 6: |
| @@ -2796,56 +2832,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, | |||
| 2796 | return fence - dev_priv->fence_regs; | 2832 | return fence - dev_priv->fence_regs; |
| 2797 | } | 2833 | } |
| 2798 | 2834 | ||
| 2799 | struct write_fence { | ||
| 2800 | struct drm_device *dev; | ||
| 2801 | struct drm_i915_gem_object *obj; | ||
| 2802 | int fence; | ||
| 2803 | }; | ||
| 2804 | |||
| 2805 | static void i915_gem_write_fence__ipi(void *data) | ||
| 2806 | { | ||
| 2807 | struct write_fence *args = data; | ||
| 2808 | |||
| 2809 | /* Required for SNB+ with LLC */ | ||
| 2810 | wbinvd(); | ||
| 2811 | |||
| 2812 | /* Required for VLV */ | ||
| 2813 | i915_gem_write_fence(args->dev, args->fence, args->obj); | ||
| 2814 | } | ||
| 2815 | |||
| 2816 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | 2835 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
| 2817 | struct drm_i915_fence_reg *fence, | 2836 | struct drm_i915_fence_reg *fence, |
| 2818 | bool enable) | 2837 | bool enable) |
| 2819 | { | 2838 | { |
| 2820 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2839 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
| 2821 | struct write_fence args = { | 2840 | int reg = fence_number(dev_priv, fence); |
| 2822 | .dev = obj->base.dev, | 2841 | |
| 2823 | .fence = fence_number(dev_priv, fence), | 2842 | i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); |
| 2824 | .obj = enable ? obj : NULL, | ||
| 2825 | }; | ||
| 2826 | |||
| 2827 | /* In order to fully serialize access to the fenced region and | ||
| 2828 | * the update to the fence register we need to take extreme | ||
| 2829 | * measures on SNB+. In theory, the write to the fence register | ||
| 2830 | * flushes all memory transactions before, and coupled with the | ||
| 2831 | * mb() placed around the register write we serialise all memory | ||
| 2832 | * operations with respect to the changes in the tiler. Yet, on | ||
| 2833 | * SNB+ we need to take a step further and emit an explicit wbinvd() | ||
| 2834 | * on each processor in order to manually flush all memory | ||
| 2835 | * transactions before updating the fence register. | ||
| 2836 | * | ||
| 2837 | * However, Valleyview complicates matter. There the wbinvd is | ||
| 2838 | * insufficient and unlike SNB/IVB requires the serialising | ||
| 2839 | * register write. (Note that that register write by itself is | ||
| 2840 | * conversely not sufficient for SNB+.) To compromise, we do both. | ||
| 2841 | */ | ||
| 2842 | if (INTEL_INFO(args.dev)->gen >= 6) | ||
| 2843 | on_each_cpu(i915_gem_write_fence__ipi, &args, 1); | ||
| 2844 | else | ||
| 2845 | i915_gem_write_fence(args.dev, args.fence, args.obj); | ||
| 2846 | 2843 | ||
| 2847 | if (enable) { | 2844 | if (enable) { |
| 2848 | obj->fence_reg = args.fence; | 2845 | obj->fence_reg = reg; |
| 2849 | fence->obj = obj; | 2846 | fence->obj = obj; |
| 2850 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); | 2847 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
| 2851 | } else { | 2848 | } else { |
| @@ -2853,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
| 2853 | fence->obj = NULL; | 2850 | fence->obj = NULL; |
| 2854 | list_del_init(&fence->lru_list); | 2851 | list_del_init(&fence->lru_list); |
| 2855 | } | 2852 | } |
| 2853 | obj->fence_dirty = false; | ||
| 2856 | } | 2854 | } |
| 2857 | 2855 | ||
| 2858 | static int | 2856 | static int |
| @@ -2982,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
| 2982 | return 0; | 2980 | return 0; |
| 2983 | 2981 | ||
| 2984 | i915_gem_object_update_fence(obj, reg, enable); | 2982 | i915_gem_object_update_fence(obj, reg, enable); |
| 2985 | obj->fence_dirty = false; | ||
| 2986 | 2983 | ||
| 2987 | return 0; | 2984 | return 0; |
| 2988 | } | 2985 | } |
| @@ -4611,7 +4608,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
| 4611 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) | 4608 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
| 4612 | if (obj->pages_pin_count == 0) | 4609 | if (obj->pages_pin_count == 0) |
| 4613 | cnt += obj->base.size >> PAGE_SHIFT; | 4610 | cnt += obj->base.size >> PAGE_SHIFT; |
| 4614 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) | 4611 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) |
| 4615 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) | 4612 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) |
| 4616 | cnt += obj->base.size >> PAGE_SHIFT; | 4613 | cnt += obj->base.size >> PAGE_SHIFT; |
| 4617 | 4614 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..6f514297c483 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -1856,10 +1856,16 @@ | |||
| 1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
| 1857 | 1857 | ||
| 1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) | 1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
| 1859 | /* HDMI/DP bits are gen4+ */ | 1859 | /* |
| 1860 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) | 1860 | * HDMI/DP bits are gen4+ |
| 1861 | * | ||
| 1862 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. | ||
| 1863 | * Please check the detailed lore in the commit message for for experimental | ||
| 1864 | * evidence. | ||
| 1865 | */ | ||
| 1866 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) | ||
| 1861 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) | 1867 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) |
| 1862 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) | 1868 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) |
| 1863 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 1869 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
| 1864 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 1870 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
| 1865 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 1871 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..b042ee5c4070 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
| 301 | struct intel_digital_port *intel_dig_port = | 301 | struct intel_digital_port *intel_dig_port = |
| 302 | enc_to_dig_port(encoder); | 302 | enc_to_dig_port(encoder); |
| 303 | 303 | ||
| 304 | intel_dp->DP = intel_dig_port->port_reversal | | 304 | intel_dp->DP = intel_dig_port->saved_port_bits | |
| 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
| 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
| 307 | 307 | ||
| @@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
| 1109 | * enabling the port. | 1109 | * enabling the port. |
| 1110 | */ | 1110 | */ |
| 1111 | I915_WRITE(DDI_BUF_CTL(port), | 1111 | I915_WRITE(DDI_BUF_CTL(port), |
| 1112 | intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); | 1112 | intel_dig_port->saved_port_bits | |
| 1113 | DDI_BUF_CTL_ENABLE); | ||
| 1113 | } else if (type == INTEL_OUTPUT_EDP) { | 1114 | } else if (type == INTEL_OUTPUT_EDP) { |
| 1114 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1115 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1115 | 1116 | ||
| @@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
| 1347 | intel_encoder->get_config = intel_ddi_get_config; | 1348 | intel_encoder->get_config = intel_ddi_get_config; |
| 1348 | 1349 | ||
| 1349 | intel_dig_port->port = port; | 1350 | intel_dig_port->port = port; |
| 1350 | intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & | 1351 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
| 1351 | DDI_BUF_PORT_REVERSAL; | 1352 | (DDI_BUF_PORT_REVERSAL | |
| 1353 | DDI_A_4_LANES); | ||
| 1352 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | 1354 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
| 1353 | 1355 | ||
| 1354 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1356 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b7..e38b45786653 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
| 4913 | uint32_t tmp; | 4913 | uint32_t tmp; |
| 4914 | 4914 | ||
| 4915 | tmp = I915_READ(PFIT_CONTROL); | 4915 | tmp = I915_READ(PFIT_CONTROL); |
| 4916 | if (!(tmp & PFIT_ENABLE)) | ||
| 4917 | return; | ||
| 4916 | 4918 | ||
| 4919 | /* Check whether the pfit is attached to our pipe. */ | ||
| 4917 | if (INTEL_INFO(dev)->gen < 4) { | 4920 | if (INTEL_INFO(dev)->gen < 4) { |
| 4918 | if (crtc->pipe != PIPE_B) | 4921 | if (crtc->pipe != PIPE_B) |
| 4919 | return; | 4922 | return; |
| 4920 | |||
| 4921 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
| 4922 | pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; | ||
| 4923 | } else { | 4923 | } else { |
| 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
| 4925 | return; | 4925 | return; |
| 4926 | } | 4926 | } |
| 4927 | 4927 | ||
| 4928 | if (!(tmp & PFIT_ENABLE)) | 4928 | pipe_config->gmch_pfit.control = tmp; |
| 4929 | return; | ||
| 4930 | |||
| 4931 | pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); | ||
| 4932 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 4929 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
| 4933 | if (INTEL_INFO(dev)->gen < 5) | 4930 | if (INTEL_INFO(dev)->gen < 5) |
| 4934 | pipe_config->gmch_pfit.lvds_border_bits = | 4931 | pipe_config->gmch_pfit.lvds_border_bits = |
| @@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) | |||
| 8272 | 8269 | ||
| 8273 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 8270 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
| 8274 | base.head) { | 8271 | base.head) { |
| 8272 | enum pipe pipe; | ||
| 8275 | if (encoder->base.crtc != &crtc->base) | 8273 | if (encoder->base.crtc != &crtc->base) |
| 8276 | continue; | 8274 | continue; |
| 8277 | if (encoder->get_config) | 8275 | if (encoder->get_config && |
| 8276 | encoder->get_hw_state(encoder, &pipe)) | ||
| 8278 | encoder->get_config(encoder, &pipe_config); | 8277 | encoder->get_config(encoder, &pipe_config); |
| 8279 | } | 8278 | } |
| 8280 | 8279 | ||
| @@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev) | |||
| 8317 | pll->active, pll->refcount); | 8316 | pll->active, pll->refcount); |
| 8318 | WARN(pll->active && !pll->on, | 8317 | WARN(pll->active && !pll->on, |
| 8319 | "pll in active use but not on in sw tracking\n"); | 8318 | "pll in active use but not on in sw tracking\n"); |
| 8319 | WARN(pll->on && !pll->active, | ||
| 8320 | "pll in on but not on in use in sw tracking\n"); | ||
| 8320 | WARN(pll->on != active, | 8321 | WARN(pll->on != active, |
| 8321 | "pll on state mismatch (expected %i, found %i)\n", | 8322 | "pll on state mismatch (expected %i, found %i)\n", |
| 8322 | pll->on, active); | 8323 | pll->on, active); |
| @@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
| 8541 | } | 8542 | } |
| 8542 | 8543 | ||
| 8543 | static bool | 8544 | static bool |
| 8544 | is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, | 8545 | is_crtc_connector_off(struct drm_mode_set *set) |
| 8545 | int num_connectors) | ||
| 8546 | { | 8546 | { |
| 8547 | int i; | 8547 | int i; |
| 8548 | 8548 | ||
| 8549 | for (i = 0; i < num_connectors; i++) | 8549 | if (set->num_connectors == 0) |
| 8550 | if (connectors[i].encoder && | 8550 | return false; |
| 8551 | connectors[i].encoder->crtc == crtc && | 8551 | |
| 8552 | connectors[i].dpms != DRM_MODE_DPMS_ON) | 8552 | if (WARN_ON(set->connectors == NULL)) |
| 8553 | return false; | ||
| 8554 | |||
| 8555 | for (i = 0; i < set->num_connectors; i++) | ||
| 8556 | if (set->connectors[i]->encoder && | ||
| 8557 | set->connectors[i]->encoder->crtc == set->crtc && | ||
| 8558 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | ||
| 8553 | return true; | 8559 | return true; |
| 8554 | 8560 | ||
| 8555 | return false; | 8561 | return false; |
| @@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
| 8562 | 8568 | ||
| 8563 | /* We should be able to check here if the fb has the same properties | 8569 | /* We should be able to check here if the fb has the same properties |
| 8564 | * and then just flip_or_move it */ | 8570 | * and then just flip_or_move it */ |
| 8565 | if (set->connectors != NULL && | 8571 | if (is_crtc_connector_off(set)) { |
| 8566 | is_crtc_connector_off(set->crtc, *set->connectors, | 8572 | config->mode_changed = true; |
| 8567 | set->num_connectors)) { | ||
| 8568 | config->mode_changed = true; | ||
| 8569 | } else if (set->crtc->fb != set->fb) { | 8573 | } else if (set->crtc->fb != set->fb) { |
| 8570 | /* If we have no fb then treat it as a full mode set */ | 8574 | /* If we have no fb then treat it as a full mode set */ |
| 8571 | if (set->crtc->fb == NULL) { | 8575 | if (set->crtc->fb == NULL) { |
| @@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
| 9398 | DRM_INFO("applying inverted panel brightness quirk\n"); | 9402 | DRM_INFO("applying inverted panel brightness quirk\n"); |
| 9399 | } | 9403 | } |
| 9400 | 9404 | ||
| 9405 | /* | ||
| 9406 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
| 9407 | * BLM_PCH_PWM_ENABLE is set. | ||
| 9408 | */ | ||
| 9409 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
| 9410 | { | ||
| 9411 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 9412 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
| 9413 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
| 9414 | } | ||
| 9415 | |||
| 9401 | struct intel_quirk { | 9416 | struct intel_quirk { |
| 9402 | int device; | 9417 | int device; |
| 9403 | int subsystem_vendor; | 9418 | int subsystem_vendor; |
| @@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = { | |||
| 9467 | 9482 | ||
| 9468 | /* Acer Aspire 4736Z */ | 9483 | /* Acer Aspire 4736Z */ |
| 9469 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 9484 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
| 9485 | |||
| 9486 | /* Dell XPS13 HD Sandy Bridge */ | ||
| 9487 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
| 9488 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
| 9489 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
| 9470 | }; | 9490 | }; |
| 9471 | 9491 | ||
| 9472 | static void intel_init_quirks(struct drm_device *dev) | 9492 | static void intel_init_quirks(struct drm_device *dev) |
| @@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
| 9817 | } | 9837 | } |
| 9818 | pll->refcount = pll->active; | 9838 | pll->refcount = pll->active; |
| 9819 | 9839 | ||
| 9820 | DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", | 9840 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
| 9821 | pll->name, pll->refcount); | 9841 | pll->name, pll->refcount, pll->on); |
| 9822 | } | 9842 | } |
| 9823 | 9843 | ||
| 9824 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9844 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
| @@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 9869 | struct drm_plane *plane; | 9889 | struct drm_plane *plane; |
| 9870 | struct intel_crtc *crtc; | 9890 | struct intel_crtc *crtc; |
| 9871 | struct intel_encoder *encoder; | 9891 | struct intel_encoder *encoder; |
| 9892 | int i; | ||
| 9872 | 9893 | ||
| 9873 | intel_modeset_readout_hw_state(dev); | 9894 | intel_modeset_readout_hw_state(dev); |
| 9874 | 9895 | ||
| @@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
| 9884 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 9905 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
| 9885 | } | 9906 | } |
| 9886 | 9907 | ||
| 9908 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
| 9909 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
| 9910 | |||
| 9911 | if (!pll->on || pll->active) | ||
| 9912 | continue; | ||
| 9913 | |||
| 9914 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | ||
| 9915 | |||
| 9916 | pll->disable(dev_priv, pll); | ||
| 9917 | pll->on = false; | ||
| 9918 | } | ||
| 9919 | |||
| 9887 | if (force_restore) { | 9920 | if (force_restore) { |
| 9888 | /* | 9921 | /* |
| 9889 | * We need to use raw interfaces for restoring state to avoid | 9922 | * We need to use raw interfaces for restoring state to avoid |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b73971234013..26e162bb3a51 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
| 75 | case DP_LINK_BW_1_62: | 75 | case DP_LINK_BW_1_62: |
| 76 | case DP_LINK_BW_2_7: | 76 | case DP_LINK_BW_2_7: |
| 77 | break; | 77 | break; |
| 78 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ | ||
| 79 | max_link_bw = DP_LINK_BW_2_7; | ||
| 80 | break; | ||
| 78 | default: | 81 | default: |
| 82 | WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", | ||
| 83 | max_link_bw); | ||
| 79 | max_link_bw = DP_LINK_BW_1_62; | 84 | max_link_bw = DP_LINK_BW_1_62; |
| 80 | break; | 85 | break; |
| 81 | } | 86 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f48230..b7d6e09456ce 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -504,7 +504,7 @@ struct intel_dp { | |||
| 504 | struct intel_digital_port { | 504 | struct intel_digital_port { |
| 505 | struct intel_encoder base; | 505 | struct intel_encoder base; |
| 506 | enum port port; | 506 | enum port port; |
| 507 | u32 port_reversal; | 507 | u32 saved_port_bits; |
| 508 | struct intel_dp dp; | 508 | struct intel_dp dp; |
| 509 | struct intel_hdmi hdmi; | 509 | struct intel_hdmi hdmi; |
| 510 | }; | 510 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..2fd3fd5b943e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
| 785 | } | 785 | } |
| 786 | } | 786 | } |
| 787 | 787 | ||
| 788 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | ||
| 789 | { | ||
| 790 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
| 791 | |||
| 792 | if (IS_G4X(dev)) | ||
| 793 | return 165000; | ||
| 794 | else if (IS_HASWELL(dev)) | ||
| 795 | return 300000; | ||
| 796 | else | ||
| 797 | return 225000; | ||
| 798 | } | ||
| 799 | |||
| 788 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 800 | static int intel_hdmi_mode_valid(struct drm_connector *connector, |
| 789 | struct drm_display_mode *mode) | 801 | struct drm_display_mode *mode) |
| 790 | { | 802 | { |
| 791 | if (mode->clock > 165000) | 803 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) |
| 792 | return MODE_CLOCK_HIGH; | 804 | return MODE_CLOCK_HIGH; |
| 793 | if (mode->clock < 20000) | 805 | if (mode->clock < 20000) |
| 794 | return MODE_CLOCK_LOW; | 806 | return MODE_CLOCK_LOW; |
| @@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 806 | struct drm_device *dev = encoder->base.dev; | 818 | struct drm_device *dev = encoder->base.dev; |
| 807 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 819 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
| 808 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; | 820 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; |
| 821 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | ||
| 809 | int desired_bpp; | 822 | int desired_bpp; |
| 810 | 823 | ||
| 811 | if (intel_hdmi->color_range_auto) { | 824 | if (intel_hdmi->color_range_auto) { |
| @@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 829 | * outputs. We also need to check that the higher clock still fits | 842 | * outputs. We also need to check that the higher clock still fits |
| 830 | * within limits. | 843 | * within limits. |
| 831 | */ | 844 | */ |
| 832 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 | 845 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit |
| 833 | && HAS_PCH_SPLIT(dev)) { | 846 | && HAS_PCH_SPLIT(dev)) { |
| 834 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 847 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 835 | desired_bpp = 12*3; | 848 | desired_bpp = 12*3; |
| @@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 846 | pipe_config->pipe_bpp = desired_bpp; | 859 | pipe_config->pipe_bpp = desired_bpp; |
| 847 | } | 860 | } |
| 848 | 861 | ||
| 849 | if (adjusted_mode->clock > 225000) { | 862 | if (adjusted_mode->clock > portclock_limit) { |
| 850 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 863 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); |
| 851 | return false; | 864 | return false; |
| 852 | } | 865 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 021e8daa022d..61348eae2f04 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
| 109 | flags |= DRM_MODE_FLAG_PVSYNC; | 109 | flags |= DRM_MODE_FLAG_PVSYNC; |
| 110 | 110 | ||
| 111 | pipe_config->adjusted_mode.flags |= flags; | 111 | pipe_config->adjusted_mode.flags |= flags; |
| 112 | |||
| 113 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
| 114 | if (INTEL_INFO(dev)->gen < 4) { | ||
| 115 | tmp = I915_READ(PFIT_CONTROL); | ||
| 116 | |||
| 117 | pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; | ||
| 118 | } | ||
| 112 | } | 119 | } |
| 113 | 120 | ||
| 114 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 121 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
| @@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
| 290 | 297 | ||
| 291 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 298 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
| 292 | intel_connector->panel.fitting_mode); | 299 | intel_connector->panel.fitting_mode); |
| 293 | return true; | ||
| 294 | } else { | 300 | } else { |
| 295 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 301 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
| 296 | intel_connector->panel.fitting_mode); | 302 | intel_connector->panel.fitting_mode); |
| 297 | } | ||
| 298 | 303 | ||
| 299 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 304 | } |
| 300 | pipe_config->timings_set = true; | ||
| 301 | 305 | ||
| 302 | /* | 306 | /* |
| 303 | * XXX: It would be nice to support lower refresh rates on the | 307 | * XXX: It would be nice to support lower refresh rates on the |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209f..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, | |||
| 194 | adjusted_mode->vdisplay == mode->vdisplay) | 194 | adjusted_mode->vdisplay == mode->vdisplay) |
| 195 | goto out; | 195 | goto out; |
| 196 | 196 | ||
| 197 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 198 | pipe_config->timings_set = true; | ||
| 199 | |||
| 197 | switch (fitting_mode) { | 200 | switch (fitting_mode) { |
| 198 | case DRM_MODE_SCALE_CENTER: | 201 | case DRM_MODE_SCALE_CENTER: |
| 199 | /* | 202 | /* |
| @@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) | |||
| 494 | goto out; | 497 | goto out; |
| 495 | } | 498 | } |
| 496 | 499 | ||
| 497 | /* scale to hardware */ | 500 | /* scale to hardware, but be careful to not overflow */ |
| 498 | level = level * freq / max; | 501 | if (freq < max) |
| 502 | level = level * freq / max; | ||
| 503 | else | ||
| 504 | level = freq / max * level; | ||
| 499 | 505 | ||
| 500 | dev_priv->backlight.level = level; | 506 | dev_priv->backlight.level = level; |
| 501 | if (dev_priv->backlight.device) | 507 | if (dev_priv->backlight.device) |
| @@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) | |||
| 512 | struct drm_i915_private *dev_priv = dev->dev_private; | 518 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 513 | unsigned long flags; | 519 | unsigned long flags; |
| 514 | 520 | ||
| 521 | /* | ||
| 522 | * Do not disable backlight on the vgaswitcheroo path. When switching | ||
| 523 | * away from i915, the other client may depend on i915 to handle the | ||
| 524 | * backlight. This will leave the backlight on unnecessarily when | ||
| 525 | * another client is not activated. | ||
| 526 | */ | ||
| 527 | if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { | ||
| 528 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); | ||
| 529 | return; | ||
| 530 | } | ||
| 531 | |||
| 515 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 532 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
| 516 | 533 | ||
| 517 | dev_priv->backlight.enabled = false; | 534 | dev_priv->backlight.enabled = false; |
| @@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
| 580 | POSTING_READ(reg); | 597 | POSTING_READ(reg); |
| 581 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 598 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
| 582 | 599 | ||
| 583 | if (HAS_PCH_SPLIT(dev)) { | 600 | if (HAS_PCH_SPLIT(dev) && |
| 601 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
| 584 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 602 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
| 585 | tmp |= BLM_PCH_PWM_ENABLE; | 603 | tmp |= BLM_PCH_PWM_ENABLE; |
| 586 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | 604 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccbdd83f5220..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5063 | } | 5063 | } |
| 5064 | } else { | 5064 | } else { |
| 5065 | if (enable_requested) { | 5065 | if (enable_requested) { |
| 5066 | unsigned long irqflags; | ||
| 5067 | enum pipe p; | ||
| 5068 | |||
| 5066 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5069 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
| 5070 | POSTING_READ(HSW_PWR_WELL_DRIVER); | ||
| 5067 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5071 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
| 5072 | |||
| 5073 | /* | ||
| 5074 | * After this, the registers on the pipes that are part | ||
| 5075 | * of the power well will become zero, so we have to | ||
| 5076 | * adjust our counters according to that. | ||
| 5077 | * | ||
| 5078 | * FIXME: Should we do this in general in | ||
| 5079 | * drm_vblank_post_modeset? | ||
| 5080 | */ | ||
| 5081 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
| 5082 | for_each_pipe(p) | ||
| 5083 | if (p != PIPE_A) | ||
| 5084 | dev->last_vblank[p] = 0; | ||
| 5085 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
| 5068 | } | 5086 | } |
| 5069 | } | 5087 | } |
| 5070 | } | 5088 | } |
| @@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | |||
| 5476 | gen6_gt_check_fifodbg(dev_priv); | 5494 | gen6_gt_check_fifodbg(dev_priv); |
| 5477 | } | 5495 | } |
| 5478 | 5496 | ||
| 5479 | void intel_gt_reset(struct drm_device *dev) | 5497 | void intel_gt_sanitize(struct drm_device *dev) |
| 5480 | { | 5498 | { |
| 5481 | struct drm_i915_private *dev_priv = dev->dev_private; | 5499 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5482 | 5500 | ||
| @@ -5487,26 +5505,61 @@ void intel_gt_reset(struct drm_device *dev) | |||
| 5487 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5505 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
| 5488 | __gen6_gt_force_wake_mt_reset(dev_priv); | 5506 | __gen6_gt_force_wake_mt_reset(dev_priv); |
| 5489 | } | 5507 | } |
| 5508 | |||
| 5509 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||
| 5510 | if (INTEL_INFO(dev)->gen >= 6) | ||
| 5511 | intel_disable_gt_powersave(dev); | ||
| 5490 | } | 5512 | } |
| 5491 | 5513 | ||
| 5492 | void intel_gt_init(struct drm_device *dev) | 5514 | void intel_gt_init(struct drm_device *dev) |
| 5493 | { | 5515 | { |
| 5494 | struct drm_i915_private *dev_priv = dev->dev_private; | 5516 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5495 | 5517 | ||
| 5496 | spin_lock_init(&dev_priv->gt_lock); | ||
| 5497 | |||
| 5498 | intel_gt_reset(dev); | ||
| 5499 | |||
| 5500 | if (IS_VALLEYVIEW(dev)) { | 5518 | if (IS_VALLEYVIEW(dev)) { |
| 5501 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 5519 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
| 5502 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 5520 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
| 5503 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | 5521 | } else if (IS_HASWELL(dev)) { |
| 5504 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; | 5522 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; |
| 5505 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; | 5523 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; |
| 5524 | } else if (IS_IVYBRIDGE(dev)) { | ||
| 5525 | u32 ecobus; | ||
| 5526 | |||
| 5527 | /* IVB configs may use multi-threaded forcewake */ | ||
| 5528 | |||
| 5529 | /* A small trick here - if the bios hasn't configured | ||
| 5530 | * MT forcewake, and if the device is in RC6, then | ||
| 5531 | * force_wake_mt_get will not wake the device and the | ||
| 5532 | * ECOBUS read will return zero. Which will be | ||
| 5533 | * (correctly) interpreted by the test below as MT | ||
| 5534 | * forcewake being disabled. | ||
| 5535 | */ | ||
| 5536 | mutex_lock(&dev->struct_mutex); | ||
| 5537 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
| 5538 | ecobus = I915_READ_NOTRACE(ECOBUS); | ||
| 5539 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
| 5540 | mutex_unlock(&dev->struct_mutex); | ||
| 5541 | |||
| 5542 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
| 5543 | dev_priv->gt.force_wake_get = | ||
| 5544 | __gen6_gt_force_wake_mt_get; | ||
| 5545 | dev_priv->gt.force_wake_put = | ||
| 5546 | __gen6_gt_force_wake_mt_put; | ||
| 5547 | } else { | ||
| 5548 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | ||
| 5549 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | ||
| 5550 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | ||
| 5551 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | ||
| 5552 | } | ||
| 5506 | } else if (IS_GEN6(dev)) { | 5553 | } else if (IS_GEN6(dev)) { |
| 5507 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | 5554 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
| 5508 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | 5555 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
| 5509 | } | 5556 | } |
| 5557 | } | ||
| 5558 | |||
| 5559 | void intel_pm_init(struct drm_device *dev) | ||
| 5560 | { | ||
| 5561 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5562 | |||
| 5510 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5563 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
| 5511 | intel_gen6_powersave_work); | 5564 | intel_gen6_powersave_work); |
| 5512 | } | 5565 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e51ab552046c..664118d8c1d6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |||
| 379 | return I915_READ(acthd_reg); | 379 | return I915_READ(acthd_reg); |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) | ||
| 383 | { | ||
| 384 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
| 385 | u32 addr; | ||
| 386 | |||
| 387 | addr = dev_priv->status_page_dmah->busaddr; | ||
| 388 | if (INTEL_INFO(ring->dev)->gen >= 4) | ||
| 389 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
| 390 | I915_WRITE(HWS_PGA, addr); | ||
| 391 | } | ||
| 392 | |||
| 382 | static int init_ring_common(struct intel_ring_buffer *ring) | 393 | static int init_ring_common(struct intel_ring_buffer *ring) |
| 383 | { | 394 | { |
| 384 | struct drm_device *dev = ring->dev; | 395 | struct drm_device *dev = ring->dev; |
| @@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
| 390 | if (HAS_FORCE_WAKE(dev)) | 401 | if (HAS_FORCE_WAKE(dev)) |
| 391 | gen6_gt_force_wake_get(dev_priv); | 402 | gen6_gt_force_wake_get(dev_priv); |
| 392 | 403 | ||
| 404 | if (I915_NEED_GFX_HWS(dev)) | ||
| 405 | intel_ring_setup_status_page(ring); | ||
| 406 | else | ||
| 407 | ring_setup_phys_status_page(ring); | ||
| 408 | |||
| 393 | /* Stop the ring if it's running. */ | 409 | /* Stop the ring if it's running. */ |
| 394 | I915_WRITE_CTL(ring, 0); | 410 | I915_WRITE_CTL(ring, 0); |
| 395 | I915_WRITE_HEAD(ring, 0); | 411 | I915_WRITE_HEAD(ring, 0); |
| @@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) | |||
| 518 | struct pipe_control *pc = ring->private; | 534 | struct pipe_control *pc = ring->private; |
| 519 | struct drm_i915_gem_object *obj; | 535 | struct drm_i915_gem_object *obj; |
| 520 | 536 | ||
| 521 | if (!ring->private) | ||
| 522 | return; | ||
| 523 | |||
| 524 | obj = pc->obj; | 537 | obj = pc->obj; |
| 525 | 538 | ||
| 526 | kunmap(sg_page(obj->pages->sgl)); | 539 | kunmap(sg_page(obj->pages->sgl)); |
| @@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) | |||
| 528 | drm_gem_object_unreference(&obj->base); | 541 | drm_gem_object_unreference(&obj->base); |
| 529 | 542 | ||
| 530 | kfree(pc); | 543 | kfree(pc); |
| 531 | ring->private = NULL; | ||
| 532 | } | 544 | } |
| 533 | 545 | ||
| 534 | static int init_render_ring(struct intel_ring_buffer *ring) | 546 | static int init_render_ring(struct intel_ring_buffer *ring) |
| @@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) | |||
| 601 | if (HAS_BROKEN_CS_TLB(dev)) | 613 | if (HAS_BROKEN_CS_TLB(dev)) |
| 602 | drm_gem_object_unreference(to_gem_object(ring->private)); | 614 | drm_gem_object_unreference(to_gem_object(ring->private)); |
| 603 | 615 | ||
| 604 | cleanup_pipe_control(ring); | 616 | if (INTEL_INFO(dev)->gen >= 5) |
| 617 | cleanup_pipe_control(ring); | ||
| 618 | |||
| 619 | ring->private = NULL; | ||
| 605 | } | 620 | } |
| 606 | 621 | ||
| 607 | static void | 622 | static void |
| @@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
| 1223 | ring->status_page.obj = obj; | 1238 | ring->status_page.obj = obj; |
| 1224 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1239 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
| 1225 | 1240 | ||
| 1226 | intel_ring_setup_status_page(ring); | ||
| 1227 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 1241 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
| 1228 | ring->name, ring->status_page.gfx_addr); | 1242 | ring->name, ring->status_page.gfx_addr); |
| 1229 | 1243 | ||
| @@ -1237,10 +1251,9 @@ err: | |||
| 1237 | return ret; | 1251 | return ret; |
| 1238 | } | 1252 | } |
| 1239 | 1253 | ||
| 1240 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) | 1254 | static int init_phys_status_page(struct intel_ring_buffer *ring) |
| 1241 | { | 1255 | { |
| 1242 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1256 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
| 1243 | u32 addr; | ||
| 1244 | 1257 | ||
| 1245 | if (!dev_priv->status_page_dmah) { | 1258 | if (!dev_priv->status_page_dmah) { |
| 1246 | dev_priv->status_page_dmah = | 1259 | dev_priv->status_page_dmah = |
| @@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring) | |||
| 1249 | return -ENOMEM; | 1262 | return -ENOMEM; |
| 1250 | } | 1263 | } |
| 1251 | 1264 | ||
| 1252 | addr = dev_priv->status_page_dmah->busaddr; | ||
| 1253 | if (INTEL_INFO(ring->dev)->gen >= 4) | ||
| 1254 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | ||
| 1255 | I915_WRITE(HWS_PGA, addr); | ||
| 1256 | |||
| 1257 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | 1265 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
| 1258 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1266 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
| 1259 | 1267 | ||
| @@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
| 1281 | return ret; | 1289 | return ret; |
| 1282 | } else { | 1290 | } else { |
| 1283 | BUG_ON(ring->id != RCS); | 1291 | BUG_ON(ring->id != RCS); |
| 1284 | ret = init_phys_hws_pga(ring); | 1292 | ret = init_phys_status_page(ring); |
| 1285 | if (ret) | 1293 | if (ret) |
| 1286 | return ret; | 1294 | return ret; |
| 1287 | } | 1295 | } |
| @@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
| 1893 | } | 1901 | } |
| 1894 | 1902 | ||
| 1895 | if (!I915_NEED_GFX_HWS(dev)) { | 1903 | if (!I915_NEED_GFX_HWS(dev)) { |
| 1896 | ret = init_phys_hws_pga(ring); | 1904 | ret = init_phys_status_page(ring); |
| 1897 | if (ret) | 1905 | if (ret) |
| 1898 | return ret; | 1906 | return ret; |
| 1899 | } | 1907 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 251784aa2225..503a414cbdad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
| 29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); | 29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); |
| 30 | struct drm_device *dev = crtc->dev; | 30 | struct drm_device *dev = crtc->dev; |
| 31 | struct mga_device *mdev = dev->dev_private; | 31 | struct mga_device *mdev = dev->dev_private; |
| 32 | struct drm_framebuffer *fb = crtc->fb; | ||
| 32 | int i; | 33 | int i; |
| 33 | 34 | ||
| 34 | if (!crtc->enabled) | 35 | if (!crtc->enabled) |
| @@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
| 36 | 37 | ||
| 37 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); | 38 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); |
| 38 | 39 | ||
| 40 | if (fb && fb->bits_per_pixel == 16) { | ||
| 41 | int inc = (fb->depth == 15) ? 8 : 4; | ||
| 42 | u8 r, b; | ||
| 43 | for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { | ||
| 44 | if (fb->depth == 16) { | ||
| 45 | if (i > (MGAG200_LUT_SIZE >> 1)) { | ||
| 46 | r = b = 0; | ||
| 47 | } else { | ||
| 48 | r = mga_crtc->lut_r[i << 1]; | ||
| 49 | b = mga_crtc->lut_b[i << 1]; | ||
| 50 | } | ||
| 51 | } else { | ||
| 52 | r = mga_crtc->lut_r[i]; | ||
| 53 | b = mga_crtc->lut_b[i]; | ||
| 54 | } | ||
| 55 | /* VGA registers */ | ||
| 56 | WREG8(DAC_INDEX + MGA1064_COL_PAL, r); | ||
| 57 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]); | ||
| 58 | WREG8(DAC_INDEX + MGA1064_COL_PAL, b); | ||
| 59 | } | ||
| 60 | return; | ||
| 61 | } | ||
| 39 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { | 62 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { |
| 40 | /* VGA registers */ | 63 | /* VGA registers */ |
| 41 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); | 64 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); |
| @@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
| 877 | 900 | ||
| 878 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); | 901 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); |
| 879 | if (crtc->fb->bits_per_pixel == 24) | 902 | if (crtc->fb->bits_per_pixel == 24) |
| 880 | pitch = pitch >> (4 - bppshift); | 903 | pitch = (pitch * 3) >> (4 - bppshift); |
| 881 | else | 904 | else |
| 882 | pitch = pitch >> (4 - bppshift); | 905 | pitch = pitch >> (4 - bppshift); |
| 883 | 906 | ||
| @@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc) | |||
| 1251 | kfree(mga_crtc); | 1274 | kfree(mga_crtc); |
| 1252 | } | 1275 | } |
| 1253 | 1276 | ||
| 1277 | static void mga_crtc_disable(struct drm_crtc *crtc) | ||
| 1278 | { | ||
| 1279 | int ret; | ||
| 1280 | DRM_DEBUG_KMS("\n"); | ||
| 1281 | mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
| 1282 | if (crtc->fb) { | ||
| 1283 | struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb); | ||
| 1284 | struct drm_gem_object *obj = mga_fb->obj; | ||
| 1285 | struct mgag200_bo *bo = gem_to_mga_bo(obj); | ||
| 1286 | ret = mgag200_bo_reserve(bo, false); | ||
| 1287 | if (ret) | ||
| 1288 | return; | ||
| 1289 | mgag200_bo_push_sysram(bo); | ||
| 1290 | mgag200_bo_unreserve(bo); | ||
| 1291 | } | ||
| 1292 | crtc->fb = NULL; | ||
| 1293 | } | ||
| 1294 | |||
| 1254 | /* These provide the minimum set of functions required to handle a CRTC */ | 1295 | /* These provide the minimum set of functions required to handle a CRTC */ |
| 1255 | static const struct drm_crtc_funcs mga_crtc_funcs = { | 1296 | static const struct drm_crtc_funcs mga_crtc_funcs = { |
| 1256 | .cursor_set = mga_crtc_cursor_set, | 1297 | .cursor_set = mga_crtc_cursor_set, |
| @@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = { | |||
| 1261 | }; | 1302 | }; |
| 1262 | 1303 | ||
| 1263 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { | 1304 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { |
| 1305 | .disable = mga_crtc_disable, | ||
| 1264 | .dpms = mga_crtc_dpms, | 1306 | .dpms = mga_crtc_dpms, |
| 1265 | .mode_fixup = mga_crtc_mode_fixup, | 1307 | .mode_fixup = mga_crtc_mode_fixup, |
| 1266 | .mode_set = mga_crtc_mode_set, | 1308 | .mode_set = mga_crtc_mode_set, |
| @@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) | |||
| 1581 | 1623 | ||
| 1582 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); | 1624 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); |
| 1583 | 1625 | ||
| 1626 | drm_sysfs_connector_add(connector); | ||
| 1627 | |||
| 1584 | mga_connector->i2c = mgag200_i2c_create(dev); | 1628 | mga_connector->i2c = mgag200_i2c_create(dev); |
| 1585 | if (!mga_connector->i2c) | 1629 | if (!mga_connector->i2c) |
| 1586 | DRM_ERROR("failed to add ddc bus\n"); | 1630 | DRM_ERROR("failed to add ddc bus\n"); |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 3acb2b044c7b..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
| @@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
| 323 | 323 | ||
| 324 | mgabo->gem.driver_private = NULL; | 324 | mgabo->gem.driver_private = NULL; |
| 325 | mgabo->bo.bdev = &mdev->ttm.bdev; | 325 | mgabo->bo.bdev = &mdev->ttm.bdev; |
| 326 | mgabo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
| 326 | 327 | ||
| 327 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
| 328 | 329 | ||
| @@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) | |||
| 353 | bo->pin_count++; | 354 | bo->pin_count++; |
| 354 | if (gpu_addr) | 355 | if (gpu_addr) |
| 355 | *gpu_addr = mgag200_bo_gpu_offset(bo); | 356 | *gpu_addr = mgag200_bo_gpu_offset(bo); |
| 357 | return 0; | ||
| 356 | } | 358 | } |
| 357 | 359 | ||
| 358 | mgag200_ttm_placement(bo, pl_flag); | 360 | mgag200_ttm_placement(bo, pl_flag); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f60..ce860de43e61 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_bsp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c83982..ba6aeca0285e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c | |||
| @@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nve0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_bsp_cclass; |
| 94 | nv_engine(priv)->sclass = nve0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nve0_bsp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index 373dbcc523b2..a19e7d79b847 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c | |||
| @@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
| 36 | if (data && data[0]) { | 36 | if (data && data[0]) { |
| 37 | for (i = 0; i < size; i++) | 37 | for (i = 0; i < size; i++) |
| 38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); | 38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); |
| 39 | for (; i < 0x60; i++) | ||
| 40 | nv_wr32(priv, 0x61c440 + soff, (i << 8)); | ||
| 39 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); | 41 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); |
| 40 | } else | 42 | } else |
| 41 | if (data) { | 43 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index dc57e24fc1df..717639386ced 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c | |||
| @@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
| 41 | if (data && data[0]) { | 41 | if (data && data[0]) { |
| 42 | for (i = 0; i < size; i++) | 42 | for (i = 0; i < size; i++) |
| 43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); | 43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); |
| 44 | for (; i < 0x60; i++) | ||
| 45 | nv_wr32(priv, 0x10ec00 + soff, (i << 8)); | ||
| 44 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); | 46 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); |
| 45 | } else | 47 | } else |
| 46 | if (data) { | 48 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index ab1e918469a8..526b75242899 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c | |||
| @@ -47,14 +47,8 @@ int | |||
| 47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | 47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) |
| 48 | { | 48 | { |
| 49 | struct nv50_disp_priv *priv = (void *)object->engine; | 49 | struct nv50_disp_priv *priv = (void *)object->engine; |
| 50 | struct nouveau_bios *bios = nouveau_bios(priv); | ||
| 51 | const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; | ||
| 52 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; | 50 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; |
| 53 | const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; | ||
| 54 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); | 51 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); |
| 55 | const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); | ||
| 56 | struct dcb_output outp; | ||
| 57 | u8 ver, hdr; | ||
| 58 | u32 data; | 52 | u32 data; |
| 59 | int ret = -EINVAL; | 53 | int ret = -EINVAL; |
| 60 | 54 | ||
| @@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | |||
| 62 | return -EINVAL; | 56 | return -EINVAL; |
| 63 | data = *(u32 *)args; | 57 | data = *(u32 *)args; |
| 64 | 58 | ||
| 65 | if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) | ||
| 66 | return -ENODEV; | ||
| 67 | 59 | ||
| 68 | switch (mthd & ~0x3f) { | 60 | switch (mthd & ~0x3f) { |
| 69 | case NV50_DISP_SOR_PWR: | 61 | case NV50_DISP_SOR_PWR: |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590e..e03fc8e4dc1d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
| @@ -23,6 +23,25 @@ | |||
| 23 | #include <engine/falcon.h> | 23 | #include <engine/falcon.h> |
| 24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
| 25 | 25 | ||
| 26 | void | ||
| 27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | ||
| 28 | { | ||
| 29 | struct nouveau_falcon *falcon = (void *)subdev; | ||
| 30 | u32 dispatch = nv_ro32(falcon, 0x01c); | ||
| 31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | ||
| 32 | |||
| 33 | if (intr & 0x00000010) { | ||
| 34 | nv_debug(falcon, "ucode halted\n"); | ||
| 35 | nv_wo32(falcon, 0x004, 0x00000010); | ||
| 36 | intr &= ~0x00000010; | ||
| 37 | } | ||
| 38 | |||
| 39 | if (intr) { | ||
| 40 | nv_error(falcon, "unhandled intr 0x%08x\n", intr); | ||
| 41 | nv_wo32(falcon, 0x004, intr); | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 26 | u32 | 45 | u32 |
| 27 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) |
| 28 | { | 47 | { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 49ecbb859b25..c19004301309 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c | |||
| @@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 265 | int | 265 | int |
| 266 | nv31_mpeg_init(struct nouveau_object *object) | 266 | nv31_mpeg_init(struct nouveau_object *object) |
| 267 | { | 267 | { |
| 268 | struct nouveau_engine *engine = nv_engine(object->engine); | 268 | struct nouveau_engine *engine = nv_engine(object); |
| 269 | struct nv31_mpeg_priv *priv = (void *)engine; | 269 | struct nv31_mpeg_priv *priv = (void *)object; |
| 270 | struct nouveau_fb *pfb = nouveau_fb(object); | 270 | struct nouveau_fb *pfb = nouveau_fb(object); |
| 271 | int ret, i; | 271 | int ret, i; |
| 272 | 272 | ||
| @@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object) | |||
| 284 | /* PMPEG init */ | 284 | /* PMPEG init */ |
| 285 | nv_wr32(priv, 0x00b32c, 0x00000000); | 285 | nv_wr32(priv, 0x00b32c, 0x00000000); |
| 286 | nv_wr32(priv, 0x00b314, 0x00000100); | 286 | nv_wr32(priv, 0x00b314, 0x00000100); |
| 287 | nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); | 287 | if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) |
| 288 | nv_wr32(priv, 0x00b220, 0x00000044); | ||
| 289 | else | ||
| 290 | nv_wr32(priv, 0x00b220, 0x00000031); | ||
| 288 | nv_wr32(priv, 0x00b300, 0x02001ec1); | 291 | nv_wr32(priv, 0x00b300, 0x02001ec1); |
| 289 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); | 292 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); |
| 290 | 293 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index f7c581ad1991..dd6196072e9c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c | |||
| @@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent, | |||
| 61 | if (ret) | 61 | if (ret) |
| 62 | return ret; | 62 | return ret; |
| 63 | 63 | ||
| 64 | nv_wo32(&chan->base.base, 0x78, 0x02001ec1); | ||
| 64 | return 0; | 65 | return 0; |
| 65 | } | 66 | } |
| 66 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff360..73719aaa62d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00000002; | 92 | nv_subdev(priv)->unit = 0x00000002; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_ppp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_ppp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60eb..ac1f62aace72 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c | |||
| @@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nvc0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_vp_cclass; |
| 94 | nv_engine(priv)->sclass = nvc0_vp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_vp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc49..d4c3108479c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | |||
| @@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 90 | return ret; | 90 | return ret; |
| 91 | 91 | ||
| 92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
| 93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
| 93 | nv_engine(priv)->cclass = &nve0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_vp_cclass; |
| 94 | nv_engine(priv)->sclass = nve0_vp_sclass; | 95 | nv_engine(priv)->sclass = nve0_vp_sclass; |
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c index 0639bc59d0a5..5f6ede7c4892 100644 --- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c | |||
| @@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object) | |||
| 118 | return ret; | 118 | return ret; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, | 121 | if (fw->size > 0x40000) { |
| 122 | nv_warn(xtensa, "firmware %s too large\n", name); | ||
| 123 | release_firmware(fw); | ||
| 124 | return -EINVAL; | ||
| 125 | } | ||
| 126 | |||
| 127 | ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, | ||
| 122 | &xtensa->gpu_fw); | 128 | &xtensa->gpu_fw); |
| 123 | if (ret) { | 129 | if (ret) { |
| 124 | release_firmware(fw); | 130 | release_firmware(fw); |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab36..181aa7da524d 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h | |||
| @@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | |||
| 72 | struct nouveau_oclass *, u32, bool, const char *, | 72 | struct nouveau_oclass *, u32, bool, const char *, |
| 73 | const char *, int, void **); | 73 | const char *, int, void **); |
| 74 | 74 | ||
| 75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | ||
| 76 | |||
| 75 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | 77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor |
| 76 | int _nouveau_falcon_init(struct nouveau_object *); | 78 | int _nouveau_falcon_init(struct nouveau_object *); |
| 77 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | 79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h index f2e87b105666..fcf57fa309bf 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h | |||
| @@ -55,7 +55,7 @@ struct nouveau_vma { | |||
| 55 | struct nouveau_vm { | 55 | struct nouveau_vm { |
| 56 | struct nouveau_vmmgr *vmm; | 56 | struct nouveau_vmmgr *vmm; |
| 57 | struct nouveau_mm mm; | 57 | struct nouveau_mm mm; |
| 58 | int refcount; | 58 | struct kref refcount; |
| 59 | 59 | ||
| 60 | struct list_head pgd_list; | 60 | struct list_head pgd_list; |
| 61 | atomic_t engref[NVDEV_SUBDEV_NR]; | 61 | atomic_t engref[NVDEV_SUBDEV_NR]; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h index 6c974dd83e8b..db9d6ddde52c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h | |||
| @@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); | |||
| 81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, | 81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, |
| 82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); | 82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); |
| 83 | 83 | ||
| 84 | void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); | 84 | void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *); |
| 85 | extern int nv50_fb_memtype[0x80]; | 85 | extern int nv50_fb_memtype[0x80]; |
| 86 | 86 | ||
| 87 | #endif | 87 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c index af5aa7ee8ad9..903baff77fdd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c | |||
| @@ -27,17 +27,10 @@ | |||
| 27 | #include "priv.h" | 27 | #include "priv.h" |
| 28 | 28 | ||
| 29 | void | 29 | void |
| 30 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 30 | __nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) |
| 31 | { | 31 | { |
| 32 | struct nouveau_mm_node *this; | 32 | struct nouveau_mm_node *this; |
| 33 | struct nouveau_mem *mem; | ||
| 34 | 33 | ||
| 35 | mem = *pmem; | ||
| 36 | *pmem = NULL; | ||
| 37 | if (unlikely(mem == NULL)) | ||
| 38 | return; | ||
| 39 | |||
| 40 | mutex_lock(&pfb->base.mutex); | ||
| 41 | while (!list_empty(&mem->regions)) { | 34 | while (!list_empty(&mem->regions)) { |
| 42 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); | 35 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); |
| 43 | 36 | ||
| @@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | |||
| 46 | } | 39 | } |
| 47 | 40 | ||
| 48 | nouveau_mm_free(&pfb->tags, &mem->tag); | 41 | nouveau_mm_free(&pfb->tags, &mem->tag); |
| 42 | } | ||
| 43 | |||
| 44 | void | ||
| 45 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | ||
| 46 | { | ||
| 47 | struct nouveau_mem *mem = *pmem; | ||
| 48 | |||
| 49 | *pmem = NULL; | ||
| 50 | if (unlikely(mem == NULL)) | ||
| 51 | return; | ||
| 52 | |||
| 53 | mutex_lock(&pfb->base.mutex); | ||
| 54 | __nv50_ram_put(pfb, mem); | ||
| 49 | mutex_unlock(&pfb->base.mutex); | 55 | mutex_unlock(&pfb->base.mutex); |
| 50 | 56 | ||
| 51 | kfree(mem); | 57 | kfree(mem); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 9c3634acbb9d..cf97c4de4a6b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c | |||
| @@ -33,11 +33,19 @@ void | |||
| 33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) |
| 34 | { | 34 | { |
| 35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); | 35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); |
| 36 | struct nouveau_mem *mem = *pmem; | ||
| 36 | 37 | ||
| 37 | if ((*pmem)->tag) | 38 | *pmem = NULL; |
| 38 | ltcg->tags_free(ltcg, &(*pmem)->tag); | 39 | if (unlikely(mem == NULL)) |
| 40 | return; | ||
| 39 | 41 | ||
| 40 | nv50_ram_put(pfb, pmem); | 42 | mutex_lock(&pfb->base.mutex); |
| 43 | if (mem->tag) | ||
| 44 | ltcg->tags_free(ltcg, &mem->tag); | ||
| 45 | __nv50_ram_put(pfb, mem); | ||
| 46 | mutex_unlock(&pfb->base.mutex); | ||
| 47 | |||
| 48 | kfree(mem); | ||
| 41 | } | 49 | } |
| 42 | 50 | ||
| 43 | int | 51 | int |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c index bf489dcf46e2..c4c1d415e7fe 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c | |||
| @@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
| 103 | int i; | 103 | int i; |
| 104 | 104 | ||
| 105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); | 105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); |
| 106 | if (nv_device(priv)->chipset >= 0x90) | 106 | if (nv_device(priv)->chipset > 0x92) |
| 107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); | 107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); |
| 108 | 108 | ||
| 109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); | 109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); |
| @@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | nv_wr32(priv, 0xe054, intr0); | 117 | nv_wr32(priv, 0xe054, intr0); |
| 118 | if (nv_device(priv)->chipset >= 0x90) | 118 | if (nv_device(priv)->chipset > 0x92) |
| 119 | nv_wr32(priv, 0xe074, intr1); | 119 | nv_wr32(priv, 0xe074, intr1); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| @@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
| 146 | int ret; | 146 | int ret; |
| 147 | 147 | ||
| 148 | ret = nouveau_gpio_create(parent, engine, oclass, | 148 | ret = nouveau_gpio_create(parent, engine, oclass, |
| 149 | nv_device(parent)->chipset >= 0x90 ? 32 : 16, | 149 | nv_device(parent)->chipset > 0x92 ? 32 : 16, |
| 150 | &priv); | 150 | &priv); |
| 151 | *pobject = nv_object(priv); | 151 | *pobject = nv_object(priv); |
| 152 | if (ret) | 152 | if (ret) |
| @@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object) | |||
| 182 | /* disable, and ack any pending gpio interrupts */ | 182 | /* disable, and ack any pending gpio interrupts */ |
| 183 | nv_wr32(priv, 0xe050, 0x00000000); | 183 | nv_wr32(priv, 0xe050, 0x00000000); |
| 184 | nv_wr32(priv, 0xe054, 0xffffffff); | 184 | nv_wr32(priv, 0xe054, 0xffffffff); |
| 185 | if (nv_device(priv)->chipset >= 0x90) { | 185 | if (nv_device(priv)->chipset > 0x92) { |
| 186 | nv_wr32(priv, 0xe070, 0x00000000); | 186 | nv_wr32(priv, 0xe070, 0x00000000); |
| 187 | nv_wr32(priv, 0xe074, 0xffffffff); | 187 | nv_wr32(priv, 0xe074, 0xffffffff); |
| 188 | } | 188 | } |
| @@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend) | |||
| 195 | { | 195 | { |
| 196 | struct nv50_gpio_priv *priv = (void *)object; | 196 | struct nv50_gpio_priv *priv = (void *)object; |
| 197 | nv_wr32(priv, 0xe050, 0x00000000); | 197 | nv_wr32(priv, 0xe050, 0x00000000); |
| 198 | if (nv_device(priv)->chipset >= 0x90) | 198 | if (nv_device(priv)->chipset > 0x92) |
| 199 | nv_wr32(priv, 0xe070, 0x00000000); | 199 | nv_wr32(priv, 0xe070, 0x00000000); |
| 200 | return nouveau_gpio_fini(&priv->base, suspend); | 200 | return nouveau_gpio_fini(&priv->base, suspend); |
| 201 | } | 201 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 0cb322a5e72c..f25fc5fc7dd1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | |||
| @@ -41,7 +41,7 @@ nv50_mc_intr[] = { | |||
| 41 | { 0x04000000, NVDEV_ENGINE_DISP }, | 41 | { 0x04000000, NVDEV_ENGINE_DISP }, |
| 42 | { 0x10000000, NVDEV_SUBDEV_BUS }, | 42 | { 0x10000000, NVDEV_SUBDEV_BUS }, |
| 43 | { 0x80000000, NVDEV_ENGINE_SW }, | 43 | { 0x80000000, NVDEV_ENGINE_SW }, |
| 44 | { 0x0000d101, NVDEV_SUBDEV_FB }, | 44 | { 0x0002d101, NVDEV_SUBDEV_FB }, |
| 45 | {}, | 45 | {}, |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c index 67fcb6c852ac..ef3133e7575c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c | |||
| @@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, | |||
| 361 | 361 | ||
| 362 | INIT_LIST_HEAD(&vm->pgd_list); | 362 | INIT_LIST_HEAD(&vm->pgd_list); |
| 363 | vm->vmm = vmm; | 363 | vm->vmm = vmm; |
| 364 | vm->refcount = 1; | 364 | kref_init(&vm->refcount); |
| 365 | vm->fpde = offset >> (vmm->pgt_bits + 12); | 365 | vm->fpde = offset >> (vmm->pgt_bits + 12); |
| 366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); | 366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); |
| 367 | 367 | ||
| @@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | static void | 443 | static void |
| 444 | nouveau_vm_del(struct nouveau_vm *vm) | 444 | nouveau_vm_del(struct kref *kref) |
| 445 | { | 445 | { |
| 446 | struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); | ||
| 446 | struct nouveau_vm_pgd *vpgd, *tmp; | 447 | struct nouveau_vm_pgd *vpgd, *tmp; |
| 447 | 448 | ||
| 448 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 449 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
| @@ -458,27 +459,19 @@ int | |||
| 458 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | 459 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, |
| 459 | struct nouveau_gpuobj *pgd) | 460 | struct nouveau_gpuobj *pgd) |
| 460 | { | 461 | { |
| 461 | struct nouveau_vm *vm; | 462 | if (ref) { |
| 462 | int ret; | 463 | int ret = nouveau_vm_link(ref, pgd); |
| 463 | |||
| 464 | vm = ref; | ||
| 465 | if (vm) { | ||
| 466 | ret = nouveau_vm_link(vm, pgd); | ||
| 467 | if (ret) | 464 | if (ret) |
| 468 | return ret; | 465 | return ret; |
| 469 | 466 | ||
| 470 | vm->refcount++; | 467 | kref_get(&ref->refcount); |
| 471 | } | 468 | } |
| 472 | 469 | ||
| 473 | vm = *ptr; | 470 | if (*ptr) { |
| 474 | *ptr = ref; | 471 | nouveau_vm_unlink(*ptr, pgd); |
| 475 | 472 | kref_put(&(*ptr)->refcount, nouveau_vm_del); | |
| 476 | if (vm) { | ||
| 477 | nouveau_vm_unlink(vm, pgd); | ||
| 478 | |||
| 479 | if (--vm->refcount == 0) | ||
| 480 | nouveau_vm_del(vm); | ||
| 481 | } | 473 | } |
| 482 | 474 | ||
| 475 | *ptr = ref; | ||
| 483 | return 0; | 476 | return 0; |
| 484 | } | 477 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..af20fba3a1a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
| 148 | 148 | ||
| 149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem)) |
| 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
| 151 | WARN_ON(nvbo->pin_refcnt > 0); | ||
| 151 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
| 152 | kfree(nvbo); | 153 | kfree(nvbo); |
| 153 | } | 154 | } |
| @@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
| 197 | size_t acc_size; | 198 | size_t acc_size; |
| 198 | int ret; | 199 | int ret; |
| 199 | int type = ttm_bo_type_device; | 200 | int type = ttm_bo_type_device; |
| 201 | int lpg_shift = 12; | ||
| 202 | int max_size; | ||
| 203 | |||
| 204 | if (drm->client.base.vm) | ||
| 205 | lpg_shift = drm->client.base.vm->vmm->lpg_shift; | ||
| 206 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); | ||
| 207 | |||
| 208 | if (size <= 0 || size > max_size) { | ||
| 209 | nv_warn(drm, "skipped size %x\n", (u32)size); | ||
| 210 | return -EINVAL; | ||
| 211 | } | ||
| 200 | 212 | ||
| 201 | if (sg) | 213 | if (sg) |
| 202 | type = ttm_bo_type_sg; | 214 | type = ttm_bo_type_sg; |
| @@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
| 340 | { | 352 | { |
| 341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 353 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 342 | struct ttm_buffer_object *bo = &nvbo->bo; | 354 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 343 | int ret; | 355 | int ret, ref; |
| 344 | 356 | ||
| 345 | ret = ttm_bo_reserve(bo, false, false, false, 0); | 357 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
| 346 | if (ret) | 358 | if (ret) |
| 347 | return ret; | 359 | return ret; |
| 348 | 360 | ||
| 349 | if (--nvbo->pin_refcnt) | 361 | ref = --nvbo->pin_refcnt; |
| 362 | WARN_ON_ONCE(ref < 0); | ||
| 363 | if (ref) | ||
| 350 | goto out; | 364 | goto out; |
| 351 | 365 | ||
| 352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 366 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
| @@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
| 578 | int ret = RING_SPACE(chan, 2); | 592 | int ret = RING_SPACE(chan, 2); |
| 579 | if (ret == 0) { | 593 | if (ret == 0) { |
| 580 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | 594 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
| 581 | OUT_RING (chan, handle); | 595 | OUT_RING (chan, handle & 0x0000ffff); |
| 582 | FIRE_RING (chan); | 596 | FIRE_RING (chan); |
| 583 | } | 597 | } |
| 584 | return ret; | 598 | return ret; |
| @@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
| 973 | struct ttm_mem_reg *old_mem = &bo->mem; | 987 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 974 | int ret; | 988 | int ret; |
| 975 | 989 | ||
| 976 | mutex_lock(&chan->cli->mutex); | 990 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
| 977 | 991 | ||
| 978 | /* create temporary vmas for the transfer and attach them to the | 992 | /* create temporary vmas for the transfer and attach them to the |
| 979 | * old nouveau_mem node, these will get cleaned up after ttm has | 993 | * old nouveau_mem node, these will get cleaned up after ttm has |
| @@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
| 1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 1028 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
| 1015 | int (*init)(struct nouveau_channel *, u32 handle); | 1029 | int (*init)(struct nouveau_channel *, u32 handle); |
| 1016 | } _methods[] = { | 1030 | } _methods[] = { |
| 1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, | 1031 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
| 1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | 1032 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
| 1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1033 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| 1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1034 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| @@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
| 1034 | struct nouveau_channel *chan; | 1048 | struct nouveau_channel *chan; |
| 1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1049 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
| 1036 | 1050 | ||
| 1037 | if (mthd->init == nve0_bo_move_init) | 1051 | if (mthd->engine) |
| 1038 | chan = drm->cechan; | 1052 | chan = drm->cechan; |
| 1039 | else | 1053 | else |
| 1040 | chan = drm->channel; | 1054 | chan = drm->channel; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c0037..907d20ef6d4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 138 | { | 138 | { |
| 139 | struct nouveau_framebuffer *nouveau_fb; | 139 | struct nouveau_framebuffer *nouveau_fb; |
| 140 | struct drm_gem_object *gem; | 140 | struct drm_gem_object *gem; |
| 141 | int ret; | 141 | int ret = -ENOMEM; |
| 142 | 142 | ||
| 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
| 144 | if (!gem) | 144 | if (!gem) |
| @@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
| 146 | 146 | ||
| 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); | 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); |
| 148 | if (!nouveau_fb) | 148 | if (!nouveau_fb) |
| 149 | return ERR_PTR(-ENOMEM); | 149 | goto err_unref; |
| 150 | 150 | ||
| 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); | 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); |
| 152 | if (ret) { | 152 | if (ret) |
| 153 | drm_gem_object_unreference(gem); | 153 | goto err; |
| 154 | return ERR_PTR(ret); | ||
| 155 | } | ||
| 156 | 154 | ||
| 157 | return &nouveau_fb->base; | 155 | return &nouveau_fb->base; |
| 156 | |||
| 157 | err: | ||
| 158 | kfree(nouveau_fb); | ||
| 159 | err_unref: | ||
| 160 | drm_gem_object_unreference(gem); | ||
| 161 | return ERR_PTR(ret); | ||
| 158 | } | 162 | } |
| 159 | 163 | ||
| 160 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 164 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
| @@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 524 | struct nouveau_page_flip_state *s; | 528 | struct nouveau_page_flip_state *s; |
| 525 | struct nouveau_channel *chan = NULL; | 529 | struct nouveau_channel *chan = NULL; |
| 526 | struct nouveau_fence *fence; | 530 | struct nouveau_fence *fence; |
| 527 | struct list_head res; | 531 | struct ttm_validate_buffer resv[2] = { |
| 528 | struct ttm_validate_buffer res_val[2]; | 532 | { .bo = &old_bo->bo }, |
| 533 | { .bo = &new_bo->bo }, | ||
| 534 | }; | ||
| 529 | struct ww_acquire_ctx ticket; | 535 | struct ww_acquire_ctx ticket; |
| 536 | LIST_HEAD(res); | ||
| 530 | int ret; | 537 | int ret; |
| 531 | 538 | ||
| 532 | if (!drm->channel) | 539 | if (!drm->channel) |
| @@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 545 | chan = drm->channel; | 552 | chan = drm->channel; |
| 546 | spin_unlock(&old_bo->bo.bdev->fence_lock); | 553 | spin_unlock(&old_bo->bo.bdev->fence_lock); |
| 547 | 554 | ||
| 548 | mutex_lock(&chan->cli->mutex); | ||
| 549 | |||
| 550 | if (new_bo != old_bo) { | 555 | if (new_bo != old_bo) { |
| 551 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 556 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
| 552 | if (likely(!ret)) { | 557 | if (ret) |
| 553 | res_val[0].bo = &old_bo->bo; | 558 | goto fail_free; |
| 554 | res_val[1].bo = &new_bo->bo; | ||
| 555 | INIT_LIST_HEAD(&res); | ||
| 556 | list_add_tail(&res_val[0].head, &res); | ||
| 557 | list_add_tail(&res_val[1].head, &res); | ||
| 558 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
| 559 | if (ret) | ||
| 560 | nouveau_bo_unpin(new_bo); | ||
| 561 | } | ||
| 562 | } else | ||
| 563 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
| 564 | 559 | ||
| 565 | if (ret) { | 560 | list_add(&resv[1].head, &res); |
| 566 | mutex_unlock(&chan->cli->mutex); | ||
| 567 | goto fail_free; | ||
| 568 | } | 561 | } |
| 562 | list_add(&resv[0].head, &res); | ||
| 563 | |||
| 564 | mutex_lock(&chan->cli->mutex); | ||
| 565 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
| 566 | if (ret) | ||
| 567 | goto fail_unpin; | ||
| 569 | 568 | ||
| 570 | /* Initialize a page flip struct */ | 569 | /* Initialize a page flip struct */ |
| 571 | *s = (struct nouveau_page_flip_state) | 570 | *s = (struct nouveau_page_flip_state) |
| @@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 576 | /* Emit a page flip */ | 575 | /* Emit a page flip */ |
| 577 | if (nv_device(drm->device)->card_type >= NV_50) { | 576 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 578 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
| 579 | if (ret) { | 578 | if (ret) |
| 580 | mutex_unlock(&chan->cli->mutex); | ||
| 581 | goto fail_unreserve; | 579 | goto fail_unreserve; |
| 582 | } | ||
| 583 | } | 580 | } |
| 584 | 581 | ||
| 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 582 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
| @@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 590 | /* Update the crtc struct and cleanup */ | 587 | /* Update the crtc struct and cleanup */ |
| 591 | crtc->fb = fb; | 588 | crtc->fb = fb; |
| 592 | 589 | ||
| 593 | if (old_bo != new_bo) { | 590 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); |
| 594 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 591 | if (old_bo != new_bo) |
| 595 | nouveau_bo_unpin(old_bo); | 592 | nouveau_bo_unpin(old_bo); |
| 596 | } else { | ||
| 597 | nouveau_bo_fence(new_bo, fence); | ||
| 598 | ttm_bo_unreserve(&new_bo->bo); | ||
| 599 | } | ||
| 600 | nouveau_fence_unref(&fence); | 593 | nouveau_fence_unref(&fence); |
| 601 | return 0; | 594 | return 0; |
| 602 | 595 | ||
| 603 | fail_unreserve: | 596 | fail_unreserve: |
| 604 | if (old_bo != new_bo) { | 597 | ttm_eu_backoff_reservation(&ticket, &res); |
| 605 | ttm_eu_backoff_reservation(&ticket, &res); | 598 | fail_unpin: |
| 599 | mutex_unlock(&chan->cli->mutex); | ||
| 600 | if (old_bo != new_bo) | ||
| 606 | nouveau_bo_unpin(new_bo); | 601 | nouveau_bo_unpin(new_bo); |
| 607 | } else | ||
| 608 | ttm_bo_unreserve(&new_bo->bo); | ||
| 609 | fail_free: | 602 | fail_free: |
| 610 | kfree(s); | 603 | kfree(s); |
| 611 | return ret; | 604 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe5..61972668fd05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
| 192 | 192 | ||
| 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; |
| 194 | arg1 = 1; | 194 | arg1 = 1; |
| 195 | } else | ||
| 196 | if (device->chipset >= 0xa3 && | ||
| 197 | device->chipset != 0xaa && | ||
| 198 | device->chipset != 0xac) { | ||
| 199 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | ||
| 200 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | ||
| 201 | &drm->cechan); | ||
| 202 | if (ret) | ||
| 203 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | ||
| 204 | |||
| 205 | arg0 = NvDmaFB; | ||
| 206 | arg1 = NvDmaTT; | ||
| 195 | } else { | 207 | } else { |
| 196 | arg0 = NvDmaFB; | 208 | arg0 = NvDmaFB; |
| 197 | arg1 = NvDmaTT; | 209 | arg1 = NvDmaTT; |
| @@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
| 284 | return 0; | 296 | return 0; |
| 285 | } | 297 | } |
| 286 | 298 | ||
| 287 | static struct lock_class_key drm_client_lock_class_key; | ||
| 288 | |||
| 289 | static int | 299 | static int |
| 290 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 300 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
| 291 | { | 301 | { |
| @@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 297 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 307 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
| 298 | if (ret) | 308 | if (ret) |
| 299 | return ret; | 309 | return ret; |
| 300 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
| 301 | 310 | ||
| 302 | dev->dev_private = drm; | 311 | dev->dev_private = drm; |
| 303 | drm->dev = dev; | 312 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e9..8f6d63d7edd3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -385,6 +385,7 @@ out_unlock: | |||
| 385 | mutex_unlock(&dev->struct_mutex); | 385 | mutex_unlock(&dev->struct_mutex); |
| 386 | if (chan) | 386 | if (chan) |
| 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); | 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); |
| 388 | nouveau_bo_unmap(nvbo); | ||
| 388 | out_unpin: | 389 | out_unpin: |
| 389 | nouveau_bo_unpin(nvbo); | 390 | nouveau_bo_unpin(nvbo); |
| 390 | out_unref: | 391 | out_unref: |
| @@ -397,7 +398,8 @@ void | |||
| 397 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) | 398 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) |
| 398 | { | 399 | { |
| 399 | struct nouveau_drm *drm = nouveau_drm(dev); | 400 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 400 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | 401 | if (drm->fbcon) |
| 402 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | ||
| 401 | } | 403 | } |
| 402 | 404 | ||
| 403 | static int | 405 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187bab..be3149932c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
| @@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
| 143 | int ret; | 143 | int ret; |
| 144 | 144 | ||
| 145 | fence->channel = chan; | 145 | fence->channel = chan; |
| 146 | fence->timeout = jiffies + (3 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * DRM_HZ); |
| 147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
| 148 | 148 | ||
| 149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a8..830cb7bad922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
| 50 | return; | 50 | return; |
| 51 | nvbo->gem = NULL; | 51 | nvbo->gem = NULL; |
| 52 | 52 | ||
| 53 | /* Lockdep hates you for doing reserve with gem object lock held */ | ||
| 54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | ||
| 55 | nvbo->pin_refcnt = 1; | ||
| 56 | nouveau_bo_unpin(nvbo); | ||
| 57 | } | ||
| 58 | |||
| 59 | if (gem->import_attach) | 53 | if (gem->import_attach) |
| 60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
| 61 | 55 | ||
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 8e47a9bae8c3..22aa9963ea6f 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
| @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
| 76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
| 77 | struct nouveau_object *object; | 77 | struct nouveau_object *object; |
| 78 | u32 start = mem->start * PAGE_SIZE; | 78 | u32 start = mem->start * PAGE_SIZE; |
| 79 | u32 limit = mem->start + mem->size - 1; | 79 | u32 limit = start + mem->size - 1; |
| 80 | int ret = 0; | 80 | int ret = 0; |
| 81 | 81 | ||
| 82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c2..8b40a36c1b57 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -355,6 +355,7 @@ struct nv50_oimm { | |||
| 355 | 355 | ||
| 356 | struct nv50_head { | 356 | struct nv50_head { |
| 357 | struct nouveau_crtc base; | 357 | struct nouveau_crtc base; |
| 358 | struct nouveau_bo *image; | ||
| 358 | struct nv50_curs curs; | 359 | struct nv50_curs curs; |
| 359 | struct nv50_sync sync; | 360 | struct nv50_sync sync; |
| 360 | struct nv50_ovly ovly; | 361 | struct nv50_ovly ovly; |
| @@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 517 | { | 518 | { |
| 518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 519 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
| 519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 520 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 521 | struct nv50_head *head = nv50_head(crtc); | ||
| 520 | struct nv50_sync *sync = nv50_sync(crtc); | 522 | struct nv50_sync *sync = nv50_sync(crtc); |
| 521 | int head = nv_crtc->index, ret; | ||
| 522 | u32 *push; | 523 | u32 *push; |
| 524 | int ret; | ||
| 523 | 525 | ||
| 524 | swap_interval <<= 4; | 526 | swap_interval <<= 4; |
| 525 | if (swap_interval == 0) | 527 | if (swap_interval == 0) |
| @@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 537 | return ret; | 539 | return ret; |
| 538 | 540 | ||
| 539 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 541 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
| 540 | OUT_RING (chan, NvEvoSema0 + head); | 542 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
| 541 | OUT_RING (chan, sync->addr ^ 0x10); | 543 | OUT_RING (chan, sync->addr ^ 0x10); |
| 542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 544 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); |
| 543 | OUT_RING (chan, sync->data + 1); | 545 | OUT_RING (chan, sync->data + 1); |
| @@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 546 | OUT_RING (chan, sync->data); | 548 | OUT_RING (chan, sync->data); |
| 547 | } else | 549 | } else |
| 548 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 550 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
| 549 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 551 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
| 550 | ret = RING_SPACE(chan, 12); | 552 | ret = RING_SPACE(chan, 12); |
| 551 | if (ret) | 553 | if (ret) |
| 552 | return ret; | 554 | return ret; |
| @@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 565 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | 567 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); |
| 566 | } else | 568 | } else |
| 567 | if (chan) { | 569 | if (chan) { |
| 568 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 570 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
| 569 | ret = RING_SPACE(chan, 10); | 571 | ret = RING_SPACE(chan, 10); |
| 570 | if (ret) | 572 | if (ret) |
| 571 | return ret; | 573 | return ret; |
| @@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 630 | evo_mthd(push, 0x0080, 1); | 632 | evo_mthd(push, 0x0080, 1); |
| 631 | evo_data(push, 0x00000000); | 633 | evo_data(push, 0x00000000); |
| 632 | evo_kick(push, sync); | 634 | evo_kick(push, sync); |
| 635 | |||
| 636 | nouveau_bo_ref(nv_fb->nvbo, &head->image); | ||
| 633 | return 0; | 637 | return 0; |
| 634 | } | 638 | } |
| 635 | 639 | ||
| @@ -1038,18 +1042,17 @@ static int | |||
| 1038 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | 1042 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) |
| 1039 | { | 1043 | { |
| 1040 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | 1044 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); |
| 1045 | struct nv50_head *head = nv50_head(crtc); | ||
| 1041 | int ret; | 1046 | int ret; |
| 1042 | 1047 | ||
| 1043 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | 1048 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); |
| 1044 | if (ret) | 1049 | if (ret == 0) { |
| 1045 | return ret; | 1050 | if (head->image) |
| 1046 | 1051 | nouveau_bo_unpin(head->image); | |
| 1047 | if (old_fb) { | 1052 | nouveau_bo_ref(nvfb->nvbo, &head->image); |
| 1048 | nvfb = nouveau_framebuffer(old_fb); | ||
| 1049 | nouveau_bo_unpin(nvfb->nvbo); | ||
| 1050 | } | 1053 | } |
| 1051 | 1054 | ||
| 1052 | return 0; | 1055 | return ret; |
| 1053 | } | 1056 | } |
| 1054 | 1057 | ||
| 1055 | static int | 1058 | static int |
| @@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
| 1198 | } | 1201 | } |
| 1199 | } | 1202 | } |
| 1200 | 1203 | ||
| 1204 | static void | ||
| 1205 | nv50_crtc_disable(struct drm_crtc *crtc) | ||
| 1206 | { | ||
| 1207 | struct nv50_head *head = nv50_head(crtc); | ||
| 1208 | if (head->image) | ||
| 1209 | nouveau_bo_unpin(head->image); | ||
| 1210 | nouveau_bo_ref(NULL, &head->image); | ||
| 1211 | } | ||
| 1212 | |||
| 1201 | static int | 1213 | static int |
| 1202 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 1214 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
| 1203 | uint32_t handle, uint32_t width, uint32_t height) | 1215 | uint32_t handle, uint32_t width, uint32_t height) |
| @@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
| 1271 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 1272 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
| 1273 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
| 1286 | |||
| 1274 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1287 | nv50_dmac_destroy(disp->core, &head->ovly.base); |
| 1275 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1288 | nv50_pioc_destroy(disp->core, &head->oimm.base); |
| 1276 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1289 | nv50_dmac_destroy(disp->core, &head->sync.base); |
| 1277 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1290 | nv50_pioc_destroy(disp->core, &head->curs.base); |
| 1291 | |||
| 1292 | /*XXX: this shouldn't be necessary, but the core doesn't call | ||
| 1293 | * disconnect() during the cleanup paths | ||
| 1294 | */ | ||
| 1295 | if (head->image) | ||
| 1296 | nouveau_bo_unpin(head->image); | ||
| 1297 | nouveau_bo_ref(NULL, &head->image); | ||
| 1298 | |||
| 1278 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 1299 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
| 1279 | if (nv_crtc->cursor.nvbo) | 1300 | if (nv_crtc->cursor.nvbo) |
| 1280 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 1301 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
| 1281 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 1302 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
| 1303 | |||
| 1282 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 1304 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
| 1283 | if (nv_crtc->lut.nvbo) | 1305 | if (nv_crtc->lut.nvbo) |
| 1284 | nouveau_bo_unpin(nv_crtc->lut.nvbo); | 1306 | nouveau_bo_unpin(nv_crtc->lut.nvbo); |
| 1285 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 1307 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
| 1308 | |||
| 1286 | drm_crtc_cleanup(crtc); | 1309 | drm_crtc_cleanup(crtc); |
| 1287 | kfree(crtc); | 1310 | kfree(crtc); |
| 1288 | } | 1311 | } |
| @@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { | |||
| 1296 | .mode_set_base = nv50_crtc_mode_set_base, | 1319 | .mode_set_base = nv50_crtc_mode_set_base, |
| 1297 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | 1320 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, |
| 1298 | .load_lut = nv50_crtc_lut_load, | 1321 | .load_lut = nv50_crtc_lut_load, |
| 1322 | .disable = nv50_crtc_disable, | ||
| 1299 | }; | 1323 | }; |
| 1300 | 1324 | ||
| 1301 | static const struct drm_crtc_funcs nv50_crtc_func = { | 1325 | static const struct drm_crtc_funcs nv50_crtc_func = { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index f9701e567db8..0ee363840035 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
| @@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
| 39 | struct nv10_fence_chan *fctx; | 39 | struct nv10_fence_chan *fctx; |
| 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
| 41 | struct nouveau_object *object; | 41 | struct nouveau_object *object; |
| 42 | u32 start = mem->start * PAGE_SIZE; | ||
| 43 | u32 limit = start + mem->size - 1; | ||
| 42 | int ret, i; | 44 | int ret, i; |
| 43 | 45 | ||
| 44 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 46 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
| @@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
| 51 | fctx->base.sync = nv17_fence_sync; | 53 | fctx->base.sync = nv17_fence_sync; |
| 52 | 54 | ||
| 53 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 55 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
| 54 | NvSema, 0x0002, | 56 | NvSema, 0x003d, |
| 55 | &(struct nv_dma_class) { | 57 | &(struct nv_dma_class) { |
| 56 | .flags = NV_DMA_TARGET_VRAM | | 58 | .flags = NV_DMA_TARGET_VRAM | |
| 57 | NV_DMA_ACCESS_RDWR, | 59 | NV_DMA_ACCESS_RDWR, |
| 58 | .start = mem->start * PAGE_SIZE, | 60 | .start = start, |
| 59 | .limit = mem->size - 1, | 61 | .limit = limit, |
| 60 | }, sizeof(struct nv_dma_class), | 62 | }, sizeof(struct nv_dma_class), |
| 61 | &object); | 63 | &object); |
| 62 | 64 | ||
| 63 | /* dma objects for display sync channel semaphore blocks */ | 65 | /* dma objects for display sync channel semaphore blocks */ |
| 64 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { | 66 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { |
| 65 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | 67 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); |
| 68 | u32 start = bo->bo.mem.start * PAGE_SIZE; | ||
| 69 | u32 limit = start + bo->bo.mem.size - 1; | ||
| 66 | 70 | ||
| 67 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 71 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
| 68 | NvEvoSema0 + i, 0x003d, | 72 | NvEvoSema0 + i, 0x003d, |
| 69 | &(struct nv_dma_class) { | 73 | &(struct nv_dma_class) { |
| 70 | .flags = NV_DMA_TARGET_VRAM | | 74 | .flags = NV_DMA_TARGET_VRAM | |
| 71 | NV_DMA_ACCESS_RDWR, | 75 | NV_DMA_ACCESS_RDWR, |
| 72 | .start = bo->bo.offset, | 76 | .start = start, |
| 73 | .limit = bo->bo.offset + 0xfff, | 77 | .limit = limit, |
| 74 | }, sizeof(struct nv_dma_class), | 78 | }, sizeof(struct nv_dma_class), |
| 75 | &object); | 79 | &object); |
| 76 | } | 80 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 93c2f2cceb51..eb89653a7a17 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
| @@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea | |||
| 179 | uint32_t type, bool interruptible) | 179 | uint32_t type, bool interruptible) |
| 180 | { | 180 | { |
| 181 | struct qxl_command cmd; | 181 | struct qxl_command cmd; |
| 182 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
| 182 | 183 | ||
| 183 | cmd.type = type; | 184 | cmd.type = type; |
| 184 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 185 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
| 185 | 186 | ||
| 186 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | 187 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); |
| 187 | } | 188 | } |
| @@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas | |||
| 191 | uint32_t type, bool interruptible) | 192 | uint32_t type, bool interruptible) |
| 192 | { | 193 | { |
| 193 | struct qxl_command cmd; | 194 | struct qxl_command cmd; |
| 195 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
| 194 | 196 | ||
| 195 | cmd.type = type; | 197 | cmd.type = type; |
| 196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 198 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
| 197 | 199 | ||
| 198 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | 200 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); |
| 199 | } | 201 | } |
| @@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 214 | struct qxl_release *release; | 216 | struct qxl_release *release; |
| 215 | uint64_t id, next_id; | 217 | uint64_t id, next_id; |
| 216 | int i = 0; | 218 | int i = 0; |
| 217 | int ret; | ||
| 218 | union qxl_release_info *info; | 219 | union qxl_release_info *info; |
| 219 | 220 | ||
| 220 | while (qxl_ring_pop(qdev->release_ring, &id)) { | 221 | while (qxl_ring_pop(qdev->release_ring, &id)) { |
| @@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 224 | if (release == NULL) | 225 | if (release == NULL) |
| 225 | break; | 226 | break; |
| 226 | 227 | ||
| 227 | ret = qxl_release_reserve(qdev, release, false); | ||
| 228 | if (ret) { | ||
| 229 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | ||
| 230 | DRM_ERROR("failed to reserve release %lld\n", id); | ||
| 231 | } | ||
| 232 | |||
| 233 | info = qxl_release_map(qdev, release); | 228 | info = qxl_release_map(qdev, release); |
| 234 | next_id = info->next; | 229 | next_id = info->next; |
| 235 | qxl_release_unmap(qdev, release, info); | 230 | qxl_release_unmap(qdev, release, info); |
| 236 | 231 | ||
| 237 | qxl_release_unreserve(qdev, release); | ||
| 238 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | 232 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, |
| 239 | next_id); | 233 | next_id); |
| 240 | 234 | ||
| @@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
| 259 | return i; | 253 | return i; |
| 260 | } | 254 | } |
| 261 | 255 | ||
| 262 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 256 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
| 257 | struct qxl_release *release, | ||
| 258 | unsigned long size, | ||
| 263 | struct qxl_bo **_bo) | 259 | struct qxl_bo **_bo) |
| 264 | { | 260 | { |
| 265 | struct qxl_bo *bo; | 261 | struct qxl_bo *bo; |
| 266 | int ret; | 262 | int ret; |
| 267 | 263 | ||
| 268 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | 264 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, |
| 269 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | 265 | false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); |
| 270 | if (ret) { | 266 | if (ret) { |
| 271 | DRM_ERROR("failed to allocate VRAM BO\n"); | 267 | DRM_ERROR("failed to allocate VRAM BO\n"); |
| 272 | return ret; | 268 | return ret; |
| 273 | } | 269 | } |
| 274 | ret = qxl_bo_reserve(bo, false); | 270 | ret = qxl_release_list_add(release, bo); |
| 275 | if (unlikely(ret != 0)) | 271 | if (ret) |
| 276 | goto out_unref; | 272 | goto out_unref; |
| 277 | 273 | ||
| 278 | *_bo = bo; | 274 | *_bo = bo; |
| 279 | return 0; | 275 | return 0; |
| 280 | out_unref: | 276 | out_unref: |
| 281 | qxl_bo_unref(&bo); | 277 | qxl_bo_unref(&bo); |
| 282 | return 0; | 278 | return ret; |
| 283 | } | 279 | } |
| 284 | 280 | ||
| 285 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) | 281 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
| @@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
| 503 | if (ret) | 499 | if (ret) |
| 504 | return ret; | 500 | return ret; |
| 505 | 501 | ||
| 502 | ret = qxl_release_reserve_list(release, true); | ||
| 503 | if (ret) | ||
| 504 | return ret; | ||
| 505 | |||
| 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); |
| 507 | cmd->type = QXL_SURFACE_CMD_CREATE; | 507 | cmd->type = QXL_SURFACE_CMD_CREATE; |
| 508 | cmd->u.surface_create.format = surf->surf.format; | 508 | cmd->u.surface_create.format = surf->surf.format; |
| @@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
| 524 | 524 | ||
| 525 | surf->surf_create = release; | 525 | surf->surf_create = release; |
| 526 | 526 | ||
| 527 | /* no need to add a release to the fence for this bo, | 527 | /* no need to add a release to the fence for this surface bo, |
| 528 | since it is only released when we ask to destroy the surface | 528 | since it is only released when we ask to destroy the surface |
| 529 | and it would never signal otherwise */ | 529 | and it would never signal otherwise */ |
| 530 | qxl_fence_releaseable(qdev, release); | ||
| 531 | |||
| 532 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 530 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
| 533 | 531 | qxl_release_fence_buffer_objects(release); | |
| 534 | qxl_release_unreserve(qdev, release); | ||
| 535 | 532 | ||
| 536 | surf->hw_surf_alloc = true; | 533 | surf->hw_surf_alloc = true; |
| 537 | spin_lock(&qdev->surf_id_idr_lock); | 534 | spin_lock(&qdev->surf_id_idr_lock); |
| @@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |||
| 573 | cmd->surface_id = id; | 570 | cmd->surface_id = id; |
| 574 | qxl_release_unmap(qdev, release, &cmd->release_info); | 571 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 575 | 572 | ||
| 576 | qxl_fence_releaseable(qdev, release); | ||
| 577 | |||
| 578 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 573 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
| 579 | 574 | ||
| 580 | qxl_release_unreserve(qdev, release); | 575 | qxl_release_fence_buffer_objects(release); |
| 581 | |||
| 582 | 576 | ||
| 583 | return 0; | 577 | return 0; |
| 584 | } | 578 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f76f5dd7bfc4..835caba026d3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
| @@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) | |||
| 179 | kfree(qxl_crtc); | 179 | kfree(qxl_crtc); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static void | 182 | static int |
| 183 | qxl_hide_cursor(struct qxl_device *qdev) | 183 | qxl_hide_cursor(struct qxl_device *qdev) |
| 184 | { | 184 | { |
| 185 | struct qxl_release *release; | 185 | struct qxl_release *release; |
| @@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev) | |||
| 188 | 188 | ||
| 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
| 190 | &release, NULL); | 190 | &release, NULL); |
| 191 | if (ret) | ||
| 192 | return ret; | ||
| 193 | |||
| 194 | ret = qxl_release_reserve_list(release, true); | ||
| 195 | if (ret) { | ||
| 196 | qxl_release_free(qdev, release); | ||
| 197 | return ret; | ||
| 198 | } | ||
| 191 | 199 | ||
| 192 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 200 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
| 193 | cmd->type = QXL_CURSOR_HIDE; | 201 | cmd->type = QXL_CURSOR_HIDE; |
| 194 | qxl_release_unmap(qdev, release, &cmd->release_info); | 202 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 195 | 203 | ||
| 196 | qxl_fence_releaseable(qdev, release); | ||
| 197 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 204 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 198 | qxl_release_unreserve(qdev, release); | 205 | qxl_release_fence_buffer_objects(release); |
| 206 | return 0; | ||
| 199 | } | 207 | } |
| 200 | 208 | ||
| 201 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | 209 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, |
| @@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 216 | 224 | ||
| 217 | int size = 64*64*4; | 225 | int size = 64*64*4; |
| 218 | int ret = 0; | 226 | int ret = 0; |
| 219 | if (!handle) { | 227 | if (!handle) |
| 220 | qxl_hide_cursor(qdev); | 228 | return qxl_hide_cursor(qdev); |
| 221 | return 0; | ||
| 222 | } | ||
| 223 | 229 | ||
| 224 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | 230 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
| 225 | if (!obj) { | 231 | if (!obj) { |
| @@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 234 | goto out_unref; | 240 | goto out_unref; |
| 235 | 241 | ||
| 236 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); | 242 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); |
| 243 | qxl_bo_unreserve(user_bo); | ||
| 237 | if (ret) | 244 | if (ret) |
| 238 | goto out_unreserve; | 245 | goto out_unref; |
| 239 | 246 | ||
| 240 | ret = qxl_bo_kmap(user_bo, &user_ptr); | 247 | ret = qxl_bo_kmap(user_bo, &user_ptr); |
| 241 | if (ret) | 248 | if (ret) |
| @@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 246 | &release, NULL); | 253 | &release, NULL); |
| 247 | if (ret) | 254 | if (ret) |
| 248 | goto out_kunmap; | 255 | goto out_kunmap; |
| 249 | ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, | 256 | |
| 250 | &cursor_bo); | 257 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, |
| 258 | &cursor_bo); | ||
| 251 | if (ret) | 259 | if (ret) |
| 252 | goto out_free_release; | 260 | goto out_free_release; |
| 253 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | 261 | |
| 262 | ret = qxl_release_reserve_list(release, false); | ||
| 254 | if (ret) | 263 | if (ret) |
| 255 | goto out_free_bo; | 264 | goto out_free_bo; |
| 256 | 265 | ||
| 266 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | ||
| 267 | if (ret) | ||
| 268 | goto out_backoff; | ||
| 269 | |||
| 257 | cursor->header.unique = 0; | 270 | cursor->header.unique = 0; |
| 258 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; | 271 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; |
| 259 | cursor->header.width = 64; | 272 | cursor->header.width = 64; |
| @@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 269 | 282 | ||
| 270 | qxl_bo_kunmap(cursor_bo); | 283 | qxl_bo_kunmap(cursor_bo); |
| 271 | 284 | ||
| 272 | /* finish with the userspace bo */ | ||
| 273 | qxl_bo_kunmap(user_bo); | 285 | qxl_bo_kunmap(user_bo); |
| 274 | qxl_bo_unpin(user_bo); | ||
| 275 | qxl_bo_unreserve(user_bo); | ||
| 276 | drm_gem_object_unreference_unlocked(obj); | ||
| 277 | 286 | ||
| 278 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 287 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
| 279 | cmd->type = QXL_CURSOR_SET; | 288 | cmd->type = QXL_CURSOR_SET; |
| @@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 281 | cmd->u.set.position.y = qcrtc->cur_y; | 290 | cmd->u.set.position.y = qcrtc->cur_y; |
| 282 | 291 | ||
| 283 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); | 292 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); |
| 284 | qxl_release_add_res(qdev, release, cursor_bo); | ||
| 285 | 293 | ||
| 286 | cmd->u.set.visible = 1; | 294 | cmd->u.set.visible = 1; |
| 287 | qxl_release_unmap(qdev, release, &cmd->release_info); | 295 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 288 | 296 | ||
| 289 | qxl_fence_releaseable(qdev, release); | ||
| 290 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 297 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 291 | qxl_release_unreserve(qdev, release); | 298 | qxl_release_fence_buffer_objects(release); |
| 299 | |||
| 300 | /* finish with the userspace bo */ | ||
| 301 | ret = qxl_bo_reserve(user_bo, false); | ||
| 302 | if (!ret) { | ||
| 303 | qxl_bo_unpin(user_bo); | ||
| 304 | qxl_bo_unreserve(user_bo); | ||
| 305 | } | ||
| 306 | drm_gem_object_unreference_unlocked(obj); | ||
| 292 | 307 | ||
| 293 | qxl_bo_unreserve(cursor_bo); | ||
| 294 | qxl_bo_unref(&cursor_bo); | 308 | qxl_bo_unref(&cursor_bo); |
| 295 | 309 | ||
| 296 | return ret; | 310 | return ret; |
| 311 | |||
| 312 | out_backoff: | ||
| 313 | qxl_release_backoff_reserve_list(release); | ||
| 297 | out_free_bo: | 314 | out_free_bo: |
| 298 | qxl_bo_unref(&cursor_bo); | 315 | qxl_bo_unref(&cursor_bo); |
| 299 | out_free_release: | 316 | out_free_release: |
| 300 | qxl_release_unreserve(qdev, release); | ||
| 301 | qxl_release_free(qdev, release); | 317 | qxl_release_free(qdev, release); |
| 302 | out_kunmap: | 318 | out_kunmap: |
| 303 | qxl_bo_kunmap(user_bo); | 319 | qxl_bo_kunmap(user_bo); |
| 304 | out_unpin: | 320 | out_unpin: |
| 305 | qxl_bo_unpin(user_bo); | 321 | qxl_bo_unpin(user_bo); |
| 306 | out_unreserve: | ||
| 307 | qxl_bo_unreserve(user_bo); | ||
| 308 | out_unref: | 322 | out_unref: |
| 309 | drm_gem_object_unreference_unlocked(obj); | 323 | drm_gem_object_unreference_unlocked(obj); |
| 310 | return ret; | 324 | return ret; |
| @@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 322 | 336 | ||
| 323 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 337 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
| 324 | &release, NULL); | 338 | &release, NULL); |
| 339 | if (ret) | ||
| 340 | return ret; | ||
| 341 | |||
| 342 | ret = qxl_release_reserve_list(release, true); | ||
| 343 | if (ret) { | ||
| 344 | qxl_release_free(qdev, release); | ||
| 345 | return ret; | ||
| 346 | } | ||
| 325 | 347 | ||
| 326 | qcrtc->cur_x = x; | 348 | qcrtc->cur_x = x; |
| 327 | qcrtc->cur_y = y; | 349 | qcrtc->cur_y = y; |
| @@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
| 332 | cmd->u.position.y = qcrtc->cur_y; | 354 | cmd->u.position.y = qcrtc->cur_y; |
| 333 | qxl_release_unmap(qdev, release, &cmd->release_info); | 355 | qxl_release_unmap(qdev, release, &cmd->release_info); |
| 334 | 356 | ||
| 335 | qxl_fence_releaseable(qdev, release); | ||
| 336 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 357 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
| 337 | qxl_release_unreserve(qdev, release); | 358 | qxl_release_fence_buffer_objects(release); |
| 359 | |||
| 338 | return 0; | 360 | return 0; |
| 339 | } | 361 | } |
| 340 | 362 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3c8c3dbf9378..56e1d633875e 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
| @@ -23,25 +23,29 @@ | |||
| 23 | #include "qxl_drv.h" | 23 | #include "qxl_drv.h" |
| 24 | #include "qxl_object.h" | 24 | #include "qxl_object.h" |
| 25 | 25 | ||
| 26 | static int alloc_clips(struct qxl_device *qdev, | ||
| 27 | struct qxl_release *release, | ||
| 28 | unsigned num_clips, | ||
| 29 | struct qxl_bo **clips_bo) | ||
| 30 | { | ||
| 31 | int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; | ||
| 32 | |||
| 33 | return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); | ||
| 34 | } | ||
| 35 | |||
| 26 | /* returns a pointer to the already allocated qxl_rect array inside | 36 | /* returns a pointer to the already allocated qxl_rect array inside |
| 27 | * the qxl_clip_rects. This is *not* the same as the memory allocated | 37 | * the qxl_clip_rects. This is *not* the same as the memory allocated |
| 28 | * on the device, it is offset to qxl_clip_rects.chunk.data */ | 38 | * on the device, it is offset to qxl_clip_rects.chunk.data */ |
| 29 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | 39 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, |
| 30 | struct qxl_drawable *drawable, | 40 | struct qxl_drawable *drawable, |
| 31 | unsigned num_clips, | 41 | unsigned num_clips, |
| 32 | struct qxl_bo **clips_bo, | 42 | struct qxl_bo *clips_bo) |
| 33 | struct qxl_release *release) | ||
| 34 | { | 43 | { |
| 35 | struct qxl_clip_rects *dev_clips; | 44 | struct qxl_clip_rects *dev_clips; |
| 36 | int ret; | 45 | int ret; |
| 37 | int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; | ||
| 38 | ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); | ||
| 39 | if (ret) | ||
| 40 | return NULL; | ||
| 41 | 46 | ||
| 42 | ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); | 47 | ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); |
| 43 | if (ret) { | 48 | if (ret) { |
| 44 | qxl_bo_unref(clips_bo); | ||
| 45 | return NULL; | 49 | return NULL; |
| 46 | } | 50 | } |
| 47 | dev_clips->num_rects = num_clips; | 51 | dev_clips->num_rects = num_clips; |
| @@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | |||
| 52 | } | 56 | } |
| 53 | 57 | ||
| 54 | static int | 58 | static int |
| 59 | alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) | ||
| 60 | { | ||
| 61 | int ret; | ||
| 62 | ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), | ||
| 63 | QXL_RELEASE_DRAWABLE, release, | ||
| 64 | NULL); | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | static void | ||
| 69 | free_drawable(struct qxl_device *qdev, struct qxl_release *release) | ||
| 70 | { | ||
| 71 | qxl_release_free(qdev, release); | ||
| 72 | } | ||
| 73 | |||
| 74 | /* release needs to be reserved at this point */ | ||
| 75 | static int | ||
| 55 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | 76 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, |
| 56 | const struct qxl_rect *rect, | 77 | const struct qxl_rect *rect, |
| 57 | struct qxl_release **release) | 78 | struct qxl_release *release) |
| 58 | { | 79 | { |
| 59 | struct qxl_drawable *drawable; | 80 | struct qxl_drawable *drawable; |
| 60 | int i, ret; | 81 | int i; |
| 61 | 82 | ||
| 62 | ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), | 83 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 63 | QXL_RELEASE_DRAWABLE, release, | 84 | if (!drawable) |
| 64 | NULL); | 85 | return -ENOMEM; |
| 65 | if (ret) | ||
| 66 | return ret; | ||
| 67 | 86 | ||
| 68 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); | ||
| 69 | drawable->type = type; | 87 | drawable->type = type; |
| 70 | 88 | ||
| 71 | drawable->surface_id = surface; /* Only primary for now */ | 89 | drawable->surface_id = surface; /* Only primary for now */ |
| @@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | |||
| 91 | drawable->bbox = *rect; | 109 | drawable->bbox = *rect; |
| 92 | 110 | ||
| 93 | drawable->mm_time = qdev->rom->mm_clock; | 111 | drawable->mm_time = qdev->rom->mm_clock; |
| 94 | qxl_release_unmap(qdev, *release, &drawable->release_info); | 112 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 95 | return 0; | 113 | return 0; |
| 96 | } | 114 | } |
| 97 | 115 | ||
| 98 | static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | 116 | static int alloc_palette_object(struct qxl_device *qdev, |
| 117 | struct qxl_release *release, | ||
| 118 | struct qxl_bo **palette_bo) | ||
| 119 | { | ||
| 120 | return qxl_alloc_bo_reserved(qdev, release, | ||
| 121 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
| 122 | palette_bo); | ||
| 123 | } | ||
| 124 | |||
| 125 | static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, | ||
| 126 | struct qxl_release *release, | ||
| 99 | const struct qxl_fb_image *qxl_fb_image) | 127 | const struct qxl_fb_image *qxl_fb_image) |
| 100 | { | 128 | { |
| 101 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
| 102 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | 129 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; |
| 103 | uint32_t visual = qxl_fb_image->visual; | 130 | uint32_t visual = qxl_fb_image->visual; |
| 104 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; | 131 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; |
| @@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
| 108 | static uint64_t unique; /* we make no attempt to actually set this | 135 | static uint64_t unique; /* we make no attempt to actually set this |
| 109 | * correctly globaly, since that would require | 136 | * correctly globaly, since that would require |
| 110 | * tracking all of our palettes. */ | 137 | * tracking all of our palettes. */ |
| 111 | 138 | ret = qxl_bo_kmap(palette_bo, (void **)&pal); | |
| 112 | ret = qxl_alloc_bo_reserved(qdev, | ||
| 113 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
| 114 | palette_bo); | ||
| 115 | |||
| 116 | ret = qxl_bo_kmap(*palette_bo, (void **)&pal); | ||
| 117 | pal->num_ents = 2; | 139 | pal->num_ents = 2; |
| 118 | pal->unique = unique++; | 140 | pal->unique = unique++; |
| 119 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { | 141 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
| 126 | } | 148 | } |
| 127 | pal->ents[0] = bgcolor; | 149 | pal->ents[0] = bgcolor; |
| 128 | pal->ents[1] = fgcolor; | 150 | pal->ents[1] = fgcolor; |
| 129 | qxl_bo_kunmap(*palette_bo); | 151 | qxl_bo_kunmap(palette_bo); |
| 130 | return 0; | 152 | return 0; |
| 131 | } | 153 | } |
| 132 | 154 | ||
| @@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
| 144 | const char *src = fb_image->data; | 166 | const char *src = fb_image->data; |
| 145 | int depth = fb_image->depth; | 167 | int depth = fb_image->depth; |
| 146 | struct qxl_release *release; | 168 | struct qxl_release *release; |
| 147 | struct qxl_bo *image_bo; | ||
| 148 | struct qxl_image *image; | 169 | struct qxl_image *image; |
| 149 | int ret; | 170 | int ret; |
| 150 | 171 | struct qxl_drm_image *dimage; | |
| 172 | struct qxl_bo *palette_bo = NULL; | ||
| 151 | if (stride == 0) | 173 | if (stride == 0) |
| 152 | stride = depth * width / 8; | 174 | stride = depth * width / 8; |
| 153 | 175 | ||
| 176 | ret = alloc_drawable(qdev, &release); | ||
| 177 | if (ret) | ||
| 178 | return; | ||
| 179 | |||
| 180 | ret = qxl_image_alloc_objects(qdev, release, | ||
| 181 | &dimage, | ||
| 182 | height, stride); | ||
| 183 | if (ret) | ||
| 184 | goto out_free_drawable; | ||
| 185 | |||
| 186 | if (depth == 1) { | ||
| 187 | ret = alloc_palette_object(qdev, release, &palette_bo); | ||
| 188 | if (ret) | ||
| 189 | goto out_free_image; | ||
| 190 | } | ||
| 191 | |||
| 192 | /* do a reservation run over all the objects we just allocated */ | ||
| 193 | ret = qxl_release_reserve_list(release, true); | ||
| 194 | if (ret) | ||
| 195 | goto out_free_palette; | ||
| 196 | |||
| 154 | rect.left = x; | 197 | rect.left = x; |
| 155 | rect.right = x + width; | 198 | rect.right = x + width; |
| 156 | rect.top = y; | 199 | rect.top = y; |
| 157 | rect.bottom = y + height; | 200 | rect.bottom = y + height; |
| 158 | 201 | ||
| 159 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); | 202 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); |
| 160 | if (ret) | 203 | if (ret) { |
| 161 | return; | 204 | qxl_release_backoff_reserve_list(release); |
| 205 | goto out_free_palette; | ||
| 206 | } | ||
| 162 | 207 | ||
| 163 | ret = qxl_image_create(qdev, release, &image_bo, | 208 | ret = qxl_image_init(qdev, release, dimage, |
| 164 | (const uint8_t *)src, 0, 0, | 209 | (const uint8_t *)src, 0, 0, |
| 165 | width, height, depth, stride); | 210 | width, height, depth, stride); |
| 166 | if (ret) { | 211 | if (ret) { |
| 167 | qxl_release_unreserve(qdev, release); | 212 | qxl_release_backoff_reserve_list(release); |
| 168 | qxl_release_free(qdev, release); | 213 | qxl_release_free(qdev, release); |
| 169 | return; | 214 | return; |
| 170 | } | 215 | } |
| 171 | 216 | ||
| 172 | if (depth == 1) { | 217 | if (depth == 1) { |
| 173 | struct qxl_bo *palette_bo; | ||
| 174 | void *ptr; | 218 | void *ptr; |
| 175 | ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); | 219 | ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); |
| 176 | qxl_release_add_res(qdev, release, palette_bo); | ||
| 177 | 220 | ||
| 178 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); | 221 | ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); |
| 179 | image = ptr; | 222 | image = ptr; |
| 180 | image->u.bitmap.palette = | 223 | image->u.bitmap.palette = |
| 181 | qxl_bo_physical_address(qdev, palette_bo, 0); | 224 | qxl_bo_physical_address(qdev, palette_bo, 0); |
| 182 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); | 225 | qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); |
| 183 | qxl_bo_unreserve(palette_bo); | ||
| 184 | qxl_bo_unref(&palette_bo); | ||
| 185 | } | 226 | } |
| 186 | 227 | ||
| 187 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 228 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| @@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
| 199 | drawable->u.copy.mask.bitmap = 0; | 240 | drawable->u.copy.mask.bitmap = 0; |
| 200 | 241 | ||
| 201 | drawable->u.copy.src_bitmap = | 242 | drawable->u.copy.src_bitmap = |
| 202 | qxl_bo_physical_address(qdev, image_bo, 0); | 243 | qxl_bo_physical_address(qdev, dimage->bo, 0); |
| 203 | qxl_release_unmap(qdev, release, &drawable->release_info); | 244 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 204 | 245 | ||
| 205 | qxl_release_add_res(qdev, release, image_bo); | ||
| 206 | qxl_bo_unreserve(image_bo); | ||
| 207 | qxl_bo_unref(&image_bo); | ||
| 208 | |||
| 209 | qxl_fence_releaseable(qdev, release); | ||
| 210 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 246 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 211 | qxl_release_unreserve(qdev, release); | 247 | qxl_release_fence_buffer_objects(release); |
| 248 | |||
| 249 | out_free_palette: | ||
| 250 | if (palette_bo) | ||
| 251 | qxl_bo_unref(&palette_bo); | ||
| 252 | out_free_image: | ||
| 253 | qxl_image_free_objects(qdev, dimage); | ||
| 254 | out_free_drawable: | ||
| 255 | if (ret) | ||
| 256 | free_drawable(qdev, release); | ||
| 212 | } | 257 | } |
| 213 | 258 | ||
| 214 | /* push a draw command using the given clipping rectangles as | 259 | /* push a draw command using the given clipping rectangles as |
| @@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 243 | int depth = qxl_fb->base.bits_per_pixel; | 288 | int depth = qxl_fb->base.bits_per_pixel; |
| 244 | uint8_t *surface_base; | 289 | uint8_t *surface_base; |
| 245 | struct qxl_release *release; | 290 | struct qxl_release *release; |
| 246 | struct qxl_bo *image_bo; | ||
| 247 | struct qxl_bo *clips_bo; | 291 | struct qxl_bo *clips_bo; |
| 292 | struct qxl_drm_image *dimage; | ||
| 248 | int ret; | 293 | int ret; |
| 249 | 294 | ||
| 295 | ret = alloc_drawable(qdev, &release); | ||
| 296 | if (ret) | ||
| 297 | return; | ||
| 298 | |||
| 250 | left = clips->x1; | 299 | left = clips->x1; |
| 251 | right = clips->x2; | 300 | right = clips->x2; |
| 252 | top = clips->y1; | 301 | top = clips->y1; |
| @@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 263 | 312 | ||
| 264 | width = right - left; | 313 | width = right - left; |
| 265 | height = bottom - top; | 314 | height = bottom - top; |
| 315 | |||
| 316 | ret = alloc_clips(qdev, release, num_clips, &clips_bo); | ||
| 317 | if (ret) | ||
| 318 | goto out_free_drawable; | ||
| 319 | |||
| 320 | ret = qxl_image_alloc_objects(qdev, release, | ||
| 321 | &dimage, | ||
| 322 | height, stride); | ||
| 323 | if (ret) | ||
| 324 | goto out_free_clips; | ||
| 325 | |||
| 326 | /* do a reservation run over all the objects we just allocated */ | ||
| 327 | ret = qxl_release_reserve_list(release, true); | ||
| 328 | if (ret) | ||
| 329 | goto out_free_image; | ||
| 330 | |||
| 266 | drawable_rect.left = left; | 331 | drawable_rect.left = left; |
| 267 | drawable_rect.right = right; | 332 | drawable_rect.right = right; |
| 268 | drawable_rect.top = top; | 333 | drawable_rect.top = top; |
| 269 | drawable_rect.bottom = bottom; | 334 | drawable_rect.bottom = bottom; |
| 335 | |||
| 270 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, | 336 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, |
| 271 | &release); | 337 | release); |
| 272 | if (ret) | 338 | if (ret) |
| 273 | return; | 339 | goto out_release_backoff; |
| 274 | 340 | ||
| 275 | ret = qxl_bo_kmap(bo, (void **)&surface_base); | 341 | ret = qxl_bo_kmap(bo, (void **)&surface_base); |
| 276 | if (ret) | 342 | if (ret) |
| 277 | goto out_unref; | 343 | goto out_release_backoff; |
| 278 | 344 | ||
| 279 | ret = qxl_image_create(qdev, release, &image_bo, surface_base, | 345 | |
| 280 | left, top, width, height, depth, stride); | 346 | ret = qxl_image_init(qdev, release, dimage, surface_base, |
| 347 | left, top, width, height, depth, stride); | ||
| 281 | qxl_bo_kunmap(bo); | 348 | qxl_bo_kunmap(bo); |
| 282 | if (ret) | 349 | if (ret) |
| 283 | goto out_unref; | 350 | goto out_release_backoff; |
| 351 | |||
| 352 | rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); | ||
| 353 | if (!rects) | ||
| 354 | goto out_release_backoff; | ||
| 284 | 355 | ||
| 285 | rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); | ||
| 286 | if (!rects) { | ||
| 287 | qxl_bo_unref(&image_bo); | ||
| 288 | goto out_unref; | ||
| 289 | } | ||
| 290 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 356 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 291 | 357 | ||
| 292 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; | 358 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; |
| 293 | drawable->clip.data = qxl_bo_physical_address(qdev, | 359 | drawable->clip.data = qxl_bo_physical_address(qdev, |
| 294 | clips_bo, 0); | 360 | clips_bo, 0); |
| 295 | qxl_release_add_res(qdev, release, clips_bo); | ||
| 296 | 361 | ||
| 297 | drawable->u.copy.src_area.top = 0; | 362 | drawable->u.copy.src_area.top = 0; |
| 298 | drawable->u.copy.src_area.bottom = height; | 363 | drawable->u.copy.src_area.bottom = height; |
| @@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 306 | drawable->u.copy.mask.pos.y = 0; | 371 | drawable->u.copy.mask.pos.y = 0; |
| 307 | drawable->u.copy.mask.bitmap = 0; | 372 | drawable->u.copy.mask.bitmap = 0; |
| 308 | 373 | ||
| 309 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); | 374 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); |
| 310 | qxl_release_unmap(qdev, release, &drawable->release_info); | 375 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 311 | qxl_release_add_res(qdev, release, image_bo); | 376 | |
| 312 | qxl_bo_unreserve(image_bo); | ||
| 313 | qxl_bo_unref(&image_bo); | ||
| 314 | clips_ptr = clips; | 377 | clips_ptr = clips; |
| 315 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | 378 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { |
| 316 | rects[i].left = clips_ptr->x1; | 379 | rects[i].left = clips_ptr->x1; |
| @@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
| 319 | rects[i].bottom = clips_ptr->y2; | 382 | rects[i].bottom = clips_ptr->y2; |
| 320 | } | 383 | } |
| 321 | qxl_bo_kunmap(clips_bo); | 384 | qxl_bo_kunmap(clips_bo); |
| 322 | qxl_bo_unreserve(clips_bo); | ||
| 323 | qxl_bo_unref(&clips_bo); | ||
| 324 | 385 | ||
| 325 | qxl_fence_releaseable(qdev, release); | ||
| 326 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 386 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 327 | qxl_release_unreserve(qdev, release); | 387 | qxl_release_fence_buffer_objects(release); |
| 328 | return; | 388 | |
| 389 | out_release_backoff: | ||
| 390 | if (ret) | ||
| 391 | qxl_release_backoff_reserve_list(release); | ||
| 392 | out_free_image: | ||
| 393 | qxl_image_free_objects(qdev, dimage); | ||
| 394 | out_free_clips: | ||
| 395 | qxl_bo_unref(&clips_bo); | ||
| 396 | out_free_drawable: | ||
| 397 | /* only free drawable on error */ | ||
| 398 | if (ret) | ||
| 399 | free_drawable(qdev, release); | ||
| 329 | 400 | ||
| 330 | out_unref: | ||
| 331 | qxl_release_unreserve(qdev, release); | ||
| 332 | qxl_release_free(qdev, release); | ||
| 333 | } | 401 | } |
| 334 | 402 | ||
| 335 | void qxl_draw_copyarea(struct qxl_device *qdev, | 403 | void qxl_draw_copyarea(struct qxl_device *qdev, |
| @@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
| 342 | struct qxl_release *release; | 410 | struct qxl_release *release; |
| 343 | int ret; | 411 | int ret; |
| 344 | 412 | ||
| 413 | ret = alloc_drawable(qdev, &release); | ||
| 414 | if (ret) | ||
| 415 | return; | ||
| 416 | |||
| 417 | /* do a reservation run over all the objects we just allocated */ | ||
| 418 | ret = qxl_release_reserve_list(release, true); | ||
| 419 | if (ret) | ||
| 420 | goto out_free_release; | ||
| 421 | |||
| 345 | rect.left = dx; | 422 | rect.left = dx; |
| 346 | rect.top = dy; | 423 | rect.top = dy; |
| 347 | rect.right = dx + width; | 424 | rect.right = dx + width; |
| 348 | rect.bottom = dy + height; | 425 | rect.bottom = dy + height; |
| 349 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); | 426 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); |
| 350 | if (ret) | 427 | if (ret) { |
| 351 | return; | 428 | qxl_release_backoff_reserve_list(release); |
| 429 | goto out_free_release; | ||
| 430 | } | ||
| 352 | 431 | ||
| 353 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 432 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 354 | drawable->u.copy_bits.src_pos.x = sx; | 433 | drawable->u.copy_bits.src_pos.x = sx; |
| 355 | drawable->u.copy_bits.src_pos.y = sy; | 434 | drawable->u.copy_bits.src_pos.y = sy; |
| 356 | |||
| 357 | qxl_release_unmap(qdev, release, &drawable->release_info); | 435 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 358 | qxl_fence_releaseable(qdev, release); | 436 | |
| 359 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 437 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 360 | qxl_release_unreserve(qdev, release); | 438 | qxl_release_fence_buffer_objects(release); |
| 439 | |||
| 440 | out_free_release: | ||
| 441 | if (ret) | ||
| 442 | free_drawable(qdev, release); | ||
| 361 | } | 443 | } |
| 362 | 444 | ||
| 363 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | 445 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) |
| @@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
| 370 | struct qxl_release *release; | 452 | struct qxl_release *release; |
| 371 | int ret; | 453 | int ret; |
| 372 | 454 | ||
| 373 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); | 455 | ret = alloc_drawable(qdev, &release); |
| 374 | if (ret) | 456 | if (ret) |
| 375 | return; | 457 | return; |
| 376 | 458 | ||
| 459 | /* do a reservation run over all the objects we just allocated */ | ||
| 460 | ret = qxl_release_reserve_list(release, true); | ||
| 461 | if (ret) | ||
| 462 | goto out_free_release; | ||
| 463 | |||
| 464 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); | ||
| 465 | if (ret) { | ||
| 466 | qxl_release_backoff_reserve_list(release); | ||
| 467 | goto out_free_release; | ||
| 468 | } | ||
| 469 | |||
| 377 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 470 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
| 378 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; | 471 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; |
| 379 | drawable->u.fill.brush.u.color = color; | 472 | drawable->u.fill.brush.u.color = color; |
| @@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
| 384 | drawable->u.fill.mask.bitmap = 0; | 477 | drawable->u.fill.mask.bitmap = 0; |
| 385 | 478 | ||
| 386 | qxl_release_unmap(qdev, release, &drawable->release_info); | 479 | qxl_release_unmap(qdev, release, &drawable->release_info); |
| 387 | qxl_fence_releaseable(qdev, release); | 480 | |
| 388 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 481 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
| 389 | qxl_release_unreserve(qdev, release); | 482 | qxl_release_fence_buffer_objects(release); |
| 483 | |||
| 484 | out_free_release: | ||
| 485 | if (ret) | ||
| 486 | free_drawable(qdev, release); | ||
| 390 | } | 487 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a3..7e96f4f11738 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
| @@ -42,6 +42,9 @@ | |||
| 42 | #include <ttm/ttm_placement.h> | 42 | #include <ttm/ttm_placement.h> |
| 43 | #include <ttm/ttm_module.h> | 43 | #include <ttm/ttm_module.h> |
| 44 | 44 | ||
| 45 | /* just for ttm_validate_buffer */ | ||
| 46 | #include <ttm/ttm_execbuf_util.h> | ||
| 47 | |||
| 45 | #include <drm/qxl_drm.h> | 48 | #include <drm/qxl_drm.h> |
| 46 | #include "qxl_dev.h" | 49 | #include "qxl_dev.h" |
| 47 | 50 | ||
| @@ -118,9 +121,9 @@ struct qxl_bo { | |||
| 118 | uint32_t surface_id; | 121 | uint32_t surface_id; |
| 119 | struct qxl_fence fence; /* per bo fence - list of releases */ | 122 | struct qxl_fence fence; /* per bo fence - list of releases */ |
| 120 | struct qxl_release *surf_create; | 123 | struct qxl_release *surf_create; |
| 121 | atomic_t reserve_count; | ||
| 122 | }; | 124 | }; |
| 123 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) | 125 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) |
| 126 | #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) | ||
| 124 | 127 | ||
| 125 | struct qxl_gem { | 128 | struct qxl_gem { |
| 126 | struct mutex mutex; | 129 | struct mutex mutex; |
| @@ -128,12 +131,7 @@ struct qxl_gem { | |||
| 128 | }; | 131 | }; |
| 129 | 132 | ||
| 130 | struct qxl_bo_list { | 133 | struct qxl_bo_list { |
| 131 | struct list_head lhead; | 134 | struct ttm_validate_buffer tv; |
| 132 | struct qxl_bo *bo; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct qxl_reloc_list { | ||
| 136 | struct list_head bos; | ||
| 137 | }; | 135 | }; |
| 138 | 136 | ||
| 139 | struct qxl_crtc { | 137 | struct qxl_crtc { |
| @@ -195,10 +193,20 @@ enum { | |||
| 195 | struct qxl_release { | 193 | struct qxl_release { |
| 196 | int id; | 194 | int id; |
| 197 | int type; | 195 | int type; |
| 198 | int bo_count; | ||
| 199 | uint32_t release_offset; | 196 | uint32_t release_offset; |
| 200 | uint32_t surface_release_id; | 197 | uint32_t surface_release_id; |
| 201 | struct qxl_bo *bos[QXL_MAX_RES]; | 198 | struct ww_acquire_ctx ticket; |
| 199 | struct list_head bos; | ||
| 200 | }; | ||
| 201 | |||
| 202 | struct qxl_drm_chunk { | ||
| 203 | struct list_head head; | ||
| 204 | struct qxl_bo *bo; | ||
| 205 | }; | ||
| 206 | |||
| 207 | struct qxl_drm_image { | ||
| 208 | struct qxl_bo *bo; | ||
| 209 | struct list_head chunk_list; | ||
| 202 | }; | 210 | }; |
| 203 | 211 | ||
| 204 | struct qxl_fb_image { | 212 | struct qxl_fb_image { |
| @@ -314,6 +322,7 @@ struct qxl_device { | |||
| 314 | struct workqueue_struct *gc_queue; | 322 | struct workqueue_struct *gc_queue; |
| 315 | struct work_struct gc_work; | 323 | struct work_struct gc_work; |
| 316 | 324 | ||
| 325 | struct work_struct fb_work; | ||
| 317 | }; | 326 | }; |
| 318 | 327 | ||
| 319 | /* forward declaration for QXL_INFO_IO */ | 328 | /* forward declaration for QXL_INFO_IO */ |
| @@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma); | |||
| 433 | 442 | ||
| 434 | /* qxl image */ | 443 | /* qxl image */ |
| 435 | 444 | ||
| 436 | int qxl_image_create(struct qxl_device *qdev, | 445 | int qxl_image_init(struct qxl_device *qdev, |
| 437 | struct qxl_release *release, | 446 | struct qxl_release *release, |
| 438 | struct qxl_bo **image_bo, | 447 | struct qxl_drm_image *dimage, |
| 439 | const uint8_t *data, | 448 | const uint8_t *data, |
| 440 | int x, int y, int width, int height, | 449 | int x, int y, int width, int height, |
| 441 | int depth, int stride); | 450 | int depth, int stride); |
| 451 | int | ||
| 452 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
| 453 | struct qxl_release *release, | ||
| 454 | struct qxl_drm_image **image_ptr, | ||
| 455 | int height, int stride); | ||
| 456 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); | ||
| 457 | |||
| 442 | void qxl_update_screen(struct qxl_device *qxl); | 458 | void qxl_update_screen(struct qxl_device *qxl); |
| 443 | 459 | ||
| 444 | /* qxl io operations (qxl_cmd.c) */ | 460 | /* qxl io operations (qxl_cmd.c) */ |
| @@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible | |||
| 459 | void qxl_io_flush_release(struct qxl_device *qdev); | 475 | void qxl_io_flush_release(struct qxl_device *qdev); |
| 460 | void qxl_io_flush_surfaces(struct qxl_device *qdev); | 476 | void qxl_io_flush_surfaces(struct qxl_device *qdev); |
| 461 | 477 | ||
| 462 | int qxl_release_reserve(struct qxl_device *qdev, | ||
| 463 | struct qxl_release *release, bool no_wait); | ||
| 464 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
| 465 | struct qxl_release *release); | ||
| 466 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | 478 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, |
| 467 | struct qxl_release *release); | 479 | struct qxl_release *release); |
| 468 | void qxl_release_unmap(struct qxl_device *qdev, | 480 | void qxl_release_unmap(struct qxl_device *qdev, |
| 469 | struct qxl_release *release, | 481 | struct qxl_release *release, |
| 470 | union qxl_release_info *info); | 482 | union qxl_release_info *info); |
| 471 | /* | 483 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); |
| 472 | * qxl_bo_add_resource. | 484 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); |
| 473 | * | 485 | void qxl_release_backoff_reserve_list(struct qxl_release *release); |
| 474 | */ | 486 | void qxl_release_fence_buffer_objects(struct qxl_release *release); |
| 475 | void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); | ||
| 476 | 487 | ||
| 477 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 488 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
| 478 | enum qxl_surface_cmd_type surface_cmd_type, | 489 | enum qxl_surface_cmd_type surface_cmd_type, |
| @@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
| 481 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | 492 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, |
| 482 | int type, struct qxl_release **release, | 493 | int type, struct qxl_release **release, |
| 483 | struct qxl_bo **rbo); | 494 | struct qxl_bo **rbo); |
| 484 | int qxl_fence_releaseable(struct qxl_device *qdev, | 495 | |
| 485 | struct qxl_release *release); | ||
| 486 | int | 496 | int |
| 487 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 497 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
| 488 | uint32_t type, bool interruptible); | 498 | uint32_t type, bool interruptible); |
| 489 | int | 499 | int |
| 490 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 500 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
| 491 | uint32_t type, bool interruptible); | 501 | uint32_t type, bool interruptible); |
| 492 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 502 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
| 503 | struct qxl_release *release, | ||
| 504 | unsigned long size, | ||
| 493 | struct qxl_bo **_bo); | 505 | struct qxl_bo **_bo); |
| 494 | /* qxl drawing commands */ | 506 | /* qxl drawing commands */ |
| 495 | 507 | ||
| @@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
| 510 | u32 sx, u32 sy, | 522 | u32 sx, u32 sy, |
| 511 | u32 dx, u32 dy); | 523 | u32 dx, u32 dy); |
| 512 | 524 | ||
| 513 | uint64_t | ||
| 514 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
| 515 | struct qxl_release **ret); | ||
| 516 | |||
| 517 | void qxl_release_free(struct qxl_device *qdev, | 525 | void qxl_release_free(struct qxl_device *qdev, |
| 518 | struct qxl_release *release); | 526 | struct qxl_release *release); |
| 519 | void qxl_release_add_res(struct qxl_device *qdev, | 527 | |
| 520 | struct qxl_release *release, | ||
| 521 | struct qxl_bo *bo); | ||
| 522 | /* used by qxl_debugfs_release */ | 528 | /* used by qxl_debugfs_release */ |
| 523 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 529 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
| 524 | uint64_t id); | 530 | uint64_t id); |
| @@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein | |||
| 561 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); | 567 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); |
| 562 | 568 | ||
| 563 | /* qxl_fence.c */ | 569 | /* qxl_fence.c */ |
| 564 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); | 570 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); |
| 565 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); | 571 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); |
| 566 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); | 572 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); |
| 567 | void qxl_fence_fini(struct qxl_fence *qfence); | 573 | void qxl_fence_fini(struct qxl_fence *qfence); |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 76f39d88d684..88722f233430 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
| @@ -37,12 +37,29 @@ | |||
| 37 | 37 | ||
| 38 | #define QXL_DIRTY_DELAY (HZ / 30) | 38 | #define QXL_DIRTY_DELAY (HZ / 30) |
| 39 | 39 | ||
| 40 | #define QXL_FB_OP_FILLRECT 1 | ||
| 41 | #define QXL_FB_OP_COPYAREA 2 | ||
| 42 | #define QXL_FB_OP_IMAGEBLIT 3 | ||
| 43 | |||
| 44 | struct qxl_fb_op { | ||
| 45 | struct list_head head; | ||
| 46 | int op_type; | ||
| 47 | union { | ||
| 48 | struct fb_fillrect fr; | ||
| 49 | struct fb_copyarea ca; | ||
| 50 | struct fb_image ib; | ||
| 51 | } op; | ||
| 52 | void *img_data; | ||
| 53 | }; | ||
| 54 | |||
| 40 | struct qxl_fbdev { | 55 | struct qxl_fbdev { |
| 41 | struct drm_fb_helper helper; | 56 | struct drm_fb_helper helper; |
| 42 | struct qxl_framebuffer qfb; | 57 | struct qxl_framebuffer qfb; |
| 43 | struct list_head fbdev_list; | 58 | struct list_head fbdev_list; |
| 44 | struct qxl_device *qdev; | 59 | struct qxl_device *qdev; |
| 45 | 60 | ||
| 61 | spinlock_t delayed_ops_lock; | ||
| 62 | struct list_head delayed_ops; | ||
| 46 | void *shadow; | 63 | void *shadow; |
| 47 | int size; | 64 | int size; |
| 48 | 65 | ||
| @@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = { | |||
| 164 | .deferred_io = qxl_deferred_io, | 181 | .deferred_io = qxl_deferred_io, |
| 165 | }; | 182 | }; |
| 166 | 183 | ||
| 167 | static void qxl_fb_fillrect(struct fb_info *info, | 184 | static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, |
| 168 | const struct fb_fillrect *fb_rect) | 185 | const struct fb_fillrect *fb_rect) |
| 186 | { | ||
| 187 | struct qxl_fb_op *op; | ||
| 188 | unsigned long flags; | ||
| 189 | |||
| 190 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
| 191 | if (!op) | ||
| 192 | return; | ||
| 193 | |||
| 194 | op->op.fr = *fb_rect; | ||
| 195 | op->img_data = NULL; | ||
| 196 | op->op_type = QXL_FB_OP_FILLRECT; | ||
| 197 | |||
| 198 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 199 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 200 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, | ||
| 204 | const struct fb_copyarea *fb_copy) | ||
| 205 | { | ||
| 206 | struct qxl_fb_op *op; | ||
| 207 | unsigned long flags; | ||
| 208 | |||
| 209 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
| 210 | if (!op) | ||
| 211 | return; | ||
| 212 | |||
| 213 | op->op.ca = *fb_copy; | ||
| 214 | op->img_data = NULL; | ||
| 215 | op->op_type = QXL_FB_OP_COPYAREA; | ||
| 216 | |||
| 217 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 218 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 219 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 220 | } | ||
| 221 | |||
| 222 | static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, | ||
| 223 | const struct fb_image *fb_image) | ||
| 224 | { | ||
| 225 | struct qxl_fb_op *op; | ||
| 226 | unsigned long flags; | ||
| 227 | uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); | ||
| 228 | |||
| 229 | op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); | ||
| 230 | if (!op) | ||
| 231 | return; | ||
| 232 | |||
| 233 | op->op.ib = *fb_image; | ||
| 234 | op->img_data = (void *)(op + 1); | ||
| 235 | op->op_type = QXL_FB_OP_IMAGEBLIT; | ||
| 236 | |||
| 237 | memcpy(op->img_data, fb_image->data, size); | ||
| 238 | |||
| 239 | op->op.ib.data = op->img_data; | ||
| 240 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 241 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
| 242 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void qxl_fb_fillrect_internal(struct fb_info *info, | ||
| 246 | const struct fb_fillrect *fb_rect) | ||
| 169 | { | 247 | { |
| 170 | struct qxl_fbdev *qfbdev = info->par; | 248 | struct qxl_fbdev *qfbdev = info->par; |
| 171 | struct qxl_device *qdev = qfbdev->qdev; | 249 | struct qxl_device *qdev = qfbdev->qdev; |
| @@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info, | |||
| 203 | qxl_draw_fill_rec.rect = rect; | 281 | qxl_draw_fill_rec.rect = rect; |
| 204 | qxl_draw_fill_rec.color = color; | 282 | qxl_draw_fill_rec.color = color; |
| 205 | qxl_draw_fill_rec.rop = rop; | 283 | qxl_draw_fill_rec.rop = rop; |
| 284 | |||
| 285 | qxl_draw_fill(&qxl_draw_fill_rec); | ||
| 286 | } | ||
| 287 | |||
| 288 | static void qxl_fb_fillrect(struct fb_info *info, | ||
| 289 | const struct fb_fillrect *fb_rect) | ||
| 290 | { | ||
| 291 | struct qxl_fbdev *qfbdev = info->par; | ||
| 292 | struct qxl_device *qdev = qfbdev->qdev; | ||
| 293 | |||
| 206 | if (!drm_can_sleep()) { | 294 | if (!drm_can_sleep()) { |
| 207 | qxl_io_log(qdev, | 295 | qxl_fb_delayed_fillrect(qfbdev, fb_rect); |
| 208 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | 296 | schedule_work(&qdev->fb_work); |
| 209 | __func__); | ||
| 210 | return; | 297 | return; |
| 211 | } | 298 | } |
| 212 | qxl_draw_fill(&qxl_draw_fill_rec); | 299 | /* make sure any previous work is done */ |
| 300 | flush_work(&qdev->fb_work); | ||
| 301 | qxl_fb_fillrect_internal(info, fb_rect); | ||
| 213 | } | 302 | } |
| 214 | 303 | ||
| 215 | static void qxl_fb_copyarea(struct fb_info *info, | 304 | static void qxl_fb_copyarea_internal(struct fb_info *info, |
| 216 | const struct fb_copyarea *region) | 305 | const struct fb_copyarea *region) |
| 217 | { | 306 | { |
| 218 | struct qxl_fbdev *qfbdev = info->par; | 307 | struct qxl_fbdev *qfbdev = info->par; |
| 219 | 308 | ||
| @@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info, | |||
| 223 | region->dx, region->dy); | 312 | region->dx, region->dy); |
| 224 | } | 313 | } |
| 225 | 314 | ||
| 315 | static void qxl_fb_copyarea(struct fb_info *info, | ||
| 316 | const struct fb_copyarea *region) | ||
| 317 | { | ||
| 318 | struct qxl_fbdev *qfbdev = info->par; | ||
| 319 | struct qxl_device *qdev = qfbdev->qdev; | ||
| 320 | |||
| 321 | if (!drm_can_sleep()) { | ||
| 322 | qxl_fb_delayed_copyarea(qfbdev, region); | ||
| 323 | schedule_work(&qdev->fb_work); | ||
| 324 | return; | ||
| 325 | } | ||
| 326 | /* make sure any previous work is done */ | ||
| 327 | flush_work(&qdev->fb_work); | ||
| 328 | qxl_fb_copyarea_internal(info, region); | ||
| 329 | } | ||
| 330 | |||
| 226 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) | 331 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) |
| 227 | { | 332 | { |
| 228 | qxl_draw_opaque_fb(qxl_fb_image, 0); | 333 | qxl_draw_opaque_fb(qxl_fb_image, 0); |
| 229 | } | 334 | } |
| 230 | 335 | ||
| 336 | static void qxl_fb_imageblit_internal(struct fb_info *info, | ||
| 337 | const struct fb_image *image) | ||
| 338 | { | ||
| 339 | struct qxl_fbdev *qfbdev = info->par; | ||
| 340 | struct qxl_fb_image qxl_fb_image; | ||
| 341 | |||
| 342 | /* ensure proper order rendering operations - TODO: must do this | ||
| 343 | * for everything. */ | ||
| 344 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | ||
| 345 | qxl_fb_imageblit_safe(&qxl_fb_image); | ||
| 346 | } | ||
| 347 | |||
| 231 | static void qxl_fb_imageblit(struct fb_info *info, | 348 | static void qxl_fb_imageblit(struct fb_info *info, |
| 232 | const struct fb_image *image) | 349 | const struct fb_image *image) |
| 233 | { | 350 | { |
| 234 | struct qxl_fbdev *qfbdev = info->par; | 351 | struct qxl_fbdev *qfbdev = info->par; |
| 235 | struct qxl_device *qdev = qfbdev->qdev; | 352 | struct qxl_device *qdev = qfbdev->qdev; |
| 236 | struct qxl_fb_image qxl_fb_image; | ||
| 237 | 353 | ||
| 238 | if (!drm_can_sleep()) { | 354 | if (!drm_can_sleep()) { |
| 239 | /* we cannot do any ttm_bo allocation since that will fail on | 355 | qxl_fb_delayed_imageblit(qfbdev, image); |
| 240 | * ioremap_wc..__get_vm_area_node, so queue the work item | 356 | schedule_work(&qdev->fb_work); |
| 241 | * instead This can happen from printk inside an interrupt | ||
| 242 | * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ | ||
| 243 | qxl_io_log(qdev, | ||
| 244 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
| 245 | __func__); | ||
| 246 | return; | 357 | return; |
| 247 | } | 358 | } |
| 359 | /* make sure any previous work is done */ | ||
| 360 | flush_work(&qdev->fb_work); | ||
| 361 | qxl_fb_imageblit_internal(info, image); | ||
| 362 | } | ||
| 248 | 363 | ||
| 249 | /* ensure proper order of rendering operations - TODO: must do this | 364 | static void qxl_fb_work(struct work_struct *work) |
| 250 | * for everything. */ | 365 | { |
| 251 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | 366 | struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); |
| 252 | qxl_fb_imageblit_safe(&qxl_fb_image); | 367 | unsigned long flags; |
| 368 | struct qxl_fb_op *entry, *tmp; | ||
| 369 | struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; | ||
| 370 | |||
| 371 | /* since the irq context just adds entries to the end of the | ||
| 372 | list dropping the lock should be fine, as entry isn't modified | ||
| 373 | in the operation code */ | ||
| 374 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 375 | list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { | ||
| 376 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 377 | switch (entry->op_type) { | ||
| 378 | case QXL_FB_OP_FILLRECT: | ||
| 379 | qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); | ||
| 380 | break; | ||
| 381 | case QXL_FB_OP_COPYAREA: | ||
| 382 | qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); | ||
| 383 | break; | ||
| 384 | case QXL_FB_OP_IMAGEBLIT: | ||
| 385 | qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); | ||
| 386 | break; | ||
| 387 | } | ||
| 388 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
| 389 | list_del(&entry->head); | ||
| 390 | kfree(entry); | ||
| 391 | } | ||
| 392 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
| 253 | } | 393 | } |
| 254 | 394 | ||
| 255 | int qxl_fb_init(struct qxl_device *qdev) | 395 | int qxl_fb_init(struct qxl_device *qdev) |
| 256 | { | 396 | { |
| 397 | INIT_WORK(&qdev->fb_work, qxl_fb_work); | ||
| 257 | return 0; | 398 | return 0; |
| 258 | } | 399 | } |
| 259 | 400 | ||
| @@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) | |||
| 536 | qfbdev->qdev = qdev; | 677 | qfbdev->qdev = qdev; |
| 537 | qdev->mode_info.qfbdev = qfbdev; | 678 | qdev->mode_info.qfbdev = qfbdev; |
| 538 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; | 679 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; |
| 539 | 680 | spin_lock_init(&qfbdev->delayed_ops_lock); | |
| 681 | INIT_LIST_HEAD(&qfbdev->delayed_ops); | ||
| 540 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, | 682 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, |
| 541 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, | 683 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, |
| 542 | QXLFB_CONN_LIMIT); | 684 | QXLFB_CONN_LIMIT); |
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index 63c6715ad385..ae59e91cfb9a 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
| @@ -49,17 +49,11 @@ | |||
| 49 | 49 | ||
| 50 | For some reason every so often qxl hw fails to release, things go wrong. | 50 | For some reason every so often qxl hw fails to release, things go wrong. |
| 51 | */ | 51 | */ |
| 52 | 52 | /* must be called with the fence lock held */ | |
| 53 | 53 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) | |
| 54 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
| 55 | { | 54 | { |
| 56 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
| 57 | |||
| 58 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
| 59 | radix_tree_insert(&qfence->tree, rel_id, qfence); | 55 | radix_tree_insert(&qfence->tree, rel_id, qfence); |
| 60 | qfence->num_active_releases++; | 56 | qfence->num_active_releases++; |
| 61 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
| 62 | return 0; | ||
| 63 | } | 57 | } |
| 64 | 58 | ||
| 65 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | 59 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) |
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a235693aabba..25e1777fb0a2 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
| @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, | |||
| 55 | /* At least align on page size */ | 55 | /* At least align on page size */ |
| 56 | if (alignment < PAGE_SIZE) | 56 | if (alignment < PAGE_SIZE) |
| 57 | alignment = PAGE_SIZE; | 57 | alignment = PAGE_SIZE; |
| 58 | r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); | 58 | r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); |
| 59 | if (r) { | 59 | if (r) { |
| 60 | if (r != -ERESTARTSYS) | 60 | if (r != -ERESTARTSYS) |
| 61 | DRM_ERROR( | 61 | DRM_ERROR( |
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996b..7fbcc35e8ad3 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c | |||
| @@ -30,31 +30,100 @@ | |||
| 30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
| 31 | 31 | ||
| 32 | static int | 32 | static int |
| 33 | qxl_image_create_helper(struct qxl_device *qdev, | 33 | qxl_allocate_chunk(struct qxl_device *qdev, |
| 34 | struct qxl_release *release, | ||
| 35 | struct qxl_drm_image *image, | ||
| 36 | unsigned int chunk_size) | ||
| 37 | { | ||
| 38 | struct qxl_drm_chunk *chunk; | ||
| 39 | int ret; | ||
| 40 | |||
| 41 | chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); | ||
| 42 | if (!chunk) | ||
| 43 | return -ENOMEM; | ||
| 44 | |||
| 45 | ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); | ||
| 46 | if (ret) { | ||
| 47 | kfree(chunk); | ||
| 48 | return ret; | ||
| 49 | } | ||
| 50 | |||
| 51 | list_add_tail(&chunk->head, &image->chunk_list); | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | int | ||
| 56 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
| 34 | struct qxl_release *release, | 57 | struct qxl_release *release, |
| 35 | struct qxl_bo **image_bo, | 58 | struct qxl_drm_image **image_ptr, |
| 36 | const uint8_t *data, | 59 | int height, int stride) |
| 37 | int width, int height, | 60 | { |
| 38 | int depth, unsigned int hash, | 61 | struct qxl_drm_image *image; |
| 39 | int stride) | 62 | int ret; |
| 63 | |||
| 64 | image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); | ||
| 65 | if (!image) | ||
| 66 | return -ENOMEM; | ||
| 67 | |||
| 68 | INIT_LIST_HEAD(&image->chunk_list); | ||
| 69 | |||
| 70 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); | ||
| 71 | if (ret) { | ||
| 72 | kfree(image); | ||
| 73 | return ret; | ||
| 74 | } | ||
| 75 | |||
| 76 | ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); | ||
| 77 | if (ret) { | ||
| 78 | qxl_bo_unref(&image->bo); | ||
| 79 | kfree(image); | ||
| 80 | return ret; | ||
| 81 | } | ||
| 82 | *image_ptr = image; | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | |||
| 86 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) | ||
| 40 | { | 87 | { |
| 88 | struct qxl_drm_chunk *chunk, *tmp; | ||
| 89 | |||
| 90 | list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { | ||
| 91 | qxl_bo_unref(&chunk->bo); | ||
| 92 | kfree(chunk); | ||
| 93 | } | ||
| 94 | |||
| 95 | qxl_bo_unref(&dimage->bo); | ||
| 96 | kfree(dimage); | ||
| 97 | } | ||
| 98 | |||
| 99 | static int | ||
| 100 | qxl_image_init_helper(struct qxl_device *qdev, | ||
| 101 | struct qxl_release *release, | ||
| 102 | struct qxl_drm_image *dimage, | ||
| 103 | const uint8_t *data, | ||
| 104 | int width, int height, | ||
| 105 | int depth, unsigned int hash, | ||
| 106 | int stride) | ||
| 107 | { | ||
| 108 | struct qxl_drm_chunk *drv_chunk; | ||
| 41 | struct qxl_image *image; | 109 | struct qxl_image *image; |
| 42 | struct qxl_data_chunk *chunk; | 110 | struct qxl_data_chunk *chunk; |
| 43 | int i; | 111 | int i; |
| 44 | int chunk_stride; | 112 | int chunk_stride; |
| 45 | int linesize = width * depth / 8; | 113 | int linesize = width * depth / 8; |
| 46 | struct qxl_bo *chunk_bo; | 114 | struct qxl_bo *chunk_bo, *image_bo; |
| 47 | int ret; | ||
| 48 | void *ptr; | 115 | void *ptr; |
| 49 | /* Chunk */ | 116 | /* Chunk */ |
| 50 | /* FIXME: Check integer overflow */ | 117 | /* FIXME: Check integer overflow */ |
| 51 | /* TODO: variable number of chunks */ | 118 | /* TODO: variable number of chunks */ |
| 119 | |||
| 120 | drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); | ||
| 121 | |||
| 122 | chunk_bo = drv_chunk->bo; | ||
| 52 | chunk_stride = stride; /* TODO: should use linesize, but it renders | 123 | chunk_stride = stride; /* TODO: should use linesize, but it renders |
| 53 | wrong (check the bitmaps are sent correctly | 124 | wrong (check the bitmaps are sent correctly |
| 54 | first) */ | 125 | first) */ |
| 55 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, | 126 | |
| 56 | &chunk_bo); | ||
| 57 | |||
| 58 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); | 127 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); |
| 59 | chunk = ptr; | 128 | chunk = ptr; |
| 60 | chunk->data_size = height * chunk_stride; | 129 | chunk->data_size = height * chunk_stride; |
| @@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 102 | while (remain > 0) { | 171 | while (remain > 0) { |
| 103 | page_base = out_offset & PAGE_MASK; | 172 | page_base = out_offset & PAGE_MASK; |
| 104 | page_offset = offset_in_page(out_offset); | 173 | page_offset = offset_in_page(out_offset); |
| 105 | |||
| 106 | size = min((int)(PAGE_SIZE - page_offset), remain); | 174 | size = min((int)(PAGE_SIZE - page_offset), remain); |
| 107 | 175 | ||
| 108 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); | 176 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); |
| @@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 116 | } | 184 | } |
| 117 | } | 185 | } |
| 118 | } | 186 | } |
| 119 | |||
| 120 | |||
| 121 | qxl_bo_kunmap(chunk_bo); | 187 | qxl_bo_kunmap(chunk_bo); |
| 122 | 188 | ||
| 123 | /* Image */ | 189 | image_bo = dimage->bo; |
| 124 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); | 190 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); |
| 125 | |||
| 126 | ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); | ||
| 127 | image = ptr; | 191 | image = ptr; |
| 128 | 192 | ||
| 129 | image->descriptor.id = 0; | 193 | image->descriptor.id = 0; |
| @@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
| 154 | image->u.bitmap.stride = chunk_stride; | 218 | image->u.bitmap.stride = chunk_stride; |
| 155 | image->u.bitmap.palette = 0; | 219 | image->u.bitmap.palette = 0; |
| 156 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); | 220 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); |
| 157 | qxl_release_add_res(qdev, release, chunk_bo); | ||
| 158 | qxl_bo_unreserve(chunk_bo); | ||
| 159 | qxl_bo_unref(&chunk_bo); | ||
| 160 | 221 | ||
| 161 | qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); | 222 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); |
| 162 | 223 | ||
| 163 | return 0; | 224 | return 0; |
| 164 | } | 225 | } |
| 165 | 226 | ||
| 166 | int qxl_image_create(struct qxl_device *qdev, | 227 | int qxl_image_init(struct qxl_device *qdev, |
| 167 | struct qxl_release *release, | 228 | struct qxl_release *release, |
| 168 | struct qxl_bo **image_bo, | 229 | struct qxl_drm_image *dimage, |
| 169 | const uint8_t *data, | 230 | const uint8_t *data, |
| 170 | int x, int y, int width, int height, | 231 | int x, int y, int width, int height, |
| 171 | int depth, int stride) | 232 | int depth, int stride) |
| 172 | { | 233 | { |
| 173 | data += y * stride + x * (depth / 8); | 234 | data += y * stride + x * (depth / 8); |
| 174 | return qxl_image_create_helper(qdev, release, image_bo, data, | 235 | return qxl_image_init_helper(qdev, release, dimage, data, |
| 175 | width, height, depth, 0, stride); | 236 | width, height, depth, 0, stride); |
| 176 | } | 237 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250d..6de33563d6f1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
| @@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data, | |||
| 68 | &qxl_map->offset); | 68 | &qxl_map->offset); |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | struct qxl_reloc_info { | ||
| 72 | int type; | ||
| 73 | struct qxl_bo *dst_bo; | ||
| 74 | uint32_t dst_offset; | ||
| 75 | struct qxl_bo *src_bo; | ||
| 76 | int src_offset; | ||
| 77 | }; | ||
| 78 | |||
| 71 | /* | 79 | /* |
| 72 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's | 80 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's |
| 73 | * are on vram). | 81 | * are on vram). |
| 74 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) | 82 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) |
| 75 | */ | 83 | */ |
| 76 | static void | 84 | static void |
| 77 | apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 85 | apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
| 78 | struct qxl_bo *src, uint64_t src_off) | ||
| 79 | { | 86 | { |
| 80 | void *reloc_page; | 87 | void *reloc_page; |
| 81 | 88 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); | |
| 82 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 89 | *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, |
| 83 | *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, | 90 | info->src_bo, |
| 84 | src, src_off); | 91 | info->src_offset); |
| 85 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 92 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
| 86 | } | 93 | } |
| 87 | 94 | ||
| 88 | static void | 95 | static void |
| 89 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 96 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
| 90 | struct qxl_bo *src) | ||
| 91 | { | 97 | { |
| 92 | uint32_t id = 0; | 98 | uint32_t id = 0; |
| 93 | void *reloc_page; | 99 | void *reloc_page; |
| 94 | 100 | ||
| 95 | if (src && !src->is_primary) | 101 | if (info->src_bo && !info->src_bo->is_primary) |
| 96 | id = src->surface_id; | 102 | id = info->src_bo->surface_id; |
| 97 | 103 | ||
| 98 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 104 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); |
| 99 | *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; | 105 | *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; |
| 100 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 106 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
| 101 | } | 107 | } |
| 102 | 108 | ||
| 103 | /* return holding the reference to this object */ | 109 | /* return holding the reference to this object */ |
| 104 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | 110 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, |
| 105 | struct drm_file *file_priv, uint64_t handle, | 111 | struct drm_file *file_priv, uint64_t handle, |
| 106 | struct qxl_reloc_list *reloc_list) | 112 | struct qxl_release *release) |
| 107 | { | 113 | { |
| 108 | struct drm_gem_object *gobj; | 114 | struct drm_gem_object *gobj; |
| 109 | struct qxl_bo *qobj; | 115 | struct qxl_bo *qobj; |
| 110 | int ret; | 116 | int ret; |
| 111 | 117 | ||
| 112 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); | 118 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); |
| 113 | if (!gobj) { | 119 | if (!gobj) |
| 114 | DRM_ERROR("bad bo handle %lld\n", handle); | ||
| 115 | return NULL; | 120 | return NULL; |
| 116 | } | 121 | |
| 117 | qobj = gem_to_qxl_bo(gobj); | 122 | qobj = gem_to_qxl_bo(gobj); |
| 118 | 123 | ||
| 119 | ret = qxl_bo_list_add(reloc_list, qobj); | 124 | ret = qxl_release_list_add(release, qobj); |
| 120 | if (ret) | 125 | if (ret) |
| 121 | return NULL; | 126 | return NULL; |
| 122 | 127 | ||
| @@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | |||
| 129 | * However, the command as passed from user space must *not* contain the initial | 134 | * However, the command as passed from user space must *not* contain the initial |
| 130 | * QXLReleaseInfo struct (first XXX bytes) | 135 | * QXLReleaseInfo struct (first XXX bytes) |
| 131 | */ | 136 | */ |
| 132 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | 137 | static int qxl_process_single_command(struct qxl_device *qdev, |
| 133 | struct drm_file *file_priv) | 138 | struct drm_qxl_command *cmd, |
| 139 | struct drm_file *file_priv) | ||
| 134 | { | 140 | { |
| 135 | struct qxl_device *qdev = dev->dev_private; | 141 | struct qxl_reloc_info *reloc_info; |
| 136 | struct drm_qxl_execbuffer *execbuffer = data; | 142 | int release_type; |
| 137 | struct drm_qxl_command user_cmd; | 143 | struct qxl_release *release; |
| 138 | int cmd_num; | 144 | struct qxl_bo *cmd_bo; |
| 139 | struct qxl_bo *reloc_src_bo; | ||
| 140 | struct qxl_bo *reloc_dst_bo; | ||
| 141 | struct drm_qxl_reloc reloc; | ||
| 142 | void *fb_cmd; | 145 | void *fb_cmd; |
| 143 | int i, ret; | 146 | int i, j, ret, num_relocs; |
| 144 | struct qxl_reloc_list reloc_list; | ||
| 145 | int unwritten; | 147 | int unwritten; |
| 146 | uint32_t reloc_dst_offset; | ||
| 147 | INIT_LIST_HEAD(&reloc_list.bos); | ||
| 148 | 148 | ||
| 149 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | 149 | switch (cmd->type) { |
| 150 | struct qxl_release *release; | 150 | case QXL_CMD_DRAW: |
| 151 | struct qxl_bo *cmd_bo; | 151 | release_type = QXL_RELEASE_DRAWABLE; |
| 152 | int release_type; | 152 | break; |
| 153 | struct drm_qxl_command *commands = | 153 | case QXL_CMD_SURFACE: |
| 154 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | 154 | case QXL_CMD_CURSOR: |
| 155 | default: | ||
| 156 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
| 157 | return -EINVAL; | ||
| 158 | break; | ||
| 159 | } | ||
| 155 | 160 | ||
| 156 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | 161 | if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) |
| 157 | sizeof(user_cmd))) | 162 | return -EINVAL; |
| 158 | return -EFAULT; | ||
| 159 | switch (user_cmd.type) { | ||
| 160 | case QXL_CMD_DRAW: | ||
| 161 | release_type = QXL_RELEASE_DRAWABLE; | ||
| 162 | break; | ||
| 163 | case QXL_CMD_SURFACE: | ||
| 164 | case QXL_CMD_CURSOR: | ||
| 165 | default: | ||
| 166 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
| 167 | return -EINVAL; | ||
| 168 | break; | ||
| 169 | } | ||
| 170 | 163 | ||
| 171 | if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) | 164 | if (!access_ok(VERIFY_READ, |
| 172 | return -EINVAL; | 165 | (void *)(unsigned long)cmd->command, |
| 166 | cmd->command_size)) | ||
| 167 | return -EFAULT; | ||
| 173 | 168 | ||
| 174 | if (!access_ok(VERIFY_READ, | 169 | reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); |
| 175 | (void *)(unsigned long)user_cmd.command, | 170 | if (!reloc_info) |
| 176 | user_cmd.command_size)) | 171 | return -ENOMEM; |
| 177 | return -EFAULT; | ||
| 178 | 172 | ||
| 179 | ret = qxl_alloc_release_reserved(qdev, | 173 | ret = qxl_alloc_release_reserved(qdev, |
| 180 | sizeof(union qxl_release_info) + | 174 | sizeof(union qxl_release_info) + |
| 181 | user_cmd.command_size, | 175 | cmd->command_size, |
| 182 | release_type, | 176 | release_type, |
| 183 | &release, | 177 | &release, |
| 184 | &cmd_bo); | 178 | &cmd_bo); |
| 185 | if (ret) | 179 | if (ret) |
| 186 | return ret; | 180 | goto out_free_reloc; |
| 187 | 181 | ||
| 188 | /* TODO copy slow path code from i915 */ | 182 | /* TODO copy slow path code from i915 */ |
| 189 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); | 183 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); |
| 190 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); | 184 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); |
| 191 | 185 | ||
| 192 | { | 186 | { |
| 193 | struct qxl_drawable *draw = fb_cmd; | 187 | struct qxl_drawable *draw = fb_cmd; |
| 188 | draw->mm_time = qdev->rom->mm_clock; | ||
| 189 | } | ||
| 194 | 190 | ||
| 195 | draw->mm_time = qdev->rom->mm_clock; | 191 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); |
| 196 | } | 192 | if (unwritten) { |
| 197 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); | 193 | DRM_ERROR("got unwritten %d\n", unwritten); |
| 198 | if (unwritten) { | 194 | ret = -EFAULT; |
| 199 | DRM_ERROR("got unwritten %d\n", unwritten); | 195 | goto out_free_release; |
| 200 | qxl_release_unreserve(qdev, release); | 196 | } |
| 201 | qxl_release_free(qdev, release); | 197 | |
| 202 | return -EFAULT; | 198 | /* fill out reloc info structs */ |
| 199 | num_relocs = 0; | ||
| 200 | for (i = 0; i < cmd->relocs_num; ++i) { | ||
| 201 | struct drm_qxl_reloc reloc; | ||
| 202 | |||
| 203 | if (DRM_COPY_FROM_USER(&reloc, | ||
| 204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], | ||
| 205 | sizeof(reloc))) { | ||
| 206 | ret = -EFAULT; | ||
| 207 | goto out_free_bos; | ||
| 203 | } | 208 | } |
| 204 | 209 | ||
| 205 | for (i = 0 ; i < user_cmd.relocs_num; ++i) { | 210 | /* add the bos to the list of bos to validate - |
| 206 | if (DRM_COPY_FROM_USER(&reloc, | 211 | need to validate first then process relocs? */ |
| 207 | &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], | 212 | if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { |
| 208 | sizeof(reloc))) { | 213 | DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); |
| 209 | qxl_bo_list_unreserve(&reloc_list, true); | ||
| 210 | qxl_release_unreserve(qdev, release); | ||
| 211 | qxl_release_free(qdev, release); | ||
| 212 | return -EFAULT; | ||
| 213 | } | ||
| 214 | 214 | ||
| 215 | /* add the bos to the list of bos to validate - | 215 | ret = -EINVAL; |
| 216 | need to validate first then process relocs? */ | 216 | goto out_free_bos; |
| 217 | if (reloc.dst_handle) { | 217 | } |
| 218 | reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, | 218 | reloc_info[i].type = reloc.reloc_type; |
| 219 | reloc.dst_handle, &reloc_list); | 219 | |
| 220 | if (!reloc_dst_bo) { | 220 | if (reloc.dst_handle) { |
| 221 | qxl_bo_list_unreserve(&reloc_list, true); | 221 | reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, |
| 222 | qxl_release_unreserve(qdev, release); | 222 | reloc.dst_handle, release); |
| 223 | qxl_release_free(qdev, release); | 223 | if (!reloc_info[i].dst_bo) { |
| 224 | return -EINVAL; | 224 | ret = -EINVAL; |
| 225 | } | 225 | reloc_info[i].src_bo = NULL; |
| 226 | reloc_dst_offset = 0; | 226 | goto out_free_bos; |
| 227 | } else { | ||
| 228 | reloc_dst_bo = cmd_bo; | ||
| 229 | reloc_dst_offset = release->release_offset; | ||
| 230 | } | 227 | } |
| 231 | 228 | reloc_info[i].dst_offset = reloc.dst_offset; | |
| 232 | /* reserve and validate the reloc dst bo */ | 229 | } else { |
| 233 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { | 230 | reloc_info[i].dst_bo = cmd_bo; |
| 234 | reloc_src_bo = | 231 | reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; |
| 235 | qxlhw_handle_to_bo(qdev, file_priv, | 232 | } |
| 236 | reloc.src_handle, &reloc_list); | 233 | num_relocs++; |
| 237 | if (!reloc_src_bo) { | 234 | |
| 238 | if (reloc_dst_bo != cmd_bo) | 235 | /* reserve and validate the reloc dst bo */ |
| 239 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 236 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { |
| 240 | qxl_bo_list_unreserve(&reloc_list, true); | 237 | reloc_info[i].src_bo = |
| 241 | qxl_release_unreserve(qdev, release); | 238 | qxlhw_handle_to_bo(qdev, file_priv, |
| 242 | qxl_release_free(qdev, release); | 239 | reloc.src_handle, release); |
| 243 | return -EINVAL; | 240 | if (!reloc_info[i].src_bo) { |
| 244 | } | 241 | if (reloc_info[i].dst_bo != cmd_bo) |
| 245 | } else | 242 | drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); |
| 246 | reloc_src_bo = NULL; | 243 | ret = -EINVAL; |
| 247 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { | 244 | goto out_free_bos; |
| 248 | apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, | ||
| 249 | reloc_src_bo, reloc.src_offset); | ||
| 250 | } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { | ||
| 251 | apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); | ||
| 252 | } else { | ||
| 253 | DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); | ||
| 254 | return -EINVAL; | ||
| 255 | } | 245 | } |
| 246 | reloc_info[i].src_offset = reloc.src_offset; | ||
| 247 | } else { | ||
| 248 | reloc_info[i].src_bo = NULL; | ||
| 249 | reloc_info[i].src_offset = 0; | ||
| 250 | } | ||
| 251 | } | ||
| 256 | 252 | ||
| 257 | if (reloc_src_bo && reloc_src_bo != cmd_bo) { | 253 | /* validate all buffers */ |
| 258 | qxl_release_add_res(qdev, release, reloc_src_bo); | 254 | ret = qxl_release_reserve_list(release, false); |
| 259 | drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); | 255 | if (ret) |
| 260 | } | 256 | goto out_free_bos; |
| 261 | 257 | ||
| 262 | if (reloc_dst_bo != cmd_bo) | 258 | for (i = 0; i < cmd->relocs_num; ++i) { |
| 263 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 259 | if (reloc_info[i].type == QXL_RELOC_TYPE_BO) |
| 264 | } | 260 | apply_reloc(qdev, &reloc_info[i]); |
| 265 | qxl_fence_releaseable(qdev, release); | 261 | else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) |
| 262 | apply_surf_reloc(qdev, &reloc_info[i]); | ||
| 263 | } | ||
| 266 | 264 | ||
| 267 | ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); | 265 | ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); |
| 268 | if (ret == -ERESTARTSYS) { | 266 | if (ret) |
| 269 | qxl_release_unreserve(qdev, release); | 267 | qxl_release_backoff_reserve_list(release); |
| 270 | qxl_release_free(qdev, release); | 268 | else |
| 271 | qxl_bo_list_unreserve(&reloc_list, true); | 269 | qxl_release_fence_buffer_objects(release); |
| 270 | |||
| 271 | out_free_bos: | ||
| 272 | for (j = 0; j < num_relocs; j++) { | ||
| 273 | if (reloc_info[j].dst_bo != cmd_bo) | ||
| 274 | drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); | ||
| 275 | if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) | ||
| 276 | drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); | ||
| 277 | } | ||
| 278 | out_free_release: | ||
| 279 | if (ret) | ||
| 280 | qxl_release_free(qdev, release); | ||
| 281 | out_free_reloc: | ||
| 282 | kfree(reloc_info); | ||
| 283 | return ret; | ||
| 284 | } | ||
| 285 | |||
| 286 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | ||
| 287 | struct drm_file *file_priv) | ||
| 288 | { | ||
| 289 | struct qxl_device *qdev = dev->dev_private; | ||
| 290 | struct drm_qxl_execbuffer *execbuffer = data; | ||
| 291 | struct drm_qxl_command user_cmd; | ||
| 292 | int cmd_num; | ||
| 293 | int ret; | ||
| 294 | |||
| 295 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | ||
| 296 | |||
| 297 | struct drm_qxl_command *commands = | ||
| 298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | ||
| 299 | |||
| 300 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | ||
| 301 | sizeof(user_cmd))) | ||
| 302 | return -EFAULT; | ||
| 303 | |||
| 304 | ret = qxl_process_single_command(qdev, &user_cmd, file_priv); | ||
| 305 | if (ret) | ||
| 272 | return ret; | 306 | return ret; |
| 273 | } | ||
| 274 | qxl_release_unreserve(qdev, release); | ||
| 275 | } | 307 | } |
| 276 | qxl_bo_list_unreserve(&reloc_list, 0); | ||
| 277 | return 0; | 308 | return 0; |
| 278 | } | 309 | } |
| 279 | 310 | ||
| @@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, | |||
| 305 | goto out; | 336 | goto out; |
| 306 | 337 | ||
| 307 | if (!qobj->pin_count) { | 338 | if (!qobj->pin_count) { |
| 308 | qxl_ttm_placement_from_domain(qobj, qobj->type); | 339 | qxl_ttm_placement_from_domain(qobj, qobj->type, false); |
| 309 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | 340 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, |
| 310 | true, false); | 341 | true, false); |
| 311 | if (unlikely(ret)) | 342 | if (unlikely(ret)) |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 1191fe7788c9..aa161cddd87e 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
| @@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) | |||
| 51 | return false; | 51 | return false; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) |
| 55 | { | 55 | { |
| 56 | u32 c = 0; | 56 | u32 c = 0; |
| 57 | u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; | ||
| 57 | 58 | ||
| 58 | qbo->placement.fpfn = 0; | 59 | qbo->placement.fpfn = 0; |
| 59 | qbo->placement.lpfn = 0; | 60 | qbo->placement.lpfn = 0; |
| 60 | qbo->placement.placement = qbo->placements; | 61 | qbo->placement.placement = qbo->placements; |
| 61 | qbo->placement.busy_placement = qbo->placements; | 62 | qbo->placement.busy_placement = qbo->placements; |
| 62 | if (domain == QXL_GEM_DOMAIN_VRAM) | 63 | if (domain == QXL_GEM_DOMAIN_VRAM) |
| 63 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; | 64 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; |
| 64 | if (domain == QXL_GEM_DOMAIN_SURFACE) | 65 | if (domain == QXL_GEM_DOMAIN_SURFACE) |
| 65 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; | 66 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; |
| 66 | if (domain == QXL_GEM_DOMAIN_CPU) | 67 | if (domain == QXL_GEM_DOMAIN_CPU) |
| 67 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 68 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; |
| 68 | if (!c) | 69 | if (!c) |
| 69 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 70 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
| 70 | qbo->placement.num_placement = c; | 71 | qbo->placement.num_placement = c; |
| @@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | |||
| 73 | 74 | ||
| 74 | 75 | ||
| 75 | int qxl_bo_create(struct qxl_device *qdev, | 76 | int qxl_bo_create(struct qxl_device *qdev, |
| 76 | unsigned long size, bool kernel, u32 domain, | 77 | unsigned long size, bool kernel, bool pinned, u32 domain, |
| 77 | struct qxl_surface *surf, | 78 | struct qxl_surface *surf, |
| 78 | struct qxl_bo **bo_ptr) | 79 | struct qxl_bo **bo_ptr) |
| 79 | { | 80 | { |
| @@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
| 99 | } | 100 | } |
| 100 | bo->gem_base.driver_private = NULL; | 101 | bo->gem_base.driver_private = NULL; |
| 101 | bo->type = domain; | 102 | bo->type = domain; |
| 102 | bo->pin_count = 0; | 103 | bo->pin_count = pinned ? 1 : 0; |
| 103 | bo->surface_id = 0; | 104 | bo->surface_id = 0; |
| 104 | qxl_fence_init(qdev, &bo->fence); | 105 | qxl_fence_init(qdev, &bo->fence); |
| 105 | INIT_LIST_HEAD(&bo->list); | 106 | INIT_LIST_HEAD(&bo->list); |
| 106 | atomic_set(&bo->reserve_count, 0); | 107 | |
| 107 | if (surf) | 108 | if (surf) |
| 108 | bo->surf = *surf; | 109 | bo->surf = *surf; |
| 109 | 110 | ||
| 110 | qxl_ttm_placement_from_domain(bo, domain); | 111 | qxl_ttm_placement_from_domain(bo, domain, pinned); |
| 111 | 112 | ||
| 112 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | 113 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, |
| 113 | &bo->placement, 0, !kernel, NULL, size, | 114 | &bo->placement, 0, !kernel, NULL, size, |
| @@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) | |||
| 228 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | 229 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) |
| 229 | { | 230 | { |
| 230 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | 231 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; |
| 231 | int r, i; | 232 | int r; |
| 232 | 233 | ||
| 233 | if (bo->pin_count) { | 234 | if (bo->pin_count) { |
| 234 | bo->pin_count++; | 235 | bo->pin_count++; |
| @@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | |||
| 236 | *gpu_addr = qxl_bo_gpu_offset(bo); | 237 | *gpu_addr = qxl_bo_gpu_offset(bo); |
| 237 | return 0; | 238 | return 0; |
| 238 | } | 239 | } |
| 239 | qxl_ttm_placement_from_domain(bo, domain); | 240 | qxl_ttm_placement_from_domain(bo, domain, true); |
| 240 | for (i = 0; i < bo->placement.num_placement; i++) | ||
| 241 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
| 242 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 241 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
| 243 | if (likely(r == 0)) { | 242 | if (likely(r == 0)) { |
| 244 | bo->pin_count = 1; | 243 | bo->pin_count = 1; |
| @@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) | |||
| 317 | return 0; | 316 | return 0; |
| 318 | } | 317 | } |
| 319 | 318 | ||
| 320 | void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) | ||
| 321 | { | ||
| 322 | struct qxl_bo_list *entry, *sf; | ||
| 323 | |||
| 324 | list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { | ||
| 325 | qxl_bo_unreserve(entry->bo); | ||
| 326 | list_del(&entry->lhead); | ||
| 327 | kfree(entry); | ||
| 328 | } | ||
| 329 | } | ||
| 330 | |||
| 331 | int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) | ||
| 332 | { | ||
| 333 | struct qxl_bo_list *entry; | ||
| 334 | int ret; | ||
| 335 | |||
| 336 | list_for_each_entry(entry, &reloc_list->bos, lhead) { | ||
| 337 | if (entry->bo == bo) | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
| 342 | if (!entry) | ||
| 343 | return -ENOMEM; | ||
| 344 | |||
| 345 | entry->bo = bo; | ||
| 346 | list_add(&entry->lhead, &reloc_list->bos); | ||
| 347 | |||
| 348 | ret = qxl_bo_reserve(bo, false); | ||
| 349 | if (ret) | ||
| 350 | return ret; | ||
| 351 | |||
| 352 | if (!bo->pin_count) { | ||
| 353 | qxl_ttm_placement_from_domain(bo, bo->type); | ||
| 354 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
| 355 | true, false); | ||
| 356 | if (ret) | ||
| 357 | return ret; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* allocate a surface for reserved + validated buffers */ | ||
| 361 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
| 362 | if (ret) | ||
| 363 | return ret; | ||
| 364 | return 0; | ||
| 365 | } | ||
| 366 | |||
| 367 | int qxl_surf_evict(struct qxl_device *qdev) | 319 | int qxl_surf_evict(struct qxl_device *qdev) |
| 368 | { | 320 | { |
| 369 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | 321 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce781..8cb6167038e5 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
| @@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
| 88 | 88 | ||
| 89 | extern int qxl_bo_create(struct qxl_device *qdev, | 89 | extern int qxl_bo_create(struct qxl_device *qdev, |
| 90 | unsigned long size, | 90 | unsigned long size, |
| 91 | bool kernel, u32 domain, | 91 | bool kernel, bool pinned, u32 domain, |
| 92 | struct qxl_surface *surf, | 92 | struct qxl_surface *surf, |
| 93 | struct qxl_bo **bo_ptr); | 93 | struct qxl_bo **bo_ptr); |
| 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); |
| @@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); | |||
| 99 | extern void qxl_bo_unref(struct qxl_bo **bo); | 99 | extern void qxl_bo_unref(struct qxl_bo **bo); |
| 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); | 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); |
| 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); | 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); |
| 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); | 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); |
| 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); | 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); |
| 104 | 104 | ||
| 105 | extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); | ||
| 106 | extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); | ||
| 107 | #endif | 105 | #endif |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5f..b61449e52cd5 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
| @@ -38,7 +38,8 @@ | |||
| 38 | 38 | ||
| 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
| 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
| 41 | uint64_t | 41 | |
| 42 | static uint64_t | ||
| 42 | qxl_release_alloc(struct qxl_device *qdev, int type, | 43 | qxl_release_alloc(struct qxl_device *qdev, int type, |
| 43 | struct qxl_release **ret) | 44 | struct qxl_release **ret) |
| 44 | { | 45 | { |
| @@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type, | |||
| 53 | return 0; | 54 | return 0; |
| 54 | } | 55 | } |
| 55 | release->type = type; | 56 | release->type = type; |
| 56 | release->bo_count = 0; | ||
| 57 | release->release_offset = 0; | 57 | release->release_offset = 0; |
| 58 | release->surface_release_id = 0; | 58 | release->surface_release_id = 0; |
| 59 | INIT_LIST_HEAD(&release->bos); | ||
| 59 | 60 | ||
| 60 | idr_preload(GFP_KERNEL); | 61 | idr_preload(GFP_KERNEL); |
| 61 | spin_lock(&qdev->release_idr_lock); | 62 | spin_lock(&qdev->release_idr_lock); |
| @@ -77,20 +78,20 @@ void | |||
| 77 | qxl_release_free(struct qxl_device *qdev, | 78 | qxl_release_free(struct qxl_device *qdev, |
| 78 | struct qxl_release *release) | 79 | struct qxl_release *release) |
| 79 | { | 80 | { |
| 80 | int i; | 81 | struct qxl_bo_list *entry, *tmp; |
| 81 | 82 | QXL_INFO(qdev, "release %d, type %d\n", release->id, | |
| 82 | QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, | 83 | release->type); |
| 83 | release->type, release->bo_count); | ||
| 84 | 84 | ||
| 85 | if (release->surface_release_id) | 85 | if (release->surface_release_id) |
| 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); | 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); |
| 87 | 87 | ||
| 88 | for (i = 0 ; i < release->bo_count; ++i) { | 88 | list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { |
| 89 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 89 | QXL_INFO(qdev, "release %llx\n", | 90 | QXL_INFO(qdev, "release %llx\n", |
| 90 | release->bos[i]->tbo.addr_space_offset | 91 | entry->tv.bo->addr_space_offset |
| 91 | - DRM_FILE_OFFSET); | 92 | - DRM_FILE_OFFSET); |
| 92 | qxl_fence_remove_release(&release->bos[i]->fence, release->id); | 93 | qxl_fence_remove_release(&bo->fence, release->id); |
| 93 | qxl_bo_unref(&release->bos[i]); | 94 | qxl_bo_unref(&bo); |
| 94 | } | 95 | } |
| 95 | spin_lock(&qdev->release_idr_lock); | 96 | spin_lock(&qdev->release_idr_lock); |
| 96 | idr_remove(&qdev->release_idr, release->id); | 97 | idr_remove(&qdev->release_idr, release->id); |
| @@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev, | |||
| 98 | kfree(release); | 99 | kfree(release); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | void | ||
| 102 | qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, | ||
| 103 | struct qxl_bo *bo) | ||
| 104 | { | ||
| 105 | int i; | ||
| 106 | for (i = 0; i < release->bo_count; i++) | ||
| 107 | if (release->bos[i] == bo) | ||
| 108 | return; | ||
| 109 | |||
| 110 | if (release->bo_count >= QXL_MAX_RES) { | ||
| 111 | DRM_ERROR("exceeded max resource on a qxl_release item\n"); | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | release->bos[release->bo_count++] = qxl_bo_ref(bo); | ||
| 115 | } | ||
| 116 | |||
| 117 | static int qxl_release_bo_alloc(struct qxl_device *qdev, | 102 | static int qxl_release_bo_alloc(struct qxl_device *qdev, |
| 118 | struct qxl_bo **bo) | 103 | struct qxl_bo **bo) |
| 119 | { | 104 | { |
| 120 | int ret; | 105 | int ret; |
| 121 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, | 106 | /* pin releases bo's they are too messy to evict */ |
| 107 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, | ||
| 108 | QXL_GEM_DOMAIN_VRAM, NULL, | ||
| 122 | bo); | 109 | bo); |
| 123 | return ret; | 110 | return ret; |
| 124 | } | 111 | } |
| 125 | 112 | ||
| 126 | int qxl_release_reserve(struct qxl_device *qdev, | 113 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) |
| 127 | struct qxl_release *release, bool no_wait) | 114 | { |
| 115 | struct qxl_bo_list *entry; | ||
| 116 | |||
| 117 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
| 118 | if (entry->tv.bo == &bo->tbo) | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
| 123 | if (!entry) | ||
| 124 | return -ENOMEM; | ||
| 125 | |||
| 126 | qxl_bo_ref(bo); | ||
| 127 | entry->tv.bo = &bo->tbo; | ||
| 128 | list_add_tail(&entry->tv.head, &release->bos); | ||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int qxl_release_validate_bo(struct qxl_bo *bo) | ||
| 128 | { | 133 | { |
| 129 | int ret; | 134 | int ret; |
| 130 | if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { | 135 | |
| 131 | ret = qxl_bo_reserve(release->bos[0], no_wait); | 136 | if (!bo->pin_count) { |
| 137 | qxl_ttm_placement_from_domain(bo, bo->type, false); | ||
| 138 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
| 139 | true, false); | ||
| 132 | if (ret) | 140 | if (ret) |
| 133 | return ret; | 141 | return ret; |
| 134 | } | 142 | } |
| 143 | |||
| 144 | /* allocate a surface for reserved + validated buffers */ | ||
| 145 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
| 146 | if (ret) | ||
| 147 | return ret; | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) | ||
| 152 | { | ||
| 153 | int ret; | ||
| 154 | struct qxl_bo_list *entry; | ||
| 155 | |||
| 156 | /* if only one object on the release its the release itself | ||
| 157 | since these objects are pinned no need to reserve */ | ||
| 158 | if (list_is_singular(&release->bos)) | ||
| 159 | return 0; | ||
| 160 | |||
| 161 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); | ||
| 162 | if (ret) | ||
| 163 | return ret; | ||
| 164 | |||
| 165 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
| 166 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 167 | |||
| 168 | ret = qxl_release_validate_bo(bo); | ||
| 169 | if (ret) { | ||
| 170 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | } | ||
| 135 | return 0; | 174 | return 0; |
| 136 | } | 175 | } |
| 137 | 176 | ||
| 138 | void qxl_release_unreserve(struct qxl_device *qdev, | 177 | void qxl_release_backoff_reserve_list(struct qxl_release *release) |
| 139 | struct qxl_release *release) | ||
| 140 | { | 178 | { |
| 141 | if (atomic_dec_and_test(&release->bos[0]->reserve_count)) | 179 | /* if only one object on the release its the release itself |
| 142 | qxl_bo_unreserve(release->bos[0]); | 180 | since these objects are pinned no need to reserve */ |
| 181 | if (list_is_singular(&release->bos)) | ||
| 182 | return; | ||
| 183 | |||
| 184 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
| 143 | } | 185 | } |
| 144 | 186 | ||
| 187 | |||
| 145 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 188 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
| 146 | enum qxl_surface_cmd_type surface_cmd_type, | 189 | enum qxl_surface_cmd_type surface_cmd_type, |
| 147 | struct qxl_release *create_rel, | 190 | struct qxl_release *create_rel, |
| 148 | struct qxl_release **release) | 191 | struct qxl_release **release) |
| 149 | { | 192 | { |
| 150 | int ret; | ||
| 151 | |||
| 152 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { | 193 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { |
| 153 | int idr_ret; | 194 | int idr_ret; |
| 195 | struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); | ||
| 154 | struct qxl_bo *bo; | 196 | struct qxl_bo *bo; |
| 155 | union qxl_release_info *info; | 197 | union qxl_release_info *info; |
| 156 | 198 | ||
| 157 | /* stash the release after the create command */ | 199 | /* stash the release after the create command */ |
| 158 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | 200 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); |
| 159 | bo = qxl_bo_ref(create_rel->bos[0]); | 201 | bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); |
| 160 | 202 | ||
| 161 | (*release)->release_offset = create_rel->release_offset + 64; | 203 | (*release)->release_offset = create_rel->release_offset + 64; |
| 162 | 204 | ||
| 163 | qxl_release_add_res(qdev, *release, bo); | 205 | qxl_release_list_add(*release, bo); |
| 164 | 206 | ||
| 165 | ret = qxl_release_reserve(qdev, *release, false); | ||
| 166 | if (ret) { | ||
| 167 | DRM_ERROR("release reserve failed\n"); | ||
| 168 | goto out_unref; | ||
| 169 | } | ||
| 170 | info = qxl_release_map(qdev, *release); | 207 | info = qxl_release_map(qdev, *release); |
| 171 | info->id = idr_ret; | 208 | info->id = idr_ret; |
| 172 | qxl_release_unmap(qdev, *release, info); | 209 | qxl_release_unmap(qdev, *release, info); |
| 173 | 210 | ||
| 174 | |||
| 175 | out_unref: | ||
| 176 | qxl_bo_unref(&bo); | 211 | qxl_bo_unref(&bo); |
| 177 | return ret; | 212 | return 0; |
| 178 | } | 213 | } |
| 179 | 214 | ||
| 180 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), | 215 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), |
| @@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 187 | { | 222 | { |
| 188 | struct qxl_bo *bo; | 223 | struct qxl_bo *bo; |
| 189 | int idr_ret; | 224 | int idr_ret; |
| 190 | int ret; | 225 | int ret = 0; |
| 191 | union qxl_release_info *info; | 226 | union qxl_release_info *info; |
| 192 | int cur_idx; | 227 | int cur_idx; |
| 193 | 228 | ||
| @@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 216 | mutex_unlock(&qdev->release_mutex); | 251 | mutex_unlock(&qdev->release_mutex); |
| 217 | return ret; | 252 | return ret; |
| 218 | } | 253 | } |
| 219 | |||
| 220 | /* pin releases bo's they are too messy to evict */ | ||
| 221 | ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); | ||
| 222 | qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); | ||
| 223 | qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); | ||
| 224 | } | 254 | } |
| 225 | 255 | ||
| 226 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); | 256 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); |
| @@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
| 231 | if (rbo) | 261 | if (rbo) |
| 232 | *rbo = bo; | 262 | *rbo = bo; |
| 233 | 263 | ||
| 234 | qxl_release_add_res(qdev, *release, bo); | ||
| 235 | |||
| 236 | ret = qxl_release_reserve(qdev, *release, false); | ||
| 237 | mutex_unlock(&qdev->release_mutex); | 264 | mutex_unlock(&qdev->release_mutex); |
| 238 | if (ret) | 265 | |
| 239 | goto out_unref; | 266 | qxl_release_list_add(*release, bo); |
| 240 | 267 | ||
| 241 | info = qxl_release_map(qdev, *release); | 268 | info = qxl_release_map(qdev, *release); |
| 242 | info->id = idr_ret; | 269 | info->id = idr_ret; |
| 243 | qxl_release_unmap(qdev, *release, info); | 270 | qxl_release_unmap(qdev, *release, info); |
| 244 | 271 | ||
| 245 | out_unref: | ||
| 246 | qxl_bo_unref(&bo); | 272 | qxl_bo_unref(&bo); |
| 247 | return ret; | 273 | return ret; |
| 248 | } | 274 | } |
| 249 | 275 | ||
| 250 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
| 251 | struct qxl_release *release) | ||
| 252 | { | ||
| 253 | int i, ret; | ||
| 254 | for (i = 0; i < release->bo_count; i++) { | ||
| 255 | if (!release->bos[i]->tbo.sync_obj) | ||
| 256 | release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; | ||
| 257 | ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); | ||
| 258 | if (ret) | ||
| 259 | return ret; | ||
| 260 | } | ||
| 261 | return 0; | ||
| 262 | } | ||
| 263 | |||
| 264 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 276 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
| 265 | uint64_t id) | 277 | uint64_t id) |
| 266 | { | 278 | { |
| @@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | |||
| 273 | DRM_ERROR("failed to find id in release_idr\n"); | 285 | DRM_ERROR("failed to find id in release_idr\n"); |
| 274 | return NULL; | 286 | return NULL; |
| 275 | } | 287 | } |
| 276 | if (release->bo_count < 1) { | 288 | |
| 277 | DRM_ERROR("read a released resource with 0 bos\n"); | ||
| 278 | return NULL; | ||
| 279 | } | ||
| 280 | return release; | 289 | return release; |
| 281 | } | 290 | } |
| 282 | 291 | ||
| @@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | |||
| 285 | { | 294 | { |
| 286 | void *ptr; | 295 | void *ptr; |
| 287 | union qxl_release_info *info; | 296 | union qxl_release_info *info; |
| 288 | struct qxl_bo *bo = release->bos[0]; | 297 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
| 298 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 289 | 299 | ||
| 290 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); | 300 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); |
| 301 | if (!ptr) | ||
| 302 | return NULL; | ||
| 291 | info = ptr + (release->release_offset & ~PAGE_SIZE); | 303 | info = ptr + (release->release_offset & ~PAGE_SIZE); |
| 292 | return info; | 304 | return info; |
| 293 | } | 305 | } |
| @@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev, | |||
| 296 | struct qxl_release *release, | 308 | struct qxl_release *release, |
| 297 | union qxl_release_info *info) | 309 | union qxl_release_info *info) |
| 298 | { | 310 | { |
| 299 | struct qxl_bo *bo = release->bos[0]; | 311 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
| 312 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
| 300 | void *ptr; | 313 | void *ptr; |
| 301 | 314 | ||
| 302 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); | 315 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); |
| 303 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); | 316 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); |
| 304 | } | 317 | } |
| 318 | |||
| 319 | void qxl_release_fence_buffer_objects(struct qxl_release *release) | ||
| 320 | { | ||
| 321 | struct ttm_validate_buffer *entry; | ||
| 322 | struct ttm_buffer_object *bo; | ||
| 323 | struct ttm_bo_global *glob; | ||
| 324 | struct ttm_bo_device *bdev; | ||
| 325 | struct ttm_bo_driver *driver; | ||
| 326 | struct qxl_bo *qbo; | ||
| 327 | |||
| 328 | /* if only one object on the release its the release itself | ||
| 329 | since these objects are pinned no need to reserve */ | ||
| 330 | if (list_is_singular(&release->bos)) | ||
| 331 | return; | ||
| 332 | |||
| 333 | bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; | ||
| 334 | bdev = bo->bdev; | ||
| 335 | driver = bdev->driver; | ||
| 336 | glob = bo->glob; | ||
| 337 | |||
| 338 | spin_lock(&glob->lru_lock); | ||
| 339 | spin_lock(&bdev->fence_lock); | ||
| 340 | |||
| 341 | list_for_each_entry(entry, &release->bos, head) { | ||
| 342 | bo = entry->bo; | ||
| 343 | qbo = to_qxl_bo(bo); | ||
| 344 | |||
| 345 | if (!entry->bo->sync_obj) | ||
| 346 | entry->bo->sync_obj = &qbo->fence; | ||
| 347 | |||
| 348 | qxl_fence_add_release_locked(&qbo->fence, release->id); | ||
| 349 | |||
| 350 | ttm_bo_add_to_lru(bo); | ||
| 351 | ww_mutex_unlock(&bo->resv->lock); | ||
| 352 | entry->reserved = false; | ||
| 353 | } | ||
| 354 | spin_unlock(&bdev->fence_lock); | ||
| 355 | spin_unlock(&glob->lru_lock); | ||
| 356 | ww_acquire_fini(&release->ticket); | ||
| 357 | } | ||
| 358 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 489cb8cece4d..1dfd84cda2a1 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
| @@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, | |||
| 206 | return; | 206 | return; |
| 207 | } | 207 | } |
| 208 | qbo = container_of(bo, struct qxl_bo, tbo); | 208 | qbo = container_of(bo, struct qxl_bo, tbo); |
| 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); |
| 210 | *placement = qbo->placement; | 210 | *placement = qbo->placement; |
| 211 | } | 211 | } |
| 212 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index fb441a790f3d..15da7ef344a4 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
| 1222 | int r; | 1222 | int r; |
| 1223 | 1223 | ||
| 1224 | mutex_lock(&ctx->mutex); | 1224 | mutex_lock(&ctx->mutex); |
| 1225 | /* reset data block */ | ||
| 1226 | ctx->data_block = 0; | ||
| 1225 | /* reset reg block */ | 1227 | /* reset reg block */ |
| 1226 | ctx->reg_block = 0; | 1228 | ctx->reg_block = 0; |
| 1227 | /* reset fb window */ | 1229 | /* reset fb window */ |
| 1228 | ctx->fb_base = 0; | 1230 | ctx->fb_base = 0; |
| 1229 | /* reset io mode */ | 1231 | /* reset io mode */ |
| 1230 | ctx->io_mode = ATOM_IO_MM; | 1232 | ctx->io_mode = ATOM_IO_MM; |
| 1233 | /* reset divmul */ | ||
| 1234 | ctx->divmul[0] = 0; | ||
| 1235 | ctx->divmul[1] = 0; | ||
| 1231 | r = atom_execute_table_locked(ctx, index, params); | 1236 | r = atom_execute_table_locked(ctx, index, params); |
| 1232 | mutex_unlock(&ctx->mutex); | 1237 | mutex_unlock(&ctx->mutex); |
| 1233 | return r; | 1238 | return r; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..32501f6ec991 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -44,6 +44,41 @@ static char *pre_emph_names[] = { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | /***** radeon AUX functions *****/ | 46 | /***** radeon AUX functions *****/ |
| 47 | |||
| 48 | /* Atom needs data in little endian format | ||
| 49 | * so swap as appropriate when copying data to | ||
| 50 | * or from atom. Note that atom operates on | ||
| 51 | * dw units. | ||
| 52 | */ | ||
| 53 | static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) | ||
| 54 | { | ||
| 55 | #ifdef __BIG_ENDIAN | ||
| 56 | u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ | ||
| 57 | u32 *dst32, *src32; | ||
| 58 | int i; | ||
| 59 | |||
| 60 | memcpy(src_tmp, src, num_bytes); | ||
| 61 | src32 = (u32 *)src_tmp; | ||
| 62 | dst32 = (u32 *)dst_tmp; | ||
| 63 | if (to_le) { | ||
| 64 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 65 | dst32[i] = cpu_to_le32(src32[i]); | ||
| 66 | memcpy(dst, dst_tmp, num_bytes); | ||
| 67 | } else { | ||
| 68 | u8 dws = num_bytes & ~3; | ||
| 69 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
| 70 | dst32[i] = le32_to_cpu(src32[i]); | ||
| 71 | memcpy(dst, dst_tmp, dws); | ||
| 72 | if (num_bytes % 4) { | ||
| 73 | for (i = 0; i < (num_bytes % 4); i++) | ||
| 74 | dst[dws+i] = dst_tmp[dws+i]; | ||
| 75 | } | ||
| 76 | } | ||
| 77 | #else | ||
| 78 | memcpy(dst, src, num_bytes); | ||
| 79 | #endif | ||
| 80 | } | ||
| 81 | |||
| 47 | union aux_channel_transaction { | 82 | union aux_channel_transaction { |
| 48 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | 83 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
| 49 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | 84 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
| @@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
| 65 | 100 | ||
| 66 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); | 101 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
| 67 | 102 | ||
| 68 | memcpy(base, send, send_bytes); | 103 | radeon_copy_swap(base, send, send_bytes, true); |
| 69 | 104 | ||
| 70 | args.v1.lpAuxRequest = 0 + 4; | 105 | args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); |
| 71 | args.v1.lpDataOut = 16 + 4; | 106 | args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); |
| 72 | args.v1.ucDataOutLen = 0; | 107 | args.v1.ucDataOutLen = 0; |
| 73 | args.v1.ucChannelID = chan->rec.i2c_id; | 108 | args.v1.ucChannelID = chan->rec.i2c_id; |
| 74 | args.v1.ucDelay = delay / 10; | 109 | args.v1.ucDelay = delay / 10; |
| @@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
| 102 | recv_bytes = recv_size; | 137 | recv_bytes = recv_size; |
| 103 | 138 | ||
| 104 | if (recv && recv_size) | 139 | if (recv && recv_size) |
| 105 | memcpy(recv, base + 16, recv_bytes); | 140 | radeon_copy_swap(recv, base + 16, recv_bytes, false); |
| 106 | 141 | ||
| 107 | return recv_bytes; | 142 | return recv_bytes; |
| 108 | } | 143 | } |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
| @@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2548 | { | 2548 | { |
| 2549 | struct rv7xx_power_info *pi; | 2549 | struct rv7xx_power_info *pi; |
| 2550 | struct evergreen_power_info *eg_pi; | 2550 | struct evergreen_power_info *eg_pi; |
| 2551 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2552 | u16 data_offset, size; | ||
| 2553 | u8 frev, crev; | ||
| 2554 | struct atom_clock_dividers dividers; | 2551 | struct atom_clock_dividers dividers; |
| 2555 | int ret; | 2552 | int ret; |
| 2556 | 2553 | ||
| @@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2633 | eg_pi->vddci_control = | 2630 | eg_pi->vddci_control = |
| 2634 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2631 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 2635 | 2632 | ||
| 2636 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2633 | rv770_get_engine_memory_ss(rdev); |
| 2637 | &frev, &crev, &data_offset)) { | ||
| 2638 | pi->sclk_ss = true; | ||
| 2639 | pi->mclk_ss = true; | ||
| 2640 | pi->dynamic_ss = true; | ||
| 2641 | } else { | ||
| 2642 | pi->sclk_ss = false; | ||
| 2643 | pi->mclk_ss = false; | ||
| 2644 | pi->dynamic_ss = true; | ||
| 2645 | } | ||
| 2646 | 2634 | ||
| 2647 | pi->asi = RV770_ASI_DFLT; | 2635 | pi->asi = RV770_ASI_DFLT; |
| 2648 | pi->pasi = CYPRESS_HASI_DFLT; | 2636 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
| 2659 | 2647 | ||
| 2660 | pi->dynamic_pcie_gen2 = true; | 2648 | pi->dynamic_pcie_gen2 = true; |
| 2661 | 2649 | ||
| 2662 | if (pi->gfx_clock_gating && | 2650 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2663 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2664 | pi->thermal_protection = true; | 2651 | pi->thermal_protection = true; |
| 2665 | else | 2652 | else |
| 2666 | pi->thermal_protection = false; | 2653 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ed1d91025928..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | * Authors: Alex Deucher | 22 | * Authors: Alex Deucher |
| 23 | */ | 23 | */ |
| 24 | #include <linux/firmware.h> | 24 | #include <linux/firmware.h> |
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 28 | #include "drmP.h" | 27 | #include "drmP.h" |
| @@ -742,7 +741,6 @@ static int ci_mc_load_microcode(struct radeon_device *rdev) | |||
| 742 | */ | 741 | */ |
| 743 | static int cik_init_microcode(struct radeon_device *rdev) | 742 | static int cik_init_microcode(struct radeon_device *rdev) |
| 744 | { | 743 | { |
| 745 | struct platform_device *pdev; | ||
| 746 | const char *chip_name; | 744 | const char *chip_name; |
| 747 | size_t pfp_req_size, me_req_size, ce_req_size, | 745 | size_t pfp_req_size, me_req_size, ce_req_size, |
| 748 | mec_req_size, rlc_req_size, mc_req_size, | 746 | mec_req_size, rlc_req_size, mc_req_size, |
| @@ -752,13 +750,6 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 752 | 750 | ||
| 753 | DRM_DEBUG("\n"); | 751 | DRM_DEBUG("\n"); |
| 754 | 752 | ||
| 755 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
| 756 | err = IS_ERR(pdev); | ||
| 757 | if (err) { | ||
| 758 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
| 759 | return -EINVAL; | ||
| 760 | } | ||
| 761 | |||
| 762 | switch (rdev->family) { | 753 | switch (rdev->family) { |
| 763 | case CHIP_BONAIRE: | 754 | case CHIP_BONAIRE: |
| 764 | chip_name = "BONAIRE"; | 755 | chip_name = "BONAIRE"; |
| @@ -794,7 +785,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 794 | DRM_INFO("Loading %s Microcode\n", chip_name); | 785 | DRM_INFO("Loading %s Microcode\n", chip_name); |
| 795 | 786 | ||
| 796 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 787 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
| 797 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 788 | err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
| 798 | if (err) | 789 | if (err) |
| 799 | goto out; | 790 | goto out; |
| 800 | if (rdev->pfp_fw->size != pfp_req_size) { | 791 | if (rdev->pfp_fw->size != pfp_req_size) { |
| @@ -806,7 +797,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 806 | } | 797 | } |
| 807 | 798 | ||
| 808 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | 799 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); |
| 809 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | 800 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
| 810 | if (err) | 801 | if (err) |
| 811 | goto out; | 802 | goto out; |
| 812 | if (rdev->me_fw->size != me_req_size) { | 803 | if (rdev->me_fw->size != me_req_size) { |
| @@ -817,7 +808,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 817 | } | 808 | } |
| 818 | 809 | ||
| 819 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); | 810 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); |
| 820 | err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); | 811 | err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); |
| 821 | if (err) | 812 | if (err) |
| 822 | goto out; | 813 | goto out; |
| 823 | if (rdev->ce_fw->size != ce_req_size) { | 814 | if (rdev->ce_fw->size != ce_req_size) { |
| @@ -828,7 +819,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 828 | } | 819 | } |
| 829 | 820 | ||
| 830 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); | 821 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); |
| 831 | err = request_firmware(&rdev->mec_fw, fw_name, &pdev->dev); | 822 | err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); |
| 832 | if (err) | 823 | if (err) |
| 833 | goto out; | 824 | goto out; |
| 834 | if (rdev->mec_fw->size != mec_req_size) { | 825 | if (rdev->mec_fw->size != mec_req_size) { |
| @@ -839,7 +830,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 839 | } | 830 | } |
| 840 | 831 | ||
| 841 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); | 832 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); |
| 842 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | 833 | err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
| 843 | if (err) | 834 | if (err) |
| 844 | goto out; | 835 | goto out; |
| 845 | if (rdev->rlc_fw->size != rlc_req_size) { | 836 | if (rdev->rlc_fw->size != rlc_req_size) { |
| @@ -850,7 +841,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 850 | } | 841 | } |
| 851 | 842 | ||
| 852 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | 843 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); |
| 853 | err = request_firmware(&rdev->sdma_fw, fw_name, &pdev->dev); | 844 | err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); |
| 854 | if (err) | 845 | if (err) |
| 855 | goto out; | 846 | goto out; |
| 856 | if (rdev->sdma_fw->size != sdma_req_size) { | 847 | if (rdev->sdma_fw->size != sdma_req_size) { |
| @@ -863,7 +854,7 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 863 | /* No MC ucode on APUs */ | 854 | /* No MC ucode on APUs */ |
| 864 | if (!(rdev->flags & RADEON_IS_IGP)) { | 855 | if (!(rdev->flags & RADEON_IS_IGP)) { |
| 865 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 856 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); |
| 866 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | 857 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
| 867 | if (err) | 858 | if (err) |
| 868 | goto out; | 859 | goto out; |
| 869 | if (rdev->mc_fw->size != mc_req_size) { | 860 | if (rdev->mc_fw->size != mc_req_size) { |
| @@ -875,8 +866,6 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
| 875 | } | 866 | } |
| 876 | 867 | ||
| 877 | out: | 868 | out: |
| 878 | platform_device_unregister(pdev); | ||
| 879 | |||
| 880 | if (err) { | 869 | if (err) { |
| 881 | if (err != -EINVAL) | 870 | if (err != -EINVAL) |
| 882 | printk(KERN_ERR | 871 | printk(KERN_ERR |
| @@ -2598,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
| 2598 | if (rdev->wb.enabled) { | 2587 | if (rdev->wb.enabled) { |
| 2599 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
| 2600 | } else { | 2589 | } else { |
| 2590 | mutex_lock(&rdev->srbm_mutex); | ||
| 2601 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2591 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| 2602 | rptr = RREG32(CP_HQD_PQ_RPTR); | 2592 | rptr = RREG32(CP_HQD_PQ_RPTR); |
| 2603 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2593 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2594 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2604 | } | 2595 | } |
| 2605 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2596 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 2606 | 2597 | ||
| @@ -2615,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
| 2615 | if (rdev->wb.enabled) { | 2606 | if (rdev->wb.enabled) { |
| 2616 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 2607 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); |
| 2617 | } else { | 2608 | } else { |
| 2609 | mutex_lock(&rdev->srbm_mutex); | ||
| 2618 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2610 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
| 2619 | wptr = RREG32(CP_HQD_PQ_WPTR); | 2611 | wptr = RREG32(CP_HQD_PQ_WPTR); |
| 2620 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2612 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2613 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2621 | } | 2614 | } |
| 2622 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2615 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
| 2623 | 2616 | ||
| @@ -2908,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2908 | WREG32(CP_CPF_DEBUG, tmp); | 2901 | WREG32(CP_CPF_DEBUG, tmp); |
| 2909 | 2902 | ||
| 2910 | /* init the pipes */ | 2903 | /* init the pipes */ |
| 2904 | mutex_lock(&rdev->srbm_mutex); | ||
| 2911 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { | 2905 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { |
| 2912 | int me = (i < 4) ? 1 : 2; | 2906 | int me = (i < 4) ? 1 : 2; |
| 2913 | int pipe = (i < 4) ? i : (i - 4); | 2907 | int pipe = (i < 4) ? i : (i - 4); |
| @@ -2930,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2930 | WREG32(CP_HPD_EOP_CONTROL, tmp); | 2924 | WREG32(CP_HPD_EOP_CONTROL, tmp); |
| 2931 | } | 2925 | } |
| 2932 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2926 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 2927 | mutex_unlock(&rdev->srbm_mutex); | ||
| 2933 | 2928 | ||
| 2934 | /* init the queues. Just two for now. */ | 2929 | /* init the queues. Just two for now. */ |
| 2935 | for (i = 0; i < 2; i++) { | 2930 | for (i = 0; i < 2; i++) { |
| @@ -2983,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 2983 | mqd->static_thread_mgmt23[0] = 0xffffffff; | 2978 | mqd->static_thread_mgmt23[0] = 0xffffffff; |
| 2984 | mqd->static_thread_mgmt23[1] = 0xffffffff; | 2979 | mqd->static_thread_mgmt23[1] = 0xffffffff; |
| 2985 | 2980 | ||
| 2981 | mutex_lock(&rdev->srbm_mutex); | ||
| 2986 | cik_srbm_select(rdev, rdev->ring[idx].me, | 2982 | cik_srbm_select(rdev, rdev->ring[idx].me, |
| 2987 | rdev->ring[idx].pipe, | 2983 | rdev->ring[idx].pipe, |
| 2988 | rdev->ring[idx].queue, 0); | 2984 | rdev->ring[idx].queue, 0); |
| @@ -3110,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
| 3110 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); | 3106 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); |
| 3111 | 3107 | ||
| 3112 | cik_srbm_select(rdev, 0, 0, 0, 0); | 3108 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 3109 | mutex_unlock(&rdev->srbm_mutex); | ||
| 3113 | 3110 | ||
| 3114 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); | 3111 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); |
| 3115 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); | 3112 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); |
| @@ -4331,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 4331 | 4328 | ||
| 4332 | /* XXX SH_MEM regs */ | 4329 | /* XXX SH_MEM regs */ |
| 4333 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | 4330 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
| 4331 | mutex_lock(&rdev->srbm_mutex); | ||
| 4334 | for (i = 0; i < 16; i++) { | 4332 | for (i = 0; i < 16; i++) { |
| 4335 | cik_srbm_select(rdev, 0, 0, 0, i); | 4333 | cik_srbm_select(rdev, 0, 0, 0, i); |
| 4336 | /* CP and shaders */ | 4334 | /* CP and shaders */ |
| @@ -4346,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
| 4346 | /* XXX SDMA RLC - todo */ | 4344 | /* XXX SDMA RLC - todo */ |
| 4347 | } | 4345 | } |
| 4348 | cik_srbm_select(rdev, 0, 0, 0, 0); | 4346 | cik_srbm_select(rdev, 0, 0, 0, 0); |
| 4347 | mutex_unlock(&rdev->srbm_mutex); | ||
| 4349 | 4348 | ||
| 4350 | cik_pcie_gart_tlb_flush(rdev); | 4349 | cik_pcie_gart_tlb_flush(rdev); |
| 4351 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 4350 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
| @@ -4453,6 +4452,29 @@ void cik_vm_fini(struct radeon_device *rdev) | |||
| 4453 | } | 4452 | } |
| 4454 | 4453 | ||
| 4455 | /** | 4454 | /** |
| 4455 | * cik_vm_decode_fault - print human readable fault info | ||
| 4456 | * | ||
| 4457 | * @rdev: radeon_device pointer | ||
| 4458 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
| 4459 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
| 4460 | * | ||
| 4461 | * Print human readable fault information (CIK). | ||
| 4462 | */ | ||
| 4463 | static void cik_vm_decode_fault(struct radeon_device *rdev, | ||
| 4464 | u32 status, u32 addr, u32 mc_client) | ||
| 4465 | { | ||
| 4466 | u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; | ||
| 4467 | u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; | ||
| 4468 | u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; | ||
| 4469 | char *block = (char *)&mc_client; | ||
| 4470 | |||
| 4471 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", | ||
| 4472 | protections, vmid, addr, | ||
| 4473 | (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", | ||
| 4474 | block, mc_id); | ||
| 4475 | } | ||
| 4476 | |||
| 4477 | /** | ||
| 4456 | * cik_vm_flush - cik vm flush using the CP | 4478 | * cik_vm_flush - cik vm flush using the CP |
| 4457 | * | 4479 | * |
| 4458 | * @rdev: radeon_device pointer | 4480 | * @rdev: radeon_device pointer |
| @@ -5507,6 +5529,7 @@ int cik_irq_process(struct radeon_device *rdev) | |||
| 5507 | u32 ring_index; | 5529 | u32 ring_index; |
| 5508 | bool queue_hotplug = false; | 5530 | bool queue_hotplug = false; |
| 5509 | bool queue_reset = false; | 5531 | bool queue_reset = false; |
| 5532 | u32 addr, status, mc_client; | ||
| 5510 | 5533 | ||
| 5511 | if (!rdev->ih.enabled || rdev->shutdown) | 5534 | if (!rdev->ih.enabled || rdev->shutdown) |
| 5512 | return IRQ_NONE; | 5535 | return IRQ_NONE; |
| @@ -5742,11 +5765,15 @@ restart_ih: | |||
| 5742 | break; | 5765 | break; |
| 5743 | case 146: | 5766 | case 146: |
| 5744 | case 147: | 5767 | case 147: |
| 5768 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
| 5769 | status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
| 5770 | mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | ||
| 5745 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); | 5771 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
| 5746 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 5772 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
| 5747 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | 5773 | addr); |
| 5748 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 5774 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
| 5749 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | 5775 | status); |
| 5776 | cik_vm_decode_fault(rdev, status, addr, mc_client); | ||
| 5750 | /* reset addr and status */ | 5777 | /* reset addr and status */ |
| 5751 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | 5778 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
| 5752 | break; | 5779 | break; |
| @@ -5937,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 5937 | struct radeon_ring *ring; | 5964 | struct radeon_ring *ring; |
| 5938 | int r; | 5965 | int r; |
| 5939 | 5966 | ||
| 5967 | cik_mc_program(rdev); | ||
| 5968 | |||
| 5940 | if (rdev->flags & RADEON_IS_IGP) { | 5969 | if (rdev->flags & RADEON_IS_IGP) { |
| 5941 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 5970 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
| 5942 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | 5971 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { |
| @@ -5968,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
| 5968 | if (r) | 5997 | if (r) |
| 5969 | return r; | 5998 | return r; |
| 5970 | 5999 | ||
| 5971 | cik_mc_program(rdev); | ||
| 5972 | r = cik_pcie_gart_enable(rdev); | 6000 | r = cik_pcie_gart_enable(rdev); |
| 5973 | if (r) | 6001 | if (r) |
| 5974 | return r; | 6002 | return r; |
| @@ -6177,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) | |||
| 6177 | radeon_vm_manager_fini(rdev); | 6205 | radeon_vm_manager_fini(rdev); |
| 6178 | cik_cp_enable(rdev, false); | 6206 | cik_cp_enable(rdev, false); |
| 6179 | cik_sdma_enable(rdev, false); | 6207 | cik_sdma_enable(rdev, false); |
| 6180 | r600_uvd_rbc_stop(rdev); | 6208 | r600_uvd_stop(rdev); |
| 6181 | radeon_uvd_suspend(rdev); | 6209 | radeon_uvd_suspend(rdev); |
| 6182 | cik_irq_suspend(rdev); | 6210 | cik_irq_suspend(rdev); |
| 6183 | radeon_wb_disable(rdev); | 6211 | radeon_wb_disable(rdev); |
| @@ -6341,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) | |||
| 6341 | radeon_vm_manager_fini(rdev); | 6369 | radeon_vm_manager_fini(rdev); |
| 6342 | radeon_ib_pool_fini(rdev); | 6370 | radeon_ib_pool_fini(rdev); |
| 6343 | radeon_irq_kms_fini(rdev); | 6371 | radeon_irq_kms_fini(rdev); |
| 6372 | r600_uvd_stop(rdev); | ||
| 6344 | radeon_uvd_fini(rdev); | 6373 | radeon_uvd_fini(rdev); |
| 6345 | cik_pcie_gart_fini(rdev); | 6374 | cik_pcie_gart_fini(rdev); |
| 6346 | r600_vram_scratch_fini(rdev); | 6375 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 63514b95889a..7e9275eaef80 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
| @@ -136,6 +136,22 @@ | |||
| 136 | #define VM_INVALIDATE_RESPONSE 0x147c | 136 | #define VM_INVALIDATE_RESPONSE 0x147c |
| 137 | 137 | ||
| 138 | #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC | 138 | #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC |
| 139 | #define PROTECTIONS_MASK (0xf << 0) | ||
| 140 | #define PROTECTIONS_SHIFT 0 | ||
| 141 | /* bit 0: range | ||
| 142 | * bit 1: pde0 | ||
| 143 | * bit 2: valid | ||
| 144 | * bit 3: read | ||
| 145 | * bit 4: write | ||
| 146 | */ | ||
| 147 | #define MEMORY_CLIENT_ID_MASK (0xff << 12) | ||
| 148 | #define MEMORY_CLIENT_ID_SHIFT 12 | ||
| 149 | #define MEMORY_CLIENT_RW_MASK (1 << 24) | ||
| 150 | #define MEMORY_CLIENT_RW_SHIFT 24 | ||
| 151 | #define FAULT_VMID_MASK (0xf << 25) | ||
| 152 | #define FAULT_VMID_SHIFT 25 | ||
| 153 | |||
| 154 | #define VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT 0x14E4 | ||
| 139 | 155 | ||
| 140 | #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC | 156 | #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC |
| 141 | 157 | ||
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
| @@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2038 | { | 2038 | { |
| 2039 | struct rv7xx_power_info *pi; | 2039 | struct rv7xx_power_info *pi; |
| 2040 | struct evergreen_power_info *eg_pi; | 2040 | struct evergreen_power_info *eg_pi; |
| 2041 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2042 | uint16_t data_offset, size; | ||
| 2043 | uint8_t frev, crev; | ||
| 2044 | struct atom_clock_dividers dividers; | 2041 | struct atom_clock_dividers dividers; |
| 2045 | int ret; | 2042 | int ret; |
| 2046 | 2043 | ||
| @@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2092 | eg_pi->vddci_control = | 2089 | eg_pi->vddci_control = |
| 2093 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2090 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 2094 | 2091 | ||
| 2095 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2092 | rv770_get_engine_memory_ss(rdev); |
| 2096 | &frev, &crev, &data_offset)) { | ||
| 2097 | pi->sclk_ss = true; | ||
| 2098 | pi->mclk_ss = true; | ||
| 2099 | pi->dynamic_ss = true; | ||
| 2100 | } else { | ||
| 2101 | pi->sclk_ss = false; | ||
| 2102 | pi->mclk_ss = false; | ||
| 2103 | pi->dynamic_ss = true; | ||
| 2104 | } | ||
| 2105 | 2093 | ||
| 2106 | pi->asi = RV770_ASI_DFLT; | 2094 | pi->asi = RV770_ASI_DFLT; |
| 2107 | pi->pasi = CYPRESS_HASI_DFLT; | 2095 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
| 2122 | 2110 | ||
| 2123 | pi->dynamic_pcie_gen2 = true; | 2111 | pi->dynamic_pcie_gen2 = true; |
| 2124 | 2112 | ||
| 2125 | if (pi->gfx_clock_gating && | 2113 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2126 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2127 | pi->thermal_protection = true; | 2114 | pi->thermal_protection = true; |
| 2128 | else | 2115 | else |
| 2129 | pi->thermal_protection = false; | 2116 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e49059dc9b8f..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -139,6 +139,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | |||
| 139 | void evergreen_program_aspm(struct radeon_device *rdev); | 139 | void evergreen_program_aspm(struct radeon_device *rdev); |
| 140 | extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, | 140 | extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
| 141 | int ring, u32 cp_int_cntl); | 141 | int ring, u32 cp_int_cntl); |
| 142 | extern void cayman_vm_decode_fault(struct radeon_device *rdev, | ||
| 143 | u32 status, u32 addr); | ||
| 142 | 144 | ||
| 143 | static const u32 evergreen_golden_registers[] = | 145 | static const u32 evergreen_golden_registers[] = |
| 144 | { | 146 | { |
| @@ -4586,6 +4588,7 @@ int evergreen_irq_process(struct radeon_device *rdev) | |||
| 4586 | bool queue_hotplug = false; | 4588 | bool queue_hotplug = false; |
| 4587 | bool queue_hdmi = false; | 4589 | bool queue_hdmi = false; |
| 4588 | bool queue_thermal = false; | 4590 | bool queue_thermal = false; |
| 4591 | u32 status, addr; | ||
| 4589 | 4592 | ||
| 4590 | if (!rdev->ih.enabled || rdev->shutdown) | 4593 | if (!rdev->ih.enabled || rdev->shutdown) |
| 4591 | return IRQ_NONE; | 4594 | return IRQ_NONE; |
| @@ -4872,11 +4875,14 @@ restart_ih: | |||
| 4872 | break; | 4875 | break; |
| 4873 | case 146: | 4876 | case 146: |
| 4874 | case 147: | 4877 | case 147: |
| 4878 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
| 4879 | status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
| 4875 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); | 4880 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
| 4876 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 4881 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
| 4877 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | 4882 | addr); |
| 4878 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 4883 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
| 4879 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | 4884 | status); |
| 4885 | cayman_vm_decode_fault(rdev, status, addr); | ||
| 4880 | /* reset addr and status */ | 4886 | /* reset addr and status */ |
| 4881 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | 4887 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
| 4882 | break; | 4888 | break; |
| @@ -5100,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5100 | /* enable aspm */ | 5106 | /* enable aspm */ |
| 5101 | evergreen_program_aspm(rdev); | 5107 | evergreen_program_aspm(rdev); |
| 5102 | 5108 | ||
| 5109 | evergreen_mc_program(rdev); | ||
| 5110 | |||
| 5103 | if (ASIC_IS_DCE5(rdev)) { | 5111 | if (ASIC_IS_DCE5(rdev)) { |
| 5104 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 5112 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
| 5105 | r = ni_init_microcode(rdev); | 5113 | r = ni_init_microcode(rdev); |
| @@ -5127,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 5127 | if (r) | 5135 | if (r) |
| 5128 | return r; | 5136 | return r; |
| 5129 | 5137 | ||
| 5130 | evergreen_mc_program(rdev); | ||
| 5131 | if (rdev->flags & RADEON_IS_AGP) { | 5138 | if (rdev->flags & RADEON_IS_AGP) { |
| 5132 | evergreen_agp_enable(rdev); | 5139 | evergreen_agp_enable(rdev); |
| 5133 | } else { | 5140 | } else { |
| @@ -5285,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5285 | int evergreen_suspend(struct radeon_device *rdev) | 5292 | int evergreen_suspend(struct radeon_device *rdev) |
| 5286 | { | 5293 | { |
| 5287 | r600_audio_fini(rdev); | 5294 | r600_audio_fini(rdev); |
| 5295 | r600_uvd_stop(rdev); | ||
| 5288 | radeon_uvd_suspend(rdev); | 5296 | radeon_uvd_suspend(rdev); |
| 5289 | r700_cp_stop(rdev); | 5297 | r700_cp_stop(rdev); |
| 5290 | r600_dma_stop(rdev); | 5298 | r600_dma_stop(rdev); |
| 5291 | r600_uvd_rbc_stop(rdev); | ||
| 5292 | evergreen_irq_suspend(rdev); | 5299 | evergreen_irq_suspend(rdev); |
| 5293 | radeon_wb_disable(rdev); | 5300 | radeon_wb_disable(rdev); |
| 5294 | evergreen_pcie_gart_disable(rdev); | 5301 | evergreen_pcie_gart_disable(rdev); |
| @@ -5423,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 5423 | radeon_ib_pool_fini(rdev); | 5430 | radeon_ib_pool_fini(rdev); |
| 5424 | radeon_irq_kms_fini(rdev); | 5431 | radeon_irq_kms_fini(rdev); |
| 5425 | evergreen_pcie_gart_fini(rdev); | 5432 | evergreen_pcie_gart_fini(rdev); |
| 5433 | r600_uvd_stop(rdev); | ||
| 5426 | radeon_uvd_fini(rdev); | 5434 | radeon_uvd_fini(rdev); |
| 5427 | r600_vram_scratch_fini(rdev); | 5435 | r600_vram_scratch_fini(rdev); |
| 5428 | radeon_gem_fini(rdev); | 5436 | radeon_gem_fini(rdev); |
| @@ -5509,6 +5517,9 @@ void evergreen_program_aspm(struct radeon_device *rdev) | |||
| 5509 | */ | 5517 | */ |
| 5510 | bool fusion_platform = false; | 5518 | bool fusion_platform = false; |
| 5511 | 5519 | ||
| 5520 | if (radeon_aspm == 0) | ||
| 5521 | return; | ||
| 5522 | |||
| 5512 | if (!(rdev->flags & RADEON_IS_PCIE)) | 5523 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 5513 | return; | 5524 | return; |
| 5514 | 5525 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b9c6f7675e59..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
| 150 | u32 base_rate = 24000; | 150 | u32 base_rate = 24000; |
| 151 | u32 max_ratio = clock / base_rate; | ||
| 152 | u32 dto_phase; | ||
| 153 | u32 dto_modulo = clock; | ||
| 154 | u32 wallclock_ratio; | ||
| 155 | u32 dto_cntl; | ||
| 151 | 156 | ||
| 152 | if (!dig || !dig->afmt) | 157 | if (!dig || !dig->afmt) |
| 153 | return; | 158 | return; |
| 154 | 159 | ||
| 160 | if (max_ratio >= 8) { | ||
| 161 | dto_phase = 192 * 1000; | ||
| 162 | wallclock_ratio = 3; | ||
| 163 | } else if (max_ratio >= 4) { | ||
| 164 | dto_phase = 96 * 1000; | ||
| 165 | wallclock_ratio = 2; | ||
| 166 | } else if (max_ratio >= 2) { | ||
| 167 | dto_phase = 48 * 1000; | ||
| 168 | wallclock_ratio = 1; | ||
| 169 | } else { | ||
| 170 | dto_phase = 24 * 1000; | ||
| 171 | wallclock_ratio = 0; | ||
| 172 | } | ||
| 173 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
| 174 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
| 175 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
| 176 | |||
| 155 | /* XXX two dtos; generally use dto0 for hdmi */ | 177 | /* XXX two dtos; generally use dto0 for hdmi */ |
| 156 | /* Express [24MHz / target pixel clock] as an exact rational | 178 | /* Express [24MHz / target pixel clock] as an exact rational |
| 157 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 179 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
| 158 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 180 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 159 | */ | 181 | */ |
| 160 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | ||
| 161 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | ||
| 162 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); | 182 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); |
| 183 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
| 184 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
| 163 | } | 185 | } |
| 164 | 186 | ||
| 165 | 187 | ||
| @@ -177,6 +199,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode | |||
| 177 | uint32_t offset; | 199 | uint32_t offset; |
| 178 | ssize_t err; | 200 | ssize_t err; |
| 179 | 201 | ||
| 202 | if (!dig || !dig->afmt) | ||
| 203 | return; | ||
| 204 | |||
| 180 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 205 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
| 181 | if (!dig->afmt->enabled) | 206 | if (!dig->afmt->enabled) |
| 182 | return; | 207 | return; |
| @@ -280,6 +305,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 280 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 305 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 281 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 306 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 282 | 307 | ||
| 308 | if (!dig || !dig->afmt) | ||
| 309 | return; | ||
| 310 | |||
| 283 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 311 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
| 284 | if (enable && dig->afmt->enabled) | 312 | if (enable && dig->afmt->enabled) |
| 285 | return; | 313 | return; |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -497,6 +497,9 @@ | |||
| 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
| 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 | 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 |
| 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc | 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc |
| 500 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
| 501 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
| 502 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
| 500 | 503 | ||
| 501 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 | 504 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
| 502 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 | 505 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index f30127cb30ef..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | * Authors: Alex Deucher | 22 | * Authors: Alex Deucher |
| 23 | */ | 23 | */ |
| 24 | #include <linux/firmware.h> | 24 | #include <linux/firmware.h> |
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 28 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
| @@ -684,7 +683,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev) | |||
| 684 | 683 | ||
| 685 | int ni_init_microcode(struct radeon_device *rdev) | 684 | int ni_init_microcode(struct radeon_device *rdev) |
| 686 | { | 685 | { |
| 687 | struct platform_device *pdev; | ||
| 688 | const char *chip_name; | 686 | const char *chip_name; |
| 689 | const char *rlc_chip_name; | 687 | const char *rlc_chip_name; |
| 690 | size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; | 688 | size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; |
| @@ -694,13 +692,6 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 694 | 692 | ||
| 695 | DRM_DEBUG("\n"); | 693 | DRM_DEBUG("\n"); |
| 696 | 694 | ||
| 697 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
| 698 | err = IS_ERR(pdev); | ||
| 699 | if (err) { | ||
| 700 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
| 701 | return -EINVAL; | ||
| 702 | } | ||
| 703 | |||
| 704 | switch (rdev->family) { | 695 | switch (rdev->family) { |
| 705 | case CHIP_BARTS: | 696 | case CHIP_BARTS: |
| 706 | chip_name = "BARTS"; | 697 | chip_name = "BARTS"; |
| @@ -753,7 +744,7 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 753 | DRM_INFO("Loading %s Microcode\n", chip_name); | 744 | DRM_INFO("Loading %s Microcode\n", chip_name); |
| 754 | 745 | ||
| 755 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 746 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
| 756 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 747 | err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
| 757 | if (err) | 748 | if (err) |
| 758 | goto out; | 749 | goto out; |
| 759 | if (rdev->pfp_fw->size != pfp_req_size) { | 750 | if (rdev->pfp_fw->size != pfp_req_size) { |
| @@ -765,7 +756,7 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 765 | } | 756 | } |
| 766 | 757 | ||
| 767 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | 758 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); |
| 768 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | 759 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
| 769 | if (err) | 760 | if (err) |
| 770 | goto out; | 761 | goto out; |
| 771 | if (rdev->me_fw->size != me_req_size) { | 762 | if (rdev->me_fw->size != me_req_size) { |
| @@ -776,7 +767,7 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 776 | } | 767 | } |
| 777 | 768 | ||
| 778 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | 769 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); |
| 779 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | 770 | err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
| 780 | if (err) | 771 | if (err) |
| 781 | goto out; | 772 | goto out; |
| 782 | if (rdev->rlc_fw->size != rlc_req_size) { | 773 | if (rdev->rlc_fw->size != rlc_req_size) { |
| @@ -789,7 +780,7 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 789 | /* no MC ucode on TN */ | 780 | /* no MC ucode on TN */ |
| 790 | if (!(rdev->flags & RADEON_IS_IGP)) { | 781 | if (!(rdev->flags & RADEON_IS_IGP)) { |
| 791 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 782 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); |
| 792 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | 783 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
| 793 | if (err) | 784 | if (err) |
| 794 | goto out; | 785 | goto out; |
| 795 | if (rdev->mc_fw->size != mc_req_size) { | 786 | if (rdev->mc_fw->size != mc_req_size) { |
| @@ -802,10 +793,14 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 802 | 793 | ||
| 803 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { | 794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { |
| 804 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
| 805 | err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); | 796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 806 | if (err) | 797 | if (err) { |
| 807 | goto out; | 798 | printk(KERN_ERR |
| 808 | if (rdev->smc_fw->size != smc_req_size) { | 799 | "smc: error loading firmware \"%s\"\n", |
| 800 | fw_name); | ||
| 801 | release_firmware(rdev->smc_fw); | ||
| 802 | rdev->smc_fw = NULL; | ||
| 803 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 809 | printk(KERN_ERR | 804 | printk(KERN_ERR |
| 810 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | 805 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", |
| 811 | rdev->mc_fw->size, fw_name); | 806 | rdev->mc_fw->size, fw_name); |
| @@ -814,8 +809,6 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
| 814 | } | 809 | } |
| 815 | 810 | ||
| 816 | out: | 811 | out: |
| 817 | platform_device_unregister(pdev); | ||
| 818 | |||
| 819 | if (err) { | 812 | if (err) { |
| 820 | if (err != -EINVAL) | 813 | if (err != -EINVAL) |
| 821 | printk(KERN_ERR | 814 | printk(KERN_ERR |
| @@ -2090,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 2090 | /* enable aspm */ | 2083 | /* enable aspm */ |
| 2091 | evergreen_program_aspm(rdev); | 2084 | evergreen_program_aspm(rdev); |
| 2092 | 2085 | ||
| 2086 | evergreen_mc_program(rdev); | ||
| 2087 | |||
| 2093 | if (rdev->flags & RADEON_IS_IGP) { | 2088 | if (rdev->flags & RADEON_IS_IGP) { |
| 2094 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2089 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 2095 | r = ni_init_microcode(rdev); | 2090 | r = ni_init_microcode(rdev); |
| @@ -2118,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
| 2118 | if (r) | 2113 | if (r) |
| 2119 | return r; | 2114 | return r; |
| 2120 | 2115 | ||
| 2121 | evergreen_mc_program(rdev); | ||
| 2122 | r = cayman_pcie_gart_enable(rdev); | 2116 | r = cayman_pcie_gart_enable(rdev); |
| 2123 | if (r) | 2117 | if (r) |
| 2124 | return r; | 2118 | return r; |
| @@ -2297,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
| 2297 | radeon_vm_manager_fini(rdev); | 2291 | radeon_vm_manager_fini(rdev); |
| 2298 | cayman_cp_enable(rdev, false); | 2292 | cayman_cp_enable(rdev, false); |
| 2299 | cayman_dma_stop(rdev); | 2293 | cayman_dma_stop(rdev); |
| 2300 | r600_uvd_rbc_stop(rdev); | 2294 | r600_uvd_stop(rdev); |
| 2301 | radeon_uvd_suspend(rdev); | 2295 | radeon_uvd_suspend(rdev); |
| 2302 | evergreen_irq_suspend(rdev); | 2296 | evergreen_irq_suspend(rdev); |
| 2303 | radeon_wb_disable(rdev); | 2297 | radeon_wb_disable(rdev); |
| @@ -2429,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
| 2429 | radeon_vm_manager_fini(rdev); | 2423 | radeon_vm_manager_fini(rdev); |
| 2430 | radeon_ib_pool_fini(rdev); | 2424 | radeon_ib_pool_fini(rdev); |
| 2431 | radeon_irq_kms_fini(rdev); | 2425 | radeon_irq_kms_fini(rdev); |
| 2426 | r600_uvd_stop(rdev); | ||
| 2432 | radeon_uvd_fini(rdev); | 2427 | radeon_uvd_fini(rdev); |
| 2433 | cayman_pcie_gart_fini(rdev); | 2428 | cayman_pcie_gart_fini(rdev); |
| 2434 | r600_vram_scratch_fini(rdev); | 2429 | r600_vram_scratch_fini(rdev); |
| @@ -2461,6 +2456,167 @@ void cayman_vm_fini(struct radeon_device *rdev) | |||
| 2461 | { | 2456 | { |
| 2462 | } | 2457 | } |
| 2463 | 2458 | ||
| 2459 | /** | ||
| 2460 | * cayman_vm_decode_fault - print human readable fault info | ||
| 2461 | * | ||
| 2462 | * @rdev: radeon_device pointer | ||
| 2463 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
| 2464 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
| 2465 | * | ||
| 2466 | * Print human readable fault information (cayman/TN). | ||
| 2467 | */ | ||
| 2468 | void cayman_vm_decode_fault(struct radeon_device *rdev, | ||
| 2469 | u32 status, u32 addr) | ||
| 2470 | { | ||
| 2471 | u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; | ||
| 2472 | u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; | ||
| 2473 | u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; | ||
| 2474 | char *block; | ||
| 2475 | |||
| 2476 | switch (mc_id) { | ||
| 2477 | case 32: | ||
| 2478 | case 16: | ||
| 2479 | case 96: | ||
| 2480 | case 80: | ||
| 2481 | case 160: | ||
| 2482 | case 144: | ||
| 2483 | case 224: | ||
| 2484 | case 208: | ||
| 2485 | block = "CB"; | ||
| 2486 | break; | ||
| 2487 | case 33: | ||
| 2488 | case 17: | ||
| 2489 | case 97: | ||
| 2490 | case 81: | ||
| 2491 | case 161: | ||
| 2492 | case 145: | ||
| 2493 | case 225: | ||
| 2494 | case 209: | ||
| 2495 | block = "CB_FMASK"; | ||
| 2496 | break; | ||
| 2497 | case 34: | ||
| 2498 | case 18: | ||
| 2499 | case 98: | ||
| 2500 | case 82: | ||
| 2501 | case 162: | ||
| 2502 | case 146: | ||
| 2503 | case 226: | ||
| 2504 | case 210: | ||
| 2505 | block = "CB_CMASK"; | ||
| 2506 | break; | ||
| 2507 | case 35: | ||
| 2508 | case 19: | ||
| 2509 | case 99: | ||
| 2510 | case 83: | ||
| 2511 | case 163: | ||
| 2512 | case 147: | ||
| 2513 | case 227: | ||
| 2514 | case 211: | ||
| 2515 | block = "CB_IMMED"; | ||
| 2516 | break; | ||
| 2517 | case 36: | ||
| 2518 | case 20: | ||
| 2519 | case 100: | ||
| 2520 | case 84: | ||
| 2521 | case 164: | ||
| 2522 | case 148: | ||
| 2523 | case 228: | ||
| 2524 | case 212: | ||
| 2525 | block = "DB"; | ||
| 2526 | break; | ||
| 2527 | case 37: | ||
| 2528 | case 21: | ||
| 2529 | case 101: | ||
| 2530 | case 85: | ||
| 2531 | case 165: | ||
| 2532 | case 149: | ||
| 2533 | case 229: | ||
| 2534 | case 213: | ||
| 2535 | block = "DB_HTILE"; | ||
| 2536 | break; | ||
| 2537 | case 38: | ||
| 2538 | case 22: | ||
| 2539 | case 102: | ||
| 2540 | case 86: | ||
| 2541 | case 166: | ||
| 2542 | case 150: | ||
| 2543 | case 230: | ||
| 2544 | case 214: | ||
| 2545 | block = "SX"; | ||
| 2546 | break; | ||
| 2547 | case 39: | ||
| 2548 | case 23: | ||
| 2549 | case 103: | ||
| 2550 | case 87: | ||
| 2551 | case 167: | ||
| 2552 | case 151: | ||
| 2553 | case 231: | ||
| 2554 | case 215: | ||
| 2555 | block = "DB_STEN"; | ||
| 2556 | break; | ||
| 2557 | case 40: | ||
| 2558 | case 24: | ||
| 2559 | case 104: | ||
| 2560 | case 88: | ||
| 2561 | case 232: | ||
| 2562 | case 216: | ||
| 2563 | case 168: | ||
| 2564 | case 152: | ||
| 2565 | block = "TC_TFETCH"; | ||
| 2566 | break; | ||
| 2567 | case 41: | ||
| 2568 | case 25: | ||
| 2569 | case 105: | ||
| 2570 | case 89: | ||
| 2571 | case 233: | ||
| 2572 | case 217: | ||
| 2573 | case 169: | ||
| 2574 | case 153: | ||
| 2575 | block = "TC_VFETCH"; | ||
| 2576 | break; | ||
| 2577 | case 42: | ||
| 2578 | case 26: | ||
| 2579 | case 106: | ||
| 2580 | case 90: | ||
| 2581 | case 234: | ||
| 2582 | case 218: | ||
| 2583 | case 170: | ||
| 2584 | case 154: | ||
| 2585 | block = "VC"; | ||
| 2586 | break; | ||
| 2587 | case 112: | ||
| 2588 | block = "CP"; | ||
| 2589 | break; | ||
| 2590 | case 113: | ||
| 2591 | case 114: | ||
| 2592 | block = "SH"; | ||
| 2593 | break; | ||
| 2594 | case 115: | ||
| 2595 | block = "VGT"; | ||
| 2596 | break; | ||
| 2597 | case 178: | ||
| 2598 | block = "IH"; | ||
| 2599 | break; | ||
| 2600 | case 51: | ||
| 2601 | block = "RLC"; | ||
| 2602 | break; | ||
| 2603 | case 55: | ||
| 2604 | block = "DMA"; | ||
| 2605 | break; | ||
| 2606 | case 56: | ||
| 2607 | block = "HDP"; | ||
| 2608 | break; | ||
| 2609 | default: | ||
| 2610 | block = "unknown"; | ||
| 2611 | break; | ||
| 2612 | } | ||
| 2613 | |||
| 2614 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", | ||
| 2615 | protections, vmid, addr, | ||
| 2616 | (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", | ||
| 2617 | block, mc_id); | ||
| 2618 | } | ||
| 2619 | |||
| 2464 | #define R600_ENTRY_VALID (1 << 0) | 2620 | #define R600_ENTRY_VALID (1 << 0) |
| 2465 | #define R600_PTE_SYSTEM (1 << 1) | 2621 | #define R600_PTE_SYSTEM (1 << 1) |
| 2466 | #define R600_PTE_SNOOPED (1 << 2) | 2622 | #define R600_PTE_SNOOPED (1 << 2) |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 559cf24d51af..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
| @@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd | |||
| 1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, | 1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, |
| 1055 | enum radeon_dpm_forced_level level) | 1055 | enum radeon_dpm_forced_level level) |
| 1056 | { | 1056 | { |
| 1057 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
| 1058 | struct ni_ps *ps = ni_get_ps(rps); | ||
| 1059 | u32 levels; | ||
| 1060 | |||
| 1061 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 1057 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
| 1062 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 1058 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) |
| 1063 | return -EINVAL; | 1059 | return -EINVAL; |
| @@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 1068 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1064 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 1069 | return -EINVAL; | 1065 | return -EINVAL; |
| 1070 | 1066 | ||
| 1071 | levels = ps->performance_level_count - 1; | 1067 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
| 1072 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
| 1073 | return -EINVAL; | 1068 | return -EINVAL; |
| 1074 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 1069 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
| 1075 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1070 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| @@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4072 | struct rv7xx_power_info *pi; | 4067 | struct rv7xx_power_info *pi; |
| 4073 | struct evergreen_power_info *eg_pi; | 4068 | struct evergreen_power_info *eg_pi; |
| 4074 | struct ni_power_info *ni_pi; | 4069 | struct ni_power_info *ni_pi; |
| 4075 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 4076 | u16 data_offset, size; | ||
| 4077 | u8 frev, crev; | ||
| 4078 | struct atom_clock_dividers dividers; | 4070 | struct atom_clock_dividers dividers; |
| 4079 | int ret; | 4071 | int ret; |
| 4080 | 4072 | ||
| @@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4167 | eg_pi->vddci_control = | 4159 | eg_pi->vddci_control = |
| 4168 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 4160 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
| 4169 | 4161 | ||
| 4170 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 4162 | rv770_get_engine_memory_ss(rdev); |
| 4171 | &frev, &crev, &data_offset)) { | ||
| 4172 | pi->sclk_ss = true; | ||
| 4173 | pi->mclk_ss = true; | ||
| 4174 | pi->dynamic_ss = true; | ||
| 4175 | } else { | ||
| 4176 | pi->sclk_ss = false; | ||
| 4177 | pi->mclk_ss = false; | ||
| 4178 | pi->dynamic_ss = true; | ||
| 4179 | } | ||
| 4180 | 4163 | ||
| 4181 | pi->asi = RV770_ASI_DFLT; | 4164 | pi->asi = RV770_ASI_DFLT; |
| 4182 | pi->pasi = CYPRESS_HASI_DFLT; | 4165 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
| 4193 | 4176 | ||
| 4194 | pi->dynamic_pcie_gen2 = true; | 4177 | pi->dynamic_pcie_gen2 = true; |
| 4195 | 4178 | ||
| 4196 | if (pi->gfx_clock_gating && | 4179 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 4197 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 4198 | pi->thermal_protection = true; | 4180 | pi->thermal_protection = true; |
| 4199 | else | 4181 | else |
| 4200 | pi->thermal_protection = false; | 4182 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index fe24a93542ec..22421bc80c0d 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
| @@ -133,6 +133,22 @@ | |||
| 133 | #define VM_CONTEXT1_CNTL2 0x1434 | 133 | #define VM_CONTEXT1_CNTL2 0x1434 |
| 134 | #define VM_INVALIDATE_REQUEST 0x1478 | 134 | #define VM_INVALIDATE_REQUEST 0x1478 |
| 135 | #define VM_INVALIDATE_RESPONSE 0x147c | 135 | #define VM_INVALIDATE_RESPONSE 0x147c |
| 136 | #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC | ||
| 137 | #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC | ||
| 138 | #define PROTECTIONS_MASK (0xf << 0) | ||
| 139 | #define PROTECTIONS_SHIFT 0 | ||
| 140 | /* bit 0: range | ||
| 141 | * bit 2: pde0 | ||
| 142 | * bit 3: valid | ||
| 143 | * bit 4: read | ||
| 144 | * bit 5: write | ||
| 145 | */ | ||
| 146 | #define MEMORY_CLIENT_ID_MASK (0xff << 12) | ||
| 147 | #define MEMORY_CLIENT_ID_SHIFT 12 | ||
| 148 | #define MEMORY_CLIENT_RW_MASK (1 << 24) | ||
| 149 | #define MEMORY_CLIENT_RW_SHIFT 24 | ||
| 150 | #define FAULT_VMID_MASK (0x7 << 25) | ||
| 151 | #define FAULT_VMID_SHIFT 25 | ||
| 136 | #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 | 152 | #define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518 |
| 137 | #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c | 153 | #define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c |
| 138 | #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C | 154 | #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9affefd79f6..75349cdaa84b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | #include "atom.h" | 39 | #include "atom.h" |
| 40 | 40 | ||
| 41 | #include <linux/firmware.h> | 41 | #include <linux/firmware.h> |
| 42 | #include <linux/platform_device.h> | ||
| 43 | #include <linux/module.h> | 42 | #include <linux/module.h> |
| 44 | 43 | ||
| 45 | #include "r100_reg_safe.h" | 44 | #include "r100_reg_safe.h" |
| @@ -989,18 +988,11 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) | |||
| 989 | /* Load the microcode for the CP */ | 988 | /* Load the microcode for the CP */ |
| 990 | static int r100_cp_init_microcode(struct radeon_device *rdev) | 989 | static int r100_cp_init_microcode(struct radeon_device *rdev) |
| 991 | { | 990 | { |
| 992 | struct platform_device *pdev; | ||
| 993 | const char *fw_name = NULL; | 991 | const char *fw_name = NULL; |
| 994 | int err; | 992 | int err; |
| 995 | 993 | ||
| 996 | DRM_DEBUG_KMS("\n"); | 994 | DRM_DEBUG_KMS("\n"); |
| 997 | 995 | ||
| 998 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
| 999 | err = IS_ERR(pdev); | ||
| 1000 | if (err) { | ||
| 1001 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
| 1002 | return -EINVAL; | ||
| 1003 | } | ||
| 1004 | if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || | 996 | if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || |
| 1005 | (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || | 997 | (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || |
| 1006 | (rdev->family == CHIP_RS200)) { | 998 | (rdev->family == CHIP_RS200)) { |
| @@ -1042,8 +1034,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) | |||
| 1042 | fw_name = FIRMWARE_R520; | 1034 | fw_name = FIRMWARE_R520; |
| 1043 | } | 1035 | } |
| 1044 | 1036 | ||
| 1045 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | 1037 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
| 1046 | platform_device_unregister(pdev); | ||
| 1047 | if (err) { | 1038 | if (err) { |
| 1048 | printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", | 1039 | printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", |
| 1049 | fw_name); | 1040 | fw_name); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2d3655f7f41e..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
| 30 | #include <linux/firmware.h> | 30 | #include <linux/firmware.h> |
| 31 | #include <linux/platform_device.h> | ||
| 32 | #include <linux/module.h> | 31 | #include <linux/module.h> |
| 33 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
| 34 | #include <drm/radeon_drm.h> | 33 | #include <drm/radeon_drm.h> |
| @@ -2144,7 +2143,6 @@ void r600_cp_stop(struct radeon_device *rdev) | |||
| 2144 | 2143 | ||
| 2145 | int r600_init_microcode(struct radeon_device *rdev) | 2144 | int r600_init_microcode(struct radeon_device *rdev) |
| 2146 | { | 2145 | { |
| 2147 | struct platform_device *pdev; | ||
| 2148 | const char *chip_name; | 2146 | const char *chip_name; |
| 2149 | const char *rlc_chip_name; | 2147 | const char *rlc_chip_name; |
| 2150 | const char *smc_chip_name = "RV770"; | 2148 | const char *smc_chip_name = "RV770"; |
| @@ -2154,13 +2152,6 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2154 | 2152 | ||
| 2155 | DRM_DEBUG("\n"); | 2153 | DRM_DEBUG("\n"); |
| 2156 | 2154 | ||
| 2157 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
| 2158 | err = IS_ERR(pdev); | ||
| 2159 | if (err) { | ||
| 2160 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
| 2161 | return -EINVAL; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | switch (rdev->family) { | 2155 | switch (rdev->family) { |
| 2165 | case CHIP_R600: | 2156 | case CHIP_R600: |
| 2166 | chip_name = "R600"; | 2157 | chip_name = "R600"; |
| @@ -2272,7 +2263,7 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2272 | DRM_INFO("Loading %s Microcode\n", chip_name); | 2263 | DRM_INFO("Loading %s Microcode\n", chip_name); |
| 2273 | 2264 | ||
| 2274 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 2265 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
| 2275 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 2266 | err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
| 2276 | if (err) | 2267 | if (err) |
| 2277 | goto out; | 2268 | goto out; |
| 2278 | if (rdev->pfp_fw->size != pfp_req_size) { | 2269 | if (rdev->pfp_fw->size != pfp_req_size) { |
| @@ -2284,7 +2275,7 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2284 | } | 2275 | } |
| 2285 | 2276 | ||
| 2286 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | 2277 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); |
| 2287 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | 2278 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
| 2288 | if (err) | 2279 | if (err) |
| 2289 | goto out; | 2280 | goto out; |
| 2290 | if (rdev->me_fw->size != me_req_size) { | 2281 | if (rdev->me_fw->size != me_req_size) { |
| @@ -2295,7 +2286,7 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2295 | } | 2286 | } |
| 2296 | 2287 | ||
| 2297 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | 2288 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); |
| 2298 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | 2289 | err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
| 2299 | if (err) | 2290 | if (err) |
| 2300 | goto out; | 2291 | goto out; |
| 2301 | if (rdev->rlc_fw->size != rlc_req_size) { | 2292 | if (rdev->rlc_fw->size != rlc_req_size) { |
| @@ -2307,10 +2298,14 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2307 | 2298 | ||
| 2308 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { | 2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { |
| 2309 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); | 2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); |
| 2310 | err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); | 2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 2311 | if (err) | 2302 | if (err) { |
| 2312 | goto out; | 2303 | printk(KERN_ERR |
| 2313 | if (rdev->smc_fw->size != smc_req_size) { | 2304 | "smc: error loading firmware \"%s\"\n", |
| 2305 | fw_name); | ||
| 2306 | release_firmware(rdev->smc_fw); | ||
| 2307 | rdev->smc_fw = NULL; | ||
| 2308 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 2314 | printk(KERN_ERR | 2309 | printk(KERN_ERR |
| 2315 | "smc: Bogus length %zu in firmware \"%s\"\n", | 2310 | "smc: Bogus length %zu in firmware \"%s\"\n", |
| 2316 | rdev->smc_fw->size, fw_name); | 2311 | rdev->smc_fw->size, fw_name); |
| @@ -2319,8 +2314,6 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
| 2319 | } | 2314 | } |
| 2320 | 2315 | ||
| 2321 | out: | 2316 | out: |
| 2322 | platform_device_unregister(pdev); | ||
| 2323 | |||
| 2324 | if (err) { | 2317 | if (err) { |
| 2325 | if (err != -EINVAL) | 2318 | if (err != -EINVAL) |
| 2326 | printk(KERN_ERR | 2319 | printk(KERN_ERR |
| @@ -2708,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) | |||
| 2708 | return 0; | 2701 | return 0; |
| 2709 | } | 2702 | } |
| 2710 | 2703 | ||
| 2711 | void r600_uvd_rbc_stop(struct radeon_device *rdev) | 2704 | void r600_uvd_stop(struct radeon_device *rdev) |
| 2712 | { | 2705 | { |
| 2713 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2706 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
| 2714 | 2707 | ||
| 2715 | /* force RBC into idle state */ | 2708 | /* force RBC into idle state */ |
| 2716 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); | 2709 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
| 2710 | |||
| 2711 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 2712 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2713 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2714 | mdelay(1); | ||
| 2715 | |||
| 2716 | /* put VCPU into reset */ | ||
| 2717 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | ||
| 2718 | mdelay(5); | ||
| 2719 | |||
| 2720 | /* disable VCPU clock */ | ||
| 2721 | WREG32(UVD_VCPU_CNTL, 0x0); | ||
| 2722 | |||
| 2723 | /* Unstall UMC and register bus */ | ||
| 2724 | WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
| 2725 | WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); | ||
| 2726 | |||
| 2717 | ring->ready = false; | 2727 | ring->ready = false; |
| 2718 | } | 2728 | } |
| 2719 | 2729 | ||
| @@ -2733,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
| 2733 | /* disable interupt */ | 2743 | /* disable interupt */ |
| 2734 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); | 2744 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); |
| 2735 | 2745 | ||
| 2746 | /* Stall UMC and register bus before resetting VCPU */ | ||
| 2747 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2748 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2749 | mdelay(1); | ||
| 2750 | |||
| 2736 | /* put LMI, VCPU, RBC etc... into reset */ | 2751 | /* put LMI, VCPU, RBC etc... into reset */ |
| 2737 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | | 2752 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | |
| 2738 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | | 2753 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | |
| @@ -2762,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
| 2762 | WREG32(UVD_MPC_SET_ALU, 0); | 2777 | WREG32(UVD_MPC_SET_ALU, 0); |
| 2763 | WREG32(UVD_MPC_SET_MUX, 0x88); | 2778 | WREG32(UVD_MPC_SET_MUX, 0x88); |
| 2764 | 2779 | ||
| 2765 | /* Stall UMC */ | ||
| 2766 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
| 2767 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
| 2768 | |||
| 2769 | /* take all subblocks out of reset, except VCPU */ | 2780 | /* take all subblocks out of reset, except VCPU */ |
| 2770 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | 2781 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); |
| 2771 | mdelay(5); | 2782 | mdelay(5); |
| @@ -3019,7 +3030,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev, | |||
| 3019 | struct radeon_fence *fence) | 3030 | struct radeon_fence *fence) |
| 3020 | { | 3031 | { |
| 3021 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | 3032 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
| 3022 | uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; | 3033 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
| 3023 | 3034 | ||
| 3024 | radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); | 3035 | radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); |
| 3025 | radeon_ring_write(ring, fence->seq); | 3036 | radeon_ring_write(ring, fence->seq); |
| @@ -3145,6 +3156,90 @@ int r600_copy_blit(struct radeon_device *rdev, | |||
| 3145 | } | 3156 | } |
| 3146 | 3157 | ||
| 3147 | /** | 3158 | /** |
| 3159 | * r600_copy_cpdma - copy pages using the CP DMA engine | ||
| 3160 | * | ||
| 3161 | * @rdev: radeon_device pointer | ||
| 3162 | * @src_offset: src GPU address | ||
| 3163 | * @dst_offset: dst GPU address | ||
| 3164 | * @num_gpu_pages: number of GPU pages to xfer | ||
| 3165 | * @fence: radeon fence object | ||
| 3166 | * | ||
| 3167 | * Copy GPU paging using the CP DMA engine (r6xx+). | ||
| 3168 | * Used by the radeon ttm implementation to move pages if | ||
| 3169 | * registered as the asic copy callback. | ||
| 3170 | */ | ||
| 3171 | int r600_copy_cpdma(struct radeon_device *rdev, | ||
| 3172 | uint64_t src_offset, uint64_t dst_offset, | ||
| 3173 | unsigned num_gpu_pages, | ||
| 3174 | struct radeon_fence **fence) | ||
| 3175 | { | ||
| 3176 | struct radeon_semaphore *sem = NULL; | ||
| 3177 | int ring_index = rdev->asic->copy.blit_ring_index; | ||
| 3178 | struct radeon_ring *ring = &rdev->ring[ring_index]; | ||
| 3179 | u32 size_in_bytes, cur_size_in_bytes, tmp; | ||
| 3180 | int i, num_loops; | ||
| 3181 | int r = 0; | ||
| 3182 | |||
| 3183 | r = radeon_semaphore_create(rdev, &sem); | ||
| 3184 | if (r) { | ||
| 3185 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
| 3186 | return r; | ||
| 3187 | } | ||
| 3188 | |||
| 3189 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | ||
| 3190 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); | ||
| 3191 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); | ||
| 3192 | if (r) { | ||
| 3193 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
| 3194 | radeon_semaphore_free(rdev, &sem, NULL); | ||
| 3195 | return r; | ||
| 3196 | } | ||
| 3197 | |||
| 3198 | if (radeon_fence_need_sync(*fence, ring->idx)) { | ||
| 3199 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | ||
| 3200 | ring->idx); | ||
| 3201 | radeon_fence_note_sync(*fence, ring->idx); | ||
| 3202 | } else { | ||
| 3203 | radeon_semaphore_free(rdev, &sem, NULL); | ||
| 3204 | } | ||
| 3205 | |||
| 3206 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
| 3207 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
| 3208 | radeon_ring_write(ring, WAIT_3D_IDLE_bit); | ||
| 3209 | for (i = 0; i < num_loops; i++) { | ||
| 3210 | cur_size_in_bytes = size_in_bytes; | ||
| 3211 | if (cur_size_in_bytes > 0x1fffff) | ||
| 3212 | cur_size_in_bytes = 0x1fffff; | ||
| 3213 | size_in_bytes -= cur_size_in_bytes; | ||
| 3214 | tmp = upper_32_bits(src_offset) & 0xff; | ||
| 3215 | if (size_in_bytes == 0) | ||
| 3216 | tmp |= PACKET3_CP_DMA_CP_SYNC; | ||
| 3217 | radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); | ||
| 3218 | radeon_ring_write(ring, src_offset & 0xffffffff); | ||
| 3219 | radeon_ring_write(ring, tmp); | ||
| 3220 | radeon_ring_write(ring, dst_offset & 0xffffffff); | ||
| 3221 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); | ||
| 3222 | radeon_ring_write(ring, cur_size_in_bytes); | ||
| 3223 | src_offset += cur_size_in_bytes; | ||
| 3224 | dst_offset += cur_size_in_bytes; | ||
| 3225 | } | ||
| 3226 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
| 3227 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
| 3228 | radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); | ||
| 3229 | |||
| 3230 | r = radeon_fence_emit(rdev, fence, ring->idx); | ||
| 3231 | if (r) { | ||
| 3232 | radeon_ring_unlock_undo(rdev, ring); | ||
| 3233 | return r; | ||
| 3234 | } | ||
| 3235 | |||
| 3236 | radeon_ring_unlock_commit(rdev, ring); | ||
| 3237 | radeon_semaphore_free(rdev, &sem, *fence); | ||
| 3238 | |||
| 3239 | return r; | ||
| 3240 | } | ||
| 3241 | |||
| 3242 | /** | ||
| 3148 | * r600_copy_dma - copy pages using the DMA engine | 3243 | * r600_copy_dma - copy pages using the DMA engine |
| 3149 | * | 3244 | * |
| 3150 | * @rdev: radeon_device pointer | 3245 | * @rdev: radeon_device pointer |
| @@ -3239,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 3239 | /* enable pcie gen2 link */ | 3334 | /* enable pcie gen2 link */ |
| 3240 | r600_pcie_gen2_enable(rdev); | 3335 | r600_pcie_gen2_enable(rdev); |
| 3241 | 3336 | ||
| 3337 | r600_mc_program(rdev); | ||
| 3338 | |||
| 3242 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 3339 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 3243 | r = r600_init_microcode(rdev); | 3340 | r = r600_init_microcode(rdev); |
| 3244 | if (r) { | 3341 | if (r) { |
| @@ -3251,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
| 3251 | if (r) | 3348 | if (r) |
| 3252 | return r; | 3349 | return r; |
| 3253 | 3350 | ||
| 3254 | r600_mc_program(rdev); | ||
| 3255 | if (rdev->flags & RADEON_IS_AGP) { | 3351 | if (rdev->flags & RADEON_IS_AGP) { |
| 3256 | r600_agp_enable(rdev); | 3352 | r600_agp_enable(rdev); |
| 3257 | } else { | 3353 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b88f54b134ab..e5c860f4ccbe 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
| @@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev) | |||
| 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) |
| 279 | { | 279 | { |
| 280 | if (enable) | 280 | if (enable) |
| 281 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | 281 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
| 282 | else | 282 | else |
| 283 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | 283 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index e73b2a73494a..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 228 | u32 base_rate = 24000; | 228 | u32 base_rate = 24000; |
| 229 | u32 max_ratio = clock / base_rate; | ||
| 230 | u32 dto_phase; | ||
| 231 | u32 dto_modulo = clock; | ||
| 232 | u32 wallclock_ratio; | ||
| 233 | u32 dto_cntl; | ||
| 229 | 234 | ||
| 230 | if (!dig || !dig->afmt) | 235 | if (!dig || !dig->afmt) |
| 231 | return; | 236 | return; |
| 232 | 237 | ||
| 238 | if (max_ratio >= 8) { | ||
| 239 | dto_phase = 192 * 1000; | ||
| 240 | wallclock_ratio = 3; | ||
| 241 | } else if (max_ratio >= 4) { | ||
| 242 | dto_phase = 96 * 1000; | ||
| 243 | wallclock_ratio = 2; | ||
| 244 | } else if (max_ratio >= 2) { | ||
| 245 | dto_phase = 48 * 1000; | ||
| 246 | wallclock_ratio = 1; | ||
| 247 | } else { | ||
| 248 | dto_phase = 24 * 1000; | ||
| 249 | wallclock_ratio = 0; | ||
| 250 | } | ||
| 251 | |||
| 233 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. | 252 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. |
| 234 | * doesn't matter which one you use. Just use the first one. | 253 | * doesn't matter which one you use. Just use the first one. |
| 235 | */ | 254 | */ |
| @@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
| 242 | /* according to the reg specs, this should DCE3.2 only, but in | 261 | /* according to the reg specs, this should DCE3.2 only, but in |
| 243 | * practice it seems to cover DCE3.0 as well. | 262 | * practice it seems to cover DCE3.0 as well. |
| 244 | */ | 263 | */ |
| 245 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 264 | if (dig->dig_encoder == 0) { |
| 246 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | 265 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
| 247 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | 266 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
| 267 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
| 268 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
| 269 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
| 270 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | ||
| 271 | } else { | ||
| 272 | dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
| 273 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
| 274 | WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); | ||
| 275 | WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); | ||
| 276 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | ||
| 277 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | ||
| 278 | } | ||
| 248 | } else { | 279 | } else { |
| 249 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ | 280 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ |
| 250 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | 281 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | |
| @@ -266,6 +297,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
| 266 | uint32_t offset; | 297 | uint32_t offset; |
| 267 | ssize_t err; | 298 | ssize_t err; |
| 268 | 299 | ||
| 300 | if (!dig || !dig->afmt) | ||
| 301 | return; | ||
| 302 | |||
| 269 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 303 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
| 270 | if (!dig->afmt->enabled) | 304 | if (!dig->afmt->enabled) |
| 271 | return; | 305 | return; |
| @@ -448,6 +482,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 448 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 482 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 449 | u32 hdmi = HDMI0_ERROR_ACK; | 483 | u32 hdmi = HDMI0_ERROR_ACK; |
| 450 | 484 | ||
| 485 | if (!dig || !dig->afmt) | ||
| 486 | return; | ||
| 487 | |||
| 451 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 488 | /* Silent, r600_hdmi_enable will raise WARN for us */ |
| 452 | if (enable && dig->afmt->enabled) | 489 | if (enable && dig->afmt->enabled) |
| 453 | return; | 490 | return; |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index f1b3084d8f51..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -602,6 +602,7 @@ | |||
| 602 | #define L2_BUSY (1 << 0) | 602 | #define L2_BUSY (1 << 0) |
| 603 | 603 | ||
| 604 | #define WAIT_UNTIL 0x8040 | 604 | #define WAIT_UNTIL 0x8040 |
| 605 | #define WAIT_CP_DMA_IDLE_bit (1 << 8) | ||
| 605 | #define WAIT_2D_IDLE_bit (1 << 14) | 606 | #define WAIT_2D_IDLE_bit (1 << 14) |
| 606 | #define WAIT_3D_IDLE_bit (1 << 15) | 607 | #define WAIT_3D_IDLE_bit (1 << 15) |
| 607 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) | 608 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) |
| @@ -932,6 +933,9 @@ | |||
| 932 | #define DCCG_AUDIO_DTO0_LOAD 0x051c | 933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c |
| 933 | # define DTO_LOAD (1 << 31) | 934 | # define DTO_LOAD (1 << 31) |
| 934 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 | 935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 |
| 936 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
| 937 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
| 938 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
| 935 | 939 | ||
| 936 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 | 940 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 |
| 937 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 | 941 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9b7025d02cd0..274b8e1b889f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -97,6 +97,7 @@ extern int radeon_msi; | |||
| 97 | extern int radeon_lockup_timeout; | 97 | extern int radeon_lockup_timeout; |
| 98 | extern int radeon_fastfb; | 98 | extern int radeon_fastfb; |
| 99 | extern int radeon_dpm; | 99 | extern int radeon_dpm; |
| 100 | extern int radeon_aspm; | ||
| 100 | 101 | ||
| 101 | /* | 102 | /* |
| 102 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 103 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
| @@ -455,6 +456,7 @@ struct radeon_sa_manager { | |||
| 455 | uint64_t gpu_addr; | 456 | uint64_t gpu_addr; |
| 456 | void *cpu_ptr; | 457 | void *cpu_ptr; |
| 457 | uint32_t domain; | 458 | uint32_t domain; |
| 459 | uint32_t align; | ||
| 458 | }; | 460 | }; |
| 459 | 461 | ||
| 460 | struct radeon_sa_bo; | 462 | struct radeon_sa_bo; |
| @@ -783,6 +785,11 @@ struct radeon_mec { | |||
| 783 | /* number of entries in page table */ | 785 | /* number of entries in page table */ |
| 784 | #define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) | 786 | #define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) |
| 785 | 787 | ||
| 788 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
| 789 | #define RADEON_VM_PTB_ALIGN_SIZE 32768 | ||
| 790 | #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) | ||
| 791 | #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) | ||
| 792 | |||
| 786 | struct radeon_vm { | 793 | struct radeon_vm { |
| 787 | struct list_head list; | 794 | struct list_head list; |
| 788 | struct list_head va; | 795 | struct list_head va; |
| @@ -1460,6 +1467,7 @@ struct radeon_uvd { | |||
| 1460 | struct radeon_bo *vcpu_bo; | 1467 | struct radeon_bo *vcpu_bo; |
| 1461 | void *cpu_addr; | 1468 | void *cpu_addr; |
| 1462 | uint64_t gpu_addr; | 1469 | uint64_t gpu_addr; |
| 1470 | void *saved_bo; | ||
| 1463 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; | 1471 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
| 1464 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; | 1472 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
| 1465 | struct delayed_work idle_work; | 1473 | struct delayed_work idle_work; |
| @@ -2054,10 +2062,10 @@ struct radeon_device { | |||
| 2054 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 2062 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
| 2055 | const struct firmware *mc_fw; /* NI MC firmware */ | 2063 | const struct firmware *mc_fw; /* NI MC firmware */ |
| 2056 | const struct firmware *ce_fw; /* SI CE firmware */ | 2064 | const struct firmware *ce_fw; /* SI CE firmware */ |
| 2057 | const struct firmware *uvd_fw; /* UVD firmware */ | ||
| 2058 | const struct firmware *mec_fw; /* CIK MEC firmware */ | 2065 | const struct firmware *mec_fw; /* CIK MEC firmware */ |
| 2059 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ | 2066 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ |
| 2060 | const struct firmware *smc_fw; /* SMC firmware */ | 2067 | const struct firmware *smc_fw; /* SMC firmware */ |
| 2068 | const struct firmware *uvd_fw; /* UVD firmware */ | ||
| 2061 | struct r600_blit r600_blit; | 2069 | struct r600_blit r600_blit; |
| 2062 | struct r600_vram_scratch vram_scratch; | 2070 | struct r600_vram_scratch vram_scratch; |
| 2063 | int msi_enabled; /* msi enabled */ | 2071 | int msi_enabled; /* msi enabled */ |
| @@ -2087,6 +2095,8 @@ struct radeon_device { | |||
| 2087 | /* ACPI interface */ | 2095 | /* ACPI interface */ |
| 2088 | struct radeon_atif atif; | 2096 | struct radeon_atif atif; |
| 2089 | struct radeon_atcs atcs; | 2097 | struct radeon_atcs atcs; |
| 2098 | /* srbm instance registers */ | ||
| 2099 | struct mutex srbm_mutex; | ||
| 2090 | }; | 2100 | }; |
| 2091 | 2101 | ||
| 2092 | int radeon_device_init(struct radeon_device *rdev, | 2102 | int radeon_device_init(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 097077499cc6..f8f8b3113ddd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -1026,8 +1026,8 @@ static struct radeon_asic r600_asic = { | |||
| 1026 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 1026 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1027 | .dma = &r600_copy_dma, | 1027 | .dma = &r600_copy_dma, |
| 1028 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 1028 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
| 1029 | .copy = &r600_copy_dma, | 1029 | .copy = &r600_copy_cpdma, |
| 1030 | .copy_ring_index = R600_RING_TYPE_DMA_INDEX, | 1030 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1031 | }, | 1031 | }, |
| 1032 | .surface = { | 1032 | .surface = { |
| 1033 | .set_reg = r600_set_surface_reg, | 1033 | .set_reg = r600_set_surface_reg, |
| @@ -1119,8 +1119,8 @@ static struct radeon_asic rv6xx_asic = { | |||
| 1119 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 1119 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1120 | .dma = &r600_copy_dma, | 1120 | .dma = &r600_copy_dma, |
| 1121 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 1121 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
| 1122 | .copy = &r600_copy_dma, | 1122 | .copy = &r600_copy_cpdma, |
| 1123 | .copy_ring_index = R600_RING_TYPE_DMA_INDEX, | 1123 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1124 | }, | 1124 | }, |
| 1125 | .surface = { | 1125 | .surface = { |
| 1126 | .set_reg = r600_set_surface_reg, | 1126 | .set_reg = r600_set_surface_reg, |
| @@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = { | |||
| 1161 | .get_mclk = &rv6xx_dpm_get_mclk, | 1161 | .get_mclk = &rv6xx_dpm_get_mclk, |
| 1162 | .print_power_state = &rv6xx_dpm_print_power_state, | 1162 | .print_power_state = &rv6xx_dpm_print_power_state, |
| 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, | 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, |
| 1164 | .force_performance_level = &rv6xx_dpm_force_performance_level, | ||
| 1164 | }, | 1165 | }, |
| 1165 | .pflip = { | 1166 | .pflip = { |
| 1166 | .pre_page_flip = &rs600_pre_page_flip, | 1167 | .pre_page_flip = &rs600_pre_page_flip, |
| @@ -1229,8 +1230,8 @@ static struct radeon_asic rs780_asic = { | |||
| 1229 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 1230 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1230 | .dma = &r600_copy_dma, | 1231 | .dma = &r600_copy_dma, |
| 1231 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 1232 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
| 1232 | .copy = &r600_copy_dma, | 1233 | .copy = &r600_copy_cpdma, |
| 1233 | .copy_ring_index = R600_RING_TYPE_DMA_INDEX, | 1234 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
| 1234 | }, | 1235 | }, |
| 1235 | .surface = { | 1236 | .surface = { |
| 1236 | .set_reg = r600_set_surface_reg, | 1237 | .set_reg = r600_set_surface_reg, |
| @@ -1270,6 +1271,7 @@ static struct radeon_asic rs780_asic = { | |||
| 1270 | .get_sclk = &rs780_dpm_get_sclk, | 1271 | .get_sclk = &rs780_dpm_get_sclk, |
| 1271 | .get_mclk = &rs780_dpm_get_mclk, | 1272 | .get_mclk = &rs780_dpm_get_mclk, |
| 1272 | .print_power_state = &rs780_dpm_print_power_state, | 1273 | .print_power_state = &rs780_dpm_print_power_state, |
| 1274 | .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, | ||
| 1273 | }, | 1275 | }, |
| 1274 | .pflip = { | 1276 | .pflip = { |
| 1275 | .pre_page_flip = &rs600_pre_page_flip, | 1277 | .pre_page_flip = &rs600_pre_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 45d0693cddd5..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -340,6 +340,9 @@ int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); | |||
| 340 | int r600_copy_blit(struct radeon_device *rdev, | 340 | int r600_copy_blit(struct radeon_device *rdev, |
| 341 | uint64_t src_offset, uint64_t dst_offset, | 341 | uint64_t src_offset, uint64_t dst_offset, |
| 342 | unsigned num_gpu_pages, struct radeon_fence **fence); | 342 | unsigned num_gpu_pages, struct radeon_fence **fence); |
| 343 | int r600_copy_cpdma(struct radeon_device *rdev, | ||
| 344 | uint64_t src_offset, uint64_t dst_offset, | ||
| 345 | unsigned num_gpu_pages, struct radeon_fence **fence); | ||
| 343 | int r600_copy_dma(struct radeon_device *rdev, | 346 | int r600_copy_dma(struct radeon_device *rdev, |
| 344 | uint64_t src_offset, uint64_t dst_offset, | 347 | uint64_t src_offset, uint64_t dst_offset, |
| 345 | unsigned num_gpu_pages, struct radeon_fence **fence); | 348 | unsigned num_gpu_pages, struct radeon_fence **fence); |
| @@ -418,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev, | |||
| 418 | struct radeon_ps *ps); | 421 | struct radeon_ps *ps); |
| 419 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
| 420 | struct seq_file *m); | 423 | struct seq_file *m); |
| 424 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
| 425 | enum radeon_dpm_forced_level level); | ||
| 421 | /* rs780 dpm */ | 426 | /* rs780 dpm */ |
| 422 | int rs780_dpm_init(struct radeon_device *rdev); | 427 | int rs780_dpm_init(struct radeon_device *rdev); |
| 423 | int rs780_dpm_enable(struct radeon_device *rdev); | 428 | int rs780_dpm_enable(struct radeon_device *rdev); |
| @@ -430,11 +435,13 @@ u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low); | |||
| 430 | u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low); | 435 | u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low); |
| 431 | void rs780_dpm_print_power_state(struct radeon_device *rdev, | 436 | void rs780_dpm_print_power_state(struct radeon_device *rdev, |
| 432 | struct radeon_ps *ps); | 437 | struct radeon_ps *ps); |
| 438 | void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
| 439 | struct seq_file *m); | ||
| 433 | 440 | ||
| 434 | /* uvd */ | 441 | /* uvd */ |
| 435 | int r600_uvd_init(struct radeon_device *rdev); | 442 | int r600_uvd_init(struct radeon_device *rdev); |
| 436 | int r600_uvd_rbc_start(struct radeon_device *rdev); | 443 | int r600_uvd_rbc_start(struct radeon_device *rdev); |
| 437 | void r600_uvd_rbc_stop(struct radeon_device *rdev); | 444 | void r600_uvd_stop(struct radeon_device *rdev); |
| 438 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | 445 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
| 439 | void r600_uvd_fence_emit(struct radeon_device *rdev, | 446 | void r600_uvd_fence_emit(struct radeon_device *rdev, |
| 440 | struct radeon_fence *fence); | 447 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index fbdaff55556b..4ccd61f60eb6 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, | |||
| 2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; | 2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; |
| 2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & | 2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & |
| 2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; | 2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; |
| 2785 | dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); | 2785 | dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); |
| 2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); | 2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); |
| 2787 | dividers->ref_div = args.v3.ucRefDiv; | 2787 | dividers->ref_div = args.v3.ucRefDiv; |
| 2788 | dividers->vco_mode = (args.v3.ucCntlFlag & | 2788 | dividers->vco_mode = (args.v3.ucCntlFlag & |
| @@ -3513,7 +3513,6 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, | |||
| 3513 | u8 frev, crev, i; | 3513 | u8 frev, crev, i; |
| 3514 | u16 data_offset, size; | 3514 | u16 data_offset, size; |
| 3515 | union vram_info *vram_info; | 3515 | union vram_info *vram_info; |
| 3516 | u8 *p; | ||
| 3517 | 3516 | ||
| 3518 | memset(mem_info, 0, sizeof(struct atom_memory_info)); | 3517 | memset(mem_info, 0, sizeof(struct atom_memory_info)); |
| 3519 | 3518 | ||
| @@ -3529,13 +3528,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, | |||
| 3529 | if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { | 3528 | if (module_index < vram_info->v1_3.ucNumOfVRAMModule) { |
| 3530 | ATOM_VRAM_MODULE_V3 *vram_module = | 3529 | ATOM_VRAM_MODULE_V3 *vram_module = |
| 3531 | (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; | 3530 | (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo; |
| 3532 | p = (u8 *)vram_info->v1_3.aVramInfo; | ||
| 3533 | 3531 | ||
| 3534 | for (i = 0; i < module_index; i++) { | 3532 | for (i = 0; i < module_index; i++) { |
| 3535 | vram_module = (ATOM_VRAM_MODULE_V3 *)p; | ||
| 3536 | if (le16_to_cpu(vram_module->usSize) == 0) | 3533 | if (le16_to_cpu(vram_module->usSize) == 0) |
| 3537 | return -EINVAL; | 3534 | return -EINVAL; |
| 3538 | p += le16_to_cpu(vram_module->usSize); | 3535 | vram_module = (ATOM_VRAM_MODULE_V3 *) |
| 3536 | ((u8 *)vram_module + le16_to_cpu(vram_module->usSize)); | ||
| 3539 | } | 3537 | } |
| 3540 | mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; | 3538 | mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf; |
| 3541 | mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; | 3539 | mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0; |
| @@ -3547,13 +3545,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, | |||
| 3547 | if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { | 3545 | if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { |
| 3548 | ATOM_VRAM_MODULE_V4 *vram_module = | 3546 | ATOM_VRAM_MODULE_V4 *vram_module = |
| 3549 | (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; | 3547 | (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; |
| 3550 | p = (u8 *)vram_info->v1_4.aVramInfo; | ||
| 3551 | 3548 | ||
| 3552 | for (i = 0; i < module_index; i++) { | 3549 | for (i = 0; i < module_index; i++) { |
| 3553 | vram_module = (ATOM_VRAM_MODULE_V4 *)p; | ||
| 3554 | if (le16_to_cpu(vram_module->usModuleSize) == 0) | 3550 | if (le16_to_cpu(vram_module->usModuleSize) == 0) |
| 3555 | return -EINVAL; | 3551 | return -EINVAL; |
| 3556 | p += le16_to_cpu(vram_module->usModuleSize); | 3552 | vram_module = (ATOM_VRAM_MODULE_V4 *) |
| 3553 | ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); | ||
| 3557 | } | 3554 | } |
| 3558 | mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; | 3555 | mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; |
| 3559 | mem_info->mem_type = vram_module->ucMemoryType & 0xf0; | 3556 | mem_info->mem_type = vram_module->ucMemoryType & 0xf0; |
| @@ -3572,13 +3569,12 @@ int radeon_atom_get_memory_info(struct radeon_device *rdev, | |||
| 3572 | if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { | 3569 | if (module_index < vram_info->v2_1.ucNumOfVRAMModule) { |
| 3573 | ATOM_VRAM_MODULE_V7 *vram_module = | 3570 | ATOM_VRAM_MODULE_V7 *vram_module = |
| 3574 | (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; | 3571 | (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo; |
| 3575 | p = (u8 *)vram_info->v2_1.aVramInfo; | ||
| 3576 | 3572 | ||
| 3577 | for (i = 0; i < module_index; i++) { | 3573 | for (i = 0; i < module_index; i++) { |
| 3578 | vram_module = (ATOM_VRAM_MODULE_V7 *)p; | ||
| 3579 | if (le16_to_cpu(vram_module->usModuleSize) == 0) | 3574 | if (le16_to_cpu(vram_module->usModuleSize) == 0) |
| 3580 | return -EINVAL; | 3575 | return -EINVAL; |
| 3581 | p += le16_to_cpu(vram_module->usModuleSize); | 3576 | vram_module = (ATOM_VRAM_MODULE_V7 *) |
| 3577 | ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); | ||
| 3582 | } | 3578 | } |
| 3583 | mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; | 3579 | mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf; |
| 3584 | mem_info->mem_type = vram_module->ucMemoryType & 0xf0; | 3580 | mem_info->mem_type = vram_module->ucMemoryType & 0xf0; |
| @@ -3628,21 +3624,19 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, | |||
| 3628 | if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { | 3624 | if (module_index < vram_info->v1_4.ucNumOfVRAMModule) { |
| 3629 | ATOM_VRAM_MODULE_V4 *vram_module = | 3625 | ATOM_VRAM_MODULE_V4 *vram_module = |
| 3630 | (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; | 3626 | (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo; |
| 3631 | ATOM_MEMORY_TIMING_FORMAT *format; | ||
| 3632 | p = (u8 *)vram_info->v1_4.aVramInfo; | ||
| 3633 | 3627 | ||
| 3634 | for (i = 0; i < module_index; i++) { | 3628 | for (i = 0; i < module_index; i++) { |
| 3635 | vram_module = (ATOM_VRAM_MODULE_V4 *)p; | ||
| 3636 | if (le16_to_cpu(vram_module->usModuleSize) == 0) | 3629 | if (le16_to_cpu(vram_module->usModuleSize) == 0) |
| 3637 | return -EINVAL; | 3630 | return -EINVAL; |
| 3638 | p += le16_to_cpu(vram_module->usModuleSize); | 3631 | vram_module = (ATOM_VRAM_MODULE_V4 *) |
| 3632 | ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize)); | ||
| 3639 | } | 3633 | } |
| 3640 | mclk_range_table->num_entries = (u8) | 3634 | mclk_range_table->num_entries = (u8) |
| 3641 | ((vram_module->usModuleSize - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / | 3635 | ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) / |
| 3642 | mem_timing_size); | 3636 | mem_timing_size); |
| 3643 | p = (u8 *)vram_module->asMemTiming; | 3637 | p = (u8 *)&vram_module->asMemTiming[0]; |
| 3644 | for (i = 0; i < mclk_range_table->num_entries; i++) { | 3638 | for (i = 0; i < mclk_range_table->num_entries; i++) { |
| 3645 | format = (ATOM_MEMORY_TIMING_FORMAT *)p; | 3639 | ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p; |
| 3646 | mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); | 3640 | mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange); |
| 3647 | p += mem_timing_size; | 3641 | p += mem_timing_size; |
| 3648 | } | 3642 | } |
| @@ -3705,17 +3699,21 @@ int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, | |||
| 3705 | (ATOM_MEMORY_SETTING_DATA_BLOCK *) | 3699 | (ATOM_MEMORY_SETTING_DATA_BLOCK *) |
| 3706 | ((u8 *)reg_block + (2 * sizeof(u16)) + | 3700 | ((u8 *)reg_block + (2 * sizeof(u16)) + |
| 3707 | le16_to_cpu(reg_block->usRegIndexTblSize)); | 3701 | le16_to_cpu(reg_block->usRegIndexTblSize)); |
| 3702 | ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; | ||
| 3708 | num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / | 3703 | num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) / |
| 3709 | sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; | 3704 | sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1; |
| 3710 | if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) | 3705 | if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE) |
| 3711 | return -EINVAL; | 3706 | return -EINVAL; |
| 3712 | while (!(reg_block->asRegIndexBuf[i].ucPreRegDataLength & ACCESS_PLACEHOLDER) && | 3707 | while (i < num_entries) { |
| 3713 | (i < num_entries)) { | 3708 | if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER) |
| 3709 | break; | ||
| 3714 | reg_table->mc_reg_address[i].s1 = | 3710 | reg_table->mc_reg_address[i].s1 = |
| 3715 | (u16)(le16_to_cpu(reg_block->asRegIndexBuf[i].usRegIndex)); | 3711 | (u16)(le16_to_cpu(format->usRegIndex)); |
| 3716 | reg_table->mc_reg_address[i].pre_reg_data = | 3712 | reg_table->mc_reg_address[i].pre_reg_data = |
| 3717 | (u8)(reg_block->asRegIndexBuf[i].ucPreRegDataLength); | 3713 | (u8)(format->ucPreRegDataLength); |
| 3718 | i++; | 3714 | i++; |
| 3715 | format = (ATOM_INIT_REG_INDEX_FORMAT *) | ||
| 3716 | ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); | ||
| 3719 | } | 3717 | } |
| 3720 | reg_table->last = i; | 3718 | reg_table->last = i; |
| 3721 | while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && | 3719 | while ((*(u32 *)reg_data != END_OF_REG_DATA_BLOCK) && |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc9e86b..68ce36056019 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 147 | enum radeon_combios_table_offset table) | 147 | enum radeon_combios_table_offset table) |
| 148 | { | 148 | { |
| 149 | struct radeon_device *rdev = dev->dev_private; | 149 | struct radeon_device *rdev = dev->dev_private; |
| 150 | int rev; | 150 | int rev, size; |
| 151 | uint16_t offset = 0, check_offset; | 151 | uint16_t offset = 0, check_offset; |
| 152 | 152 | ||
| 153 | if (!rdev->bios) | 153 | if (!rdev->bios) |
| @@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 156 | switch (table) { | 156 | switch (table) { |
| 157 | /* absolute offset tables */ | 157 | /* absolute offset tables */ |
| 158 | case COMBIOS_ASIC_INIT_1_TABLE: | 158 | case COMBIOS_ASIC_INIT_1_TABLE: |
| 159 | check_offset = RBIOS16(rdev->bios_header_start + 0xc); | 159 | check_offset = 0xc; |
| 160 | if (check_offset) | ||
| 161 | offset = check_offset; | ||
| 162 | break; | 160 | break; |
| 163 | case COMBIOS_BIOS_SUPPORT_TABLE: | 161 | case COMBIOS_BIOS_SUPPORT_TABLE: |
| 164 | check_offset = RBIOS16(rdev->bios_header_start + 0x14); | 162 | check_offset = 0x14; |
| 165 | if (check_offset) | ||
| 166 | offset = check_offset; | ||
| 167 | break; | 163 | break; |
| 168 | case COMBIOS_DAC_PROGRAMMING_TABLE: | 164 | case COMBIOS_DAC_PROGRAMMING_TABLE: |
| 169 | check_offset = RBIOS16(rdev->bios_header_start + 0x2a); | 165 | check_offset = 0x2a; |
| 170 | if (check_offset) | ||
| 171 | offset = check_offset; | ||
| 172 | break; | 166 | break; |
| 173 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: | 167 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: |
| 174 | check_offset = RBIOS16(rdev->bios_header_start + 0x2c); | 168 | check_offset = 0x2c; |
| 175 | if (check_offset) | ||
| 176 | offset = check_offset; | ||
| 177 | break; | 169 | break; |
| 178 | case COMBIOS_CRTC_INFO_TABLE: | 170 | case COMBIOS_CRTC_INFO_TABLE: |
| 179 | check_offset = RBIOS16(rdev->bios_header_start + 0x2e); | 171 | check_offset = 0x2e; |
| 180 | if (check_offset) | ||
| 181 | offset = check_offset; | ||
| 182 | break; | 172 | break; |
| 183 | case COMBIOS_PLL_INFO_TABLE: | 173 | case COMBIOS_PLL_INFO_TABLE: |
| 184 | check_offset = RBIOS16(rdev->bios_header_start + 0x30); | 174 | check_offset = 0x30; |
| 185 | if (check_offset) | ||
| 186 | offset = check_offset; | ||
| 187 | break; | 175 | break; |
| 188 | case COMBIOS_TV_INFO_TABLE: | 176 | case COMBIOS_TV_INFO_TABLE: |
| 189 | check_offset = RBIOS16(rdev->bios_header_start + 0x32); | 177 | check_offset = 0x32; |
| 190 | if (check_offset) | ||
| 191 | offset = check_offset; | ||
| 192 | break; | 178 | break; |
| 193 | case COMBIOS_DFP_INFO_TABLE: | 179 | case COMBIOS_DFP_INFO_TABLE: |
| 194 | check_offset = RBIOS16(rdev->bios_header_start + 0x34); | 180 | check_offset = 0x34; |
| 195 | if (check_offset) | ||
| 196 | offset = check_offset; | ||
| 197 | break; | 181 | break; |
| 198 | case COMBIOS_HW_CONFIG_INFO_TABLE: | 182 | case COMBIOS_HW_CONFIG_INFO_TABLE: |
| 199 | check_offset = RBIOS16(rdev->bios_header_start + 0x36); | 183 | check_offset = 0x36; |
| 200 | if (check_offset) | ||
| 201 | offset = check_offset; | ||
| 202 | break; | 184 | break; |
| 203 | case COMBIOS_MULTIMEDIA_INFO_TABLE: | 185 | case COMBIOS_MULTIMEDIA_INFO_TABLE: |
| 204 | check_offset = RBIOS16(rdev->bios_header_start + 0x38); | 186 | check_offset = 0x38; |
| 205 | if (check_offset) | ||
| 206 | offset = check_offset; | ||
| 207 | break; | 187 | break; |
| 208 | case COMBIOS_TV_STD_PATCH_TABLE: | 188 | case COMBIOS_TV_STD_PATCH_TABLE: |
| 209 | check_offset = RBIOS16(rdev->bios_header_start + 0x3e); | 189 | check_offset = 0x3e; |
| 210 | if (check_offset) | ||
| 211 | offset = check_offset; | ||
| 212 | break; | 190 | break; |
| 213 | case COMBIOS_LCD_INFO_TABLE: | 191 | case COMBIOS_LCD_INFO_TABLE: |
| 214 | check_offset = RBIOS16(rdev->bios_header_start + 0x40); | 192 | check_offset = 0x40; |
| 215 | if (check_offset) | ||
| 216 | offset = check_offset; | ||
| 217 | break; | 193 | break; |
| 218 | case COMBIOS_MOBILE_INFO_TABLE: | 194 | case COMBIOS_MOBILE_INFO_TABLE: |
| 219 | check_offset = RBIOS16(rdev->bios_header_start + 0x42); | 195 | check_offset = 0x42; |
| 220 | if (check_offset) | ||
| 221 | offset = check_offset; | ||
| 222 | break; | 196 | break; |
| 223 | case COMBIOS_PLL_INIT_TABLE: | 197 | case COMBIOS_PLL_INIT_TABLE: |
| 224 | check_offset = RBIOS16(rdev->bios_header_start + 0x46); | 198 | check_offset = 0x46; |
| 225 | if (check_offset) | ||
| 226 | offset = check_offset; | ||
| 227 | break; | 199 | break; |
| 228 | case COMBIOS_MEM_CONFIG_TABLE: | 200 | case COMBIOS_MEM_CONFIG_TABLE: |
| 229 | check_offset = RBIOS16(rdev->bios_header_start + 0x48); | 201 | check_offset = 0x48; |
| 230 | if (check_offset) | ||
| 231 | offset = check_offset; | ||
| 232 | break; | 202 | break; |
| 233 | case COMBIOS_SAVE_MASK_TABLE: | 203 | case COMBIOS_SAVE_MASK_TABLE: |
| 234 | check_offset = RBIOS16(rdev->bios_header_start + 0x4a); | 204 | check_offset = 0x4a; |
| 235 | if (check_offset) | ||
| 236 | offset = check_offset; | ||
| 237 | break; | 205 | break; |
| 238 | case COMBIOS_HARDCODED_EDID_TABLE: | 206 | case COMBIOS_HARDCODED_EDID_TABLE: |
| 239 | check_offset = RBIOS16(rdev->bios_header_start + 0x4c); | 207 | check_offset = 0x4c; |
| 240 | if (check_offset) | ||
| 241 | offset = check_offset; | ||
| 242 | break; | 208 | break; |
| 243 | case COMBIOS_ASIC_INIT_2_TABLE: | 209 | case COMBIOS_ASIC_INIT_2_TABLE: |
| 244 | check_offset = RBIOS16(rdev->bios_header_start + 0x4e); | 210 | check_offset = 0x4e; |
| 245 | if (check_offset) | ||
| 246 | offset = check_offset; | ||
| 247 | break; | 211 | break; |
| 248 | case COMBIOS_CONNECTOR_INFO_TABLE: | 212 | case COMBIOS_CONNECTOR_INFO_TABLE: |
| 249 | check_offset = RBIOS16(rdev->bios_header_start + 0x50); | 213 | check_offset = 0x50; |
| 250 | if (check_offset) | ||
| 251 | offset = check_offset; | ||
| 252 | break; | 214 | break; |
| 253 | case COMBIOS_DYN_CLK_1_TABLE: | 215 | case COMBIOS_DYN_CLK_1_TABLE: |
| 254 | check_offset = RBIOS16(rdev->bios_header_start + 0x52); | 216 | check_offset = 0x52; |
| 255 | if (check_offset) | ||
| 256 | offset = check_offset; | ||
| 257 | break; | 217 | break; |
| 258 | case COMBIOS_RESERVED_MEM_TABLE: | 218 | case COMBIOS_RESERVED_MEM_TABLE: |
| 259 | check_offset = RBIOS16(rdev->bios_header_start + 0x54); | 219 | check_offset = 0x54; |
| 260 | if (check_offset) | ||
| 261 | offset = check_offset; | ||
| 262 | break; | 220 | break; |
| 263 | case COMBIOS_EXT_TMDS_INFO_TABLE: | 221 | case COMBIOS_EXT_TMDS_INFO_TABLE: |
| 264 | check_offset = RBIOS16(rdev->bios_header_start + 0x58); | 222 | check_offset = 0x58; |
| 265 | if (check_offset) | ||
| 266 | offset = check_offset; | ||
| 267 | break; | 223 | break; |
| 268 | case COMBIOS_MEM_CLK_INFO_TABLE: | 224 | case COMBIOS_MEM_CLK_INFO_TABLE: |
| 269 | check_offset = RBIOS16(rdev->bios_header_start + 0x5a); | 225 | check_offset = 0x5a; |
| 270 | if (check_offset) | ||
| 271 | offset = check_offset; | ||
| 272 | break; | 226 | break; |
| 273 | case COMBIOS_EXT_DAC_INFO_TABLE: | 227 | case COMBIOS_EXT_DAC_INFO_TABLE: |
| 274 | check_offset = RBIOS16(rdev->bios_header_start + 0x5c); | 228 | check_offset = 0x5c; |
| 275 | if (check_offset) | ||
| 276 | offset = check_offset; | ||
| 277 | break; | 229 | break; |
| 278 | case COMBIOS_MISC_INFO_TABLE: | 230 | case COMBIOS_MISC_INFO_TABLE: |
| 279 | check_offset = RBIOS16(rdev->bios_header_start + 0x5e); | 231 | check_offset = 0x5e; |
| 280 | if (check_offset) | ||
| 281 | offset = check_offset; | ||
| 282 | break; | 232 | break; |
| 283 | case COMBIOS_CRT_INFO_TABLE: | 233 | case COMBIOS_CRT_INFO_TABLE: |
| 284 | check_offset = RBIOS16(rdev->bios_header_start + 0x60); | 234 | check_offset = 0x60; |
| 285 | if (check_offset) | ||
| 286 | offset = check_offset; | ||
| 287 | break; | 235 | break; |
| 288 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: | 236 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: |
| 289 | check_offset = RBIOS16(rdev->bios_header_start + 0x62); | 237 | check_offset = 0x62; |
| 290 | if (check_offset) | ||
| 291 | offset = check_offset; | ||
| 292 | break; | 238 | break; |
| 293 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: | 239 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: |
| 294 | check_offset = RBIOS16(rdev->bios_header_start + 0x64); | 240 | check_offset = 0x64; |
| 295 | if (check_offset) | ||
| 296 | offset = check_offset; | ||
| 297 | break; | 241 | break; |
| 298 | case COMBIOS_FAN_SPEED_INFO_TABLE: | 242 | case COMBIOS_FAN_SPEED_INFO_TABLE: |
| 299 | check_offset = RBIOS16(rdev->bios_header_start + 0x66); | 243 | check_offset = 0x66; |
| 300 | if (check_offset) | ||
| 301 | offset = check_offset; | ||
| 302 | break; | 244 | break; |
| 303 | case COMBIOS_OVERDRIVE_INFO_TABLE: | 245 | case COMBIOS_OVERDRIVE_INFO_TABLE: |
| 304 | check_offset = RBIOS16(rdev->bios_header_start + 0x68); | 246 | check_offset = 0x68; |
| 305 | if (check_offset) | ||
| 306 | offset = check_offset; | ||
| 307 | break; | 247 | break; |
| 308 | case COMBIOS_OEM_INFO_TABLE: | 248 | case COMBIOS_OEM_INFO_TABLE: |
| 309 | check_offset = RBIOS16(rdev->bios_header_start + 0x6a); | 249 | check_offset = 0x6a; |
| 310 | if (check_offset) | ||
| 311 | offset = check_offset; | ||
| 312 | break; | 250 | break; |
| 313 | case COMBIOS_DYN_CLK_2_TABLE: | 251 | case COMBIOS_DYN_CLK_2_TABLE: |
| 314 | check_offset = RBIOS16(rdev->bios_header_start + 0x6c); | 252 | check_offset = 0x6c; |
| 315 | if (check_offset) | ||
| 316 | offset = check_offset; | ||
| 317 | break; | 253 | break; |
| 318 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: | 254 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: |
| 319 | check_offset = RBIOS16(rdev->bios_header_start + 0x6e); | 255 | check_offset = 0x6e; |
| 320 | if (check_offset) | ||
| 321 | offset = check_offset; | ||
| 322 | break; | 256 | break; |
| 323 | case COMBIOS_I2C_INFO_TABLE: | 257 | case COMBIOS_I2C_INFO_TABLE: |
| 324 | check_offset = RBIOS16(rdev->bios_header_start + 0x70); | 258 | check_offset = 0x70; |
| 325 | if (check_offset) | ||
| 326 | offset = check_offset; | ||
| 327 | break; | 259 | break; |
| 328 | /* relative offset tables */ | 260 | /* relative offset tables */ |
| 329 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ | 261 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ |
| @@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
| 439 | } | 371 | } |
| 440 | break; | 372 | break; |
| 441 | default: | 373 | default: |
| 374 | check_offset = 0; | ||
| 442 | break; | 375 | break; |
| 443 | } | 376 | } |
| 444 | 377 | ||
| 445 | return offset; | 378 | size = RBIOS8(rdev->bios_header_start + 0x6); |
| 379 | /* check absolute offset tables */ | ||
| 380 | if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) | ||
| 381 | offset = RBIOS16(rdev->bios_header_start + check_offset); | ||
| 446 | 382 | ||
| 383 | return offset; | ||
| 447 | } | 384 | } |
| 448 | 385 | ||
| 449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 386 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
| @@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 965 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 902 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
| 966 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 903 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
| 967 | } | 904 | } |
| 968 | /* if the values are all zeros, use the table */ | 905 | /* if the values are zeros, use the table */ |
| 969 | if (p_dac->ps2_pdac_adj) | 906 | if ((dac == 0) || (bg == 0)) |
| 907 | found = 0; | ||
| 908 | else | ||
| 970 | found = 1; | 909 | found = 1; |
| 971 | } | 910 | } |
| 972 | 911 | ||
| 973 | /* quirks */ | 912 | /* quirks */ |
| 913 | /* Radeon 7000 (RV100) */ | ||
| 914 | if (((dev->pdev->device == 0x5159) && | ||
| 915 | (dev->pdev->subsystem_vendor == 0x174B) && | ||
| 916 | (dev->pdev->subsystem_device == 0x7c28)) || | ||
| 974 | /* Radeon 9100 (R200) */ | 917 | /* Radeon 9100 (R200) */ |
| 975 | if ((dev->pdev->device == 0x514D) && | 918 | ((dev->pdev->device == 0x514D) && |
| 976 | (dev->pdev->subsystem_vendor == 0x174B) && | 919 | (dev->pdev->subsystem_vendor == 0x174B) && |
| 977 | (dev->pdev->subsystem_device == 0x7149)) { | 920 | (dev->pdev->subsystem_device == 0x7149))) { |
| 978 | /* vbios value is bad, use the default */ | 921 | /* vbios value is bad, use the default */ |
| 979 | found = 0; | 922 | found = 0; |
| 980 | } | 923 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 1163 | mutex_init(&rdev->gem.mutex); | 1163 | mutex_init(&rdev->gem.mutex); |
| 1164 | mutex_init(&rdev->pm.mutex); | 1164 | mutex_init(&rdev->pm.mutex); |
| 1165 | mutex_init(&rdev->gpu_clock_mutex); | 1165 | mutex_init(&rdev->gpu_clock_mutex); |
| 1166 | mutex_init(&rdev->srbm_mutex); | ||
| 1166 | init_rwsem(&rdev->pm.mclk_lock); | 1167 | init_rwsem(&rdev->pm.mclk_lock); |
| 1167 | init_rwsem(&rdev->exclusive_lock); | 1168 | init_rwsem(&rdev->exclusive_lock); |
| 1168 | init_waitqueue_head(&rdev->irq.vblank_queue); | 1169 | init_waitqueue_head(&rdev->irq.vblank_queue); |
| @@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
| 1519 | radeon_save_bios_scratch_regs(rdev); | 1520 | radeon_save_bios_scratch_regs(rdev); |
| 1520 | /* block TTM */ | 1521 | /* block TTM */ |
| 1521 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 1522 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
| 1523 | radeon_pm_suspend(rdev); | ||
| 1522 | radeon_suspend(rdev); | 1524 | radeon_suspend(rdev); |
| 1523 | 1525 | ||
| 1524 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1526 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| @@ -1564,6 +1566,7 @@ retry: | |||
| 1564 | } | 1566 | } |
| 1565 | } | 1567 | } |
| 1566 | 1568 | ||
| 1569 | radeon_pm_resume(rdev); | ||
| 1567 | drm_helper_resume_force_mode(rdev->ddev); | 1570 | drm_helper_resume_force_mode(rdev->ddev); |
| 1568 | 1571 | ||
| 1569 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 1572 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e5419b350170..29876b1be8ec 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -167,6 +167,7 @@ int radeon_msi = -1; | |||
| 167 | int radeon_lockup_timeout = 10000; | 167 | int radeon_lockup_timeout = 10000; |
| 168 | int radeon_fastfb = 0; | 168 | int radeon_fastfb = 0; |
| 169 | int radeon_dpm = -1; | 169 | int radeon_dpm = -1; |
| 170 | int radeon_aspm = -1; | ||
| 170 | 171 | ||
| 171 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 172 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
| 172 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 173 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
| @@ -225,6 +226,9 @@ module_param_named(fastfb, radeon_fastfb, int, 0444); | |||
| 225 | MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); | 226 | MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); |
| 226 | module_param_named(dpm, radeon_dpm, int, 0444); | 227 | module_param_named(dpm, radeon_dpm, int, 0444); |
| 227 | 228 | ||
| 229 | MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)"); | ||
| 230 | module_param_named(aspm, radeon_aspm, int, 0444); | ||
| 231 | |||
| 228 | static struct pci_device_id pciidlist[] = { | 232 | static struct pci_device_id pciidlist[] = { |
| 229 | radeon_PCI_IDS | 233 | radeon_PCI_IDS |
| 230 | }; | 234 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 43ec4a401f07..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) | |||
| 207 | if (rdev->gart.robj == NULL) { | 207 | if (rdev->gart.robj == NULL) { |
| 208 | return; | 208 | return; |
| 209 | } | 209 | } |
| 210 | radeon_gart_table_vram_unpin(rdev); | ||
| 211 | radeon_bo_unref(&rdev->gart.robj); | 210 | radeon_bo_unref(&rdev->gart.robj); |
| 212 | } | 211 | } |
| 213 | 212 | ||
| @@ -467,6 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
| 467 | size *= 2; | 466 | size *= 2; |
| 468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | 467 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
| 469 | RADEON_GPU_PAGE_ALIGN(size), | 468 | RADEON_GPU_PAGE_ALIGN(size), |
| 469 | RADEON_VM_PTB_ALIGN_SIZE, | ||
| 470 | RADEON_GEM_DOMAIN_VRAM); | 470 | RADEON_GEM_DOMAIN_VRAM); |
| 471 | if (r) { | 471 | if (r) { |
| 472 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", | 472 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
| @@ -620,10 +620,10 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | |||
| 620 | } | 620 | } |
| 621 | 621 | ||
| 622 | retry: | 622 | retry: |
| 623 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); | 623 | pd_size = radeon_vm_directory_size(rdev); |
| 624 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 624 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
| 625 | &vm->page_directory, pd_size, | 625 | &vm->page_directory, pd_size, |
| 626 | RADEON_GPU_PAGE_SIZE, false); | 626 | RADEON_VM_PTB_ALIGN_SIZE, false); |
| 627 | if (r == -ENOMEM) { | 627 | if (r == -ENOMEM) { |
| 628 | r = radeon_vm_evict(rdev, vm); | 628 | r = radeon_vm_evict(rdev, vm); |
| 629 | if (r) | 629 | if (r) |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index bcdefd1dcd43..081886b0642d 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -260,10 +260,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
| 260 | { | 260 | { |
| 261 | int r = 0; | 261 | int r = 0; |
| 262 | 262 | ||
| 263 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
| 264 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); | ||
| 265 | INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); | ||
| 266 | |||
| 267 | spin_lock_init(&rdev->irq.lock); | 263 | spin_lock_init(&rdev->irq.lock); |
| 268 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); | 264 | r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
| 269 | if (r) { | 265 | if (r) { |
| @@ -285,6 +281,11 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
| 285 | rdev->irq.installed = false; | 281 | rdev->irq.installed = false; |
| 286 | return r; | 282 | return r; |
| 287 | } | 283 | } |
| 284 | |||
| 285 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
| 286 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); | ||
| 287 | INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func); | ||
| 288 | |||
| 288 | DRM_INFO("radeon: irq initialized.\n"); | 289 | DRM_INFO("radeon: irq initialized.\n"); |
| 289 | return 0; | 290 | return 0; |
| 290 | } | 291 | } |
| @@ -304,8 +305,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
| 304 | rdev->irq.installed = false; | 305 | rdev->irq.installed = false; |
| 305 | if (rdev->msi_enabled) | 306 | if (rdev->msi_enabled) |
| 306 | pci_disable_msi(rdev->pdev); | 307 | pci_disable_msi(rdev->pdev); |
| 308 | flush_work(&rdev->hotplug_work); | ||
| 307 | } | 309 | } |
| 308 | flush_work(&rdev->hotplug_work); | ||
| 309 | } | 310 | } |
| 310 | 311 | ||
| 311 | /** | 312 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0219d263e2df..2020bf4a3830 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -377,6 +377,7 @@ int radeon_bo_list_validate(struct ww_acquire_ctx *ticket, | |||
| 377 | domain = lobj->alt_domain; | 377 | domain = lobj->alt_domain; |
| 378 | goto retry; | 378 | goto retry; |
| 379 | } | 379 | } |
| 380 | ttm_eu_backoff_reservation(ticket, head); | ||
| 380 | return r; | 381 | return r; |
| 381 | } | 382 | } |
| 382 | } | 383 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 91519a5622b4..49c82c480013 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
| @@ -174,7 +174,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) | |||
| 174 | 174 | ||
| 175 | extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, | 175 | extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, |
| 176 | struct radeon_sa_manager *sa_manager, | 176 | struct radeon_sa_manager *sa_manager, |
| 177 | unsigned size, u32 domain); | 177 | unsigned size, u32 align, u32 domain); |
| 178 | extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, | 178 | extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, |
| 179 | struct radeon_sa_manager *sa_manager); | 179 | struct radeon_sa_manager *sa_manager); |
| 180 | extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, | 180 | extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1176 | case CHIP_VERDE: | 1176 | case CHIP_VERDE: |
| 1177 | case CHIP_OLAND: | 1177 | case CHIP_OLAND: |
| 1178 | case CHIP_HAINAN: | 1178 | case CHIP_HAINAN: |
| 1179 | if (radeon_dpm == 1) | 1179 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
| 1180 | if (!rdev->rlc_fw) | ||
| 1181 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 1182 | else if ((rdev->family >= CHIP_RV770) && | ||
| 1183 | (!(rdev->flags & RADEON_IS_IGP)) && | ||
| 1184 | (!rdev->smc_fw)) | ||
| 1185 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 1186 | else if (radeon_dpm == 1) | ||
| 1180 | rdev->pm.pm_method = PM_METHOD_DPM; | 1187 | rdev->pm.pm_method = PM_METHOD_DPM; |
| 1181 | else | 1188 | else |
| 1182 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1189 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 5f1c51a776ed..fb5ea6208970 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 224 | } | 224 | } |
| 225 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, | 225 | r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, |
| 226 | RADEON_IB_POOL_SIZE*64*1024, | 226 | RADEON_IB_POOL_SIZE*64*1024, |
| 227 | RADEON_GPU_PAGE_SIZE, | ||
| 227 | RADEON_GEM_DOMAIN_GTT); | 228 | RADEON_GEM_DOMAIN_GTT); |
| 228 | if (r) { | 229 | if (r) { |
| 229 | return r; | 230 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 0abe5a9431bb..f0bac68254b7 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
| @@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); | |||
| 49 | 49 | ||
| 50 | int radeon_sa_bo_manager_init(struct radeon_device *rdev, | 50 | int radeon_sa_bo_manager_init(struct radeon_device *rdev, |
| 51 | struct radeon_sa_manager *sa_manager, | 51 | struct radeon_sa_manager *sa_manager, |
| 52 | unsigned size, u32 domain) | 52 | unsigned size, u32 align, u32 domain) |
| 53 | { | 53 | { |
| 54 | int i, r; | 54 | int i, r; |
| 55 | 55 | ||
| @@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, | |||
| 57 | sa_manager->bo = NULL; | 57 | sa_manager->bo = NULL; |
| 58 | sa_manager->size = size; | 58 | sa_manager->size = size; |
| 59 | sa_manager->domain = domain; | 59 | sa_manager->domain = domain; |
| 60 | sa_manager->align = align; | ||
| 60 | sa_manager->hole = &sa_manager->olist; | 61 | sa_manager->hole = &sa_manager->olist; |
| 61 | INIT_LIST_HEAD(&sa_manager->olist); | 62 | INIT_LIST_HEAD(&sa_manager->olist); |
| 62 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 63 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| 63 | INIT_LIST_HEAD(&sa_manager->flist[i]); | 64 | INIT_LIST_HEAD(&sa_manager->flist[i]); |
| 64 | } | 65 | } |
| 65 | 66 | ||
| 66 | r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, | 67 | r = radeon_bo_create(rdev, size, align, true, |
| 67 | domain, NULL, &sa_manager->bo); | 68 | domain, NULL, &sa_manager->bo); |
| 68 | if (r) { | 69 | if (r) { |
| 69 | dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); | 70 | dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); |
| @@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev, | |||
| 317 | unsigned tries[RADEON_NUM_RINGS]; | 318 | unsigned tries[RADEON_NUM_RINGS]; |
| 318 | int i, r; | 319 | int i, r; |
| 319 | 320 | ||
| 320 | BUG_ON(align > RADEON_GPU_PAGE_SIZE); | 321 | BUG_ON(align > sa_manager->align); |
| 321 | BUG_ON(size > sa_manager->size); | 322 | BUG_ON(size > sa_manager->size); |
| 322 | 323 | ||
| 323 | *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); | 324 | *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 41efcec28cd8..f1c15754e73c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -56,20 +56,12 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); | |||
| 56 | 56 | ||
| 57 | int radeon_uvd_init(struct radeon_device *rdev) | 57 | int radeon_uvd_init(struct radeon_device *rdev) |
| 58 | { | 58 | { |
| 59 | struct platform_device *pdev; | ||
| 60 | unsigned long bo_size; | 59 | unsigned long bo_size; |
| 61 | const char *fw_name; | 60 | const char *fw_name; |
| 62 | int i, r; | 61 | int i, r; |
| 63 | 62 | ||
| 64 | INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); | 63 | INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); |
| 65 | 64 | ||
| 66 | pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0); | ||
| 67 | r = IS_ERR(pdev); | ||
| 68 | if (r) { | ||
| 69 | dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n"); | ||
| 70 | return -EINVAL; | ||
| 71 | } | ||
| 72 | |||
| 73 | switch (rdev->family) { | 65 | switch (rdev->family) { |
| 74 | case CHIP_RV710: | 66 | case CHIP_RV710: |
| 75 | case CHIP_RV730: | 67 | case CHIP_RV730: |
| @@ -112,16 +104,13 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 112 | return -EINVAL; | 104 | return -EINVAL; |
| 113 | } | 105 | } |
| 114 | 106 | ||
| 115 | r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev); | 107 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); |
| 116 | if (r) { | 108 | if (r) { |
| 117 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | 109 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", |
| 118 | fw_name); | 110 | fw_name); |
| 119 | platform_device_unregister(pdev); | ||
| 120 | return r; | 111 | return r; |
| 121 | } | 112 | } |
| 122 | 113 | ||
| 123 | platform_device_unregister(pdev); | ||
| 124 | |||
| 125 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + | 114 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
| 126 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; | 115 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
| 127 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, | 116 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
| @@ -131,16 +120,29 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 131 | return r; | 120 | return r; |
| 132 | } | 121 | } |
| 133 | 122 | ||
| 134 | r = radeon_uvd_resume(rdev); | 123 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); |
| 135 | if (r) | 124 | if (r) { |
| 125 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 126 | dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); | ||
| 136 | return r; | 127 | return r; |
| 128 | } | ||
| 137 | 129 | ||
| 138 | memset(rdev->uvd.cpu_addr, 0, bo_size); | 130 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, |
| 139 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); | 131 | &rdev->uvd.gpu_addr); |
| 132 | if (r) { | ||
| 133 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||
| 134 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 135 | dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); | ||
| 136 | return r; | ||
| 137 | } | ||
| 140 | 138 | ||
| 141 | r = radeon_uvd_suspend(rdev); | 139 | r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); |
| 142 | if (r) | 140 | if (r) { |
| 141 | dev_err(rdev->dev, "(%d) UVD map failed\n", r); | ||
| 143 | return r; | 142 | return r; |
| 143 | } | ||
| 144 | |||
| 145 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||
| 144 | 146 | ||
| 145 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 147 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
| 146 | atomic_set(&rdev->uvd.handles[i], 0); | 148 | atomic_set(&rdev->uvd.handles[i], 0); |
| @@ -152,70 +154,73 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
| 152 | 154 | ||
| 153 | void radeon_uvd_fini(struct radeon_device *rdev) | 155 | void radeon_uvd_fini(struct radeon_device *rdev) |
| 154 | { | 156 | { |
| 155 | radeon_uvd_suspend(rdev); | ||
| 156 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 157 | } | ||
| 158 | |||
| 159 | int radeon_uvd_suspend(struct radeon_device *rdev) | ||
| 160 | { | ||
| 161 | int r; | 157 | int r; |
| 162 | 158 | ||
| 163 | if (rdev->uvd.vcpu_bo == NULL) | 159 | if (rdev->uvd.vcpu_bo == NULL) |
| 164 | return 0; | 160 | return; |
| 165 | 161 | ||
| 166 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); | 162 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); |
| 167 | if (!r) { | 163 | if (!r) { |
| 168 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | 164 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); |
| 169 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | 165 | radeon_bo_unpin(rdev->uvd.vcpu_bo); |
| 170 | rdev->uvd.cpu_addr = NULL; | ||
| 171 | if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { | ||
| 172 | radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | ||
| 173 | } | ||
| 174 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | 166 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
| 175 | |||
| 176 | if (rdev->uvd.cpu_addr) { | ||
| 177 | radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); | ||
| 178 | } else { | ||
| 179 | rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; | ||
| 180 | } | ||
| 181 | } | 167 | } |
| 182 | return r; | 168 | |
| 169 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 170 | |||
| 171 | release_firmware(rdev->uvd_fw); | ||
| 172 | } | ||
| 173 | |||
| 174 | int radeon_uvd_suspend(struct radeon_device *rdev) | ||
| 175 | { | ||
| 176 | unsigned size; | ||
| 177 | void *ptr; | ||
| 178 | int i; | ||
| 179 | |||
| 180 | if (rdev->uvd.vcpu_bo == NULL) | ||
| 181 | return 0; | ||
| 182 | |||
| 183 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | ||
| 184 | if (atomic_read(&rdev->uvd.handles[i])) | ||
| 185 | break; | ||
| 186 | |||
| 187 | if (i == RADEON_MAX_UVD_HANDLES) | ||
| 188 | return 0; | ||
| 189 | |||
| 190 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | ||
| 191 | size -= rdev->uvd_fw->size; | ||
| 192 | |||
| 193 | ptr = rdev->uvd.cpu_addr; | ||
| 194 | ptr += rdev->uvd_fw->size; | ||
| 195 | |||
| 196 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | ||
| 197 | memcpy(rdev->uvd.saved_bo, ptr, size); | ||
| 198 | |||
| 199 | return 0; | ||
| 183 | } | 200 | } |
| 184 | 201 | ||
| 185 | int radeon_uvd_resume(struct radeon_device *rdev) | 202 | int radeon_uvd_resume(struct radeon_device *rdev) |
| 186 | { | 203 | { |
| 187 | int r; | 204 | unsigned size; |
| 205 | void *ptr; | ||
| 188 | 206 | ||
| 189 | if (rdev->uvd.vcpu_bo == NULL) | 207 | if (rdev->uvd.vcpu_bo == NULL) |
| 190 | return -EINVAL; | 208 | return -EINVAL; |
| 191 | 209 | ||
| 192 | r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); | 210 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); |
| 193 | if (r) { | ||
| 194 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 195 | dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); | ||
| 196 | return r; | ||
| 197 | } | ||
| 198 | 211 | ||
| 199 | /* Have been pin in cpu unmap unpin */ | 212 | size = radeon_bo_size(rdev->uvd.vcpu_bo); |
| 200 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | 213 | size -= rdev->uvd_fw->size; |
| 201 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | ||
| 202 | 214 | ||
| 203 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, | 215 | ptr = rdev->uvd.cpu_addr; |
| 204 | &rdev->uvd.gpu_addr); | 216 | ptr += rdev->uvd_fw->size; |
| 205 | if (r) { | ||
| 206 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||
| 207 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | ||
| 208 | dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); | ||
| 209 | return r; | ||
| 210 | } | ||
| 211 | 217 | ||
| 212 | r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | 218 | if (rdev->uvd.saved_bo != NULL) { |
| 213 | if (r) { | 219 | memcpy(ptr, rdev->uvd.saved_bo, size); |
| 214 | dev_err(rdev->dev, "(%d) UVD map failed\n", r); | 220 | kfree(rdev->uvd.saved_bo); |
| 215 | return r; | 221 | rdev->uvd.saved_bo = NULL; |
| 216 | } | 222 | } else |
| 217 | 223 | memset(ptr, 0, size); | |
| 218 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | ||
| 219 | 224 | ||
| 220 | return 0; | 225 | return 0; |
| 221 | } | 226 | } |
| @@ -230,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |||
| 230 | { | 235 | { |
| 231 | int i, r; | 236 | int i, r; |
| 232 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 237 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
| 233 | if (rdev->uvd.filp[i] == filp) { | 238 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
| 234 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | 239 | if (handle != 0 && rdev->uvd.filp[i] == filp) { |
| 235 | struct radeon_fence *fence; | 240 | struct radeon_fence *fence; |
| 236 | 241 | ||
| 237 | r = radeon_uvd_get_destroy_msg(rdev, | 242 | r = radeon_uvd_get_destroy_msg(rdev, |
| @@ -352,8 +357,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 352 | } | 357 | } |
| 353 | 358 | ||
| 354 | r = radeon_bo_kmap(bo, &ptr); | 359 | r = radeon_bo_kmap(bo, &ptr); |
| 355 | if (r) | 360 | if (r) { |
| 361 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | ||
| 356 | return r; | 362 | return r; |
| 363 | } | ||
| 357 | 364 | ||
| 358 | msg = ptr + offset; | 365 | msg = ptr + offset; |
| 359 | 366 | ||
| @@ -379,8 +386,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 379 | radeon_bo_kunmap(bo); | 386 | radeon_bo_kunmap(bo); |
| 380 | return 0; | 387 | return 0; |
| 381 | } else { | 388 | } else { |
| 382 | /* it's a create msg, no special handling needed */ | ||
| 383 | radeon_bo_kunmap(bo); | 389 | radeon_bo_kunmap(bo); |
| 390 | |||
| 391 | if (msg_type != 0) { | ||
| 392 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
| 393 | return -EINVAL; | ||
| 394 | } | ||
| 395 | |||
| 396 | /* it's a create msg, no special handling needed */ | ||
| 384 | } | 397 | } |
| 385 | 398 | ||
| 386 | /* create or decode, validate the handle */ | 399 | /* create or decode, validate the handle */ |
| @@ -403,7 +416,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
| 403 | 416 | ||
| 404 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | 417 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, |
| 405 | int data0, int data1, | 418 | int data0, int data1, |
| 406 | unsigned buf_sizes[]) | 419 | unsigned buf_sizes[], bool *has_msg_cmd) |
| 407 | { | 420 | { |
| 408 | struct radeon_cs_chunk *relocs_chunk; | 421 | struct radeon_cs_chunk *relocs_chunk; |
| 409 | struct radeon_cs_reloc *reloc; | 422 | struct radeon_cs_reloc *reloc; |
| @@ -432,7 +445,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 432 | 445 | ||
| 433 | if (cmd < 0x4) { | 446 | if (cmd < 0x4) { |
| 434 | if ((end - start) < buf_sizes[cmd]) { | 447 | if ((end - start) < buf_sizes[cmd]) { |
| 435 | DRM_ERROR("buffer to small (%d / %d)!\n", | 448 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
| 436 | (unsigned)(end - start), buf_sizes[cmd]); | 449 | (unsigned)(end - start), buf_sizes[cmd]); |
| 437 | return -EINVAL; | 450 | return -EINVAL; |
| 438 | } | 451 | } |
| @@ -457,9 +470,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 457 | } | 470 | } |
| 458 | 471 | ||
| 459 | if (cmd == 0) { | 472 | if (cmd == 0) { |
| 473 | if (*has_msg_cmd) { | ||
| 474 | DRM_ERROR("More than one message in a UVD-IB!\n"); | ||
| 475 | return -EINVAL; | ||
| 476 | } | ||
| 477 | *has_msg_cmd = true; | ||
| 460 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | 478 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); |
| 461 | if (r) | 479 | if (r) |
| 462 | return r; | 480 | return r; |
| 481 | } else if (!*has_msg_cmd) { | ||
| 482 | DRM_ERROR("Message needed before other commands are send!\n"); | ||
| 483 | return -EINVAL; | ||
| 463 | } | 484 | } |
| 464 | 485 | ||
| 465 | return 0; | 486 | return 0; |
| @@ -468,7 +489,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
| 468 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | 489 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, |
| 469 | struct radeon_cs_packet *pkt, | 490 | struct radeon_cs_packet *pkt, |
| 470 | int *data0, int *data1, | 491 | int *data0, int *data1, |
| 471 | unsigned buf_sizes[]) | 492 | unsigned buf_sizes[], |
| 493 | bool *has_msg_cmd) | ||
| 472 | { | 494 | { |
| 473 | int i, r; | 495 | int i, r; |
| 474 | 496 | ||
| @@ -482,7 +504,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |||
| 482 | *data1 = p->idx; | 504 | *data1 = p->idx; |
| 483 | break; | 505 | break; |
| 484 | case UVD_GPCOM_VCPU_CMD: | 506 | case UVD_GPCOM_VCPU_CMD: |
| 485 | r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); | 507 | r = radeon_uvd_cs_reloc(p, *data0, *data1, |
| 508 | buf_sizes, has_msg_cmd); | ||
| 486 | if (r) | 509 | if (r) |
| 487 | return r; | 510 | return r; |
| 488 | break; | 511 | break; |
| @@ -503,6 +526,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 503 | struct radeon_cs_packet pkt; | 526 | struct radeon_cs_packet pkt; |
| 504 | int r, data0 = 0, data1 = 0; | 527 | int r, data0 = 0, data1 = 0; |
| 505 | 528 | ||
| 529 | /* does the IB has a msg command */ | ||
| 530 | bool has_msg_cmd = false; | ||
| 531 | |||
| 506 | /* minimum buffer sizes */ | 532 | /* minimum buffer sizes */ |
| 507 | unsigned buf_sizes[] = { | 533 | unsigned buf_sizes[] = { |
| 508 | [0x00000000] = 2048, | 534 | [0x00000000] = 2048, |
| @@ -529,8 +555,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 529 | return r; | 555 | return r; |
| 530 | switch (pkt.type) { | 556 | switch (pkt.type) { |
| 531 | case RADEON_PACKET_TYPE0: | 557 | case RADEON_PACKET_TYPE0: |
| 532 | r = radeon_uvd_cs_reg(p, &pkt, &data0, | 558 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, |
| 533 | &data1, buf_sizes); | 559 | buf_sizes, &has_msg_cmd); |
| 534 | if (r) | 560 | if (r) |
| 535 | return r; | 561 | return r; |
| 536 | break; | 562 | break; |
| @@ -542,6 +568,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
| 542 | return -EINVAL; | 568 | return -EINVAL; |
| 543 | } | 569 | } |
| 544 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 570 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
| 571 | |||
| 572 | if (!has_msg_cmd) { | ||
| 573 | DRM_ERROR("UVD-IBs need a msg command!\n"); | ||
| 574 | return -EINVAL; | ||
| 575 | } | ||
| 576 | |||
| 545 | return 0; | 577 | return 0; |
| 546 | } | 578 | } |
| 547 | 579 | ||
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index bef832a62fee..d1a1ce73bd45 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "r600_dpm.h" | 28 | #include "r600_dpm.h" |
| 29 | #include "rs780_dpm.h" | 29 | #include "rs780_dpm.h" |
| 30 | #include "atom.h" | 30 | #include "atom.h" |
| 31 | #include <linux/seq_file.h> | ||
| 31 | 32 | ||
| 32 | static struct igp_ps *rs780_get_ps(struct radeon_ps *rps) | 33 | static struct igp_ps *rs780_get_ps(struct radeon_ps *rps) |
| 33 | { | 34 | { |
| @@ -961,3 +962,27 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
| 961 | 962 | ||
| 962 | return pi->bootup_uma_clk; | 963 | return pi->bootup_uma_clk; |
| 963 | } | 964 | } |
| 965 | |||
| 966 | void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
| 967 | struct seq_file *m) | ||
| 968 | { | ||
| 969 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
| 970 | struct igp_ps *ps = rs780_get_ps(rps); | ||
| 971 | u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK; | ||
| 972 | u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL); | ||
| 973 | u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1; | ||
| 974 | u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 + | ||
| 975 | ((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1; | ||
| 976 | u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) / | ||
| 977 | (post_div * ref_div); | ||
| 978 | |||
| 979 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
| 980 | |||
| 981 | /* guess based on the current sclk */ | ||
| 982 | if (sclk < (ps->sclk_low + 500)) | ||
| 983 | seq_printf(m, "power level 0 sclk: %u vddc_index: %d\n", | ||
| 984 | ps->sclk_low, ps->min_voltage); | ||
| 985 | else | ||
| 986 | seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", | ||
| 987 | ps->sclk_high, ps->max_voltage); | ||
| 988 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs780d.h b/drivers/gpu/drm/radeon/rs780d.h index b1142ed1c628..cfbe9a43d97b 100644 --- a/drivers/gpu/drm/radeon/rs780d.h +++ b/drivers/gpu/drm/radeon/rs780d.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | # define SPLL_SLEEP (1 << 1) | 28 | # define SPLL_SLEEP (1 << 1) |
| 29 | # define SPLL_REF_DIV(x) ((x) << 2) | 29 | # define SPLL_REF_DIV(x) ((x) << 2) |
| 30 | # define SPLL_REF_DIV_MASK (7 << 2) | 30 | # define SPLL_REF_DIV_MASK (7 << 2) |
| 31 | # define SPLL_REF_DIV_SHIFT 2 | ||
| 31 | # define SPLL_FB_DIV(x) ((x) << 5) | 32 | # define SPLL_FB_DIV(x) ((x) << 5) |
| 32 | # define SPLL_FB_DIV_MASK (0xff << 2) | 33 | # define SPLL_FB_DIV_MASK (0xff << 2) |
| 33 | # define SPLL_FB_DIV_SHIFT 2 | 34 | # define SPLL_FB_DIV_SHIFT 2 |
| @@ -36,8 +37,10 @@ | |||
| 36 | # define SPLL_PULSENUM_MASK (3 << 14) | 37 | # define SPLL_PULSENUM_MASK (3 << 14) |
| 37 | # define SPLL_SW_HILEN(x) ((x) << 16) | 38 | # define SPLL_SW_HILEN(x) ((x) << 16) |
| 38 | # define SPLL_SW_HILEN_MASK (0xf << 16) | 39 | # define SPLL_SW_HILEN_MASK (0xf << 16) |
| 40 | # define SPLL_SW_HILEN_SHIFT 16 | ||
| 39 | # define SPLL_SW_LOLEN(x) ((x) << 20) | 41 | # define SPLL_SW_LOLEN(x) ((x) << 20) |
| 40 | # define SPLL_SW_LOLEN_MASK (0xf << 20) | 42 | # define SPLL_SW_LOLEN_MASK (0xf << 20) |
| 43 | # define SPLL_SW_LOLEN_SHIFT 20 | ||
| 41 | # define SPLL_DIVEN (1 << 24) | 44 | # define SPLL_DIVEN (1 << 24) |
| 42 | # define SPLL_BYPASS_EN (1 << 25) | 45 | # define SPLL_BYPASS_EN (1 << 25) |
| 43 | # define SPLL_CHG_STATUS (1 << 29) | 46 | # define SPLL_CHG_STATUS (1 << 29) |
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 8303de267ee5..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
| @@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev) | |||
| 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, | 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, |
| 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | |
| 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, | 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, |
| 822 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 822 | pi->hw.sclks[R600_POWER_LEVEL_HIGH])) | |
| 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, | 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, |
| 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); | 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); |
| 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); | 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); |
| @@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev) | |||
| 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
| 1183 | 1183 | ||
| 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
| 1185 | if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1185 | if (rdev->pm.dpm.new_active_crtcs & 1) { |
| 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
| 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
| 1188 | } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1188 | } else if (rdev->pm.dpm.new_active_crtcs & 2) { |
| 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
| 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
| 1191 | } else { | 1191 | } else { |
| @@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; | 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; |
| 1671 | int ret; | 1671 | int ret; |
| 1672 | 1672 | ||
| 1673 | pi->restricted_levels = 0; | ||
| 1674 | |||
| 1673 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 1675 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
| 1674 | 1676 | ||
| 1675 | rv6xx_clear_vc(rdev); | 1677 | rv6xx_clear_vc(rdev); |
| @@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
| 1756 | 1758 | ||
| 1757 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | 1759 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
| 1758 | 1760 | ||
| 1761 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
| 1762 | |||
| 1759 | return 0; | 1763 | return 0; |
| 1760 | } | 1764 | } |
| 1761 | 1765 | ||
| @@ -1763,12 +1767,14 @@ void rv6xx_setup_asic(struct radeon_device *rdev) | |||
| 1763 | { | 1767 | { |
| 1764 | r600_enable_acpi_pm(rdev); | 1768 | r600_enable_acpi_pm(rdev); |
| 1765 | 1769 | ||
| 1766 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) | 1770 | if (radeon_aspm != 0) { |
| 1767 | rv6xx_enable_l0s(rdev); | 1771 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) |
| 1768 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) | 1772 | rv6xx_enable_l0s(rdev); |
| 1769 | rv6xx_enable_l1(rdev); | 1773 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) |
| 1770 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) | 1774 | rv6xx_enable_l1(rdev); |
| 1771 | rv6xx_enable_pll_sleep_in_l1(rdev); | 1775 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) |
| 1776 | rv6xx_enable_pll_sleep_in_l1(rdev); | ||
| 1777 | } | ||
| 1772 | } | 1778 | } |
| 1773 | 1779 | ||
| 1774 | void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev) | 1780 | void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev) |
| @@ -1938,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) | |||
| 1938 | 1944 | ||
| 1939 | int rv6xx_dpm_init(struct radeon_device *rdev) | 1945 | int rv6xx_dpm_init(struct radeon_device *rdev) |
| 1940 | { | 1946 | { |
| 1941 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 1947 | struct radeon_atom_ss ss; |
| 1942 | uint16_t data_offset, size; | ||
| 1943 | uint8_t frev, crev; | ||
| 1944 | struct atom_clock_dividers dividers; | 1948 | struct atom_clock_dividers dividers; |
| 1945 | struct rv6xx_power_info *pi; | 1949 | struct rv6xx_power_info *pi; |
| 1946 | int ret; | 1950 | int ret; |
| @@ -1983,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) | |||
| 1983 | 1987 | ||
| 1984 | pi->gfx_clock_gating = true; | 1988 | pi->gfx_clock_gating = true; |
| 1985 | 1989 | ||
| 1986 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 1990 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
| 1987 | &frev, &crev, &data_offset)) { | 1991 | ASIC_INTERNAL_ENGINE_SS, 0); |
| 1988 | pi->sclk_ss = true; | 1992 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
| 1989 | pi->mclk_ss = true; | 1993 | ASIC_INTERNAL_MEMORY_SS, 0); |
| 1994 | |||
| 1995 | /* Disable sclk ss, causes hangs on a lot of systems */ | ||
| 1996 | pi->sclk_ss = false; | ||
| 1997 | |||
| 1998 | if (pi->sclk_ss || pi->mclk_ss) | ||
| 1990 | pi->dynamic_ss = true; | 1999 | pi->dynamic_ss = true; |
| 1991 | } else { | 2000 | else |
| 1992 | pi->sclk_ss = false; | ||
| 1993 | pi->mclk_ss = false; | ||
| 1994 | pi->dynamic_ss = false; | 2001 | pi->dynamic_ss = false; |
| 1995 | } | ||
| 1996 | 2002 | ||
| 1997 | pi->dynamic_pcie_gen2 = true; | 2003 | pi->dynamic_pcie_gen2 = true; |
| 1998 | 2004 | ||
| @@ -2083,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
| 2083 | else | 2089 | else |
| 2084 | return requested_state->high.mclk; | 2090 | return requested_state->high.mclk; |
| 2085 | } | 2091 | } |
| 2092 | |||
| 2093 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
| 2094 | enum radeon_dpm_forced_level level) | ||
| 2095 | { | ||
| 2096 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | ||
| 2097 | |||
| 2098 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
| 2099 | pi->restricted_levels = 3; | ||
| 2100 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
| 2101 | pi->restricted_levels = 2; | ||
| 2102 | } else { | ||
| 2103 | pi->restricted_levels = 0; | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | rv6xx_clear_vc(rdev); | ||
| 2107 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true); | ||
| 2108 | r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF); | ||
| 2109 | r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW); | ||
| 2110 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false); | ||
| 2111 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false); | ||
| 2112 | rv6xx_enable_medium(rdev); | ||
| 2113 | rv6xx_enable_high(rdev); | ||
| 2114 | if (pi->restricted_levels == 3) | ||
| 2115 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false); | ||
| 2116 | rv6xx_program_vc(rdev); | ||
| 2117 | rv6xx_program_at(rdev); | ||
| 2118 | |||
| 2119 | rdev->pm.dpm.forced_level = level; | ||
| 2120 | |||
| 2121 | return 0; | ||
| 2122 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4a62ad2e5399..bcc68ec204ad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1829 | /* enable pcie gen2 link */ | 1829 | /* enable pcie gen2 link */ |
| 1830 | rv770_pcie_gen2_enable(rdev); | 1830 | rv770_pcie_gen2_enable(rdev); |
| 1831 | 1831 | ||
| 1832 | rv770_mc_program(rdev); | ||
| 1833 | |||
| 1832 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1834 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
| 1833 | r = r600_init_microcode(rdev); | 1835 | r = r600_init_microcode(rdev); |
| 1834 | if (r) { | 1836 | if (r) { |
| @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 1841 | if (r) | 1843 | if (r) |
| 1842 | return r; | 1844 | return r; |
| 1843 | 1845 | ||
| 1844 | rv770_mc_program(rdev); | ||
| 1845 | if (rdev->flags & RADEON_IS_AGP) { | 1846 | if (rdev->flags & RADEON_IS_AGP) { |
| 1846 | rv770_agp_enable(rdev); | 1847 | rv770_agp_enable(rdev); |
| 1847 | } else { | 1848 | } else { |
| @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1983 | int rv770_suspend(struct radeon_device *rdev) | 1984 | int rv770_suspend(struct radeon_device *rdev) |
| 1984 | { | 1985 | { |
| 1985 | r600_audio_fini(rdev); | 1986 | r600_audio_fini(rdev); |
| 1987 | r600_uvd_stop(rdev); | ||
| 1986 | radeon_uvd_suspend(rdev); | 1988 | radeon_uvd_suspend(rdev); |
| 1987 | r700_cp_stop(rdev); | 1989 | r700_cp_stop(rdev); |
| 1988 | r600_dma_stop(rdev); | 1990 | r600_dma_stop(rdev); |
| @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 2098 | radeon_ib_pool_fini(rdev); | 2100 | radeon_ib_pool_fini(rdev); |
| 2099 | radeon_irq_kms_fini(rdev); | 2101 | radeon_irq_kms_fini(rdev); |
| 2100 | rv770_pcie_gart_fini(rdev); | 2102 | rv770_pcie_gart_fini(rdev); |
| 2103 | r600_uvd_stop(rdev); | ||
| 2101 | radeon_uvd_fini(rdev); | 2104 | radeon_uvd_fini(rdev); |
| 2102 | r600_vram_scratch_fini(rdev); | 2105 | r600_vram_scratch_fini(rdev); |
| 2103 | radeon_gem_fini(rdev); | 2106 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index d914e04ea39a..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -2099,12 +2099,14 @@ void rv770_dpm_setup_asic(struct radeon_device *rdev) | |||
| 2099 | 2099 | ||
| 2100 | rv770_enable_acpi_pm(rdev); | 2100 | rv770_enable_acpi_pm(rdev); |
| 2101 | 2101 | ||
| 2102 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) | 2102 | if (radeon_aspm != 0) { |
| 2103 | rv770_enable_l0s(rdev); | 2103 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s) |
| 2104 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) | 2104 | rv770_enable_l0s(rdev); |
| 2105 | rv770_enable_l1(rdev); | 2105 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1) |
| 2106 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) | 2106 | rv770_enable_l1(rdev); |
| 2107 | rv770_enable_pll_sleep_in_l1(rdev); | 2107 | if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1) |
| 2108 | rv770_enable_pll_sleep_in_l1(rdev); | ||
| 2109 | } | ||
| 2108 | } | 2110 | } |
| 2109 | 2111 | ||
| 2110 | void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) | 2112 | void rv770_dpm_display_configuration_changed(struct radeon_device *rdev) |
| @@ -2317,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) | |||
| 2317 | return 0; | 2319 | return 0; |
| 2318 | } | 2320 | } |
| 2319 | 2321 | ||
| 2322 | void rv770_get_engine_memory_ss(struct radeon_device *rdev) | ||
| 2323 | { | ||
| 2324 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
| 2325 | struct radeon_atom_ss ss; | ||
| 2326 | |||
| 2327 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
| 2328 | ASIC_INTERNAL_ENGINE_SS, 0); | ||
| 2329 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
| 2330 | ASIC_INTERNAL_MEMORY_SS, 0); | ||
| 2331 | |||
| 2332 | if (pi->sclk_ss || pi->mclk_ss) | ||
| 2333 | pi->dynamic_ss = true; | ||
| 2334 | else | ||
| 2335 | pi->dynamic_ss = false; | ||
| 2336 | } | ||
| 2337 | |||
| 2320 | int rv770_dpm_init(struct radeon_device *rdev) | 2338 | int rv770_dpm_init(struct radeon_device *rdev) |
| 2321 | { | 2339 | { |
| 2322 | struct rv7xx_power_info *pi; | 2340 | struct rv7xx_power_info *pi; |
| 2323 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 2324 | uint16_t data_offset, size; | ||
| 2325 | uint8_t frev, crev; | ||
| 2326 | struct atom_clock_dividers dividers; | 2341 | struct atom_clock_dividers dividers; |
| 2327 | int ret; | 2342 | int ret; |
| 2328 | 2343 | ||
| @@ -2367,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
| 2367 | pi->mvdd_control = | 2382 | pi->mvdd_control = |
| 2368 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); | 2383 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); |
| 2369 | 2384 | ||
| 2370 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2385 | rv770_get_engine_memory_ss(rdev); |
| 2371 | &frev, &crev, &data_offset)) { | ||
| 2372 | pi->sclk_ss = true; | ||
| 2373 | pi->mclk_ss = true; | ||
| 2374 | pi->dynamic_ss = true; | ||
| 2375 | } else { | ||
| 2376 | pi->sclk_ss = false; | ||
| 2377 | pi->mclk_ss = false; | ||
| 2378 | pi->dynamic_ss = false; | ||
| 2379 | } | ||
| 2380 | 2386 | ||
| 2381 | pi->asi = RV770_ASI_DFLT; | 2387 | pi->asi = RV770_ASI_DFLT; |
| 2382 | pi->pasi = RV770_HASI_DFLT; | 2388 | pi->pasi = RV770_HASI_DFLT; |
| @@ -2391,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
| 2391 | 2397 | ||
| 2392 | pi->dynamic_pcie_gen2 = true; | 2398 | pi->dynamic_pcie_gen2 = true; |
| 2393 | 2399 | ||
| 2394 | if (pi->gfx_clock_gating && | 2400 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 2395 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 2396 | pi->thermal_protection = true; | 2401 | pi->thermal_protection = true; |
| 2397 | else | 2402 | else |
| 2398 | pi->thermal_protection = false; | 2403 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
| @@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, | |||
| 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
| 276 | struct radeon_ps *new_ps, | 276 | struct radeon_ps *new_ps, |
| 277 | struct radeon_ps *old_ps); | 277 | struct radeon_ps *old_ps); |
| 278 | void rv770_get_engine_memory_ss(struct radeon_device *rdev); | ||
| 278 | 279 | ||
| 279 | /* smc */ | 280 | /* smc */ |
| 280 | int rv770_read_smc_soft_register(struct radeon_device *rdev, | 281 | int rv770_read_smc_soft_register(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 234906709067..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | * Authors: Alex Deucher | 22 | * Authors: Alex Deucher |
| 23 | */ | 23 | */ |
| 24 | #include <linux/firmware.h> | 24 | #include <linux/firmware.h> |
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 28 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
| @@ -1541,7 +1540,6 @@ static int si_mc_load_microcode(struct radeon_device *rdev) | |||
| 1541 | 1540 | ||
| 1542 | static int si_init_microcode(struct radeon_device *rdev) | 1541 | static int si_init_microcode(struct radeon_device *rdev) |
| 1543 | { | 1542 | { |
| 1544 | struct platform_device *pdev; | ||
| 1545 | const char *chip_name; | 1543 | const char *chip_name; |
| 1546 | const char *rlc_chip_name; | 1544 | const char *rlc_chip_name; |
| 1547 | size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; | 1545 | size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; |
| @@ -1551,13 +1549,6 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1551 | 1549 | ||
| 1552 | DRM_DEBUG("\n"); | 1550 | DRM_DEBUG("\n"); |
| 1553 | 1551 | ||
| 1554 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
| 1555 | err = IS_ERR(pdev); | ||
| 1556 | if (err) { | ||
| 1557 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
| 1558 | return -EINVAL; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | switch (rdev->family) { | 1552 | switch (rdev->family) { |
| 1562 | case CHIP_TAHITI: | 1553 | case CHIP_TAHITI: |
| 1563 | chip_name = "TAHITI"; | 1554 | chip_name = "TAHITI"; |
| @@ -1615,7 +1606,7 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1615 | DRM_INFO("Loading %s Microcode\n", chip_name); | 1606 | DRM_INFO("Loading %s Microcode\n", chip_name); |
| 1616 | 1607 | ||
| 1617 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 1608 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
| 1618 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 1609 | err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
| 1619 | if (err) | 1610 | if (err) |
| 1620 | goto out; | 1611 | goto out; |
| 1621 | if (rdev->pfp_fw->size != pfp_req_size) { | 1612 | if (rdev->pfp_fw->size != pfp_req_size) { |
| @@ -1627,7 +1618,7 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1627 | } | 1618 | } |
| 1628 | 1619 | ||
| 1629 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | 1620 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); |
| 1630 | err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); | 1621 | err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
| 1631 | if (err) | 1622 | if (err) |
| 1632 | goto out; | 1623 | goto out; |
| 1633 | if (rdev->me_fw->size != me_req_size) { | 1624 | if (rdev->me_fw->size != me_req_size) { |
| @@ -1638,7 +1629,7 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1638 | } | 1629 | } |
| 1639 | 1630 | ||
| 1640 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); | 1631 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); |
| 1641 | err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev); | 1632 | err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); |
| 1642 | if (err) | 1633 | if (err) |
| 1643 | goto out; | 1634 | goto out; |
| 1644 | if (rdev->ce_fw->size != ce_req_size) { | 1635 | if (rdev->ce_fw->size != ce_req_size) { |
| @@ -1649,7 +1640,7 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1649 | } | 1640 | } |
| 1650 | 1641 | ||
| 1651 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | 1642 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); |
| 1652 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | 1643 | err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
| 1653 | if (err) | 1644 | if (err) |
| 1654 | goto out; | 1645 | goto out; |
| 1655 | if (rdev->rlc_fw->size != rlc_req_size) { | 1646 | if (rdev->rlc_fw->size != rlc_req_size) { |
| @@ -1660,7 +1651,7 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1660 | } | 1651 | } |
| 1661 | 1652 | ||
| 1662 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 1653 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); |
| 1663 | err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); | 1654 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
| 1664 | if (err) | 1655 | if (err) |
| 1665 | goto out; | 1656 | goto out; |
| 1666 | if (rdev->mc_fw->size != mc_req_size) { | 1657 | if (rdev->mc_fw->size != mc_req_size) { |
| @@ -1671,10 +1662,14 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1671 | } | 1662 | } |
| 1672 | 1663 | ||
| 1673 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
| 1674 | err = request_firmware(&rdev->smc_fw, fw_name, &pdev->dev); | 1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
| 1675 | if (err) | 1666 | if (err) { |
| 1676 | goto out; | 1667 | printk(KERN_ERR |
| 1677 | if (rdev->smc_fw->size != smc_req_size) { | 1668 | "smc: error loading firmware \"%s\"\n", |
| 1669 | fw_name); | ||
| 1670 | release_firmware(rdev->smc_fw); | ||
| 1671 | rdev->smc_fw = NULL; | ||
| 1672 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
| 1678 | printk(KERN_ERR | 1673 | printk(KERN_ERR |
| 1679 | "si_smc: Bogus length %zu in firmware \"%s\"\n", | 1674 | "si_smc: Bogus length %zu in firmware \"%s\"\n", |
| 1680 | rdev->smc_fw->size, fw_name); | 1675 | rdev->smc_fw->size, fw_name); |
| @@ -1682,8 +1677,6 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1682 | } | 1677 | } |
| 1683 | 1678 | ||
| 1684 | out: | 1679 | out: |
| 1685 | platform_device_unregister(pdev); | ||
| 1686 | |||
| 1687 | if (err) { | 1680 | if (err) { |
| 1688 | if (err != -EINVAL) | 1681 | if (err != -EINVAL) |
| 1689 | printk(KERN_ERR | 1682 | printk(KERN_ERR |
| @@ -4401,6 +4394,270 @@ void si_vm_fini(struct radeon_device *rdev) | |||
| 4401 | } | 4394 | } |
| 4402 | 4395 | ||
| 4403 | /** | 4396 | /** |
| 4397 | * si_vm_decode_fault - print human readable fault info | ||
| 4398 | * | ||
| 4399 | * @rdev: radeon_device pointer | ||
| 4400 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
| 4401 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
| 4402 | * | ||
| 4403 | * Print human readable fault information (SI). | ||
| 4404 | */ | ||
| 4405 | static void si_vm_decode_fault(struct radeon_device *rdev, | ||
| 4406 | u32 status, u32 addr) | ||
| 4407 | { | ||
| 4408 | u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; | ||
| 4409 | u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; | ||
| 4410 | u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; | ||
| 4411 | char *block; | ||
| 4412 | |||
| 4413 | if (rdev->family == CHIP_TAHITI) { | ||
| 4414 | switch (mc_id) { | ||
| 4415 | case 160: | ||
| 4416 | case 144: | ||
| 4417 | case 96: | ||
| 4418 | case 80: | ||
| 4419 | case 224: | ||
| 4420 | case 208: | ||
| 4421 | case 32: | ||
| 4422 | case 16: | ||
| 4423 | block = "CB"; | ||
| 4424 | break; | ||
| 4425 | case 161: | ||
| 4426 | case 145: | ||
| 4427 | case 97: | ||
| 4428 | case 81: | ||
| 4429 | case 225: | ||
| 4430 | case 209: | ||
| 4431 | case 33: | ||
| 4432 | case 17: | ||
| 4433 | block = "CB_FMASK"; | ||
| 4434 | break; | ||
| 4435 | case 162: | ||
| 4436 | case 146: | ||
| 4437 | case 98: | ||
| 4438 | case 82: | ||
| 4439 | case 226: | ||
| 4440 | case 210: | ||
| 4441 | case 34: | ||
| 4442 | case 18: | ||
| 4443 | block = "CB_CMASK"; | ||
| 4444 | break; | ||
| 4445 | case 163: | ||
| 4446 | case 147: | ||
| 4447 | case 99: | ||
| 4448 | case 83: | ||
| 4449 | case 227: | ||
| 4450 | case 211: | ||
| 4451 | case 35: | ||
| 4452 | case 19: | ||
| 4453 | block = "CB_IMMED"; | ||
| 4454 | break; | ||
| 4455 | case 164: | ||
| 4456 | case 148: | ||
| 4457 | case 100: | ||
| 4458 | case 84: | ||
| 4459 | case 228: | ||
| 4460 | case 212: | ||
| 4461 | case 36: | ||
| 4462 | case 20: | ||
| 4463 | block = "DB"; | ||
| 4464 | break; | ||
| 4465 | case 165: | ||
| 4466 | case 149: | ||
| 4467 | case 101: | ||
| 4468 | case 85: | ||
| 4469 | case 229: | ||
| 4470 | case 213: | ||
| 4471 | case 37: | ||
| 4472 | case 21: | ||
| 4473 | block = "DB_HTILE"; | ||
| 4474 | break; | ||
| 4475 | case 167: | ||
| 4476 | case 151: | ||
| 4477 | case 103: | ||
| 4478 | case 87: | ||
| 4479 | case 231: | ||
| 4480 | case 215: | ||
| 4481 | case 39: | ||
| 4482 | case 23: | ||
| 4483 | block = "DB_STEN"; | ||
| 4484 | break; | ||
| 4485 | case 72: | ||
| 4486 | case 68: | ||
| 4487 | case 64: | ||
| 4488 | case 8: | ||
| 4489 | case 4: | ||
| 4490 | case 0: | ||
| 4491 | case 136: | ||
| 4492 | case 132: | ||
| 4493 | case 128: | ||
| 4494 | case 200: | ||
| 4495 | case 196: | ||
| 4496 | case 192: | ||
| 4497 | block = "TC"; | ||
| 4498 | break; | ||
| 4499 | case 112: | ||
| 4500 | case 48: | ||
| 4501 | block = "CP"; | ||
| 4502 | break; | ||
| 4503 | case 49: | ||
| 4504 | case 177: | ||
| 4505 | case 50: | ||
| 4506 | case 178: | ||
| 4507 | block = "SH"; | ||
| 4508 | break; | ||
| 4509 | case 53: | ||
| 4510 | case 190: | ||
| 4511 | block = "VGT"; | ||
| 4512 | break; | ||
| 4513 | case 117: | ||
| 4514 | block = "IH"; | ||
| 4515 | break; | ||
| 4516 | case 51: | ||
| 4517 | case 115: | ||
| 4518 | block = "RLC"; | ||
| 4519 | break; | ||
| 4520 | case 119: | ||
| 4521 | case 183: | ||
| 4522 | block = "DMA0"; | ||
| 4523 | break; | ||
| 4524 | case 61: | ||
| 4525 | block = "DMA1"; | ||
| 4526 | break; | ||
| 4527 | case 248: | ||
| 4528 | case 120: | ||
| 4529 | block = "HDP"; | ||
| 4530 | break; | ||
| 4531 | default: | ||
| 4532 | block = "unknown"; | ||
| 4533 | break; | ||
| 4534 | } | ||
| 4535 | } else { | ||
| 4536 | switch (mc_id) { | ||
| 4537 | case 32: | ||
| 4538 | case 16: | ||
| 4539 | case 96: | ||
| 4540 | case 80: | ||
| 4541 | case 160: | ||
| 4542 | case 144: | ||
| 4543 | case 224: | ||
| 4544 | case 208: | ||
| 4545 | block = "CB"; | ||
| 4546 | break; | ||
| 4547 | case 33: | ||
| 4548 | case 17: | ||
| 4549 | case 97: | ||
| 4550 | case 81: | ||
| 4551 | case 161: | ||
| 4552 | case 145: | ||
| 4553 | case 225: | ||
| 4554 | case 209: | ||
| 4555 | block = "CB_FMASK"; | ||
| 4556 | break; | ||
| 4557 | case 34: | ||
| 4558 | case 18: | ||
| 4559 | case 98: | ||
| 4560 | case 82: | ||
| 4561 | case 162: | ||
| 4562 | case 146: | ||
| 4563 | case 226: | ||
| 4564 | case 210: | ||
| 4565 | block = "CB_CMASK"; | ||
| 4566 | break; | ||
| 4567 | case 35: | ||
| 4568 | case 19: | ||
| 4569 | case 99: | ||
| 4570 | case 83: | ||
| 4571 | case 163: | ||
| 4572 | case 147: | ||
| 4573 | case 227: | ||
| 4574 | case 211: | ||
| 4575 | block = "CB_IMMED"; | ||
| 4576 | break; | ||
| 4577 | case 36: | ||
| 4578 | case 20: | ||
| 4579 | case 100: | ||
| 4580 | case 84: | ||
| 4581 | case 164: | ||
| 4582 | case 148: | ||
| 4583 | case 228: | ||
| 4584 | case 212: | ||
| 4585 | block = "DB"; | ||
| 4586 | break; | ||
| 4587 | case 37: | ||
| 4588 | case 21: | ||
| 4589 | case 101: | ||
| 4590 | case 85: | ||
| 4591 | case 165: | ||
| 4592 | case 149: | ||
| 4593 | case 229: | ||
| 4594 | case 213: | ||
| 4595 | block = "DB_HTILE"; | ||
| 4596 | break; | ||
| 4597 | case 39: | ||
| 4598 | case 23: | ||
| 4599 | case 103: | ||
| 4600 | case 87: | ||
| 4601 | case 167: | ||
| 4602 | case 151: | ||
| 4603 | case 231: | ||
| 4604 | case 215: | ||
| 4605 | block = "DB_STEN"; | ||
| 4606 | break; | ||
| 4607 | case 72: | ||
| 4608 | case 68: | ||
| 4609 | case 8: | ||
| 4610 | case 4: | ||
| 4611 | case 136: | ||
| 4612 | case 132: | ||
| 4613 | case 200: | ||
| 4614 | case 196: | ||
| 4615 | block = "TC"; | ||
| 4616 | break; | ||
| 4617 | case 112: | ||
| 4618 | case 48: | ||
| 4619 | block = "CP"; | ||
| 4620 | break; | ||
| 4621 | case 49: | ||
| 4622 | case 177: | ||
| 4623 | case 50: | ||
| 4624 | case 178: | ||
| 4625 | block = "SH"; | ||
| 4626 | break; | ||
| 4627 | case 53: | ||
| 4628 | block = "VGT"; | ||
| 4629 | break; | ||
| 4630 | case 117: | ||
| 4631 | block = "IH"; | ||
| 4632 | break; | ||
| 4633 | case 51: | ||
| 4634 | case 115: | ||
| 4635 | block = "RLC"; | ||
| 4636 | break; | ||
| 4637 | case 119: | ||
| 4638 | case 183: | ||
| 4639 | block = "DMA0"; | ||
| 4640 | break; | ||
| 4641 | case 61: | ||
| 4642 | block = "DMA1"; | ||
| 4643 | break; | ||
| 4644 | case 248: | ||
| 4645 | case 120: | ||
| 4646 | block = "HDP"; | ||
| 4647 | break; | ||
| 4648 | default: | ||
| 4649 | block = "unknown"; | ||
| 4650 | break; | ||
| 4651 | } | ||
| 4652 | } | ||
| 4653 | |||
| 4654 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", | ||
| 4655 | protections, vmid, addr, | ||
| 4656 | (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", | ||
| 4657 | block, mc_id); | ||
| 4658 | } | ||
| 4659 | |||
| 4660 | /** | ||
| 4404 | * si_vm_set_page - update the page tables using the CP | 4661 | * si_vm_set_page - update the page tables using the CP |
| 4405 | * | 4662 | * |
| 4406 | * @rdev: radeon_device pointer | 4663 | * @rdev: radeon_device pointer |
| @@ -4962,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev, | |||
| 4962 | 5219 | ||
| 4963 | static void si_init_cg(struct radeon_device *rdev) | 5220 | static void si_init_cg(struct radeon_device *rdev) |
| 4964 | { | 5221 | { |
| 4965 | bool has_uvd = true; | ||
| 4966 | |||
| 4967 | si_enable_mgcg(rdev, true); | 5222 | si_enable_mgcg(rdev, true); |
| 4968 | si_enable_cgcg(rdev, true); | 5223 | si_enable_cgcg(rdev, false); |
| 4969 | /* disable MC LS on Tahiti */ | 5224 | /* disable MC LS on Tahiti */ |
| 4970 | if (rdev->family == CHIP_TAHITI) | 5225 | if (rdev->family == CHIP_TAHITI) |
| 4971 | si_enable_mc_ls(rdev, false); | 5226 | si_enable_mc_ls(rdev, false); |
| 4972 | if (has_uvd) { | 5227 | if (rdev->has_uvd) { |
| 4973 | si_enable_uvd_mgcg(rdev, true); | 5228 | si_enable_uvd_mgcg(rdev, true); |
| 4974 | si_init_uvd_internal_cg(rdev); | 5229 | si_init_uvd_internal_cg(rdev); |
| 4975 | } | 5230 | } |
| @@ -4977,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev) | |||
| 4977 | 5232 | ||
| 4978 | static void si_fini_cg(struct radeon_device *rdev) | 5233 | static void si_fini_cg(struct radeon_device *rdev) |
| 4979 | { | 5234 | { |
| 4980 | bool has_uvd = true; | 5235 | if (rdev->has_uvd) |
| 4981 | |||
| 4982 | if (has_uvd) | ||
| 4983 | si_enable_uvd_mgcg(rdev, false); | 5236 | si_enable_uvd_mgcg(rdev, false); |
| 4984 | si_enable_cgcg(rdev, false); | 5237 | si_enable_cgcg(rdev, false); |
| 4985 | si_enable_mgcg(rdev, false); | 5238 | si_enable_mgcg(rdev, false); |
| @@ -4988,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev) | |||
| 4988 | static void si_init_pg(struct radeon_device *rdev) | 5241 | static void si_init_pg(struct radeon_device *rdev) |
| 4989 | { | 5242 | { |
| 4990 | bool has_pg = false; | 5243 | bool has_pg = false; |
| 4991 | 5244 | #if 0 | |
| 4992 | /* only cape verde supports PG */ | 5245 | /* only cape verde supports PG */ |
| 4993 | if (rdev->family == CHIP_VERDE) | 5246 | if (rdev->family == CHIP_VERDE) |
| 4994 | has_pg = true; | 5247 | has_pg = true; |
| 4995 | 5248 | #endif | |
| 4996 | if (has_pg) { | 5249 | if (has_pg) { |
| 4997 | si_init_ao_cu_mask(rdev); | 5250 | si_init_ao_cu_mask(rdev); |
| 4998 | si_init_dma_pg(rdev); | 5251 | si_init_dma_pg(rdev); |
| @@ -5766,6 +6019,7 @@ int si_irq_process(struct radeon_device *rdev) | |||
| 5766 | u32 ring_index; | 6019 | u32 ring_index; |
| 5767 | bool queue_hotplug = false; | 6020 | bool queue_hotplug = false; |
| 5768 | bool queue_thermal = false; | 6021 | bool queue_thermal = false; |
| 6022 | u32 status, addr; | ||
| 5769 | 6023 | ||
| 5770 | if (!rdev->ih.enabled || rdev->shutdown) | 6024 | if (!rdev->ih.enabled || rdev->shutdown) |
| 5771 | return IRQ_NONE; | 6025 | return IRQ_NONE; |
| @@ -6001,11 +6255,14 @@ restart_ih: | |||
| 6001 | break; | 6255 | break; |
| 6002 | case 146: | 6256 | case 146: |
| 6003 | case 147: | 6257 | case 147: |
| 6258 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
| 6259 | status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
| 6004 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); | 6260 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
| 6005 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 6261 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
| 6006 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | 6262 | addr); |
| 6007 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 6263 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
| 6008 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | 6264 | status); |
| 6265 | si_vm_decode_fault(rdev, status, addr); | ||
| 6009 | /* reset addr and status */ | 6266 | /* reset addr and status */ |
| 6010 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | 6267 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
| 6011 | break; | 6268 | break; |
| @@ -6165,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6165 | /* enable aspm */ | 6422 | /* enable aspm */ |
| 6166 | si_program_aspm(rdev); | 6423 | si_program_aspm(rdev); |
| 6167 | 6424 | ||
| 6425 | si_mc_program(rdev); | ||
| 6426 | |||
| 6168 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6427 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
| 6169 | !rdev->rlc_fw || !rdev->mc_fw) { | 6428 | !rdev->rlc_fw || !rdev->mc_fw) { |
| 6170 | r = si_init_microcode(rdev); | 6429 | r = si_init_microcode(rdev); |
| @@ -6184,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) | |||
| 6184 | if (r) | 6443 | if (r) |
| 6185 | return r; | 6444 | return r; |
| 6186 | 6445 | ||
| 6187 | si_mc_program(rdev); | ||
| 6188 | r = si_pcie_gart_enable(rdev); | 6446 | r = si_pcie_gart_enable(rdev); |
| 6189 | if (r) | 6447 | if (r) |
| 6190 | return r; | 6448 | return r; |
| @@ -6368,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) | |||
| 6368 | si_cp_enable(rdev, false); | 6626 | si_cp_enable(rdev, false); |
| 6369 | cayman_dma_stop(rdev); | 6627 | cayman_dma_stop(rdev); |
| 6370 | if (rdev->has_uvd) { | 6628 | if (rdev->has_uvd) { |
| 6371 | r600_uvd_rbc_stop(rdev); | 6629 | r600_uvd_stop(rdev); |
| 6372 | radeon_uvd_suspend(rdev); | 6630 | radeon_uvd_suspend(rdev); |
| 6373 | } | 6631 | } |
| 6374 | si_irq_suspend(rdev); | 6632 | si_irq_suspend(rdev); |
| @@ -6510,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) | |||
| 6510 | radeon_vm_manager_fini(rdev); | 6768 | radeon_vm_manager_fini(rdev); |
| 6511 | radeon_ib_pool_fini(rdev); | 6769 | radeon_ib_pool_fini(rdev); |
| 6512 | radeon_irq_kms_fini(rdev); | 6770 | radeon_irq_kms_fini(rdev); |
| 6513 | if (rdev->has_uvd) | 6771 | if (rdev->has_uvd) { |
| 6772 | r600_uvd_stop(rdev); | ||
| 6514 | radeon_uvd_fini(rdev); | 6773 | radeon_uvd_fini(rdev); |
| 6774 | } | ||
| 6515 | si_pcie_gart_fini(rdev); | 6775 | si_pcie_gart_fini(rdev); |
| 6516 | r600_vram_scratch_fini(rdev); | 6776 | r600_vram_scratch_fini(rdev); |
| 6517 | radeon_gem_fini(rdev); | 6777 | radeon_gem_fini(rdev); |
| @@ -6796,6 +7056,9 @@ static void si_program_aspm(struct radeon_device *rdev) | |||
| 6796 | bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; | 7056 | bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; |
| 6797 | bool disable_clkreq = false; | 7057 | bool disable_clkreq = false; |
| 6798 | 7058 | ||
| 7059 | if (radeon_aspm == 0) | ||
| 7060 | return; | ||
| 7061 | |||
| 6799 | if (!(rdev->flags & RADEON_IS_PCIE)) | 7062 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 6800 | return; | 7063 | return; |
| 6801 | 7064 | ||
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 73aaa2e4c312..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | 37 | ||
| 38 | #define SMC_RAM_END 0x20000 | 38 | #define SMC_RAM_END 0x20000 |
| 39 | 39 | ||
| 40 | #define DDR3_DRAM_ROWS 0x2000 | ||
| 41 | |||
| 42 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 | 40 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 |
| 43 | 41 | ||
| 44 | static const struct si_cac_config_reg cac_weights_tahiti[] = | 42 | static const struct si_cac_config_reg cac_weights_tahiti[] = |
| @@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
| 1767 | { | 1765 | { |
| 1768 | s64 kt, kv, leakage_w, i_leakage, vddc; | 1766 | s64 kt, kv, leakage_w, i_leakage, vddc; |
| 1769 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; | 1767 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; |
| 1768 | s64 tmp; | ||
| 1770 | 1769 | ||
| 1771 | i_leakage = drm_int2fixp(ileakage / 100); | 1770 | i_leakage = div64_s64(drm_int2fixp(ileakage), 100); |
| 1772 | vddc = div64_s64(drm_int2fixp(v), 1000); | 1771 | vddc = div64_s64(drm_int2fixp(v), 1000); |
| 1773 | temperature = div64_s64(drm_int2fixp(t), 1000); | 1772 | temperature = div64_s64(drm_int2fixp(t), 1000); |
| 1774 | 1773 | ||
| @@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
| 1778 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); | 1777 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); |
| 1779 | t_ref = drm_int2fixp(coeff->t_ref); | 1778 | t_ref = drm_int2fixp(coeff->t_ref); |
| 1780 | 1779 | ||
| 1781 | kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), | 1780 | tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; |
| 1782 | drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); | 1781 | kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); |
| 1782 | kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); | ||
| 1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); | 1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); |
| 1784 | 1784 | ||
| 1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); | 1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); |
| @@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
| 1931 | si_pi->cac_override = cac_override_pitcairn; | 1931 | si_pi->cac_override = cac_override_pitcairn; |
| 1932 | si_pi->powertune_data = &powertune_data_pitcairn; | 1932 | si_pi->powertune_data = &powertune_data_pitcairn; |
| 1933 | si_pi->dte_data = dte_data_pitcairn; | 1933 | si_pi->dte_data = dte_data_pitcairn; |
| 1934 | break; | ||
| 1934 | } | 1935 | } |
| 1935 | } else if (rdev->family == CHIP_VERDE) { | 1936 | } else if (rdev->family == CHIP_VERDE) { |
| 1936 | si_pi->lcac_config = lcac_cape_verde; | 1937 | si_pi->lcac_config = lcac_cape_verde; |
| @@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
| 1941 | case 0x683B: | 1942 | case 0x683B: |
| 1942 | case 0x683F: | 1943 | case 0x683F: |
| 1943 | case 0x6829: | 1944 | case 0x6829: |
| 1945 | case 0x6835: | ||
| 1944 | si_pi->cac_weights = cac_weights_cape_verde_pro; | 1946 | si_pi->cac_weights = cac_weights_cape_verde_pro; |
| 1945 | si_pi->dte_data = dte_data_cape_verde; | 1947 | si_pi->dte_data = dte_data_cape_verde; |
| 1946 | break; | 1948 | break; |
| @@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2901 | { | 2903 | { |
| 2902 | struct ni_ps *ps = ni_get_ps(rps); | 2904 | struct ni_ps *ps = ni_get_ps(rps); |
| 2903 | struct radeon_clock_and_voltage_limits *max_limits; | 2905 | struct radeon_clock_and_voltage_limits *max_limits; |
| 2904 | bool disable_mclk_switching; | 2906 | bool disable_mclk_switching = false; |
| 2907 | bool disable_sclk_switching = false; | ||
| 2905 | u32 mclk, sclk; | 2908 | u32 mclk, sclk; |
| 2906 | u16 vddc, vddci; | 2909 | u16 vddc, vddci; |
| 2907 | int i; | 2910 | int i; |
| @@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2909 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2912 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
| 2910 | ni_dpm_vblank_too_short(rdev)) | 2913 | ni_dpm_vblank_too_short(rdev)) |
| 2911 | disable_mclk_switching = true; | 2914 | disable_mclk_switching = true; |
| 2912 | else | 2915 | |
| 2913 | disable_mclk_switching = false; | 2916 | if (rps->vclk || rps->dclk) { |
| 2917 | disable_mclk_switching = true; | ||
| 2918 | disable_sclk_switching = true; | ||
| 2919 | } | ||
| 2914 | 2920 | ||
| 2915 | if (rdev->pm.dpm.ac_power) | 2921 | if (rdev->pm.dpm.ac_power) |
| 2916 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 2922 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
| @@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2938 | 2944 | ||
| 2939 | if (disable_mclk_switching) { | 2945 | if (disable_mclk_switching) { |
| 2940 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; | 2946 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; |
| 2941 | sclk = ps->performance_levels[0].sclk; | ||
| 2942 | vddc = ps->performance_levels[0].vddc; | ||
| 2943 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; | 2947 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; |
| 2944 | } else { | 2948 | } else { |
| 2945 | sclk = ps->performance_levels[0].sclk; | ||
| 2946 | mclk = ps->performance_levels[0].mclk; | 2949 | mclk = ps->performance_levels[0].mclk; |
| 2947 | vddc = ps->performance_levels[0].vddc; | ||
| 2948 | vddci = ps->performance_levels[0].vddci; | 2950 | vddci = ps->performance_levels[0].vddci; |
| 2949 | } | 2951 | } |
| 2950 | 2952 | ||
| 2953 | if (disable_sclk_switching) { | ||
| 2954 | sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; | ||
| 2955 | vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; | ||
| 2956 | } else { | ||
| 2957 | sclk = ps->performance_levels[0].sclk; | ||
| 2958 | vddc = ps->performance_levels[0].vddc; | ||
| 2959 | } | ||
| 2960 | |||
| 2951 | /* adjusted low state */ | 2961 | /* adjusted low state */ |
| 2952 | ps->performance_levels[0].sclk = sclk; | 2962 | ps->performance_levels[0].sclk = sclk; |
| 2953 | ps->performance_levels[0].mclk = mclk; | 2963 | ps->performance_levels[0].mclk = mclk; |
| 2954 | ps->performance_levels[0].vddc = vddc; | 2964 | ps->performance_levels[0].vddc = vddc; |
| 2955 | ps->performance_levels[0].vddci = vddci; | 2965 | ps->performance_levels[0].vddci = vddci; |
| 2956 | 2966 | ||
| 2957 | for (i = 1; i < ps->performance_level_count; i++) { | 2967 | if (disable_sclk_switching) { |
| 2958 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | 2968 | sclk = ps->performance_levels[0].sclk; |
| 2959 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | 2969 | for (i = 1; i < ps->performance_level_count; i++) { |
| 2960 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | 2970 | if (sclk < ps->performance_levels[i].sclk) |
| 2961 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | 2971 | sclk = ps->performance_levels[i].sclk; |
| 2972 | } | ||
| 2973 | for (i = 0; i < ps->performance_level_count; i++) { | ||
| 2974 | ps->performance_levels[i].sclk = sclk; | ||
| 2975 | ps->performance_levels[i].vddc = vddc; | ||
| 2976 | } | ||
| 2977 | } else { | ||
| 2978 | for (i = 1; i < ps->performance_level_count; i++) { | ||
| 2979 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | ||
| 2980 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | ||
| 2981 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | ||
| 2982 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | ||
| 2983 | } | ||
| 2962 | } | 2984 | } |
| 2963 | 2985 | ||
| 2964 | if (disable_mclk_switching) { | 2986 | if (disable_mclk_switching) { |
| @@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 3237 | { | 3259 | { |
| 3238 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 3260 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
| 3239 | struct ni_ps *ps = ni_get_ps(rps); | 3261 | struct ni_ps *ps = ni_get_ps(rps); |
| 3240 | u32 levels; | 3262 | u32 levels = ps->performance_level_count; |
| 3241 | 3263 | ||
| 3242 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 3264 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
| 3243 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3265 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
| 3244 | return -EINVAL; | 3266 | return -EINVAL; |
| 3245 | 3267 | ||
| 3246 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) | 3268 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) |
| @@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 3249 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3271 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 3250 | return -EINVAL; | 3272 | return -EINVAL; |
| 3251 | 3273 | ||
| 3252 | levels = ps->performance_level_count - 1; | 3274 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
| 3253 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
| 3254 | return -EINVAL; | 3275 | return -EINVAL; |
| 3255 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 3276 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
| 3256 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3277 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
| 3257 | return -EINVAL; | 3278 | return -EINVAL; |
| 3258 | 3279 | ||
| 3259 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3280 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
| 3260 | return -EINVAL; | 3281 | return -EINVAL; |
| 3261 | } | 3282 | } |
| 3262 | 3283 | ||
| @@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev) | |||
| 3620 | { | 3641 | { |
| 3621 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 3642 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
| 3622 | 3643 | ||
| 3644 | tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); | ||
| 3645 | tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | | ||
| 3646 | DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); | ||
| 3647 | |||
| 3623 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 3648 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
| 3624 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | | 3649 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | |
| 3625 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); | 3650 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); |
| 3626 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); | 3651 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); |
| 3627 | } | 3652 | } |
| @@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev) | |||
| 4036 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, | 4061 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, |
| 4037 | u32 engine_clock) | 4062 | u32 engine_clock) |
| 4038 | { | 4063 | { |
| 4039 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
| 4040 | u32 dram_rows; | 4064 | u32 dram_rows; |
| 4041 | u32 dram_refresh_rate; | 4065 | u32 dram_refresh_rate; |
| 4042 | u32 mc_arb_rfsh_rate; | 4066 | u32 mc_arb_rfsh_rate; |
| 4043 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | 4067 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
| 4044 | 4068 | ||
| 4045 | if (pi->mem_gddr5) | 4069 | if (tmp >= 4) |
| 4046 | dram_rows = 1 << (tmp + 10); | 4070 | dram_rows = 16384; |
| 4047 | else | 4071 | else |
| 4048 | dram_rows = DDR3_DRAM_ROWS; | 4072 | dram_rows = 1 << (tmp + 10); |
| 4049 | 4073 | ||
| 4050 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); | 4074 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); |
| 4051 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; | 4075 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; |
| @@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
| 6013 | return ret; | 6037 | return ret; |
| 6014 | } | 6038 | } |
| 6015 | 6039 | ||
| 6016 | #if 0 | ||
| 6017 | /* XXX */ | ||
| 6018 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); | 6040 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); |
| 6019 | if (ret) { | 6041 | if (ret) { |
| 6020 | DRM_ERROR("si_dpm_force_performance_level failed\n"); | 6042 | DRM_ERROR("si_dpm_force_performance_level failed\n"); |
| 6021 | return ret; | 6043 | return ret; |
| 6022 | } | 6044 | } |
| 6023 | #else | ||
| 6024 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
| 6025 | #endif | ||
| 6026 | 6045 | ||
| 6027 | return 0; | 6046 | return 0; |
| 6028 | } | 6047 | } |
| @@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6254 | struct evergreen_power_info *eg_pi; | 6273 | struct evergreen_power_info *eg_pi; |
| 6255 | struct ni_power_info *ni_pi; | 6274 | struct ni_power_info *ni_pi; |
| 6256 | struct si_power_info *si_pi; | 6275 | struct si_power_info *si_pi; |
| 6257 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
| 6258 | u16 data_offset, size; | ||
| 6259 | u8 frev, crev; | ||
| 6260 | struct atom_clock_dividers dividers; | 6276 | struct atom_clock_dividers dividers; |
| 6261 | int ret; | 6277 | int ret; |
| 6262 | u32 mask; | 6278 | u32 mask; |
| @@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6347 | si_pi->vddc_phase_shed_control = | 6363 | si_pi->vddc_phase_shed_control = |
| 6348 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); | 6364 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); |
| 6349 | 6365 | ||
| 6350 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 6366 | rv770_get_engine_memory_ss(rdev); |
| 6351 | &frev, &crev, &data_offset)) { | ||
| 6352 | pi->sclk_ss = true; | ||
| 6353 | pi->mclk_ss = true; | ||
| 6354 | pi->dynamic_ss = true; | ||
| 6355 | } else { | ||
| 6356 | pi->sclk_ss = false; | ||
| 6357 | pi->mclk_ss = false; | ||
| 6358 | pi->dynamic_ss = true; | ||
| 6359 | } | ||
| 6360 | 6367 | ||
| 6361 | pi->asi = RV770_ASI_DFLT; | 6368 | pi->asi = RV770_ASI_DFLT; |
| 6362 | pi->pasi = CYPRESS_HASI_DFLT; | 6369 | pi->pasi = CYPRESS_HASI_DFLT; |
| @@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
| 6367 | eg_pi->sclk_deep_sleep = true; | 6374 | eg_pi->sclk_deep_sleep = true; |
| 6368 | si_pi->sclk_deep_sleep_above_low = false; | 6375 | si_pi->sclk_deep_sleep_above_low = false; |
| 6369 | 6376 | ||
| 6370 | if (pi->gfx_clock_gating && | 6377 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
| 6371 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
| 6372 | pi->thermal_protection = true; | 6378 | pi->thermal_protection = true; |
| 6373 | else | 6379 | else |
| 6374 | pi->thermal_protection = false; | 6380 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 12a20eb77d0c..2c8da27a929f 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
| @@ -367,6 +367,20 @@ | |||
| 367 | 367 | ||
| 368 | #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC | 368 | #define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC |
| 369 | #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC | 369 | #define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC |
| 370 | #define PROTECTIONS_MASK (0xf << 0) | ||
| 371 | #define PROTECTIONS_SHIFT 0 | ||
| 372 | /* bit 0: range | ||
| 373 | * bit 1: pde0 | ||
| 374 | * bit 2: valid | ||
| 375 | * bit 3: read | ||
| 376 | * bit 4: write | ||
| 377 | */ | ||
| 378 | #define MEMORY_CLIENT_ID_MASK (0xff << 12) | ||
| 379 | #define MEMORY_CLIENT_ID_SHIFT 12 | ||
| 380 | #define MEMORY_CLIENT_RW_MASK (1 << 24) | ||
| 381 | #define MEMORY_CLIENT_RW_SHIFT 24 | ||
| 382 | #define FAULT_VMID_MASK (0xf << 25) | ||
| 383 | #define FAULT_VMID_SHIFT 25 | ||
| 370 | 384 | ||
| 371 | #define VM_INVALIDATE_REQUEST 0x1478 | 385 | #define VM_INVALIDATE_REQUEST 0x1478 |
| 372 | #define VM_INVALIDATE_RESPONSE 0x147c | 386 | #define VM_INVALIDATE_RESPONSE 0x147c |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 11b6b9924f1b..c0a850319908 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
| @@ -1732,7 +1732,13 @@ int sumo_dpm_init(struct radeon_device *rdev) | |||
| 1732 | pi->enable_sclk_ds = true; | 1732 | pi->enable_sclk_ds = true; |
| 1733 | pi->enable_dynamic_m3_arbiter = false; | 1733 | pi->enable_dynamic_m3_arbiter = false; |
| 1734 | pi->enable_dynamic_patch_ps = true; | 1734 | pi->enable_dynamic_patch_ps = true; |
| 1735 | pi->enable_gfx_power_gating = true; | 1735 | /* Some PALM chips don't seem to properly ungate gfx when UVD is in use; |
| 1736 | * for now just disable gfx PG. | ||
| 1737 | */ | ||
| 1738 | if (rdev->family == CHIP_PALM) | ||
| 1739 | pi->enable_gfx_power_gating = false; | ||
| 1740 | else | ||
| 1741 | pi->enable_gfx_power_gating = true; | ||
| 1736 | pi->enable_gfx_clock_gating = true; | 1742 | pi->enable_gfx_clock_gating = true; |
| 1737 | pi->enable_mg_clock_gating = true; | 1743 | pi->enable_mg_clock_gating = true; |
| 1738 | pi->enable_auto_thermal_throttling = true; | 1744 | pi->enable_auto_thermal_throttling = true; |
| @@ -1845,6 +1851,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 1845 | return 0; | 1851 | return 0; |
| 1846 | 1852 | ||
| 1847 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 1853 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
| 1854 | if (pi->enable_boost) | ||
| 1855 | sumo_enable_boost(rdev, rps, false); | ||
| 1848 | sumo_power_level_enable(rdev, ps->num_levels - 1, true); | 1856 | sumo_power_level_enable(rdev, ps->num_levels - 1, true); |
| 1849 | sumo_set_forced_level(rdev, ps->num_levels - 1); | 1857 | sumo_set_forced_level(rdev, ps->num_levels - 1); |
| 1850 | sumo_set_forced_mode_enabled(rdev); | 1858 | sumo_set_forced_mode_enabled(rdev); |
| @@ -1855,6 +1863,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 1855 | sumo_set_forced_mode_enabled(rdev); | 1863 | sumo_set_forced_mode_enabled(rdev); |
| 1856 | sumo_set_forced_mode(rdev, false); | 1864 | sumo_set_forced_mode(rdev, false); |
| 1857 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | 1865 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { |
| 1866 | if (pi->enable_boost) | ||
| 1867 | sumo_enable_boost(rdev, rps, false); | ||
| 1858 | sumo_power_level_enable(rdev, 0, true); | 1868 | sumo_power_level_enable(rdev, 0, true); |
| 1859 | sumo_set_forced_level(rdev, 0); | 1869 | sumo_set_forced_level(rdev, 0); |
| 1860 | sumo_set_forced_mode_enabled(rdev); | 1870 | sumo_set_forced_mode_enabled(rdev); |
| @@ -1868,6 +1878,8 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, | |||
| 1868 | for (i = 0; i < ps->num_levels; i++) { | 1878 | for (i = 0; i < ps->num_levels; i++) { |
| 1869 | sumo_power_level_enable(rdev, i, true); | 1879 | sumo_power_level_enable(rdev, i, true); |
| 1870 | } | 1880 | } |
| 1881 | if (pi->enable_boost) | ||
| 1882 | sumo_enable_boost(rdev, rps, true); | ||
| 1871 | } | 1883 | } |
| 1872 | 1884 | ||
| 1873 | rdev->pm.dpm.forced_level = level; | 1885 | rdev->pm.dpm.forced_level = level; |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index ff82877de876..dc0fe09b2ba1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c | |||
| @@ -249,8 +249,13 @@ static struct drm_driver rcar_du_driver = { | |||
| 249 | .gem_vm_ops = &drm_gem_cma_vm_ops, | 249 | .gem_vm_ops = &drm_gem_cma_vm_ops, |
| 250 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | 250 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| 251 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 251 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 252 | .gem_prime_import = drm_gem_cma_dmabuf_import, | 252 | .gem_prime_import = drm_gem_prime_import, |
| 253 | .gem_prime_export = drm_gem_cma_dmabuf_export, | 253 | .gem_prime_export = drm_gem_prime_export, |
| 254 | .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, | ||
| 255 | .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, | ||
| 256 | .gem_prime_vmap = drm_gem_cma_prime_vmap, | ||
| 257 | .gem_prime_vunmap = drm_gem_cma_prime_vunmap, | ||
| 258 | .gem_prime_mmap = drm_gem_cma_prime_mmap, | ||
| 254 | .dumb_create = rcar_du_dumb_create, | 259 | .dumb_create = rcar_du_dumb_create, |
| 255 | .dumb_map_offset = drm_gem_cma_dumb_map_offset, | 260 | .dumb_map_offset = drm_gem_cma_dumb_map_offset, |
| 256 | .dumb_destroy = drm_gem_cma_dumb_destroy, | 261 | .dumb_destroy = drm_gem_cma_dumb_destroy, |
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index edc10181f551..5f83f9a3ef59 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c | |||
| @@ -276,8 +276,13 @@ static struct drm_driver shmob_drm_driver = { | |||
| 276 | .gem_vm_ops = &drm_gem_cma_vm_ops, | 276 | .gem_vm_ops = &drm_gem_cma_vm_ops, |
| 277 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | 277 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| 278 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 278 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 279 | .gem_prime_import = drm_gem_cma_dmabuf_import, | 279 | .gem_prime_import = drm_gem_prime_import, |
| 280 | .gem_prime_export = drm_gem_cma_dmabuf_export, | 280 | .gem_prime_export = drm_gem_prime_export, |
| 281 | .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, | ||
| 282 | .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, | ||
| 283 | .gem_prime_vmap = drm_gem_cma_prime_vmap, | ||
| 284 | .gem_prime_vunmap = drm_gem_cma_prime_vunmap, | ||
| 285 | .gem_prime_mmap = drm_gem_cma_prime_mmap, | ||
| 281 | .dumb_create = drm_gem_cma_dumb_create, | 286 | .dumb_create = drm_gem_cma_dumb_create, |
| 282 | .dumb_map_offset = drm_gem_cma_dumb_map_offset, | 287 | .dumb_map_offset = drm_gem_cma_dumb_map_offset, |
| 283 | .dumb_destroy = drm_gem_cma_dumb_destroy, | 288 | .dumb_destroy = drm_gem_cma_dumb_destroy, |
