diff options
Diffstat (limited to 'drivers/gpu/drm')
118 files changed, 1990 insertions, 1132 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
323 | 323 | ||
324 | astbo->gem.driver_private = NULL; | 324 | astbo->gem.driver_private = NULL; |
325 | astbo->bo.bdev = &ast->ttm.bdev; | 325 | astbo->bo.bdev = &ast->ttm.bdev; |
326 | astbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
326 | 327 | ||
327 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
328 | 329 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
328 | 328 | ||
329 | cirrusbo->gem.driver_private = NULL; | 329 | cirrusbo->gem.driver_private = NULL; |
330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; | 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; |
331 | cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
331 | 332 | ||
332 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 333 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
333 | 334 | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..6a647493ca7f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
677 | /* don't break so fail path works correct */ | 677 | /* don't break so fail path works correct */ |
678 | fail = 1; | 678 | fail = 1; |
679 | break; | 679 | break; |
680 | |||
681 | if (connector->dpms != DRM_MODE_DPMS_ON) { | ||
682 | DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); | ||
683 | mode_changed = true; | ||
684 | } | ||
680 | } | 685 | } |
681 | } | 686 | } |
682 | 687 | ||
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
754 | ret = -EINVAL; | 759 | ret = -EINVAL; |
755 | goto fail; | 760 | goto fail; |
756 | } | 761 | } |
762 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
763 | for (i = 0; i < set->num_connectors; i++) { | ||
764 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
765 | drm_get_connector_name(set->connectors[i])); | ||
766 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
767 | } | ||
757 | } | 768 | } |
758 | drm_helper_disable_unused_functions(dev); | 769 | drm_helper_disable_unused_functions(dev); |
759 | } else if (fb_changed) { | 770 | } else if (fb_changed) { |
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
771 | } | 782 | } |
772 | } | 783 | } |
773 | 784 | ||
774 | /* | ||
775 | * crtc set_config helpers implicit set the crtc and all connected | ||
776 | * encoders to DPMS on for a full mode set. But for just an fb update it | ||
777 | * doesn't do that. To not confuse userspace, do an explicit DPMS_ON | ||
778 | * unconditionally. This will also ensure driver internal dpms state is | ||
779 | * consistent again. | ||
780 | */ | ||
781 | if (set->crtc->enabled) { | ||
782 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
783 | for (i = 0; i < set->num_connectors; i++) { | ||
784 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
785 | drm_get_connector_name(set->connectors[i])); | ||
786 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | kfree(save_connectors); | 785 | kfree(save_connectors); |
791 | kfree(save_encoders); | 786 | kfree(save_encoders); |
792 | kfree(save_crtcs); | 787 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
708 | /* Subtract time delta from raw timestamp to get final | 708 | /* Subtract time delta from raw timestamp to get final |
709 | * vblank_time timestamp for end of vblank. | 709 | * vblank_time timestamp for end of vblank. |
710 | */ | 710 | */ |
711 | etime = ktime_sub_ns(etime, delta_ns); | 711 | if (delta_ns < 0) |
712 | etime = ktime_add_ns(etime, -delta_ns); | ||
713 | else | ||
714 | etime = ktime_sub_ns(etime, delta_ns); | ||
712 | *vblank_time = ktime_to_timeval(etime); | 715 | *vblank_time = ktime_to_timeval(etime); |
713 | 716 | ||
714 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", | 717 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", |
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 95c75edef01a..30ef41bcd7b8 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
18 | #include <linux/module.h> | ||
19 | 18 | ||
20 | 19 | ||
21 | #include "exynos_drm_drv.h" | 20 | #include "exynos_drm_drv.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 61b094f689a7..6e047bd53e2f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -12,7 +12,6 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
17 | #include <linux/mfd/syscon.h> | 16 | #include <linux/mfd/syscon.h> |
18 | #include <linux/regmap.h> | 17 | #include <linux/regmap.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3e106beca5b6..1c263dac3c1c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <drm/drmP.h> | 14 | #include <drm/drmP.h> |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | ||
18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
20 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
@@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { | |||
130 | .data = &exynos5_fimd_driver_data }, | 129 | .data = &exynos5_fimd_driver_data }, |
131 | {}, | 130 | {}, |
132 | }; | 131 | }; |
133 | MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); | ||
134 | #endif | 132 | #endif |
135 | 133 | ||
136 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( | 134 | static inline struct fimd_driver_data *drm_fimd_get_driver_data( |
@@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = { | |||
1082 | }, | 1080 | }, |
1083 | {}, | 1081 | {}, |
1084 | }; | 1082 | }; |
1085 | MODULE_DEVICE_TABLE(platform, fimd_driver_ids); | ||
1086 | 1083 | ||
1087 | static const struct dev_pm_ops fimd_pm_ops = { | 1084 | static const struct dev_pm_ops fimd_pm_ops = { |
1088 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) | 1085 | SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 42a5a5466075..eddea4941483 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -8,7 +8,6 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | ||
12 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
14 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
@@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d, | |||
806 | struct g2d_cmdlist_node *node = | 805 | struct g2d_cmdlist_node *node = |
807 | list_first_entry(&runqueue_node->run_cmdlist, | 806 | list_first_entry(&runqueue_node->run_cmdlist, |
808 | struct g2d_cmdlist_node, list); | 807 | struct g2d_cmdlist_node, list); |
808 | int ret; | ||
809 | |||
810 | ret = pm_runtime_get_sync(g2d->dev); | ||
811 | if (ret < 0) { | ||
812 | dev_warn(g2d->dev, "failed pm power on.\n"); | ||
813 | return; | ||
814 | } | ||
809 | 815 | ||
810 | pm_runtime_get_sync(g2d->dev); | 816 | ret = clk_prepare_enable(g2d->gate_clk); |
811 | clk_enable(g2d->gate_clk); | 817 | if (ret < 0) { |
818 | dev_warn(g2d->dev, "failed to enable clock.\n"); | ||
819 | pm_runtime_put_sync(g2d->dev); | ||
820 | return; | ||
821 | } | ||
812 | 822 | ||
813 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); | 823 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); |
814 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); | 824 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); |
@@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work) | |||
861 | runqueue_work); | 871 | runqueue_work); |
862 | 872 | ||
863 | mutex_lock(&g2d->runqueue_mutex); | 873 | mutex_lock(&g2d->runqueue_mutex); |
864 | clk_disable(g2d->gate_clk); | 874 | clk_disable_unprepare(g2d->gate_clk); |
865 | pm_runtime_put_sync(g2d->dev); | 875 | pm_runtime_put_sync(g2d->dev); |
866 | 876 | ||
867 | complete(&g2d->runqueue_node->complete); | 877 | complete(&g2d->runqueue_node->complete); |
@@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = { | |||
1521 | { .compatible = "samsung,exynos5250-g2d" }, | 1531 | { .compatible = "samsung,exynos5250-g2d" }, |
1522 | {}, | 1532 | {}, |
1523 | }; | 1533 | }; |
1524 | MODULE_DEVICE_TABLE(of, exynos_g2d_match); | ||
1525 | #endif | 1534 | #endif |
1526 | 1535 | ||
1527 | struct platform_driver g2d_driver = { | 1536 | struct platform_driver g2d_driver = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 472e3b25e7f2..90b8a1a5344c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
@@ -12,7 +12,6 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
17 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
18 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index aaa550d622f0..8d3bc01d6834 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/wait.h> | 17 | #include <linux/wait.h> |
18 | #include <linux/module.h> | ||
19 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
20 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
21 | 20 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index b1ef8e7ff9c9..d2b6ab4def93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
@@ -12,7 +12,6 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
17 | #include <linux/types.h> | 16 | #include <linux/types.h> |
18 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, | |||
342 | */ | 341 | */ |
343 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, | 342 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, |
344 | prop_list->ipp_id); | 343 | prop_list->ipp_id); |
345 | if (!ippdrv) { | 344 | if (IS_ERR(ippdrv)) { |
346 | DRM_ERROR("not found ipp%d driver.\n", | 345 | DRM_ERROR("not found ipp%d driver.\n", |
347 | prop_list->ipp_id); | 346 | prop_list->ipp_id); |
348 | return -EINVAL; | 347 | return PTR_ERR(ippdrv); |
349 | } | 348 | } |
350 | 349 | ||
351 | prop_list = ippdrv->prop_list; | 350 | prop_list = ippdrv->prop_list; |
@@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, | |||
970 | /* find command node */ | 969 | /* find command node */ |
971 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 970 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
972 | qbuf->prop_id); | 971 | qbuf->prop_id); |
973 | if (!c_node) { | 972 | if (IS_ERR(c_node)) { |
974 | DRM_ERROR("failed to get command node.\n"); | 973 | DRM_ERROR("failed to get command node.\n"); |
975 | return -EFAULT; | 974 | return PTR_ERR(c_node); |
976 | } | 975 | } |
977 | 976 | ||
978 | /* buffer control */ | 977 | /* buffer control */ |
@@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, | |||
1106 | 1105 | ||
1107 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | 1106 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, |
1108 | cmd_ctrl->prop_id); | 1107 | cmd_ctrl->prop_id); |
1109 | if (!c_node) { | 1108 | if (IS_ERR(c_node)) { |
1110 | DRM_ERROR("invalid command node list.\n"); | 1109 | DRM_ERROR("invalid command node list.\n"); |
1111 | return -EINVAL; | 1110 | return PTR_ERR(c_node); |
1112 | } | 1111 | } |
1113 | 1112 | ||
1114 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, | 1113 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 427640aa5148..49669aa24c45 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | ||
14 | #include <linux/err.h> | 13 | #include <linux/err.h> |
15 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> | 15 | #include <linux/io.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 41cc74d83e4e..c57c56519add 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <drm/drmP.h> | 13 | #include <drm/drmP.h> |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
18 | 17 | ||
19 | #include <drm/exynos_drm.h> | 18 | #include <drm/exynos_drm.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 62ef5971ac3c..2f5c6942c968 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
26 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
30 | #include <linux/irq.h> | 29 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index ef04255076c7..6e320ae9afed 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
18 | #include <linux/module.h> | ||
19 | 18 | ||
20 | #include "exynos_drm_drv.h" | 19 | #include "exynos_drm_drv.h" |
21 | #include "exynos_hdmi.h" | 20 | #include "exynos_hdmi.h" |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 42ffb71c63bc..c9a137caea41 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/wait.h> | 24 | #include <linux/wait.h> |
25 | #include <linux/i2c.h> | 25 | #include <linux/i2c.h> |
26 | #include <linux/module.h> | ||
27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
28 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
29 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..3bc8414533c9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c | |||
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, | |||
500 | &status)) | 500 | &status)) |
501 | goto log_fail; | 501 | goto log_fail; |
502 | 502 | ||
503 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | 503 | while ((status == SDVO_CMD_STATUS_PENDING || |
504 | status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { | ||
504 | udelay(15); | 505 | udelay(15); |
505 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, | 506 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, |
506 | SDVO_I2C_CMD_STATUS, | 507 | SDVO_I2C_CMD_STATUS, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf188ab7051a..f4669802a0fb 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1495 | dev_priv->dev = dev; | 1495 | dev_priv->dev = dev; |
1496 | dev_priv->info = info; | 1496 | dev_priv->info = info; |
1497 | 1497 | ||
1498 | spin_lock_init(&dev_priv->irq_lock); | ||
1499 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1500 | spin_lock_init(&dev_priv->rps.lock); | ||
1501 | spin_lock_init(&dev_priv->gt_lock); | ||
1502 | spin_lock_init(&dev_priv->backlight.lock); | ||
1503 | mutex_init(&dev_priv->dpio_lock); | ||
1504 | mutex_init(&dev_priv->rps.hw_lock); | ||
1505 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1506 | |||
1498 | i915_dump_device_info(dev_priv); | 1507 | i915_dump_device_info(dev_priv); |
1499 | 1508 | ||
1500 | if (i915_get_bridge_dev(dev)) { | 1509 | if (i915_get_bridge_dev(dev)) { |
@@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1585 | intel_detect_pch(dev); | 1594 | intel_detect_pch(dev); |
1586 | 1595 | ||
1587 | intel_irq_init(dev); | 1596 | intel_irq_init(dev); |
1597 | intel_pm_init(dev); | ||
1598 | intel_gt_sanitize(dev); | ||
1588 | intel_gt_init(dev); | 1599 | intel_gt_init(dev); |
1589 | 1600 | ||
1590 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1601 | /* Try to make sure MCHBAR is enabled before poking at it */ |
@@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1610 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1621 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
1611 | pci_enable_msi(dev->pdev); | 1622 | pci_enable_msi(dev->pdev); |
1612 | 1623 | ||
1613 | spin_lock_init(&dev_priv->irq_lock); | ||
1614 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1615 | spin_lock_init(&dev_priv->rps.lock); | ||
1616 | spin_lock_init(&dev_priv->backlight.lock); | ||
1617 | mutex_init(&dev_priv->dpio_lock); | ||
1618 | |||
1619 | mutex_init(&dev_priv->rps.hw_lock); | ||
1620 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1621 | |||
1622 | dev_priv->num_plane = 1; | 1624 | dev_priv->num_plane = 1; |
1623 | if (IS_VALLEYVIEW(dev)) | 1625 | if (IS_VALLEYVIEW(dev)) |
1624 | dev_priv->num_plane = 2; | 1626 | dev_priv->num_plane = 2; |
@@ -1648,7 +1650,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1648 | if (INTEL_INFO(dev)->num_pipes) { | 1650 | if (INTEL_INFO(dev)->num_pipes) { |
1649 | /* Must be done after probing outputs */ | 1651 | /* Must be done after probing outputs */ |
1650 | intel_opregion_init(dev); | 1652 | intel_opregion_init(dev); |
1651 | acpi_video_register_with_quirks(); | 1653 | acpi_video_register(); |
1652 | } | 1654 | } |
1653 | 1655 | ||
1654 | if (IS_GEN5(dev)) | 1656 | if (IS_GEN5(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f4af1ca0fb62..45b3c030f483 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
706 | { | 706 | { |
707 | int error = 0; | 707 | int error = 0; |
708 | 708 | ||
709 | intel_gt_reset(dev); | 709 | intel_gt_sanitize(dev); |
710 | 710 | ||
711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 711 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
712 | mutex_lock(&dev->struct_mutex); | 712 | mutex_lock(&dev->struct_mutex); |
@@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev) | |||
732 | 732 | ||
733 | pci_set_master(dev->pdev); | 733 | pci_set_master(dev->pdev); |
734 | 734 | ||
735 | intel_gt_reset(dev); | 735 | intel_gt_sanitize(dev); |
736 | 736 | ||
737 | /* | 737 | /* |
738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | 738 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
@@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | |||
1253 | 1253 | ||
1254 | #define __i915_read(x, y) \ | 1254 | #define __i915_read(x, y) \ |
1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1255 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ |
1256 | unsigned long irqflags; \ | ||
1256 | u##x val = 0; \ | 1257 | u##x val = 0; \ |
1258 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1257 | if (IS_GEN5(dev_priv->dev)) \ | 1259 | if (IS_GEN5(dev_priv->dev)) \ |
1258 | ilk_dummy_write(dev_priv); \ | 1260 | ilk_dummy_write(dev_priv); \ |
1259 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1261 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1260 | unsigned long irqflags; \ | ||
1261 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1262 | if (dev_priv->forcewake_count == 0) \ | 1262 | if (dev_priv->forcewake_count == 0) \ |
1263 | dev_priv->gt.force_wake_get(dev_priv); \ | 1263 | dev_priv->gt.force_wake_get(dev_priv); \ |
1264 | val = read##y(dev_priv->regs + reg); \ | 1264 | val = read##y(dev_priv->regs + reg); \ |
1265 | if (dev_priv->forcewake_count == 0) \ | 1265 | if (dev_priv->forcewake_count == 0) \ |
1266 | dev_priv->gt.force_wake_put(dev_priv); \ | 1266 | dev_priv->gt.force_wake_put(dev_priv); \ |
1267 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1268 | } else { \ | 1267 | } else { \ |
1269 | val = read##y(dev_priv->regs + reg); \ | 1268 | val = read##y(dev_priv->regs + reg); \ |
1270 | } \ | 1269 | } \ |
1270 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | 1271 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
1272 | return val; \ | 1272 | return val; \ |
1273 | } | 1273 | } |
@@ -1280,8 +1280,10 @@ __i915_read(64, q) | |||
1280 | 1280 | ||
1281 | #define __i915_write(x, y) \ | 1281 | #define __i915_write(x, y) \ |
1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1282 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
1283 | unsigned long irqflags; \ | ||
1283 | u32 __fifo_ret = 0; \ | 1284 | u32 __fifo_ret = 0; \ |
1284 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1285 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
1286 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1285 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 1287 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
1286 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 1288 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
1287 | } \ | 1289 | } \ |
@@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | |||
1293 | gen6_gt_check_fifodbg(dev_priv); \ | 1295 | gen6_gt_check_fifodbg(dev_priv); \ |
1294 | } \ | 1296 | } \ |
1295 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 1297 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
1298 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1296 | } | 1299 | } |
1297 | __i915_write(8, b) | 1300 | __i915_write(8, b) |
1298 | __i915_write(16, w) | 1301 | __i915_write(16, w) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a416645bcd23..1929bffc1c77 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -555,6 +555,7 @@ enum intel_sbi_destination { | |||
555 | #define QUIRK_PIPEA_FORCE (1<<0) | 555 | #define QUIRK_PIPEA_FORCE (1<<0) |
556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 556 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 557 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
558 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
558 | 559 | ||
559 | struct intel_fbdev; | 560 | struct intel_fbdev; |
560 | struct intel_fbc_work; | 561 | struct intel_fbc_work; |
@@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data); | |||
1581 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1582 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1582 | 1583 | ||
1583 | extern void intel_irq_init(struct drm_device *dev); | 1584 | extern void intel_irq_init(struct drm_device *dev); |
1585 | extern void intel_pm_init(struct drm_device *dev); | ||
1584 | extern void intel_hpd_init(struct drm_device *dev); | 1586 | extern void intel_hpd_init(struct drm_device *dev); |
1585 | extern void intel_gt_init(struct drm_device *dev); | 1587 | extern void intel_gt_init(struct drm_device *dev); |
1586 | extern void intel_gt_reset(struct drm_device *dev); | 1588 | extern void intel_gt_sanitize(struct drm_device *dev); |
1587 | 1589 | ||
1588 | void i915_error_state_free(struct kref *error_ref); | 1590 | void i915_error_state_free(struct kref *error_ref); |
1589 | 1591 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97afd2639fb6..d9e2208cfe98 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
2258 | 2258 | ||
2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2259 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2260 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
2261 | i915_gem_write_fence(dev, i, reg->obj); | 2261 | |
2262 | /* | ||
2263 | * Commit delayed tiling changes if we have an object still | ||
2264 | * attached to the fence, otherwise just clear the fence. | ||
2265 | */ | ||
2266 | if (reg->obj) { | ||
2267 | i915_gem_object_update_fence(reg->obj, reg, | ||
2268 | reg->obj->tiling_mode); | ||
2269 | } else { | ||
2270 | i915_gem_write_fence(dev, i, NULL); | ||
2271 | } | ||
2262 | } | 2272 | } |
2263 | } | 2273 | } |
2264 | 2274 | ||
@@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
2795 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | 2805 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
2796 | mb(); | 2806 | mb(); |
2797 | 2807 | ||
2808 | WARN(obj && (!obj->stride || !obj->tiling_mode), | ||
2809 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | ||
2810 | obj->stride, obj->tiling_mode); | ||
2811 | |||
2798 | switch (INTEL_INFO(dev)->gen) { | 2812 | switch (INTEL_INFO(dev)->gen) { |
2799 | case 7: | 2813 | case 7: |
2800 | case 6: | 2814 | case 6: |
@@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
2836 | fence->obj = NULL; | 2850 | fence->obj = NULL; |
2837 | list_del_init(&fence->lru_list); | 2851 | list_del_init(&fence->lru_list); |
2838 | } | 2852 | } |
2853 | obj->fence_dirty = false; | ||
2839 | } | 2854 | } |
2840 | 2855 | ||
2841 | static int | 2856 | static int |
@@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2965 | return 0; | 2980 | return 0; |
2966 | 2981 | ||
2967 | i915_gem_object_update_fence(obj, reg, enable); | 2982 | i915_gem_object_update_fence(obj, reg, enable); |
2968 | obj->fence_dirty = false; | ||
2969 | 2983 | ||
2970 | return 0; | 2984 | return 0; |
2971 | } | 2985 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |||
85 | struct sg_table *sg, | 85 | struct sg_table *sg, |
86 | enum dma_data_direction dir) | 86 | enum dma_data_direction dir) |
87 | { | 87 | { |
88 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | ||
89 | |||
90 | mutex_lock(&obj->base.dev->struct_mutex); | ||
91 | |||
88 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | 92 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); |
89 | sg_free_table(sg); | 93 | sg_free_table(sg); |
90 | kfree(sg); | 94 | kfree(sg); |
95 | |||
96 | i915_gem_object_unpin_pages(obj); | ||
97 | |||
98 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | 101 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..53cddd985406 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -752,6 +752,8 @@ | |||
752 | will not assert AGPBUSY# and will only | 752 | will not assert AGPBUSY# and will only |
753 | be delivered when out of C3. */ | 753 | be delivered when out of C3. */ |
754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | 754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
755 | #define INSTPM_TLB_INVALIDATE (1<<9) | ||
756 | #define INSTPM_SYNC_FLUSH (1<<5) | ||
755 | #define ACTHD 0x020c8 | 757 | #define ACTHD 0x020c8 |
756 | #define FW_BLC 0x020d8 | 758 | #define FW_BLC 0x020d8 |
757 | #define FW_BLC2 0x020dc | 759 | #define FW_BLC2 0x020dc |
@@ -1856,10 +1858,16 @@ | |||
1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1858 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
1857 | 1859 | ||
1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) | 1860 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
1859 | /* HDMI/DP bits are gen4+ */ | 1861 | /* |
1860 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) | 1862 | * HDMI/DP bits are gen4+ |
1863 | * | ||
1864 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. | ||
1865 | * Please check the detailed lore in the commit message for for experimental | ||
1866 | * evidence. | ||
1867 | */ | ||
1868 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) | ||
1861 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) | 1869 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) |
1862 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) | 1870 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) |
1863 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 1871 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
1864 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 1872 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
1865 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 1873 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..b042ee5c4070 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
301 | struct intel_digital_port *intel_dig_port = | 301 | struct intel_digital_port *intel_dig_port = |
302 | enc_to_dig_port(encoder); | 302 | enc_to_dig_port(encoder); |
303 | 303 | ||
304 | intel_dp->DP = intel_dig_port->port_reversal | | 304 | intel_dp->DP = intel_dig_port->saved_port_bits | |
305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
307 | 307 | ||
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
1109 | * enabling the port. | 1109 | * enabling the port. |
1110 | */ | 1110 | */ |
1111 | I915_WRITE(DDI_BUF_CTL(port), | 1111 | I915_WRITE(DDI_BUF_CTL(port), |
1112 | intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); | 1112 | intel_dig_port->saved_port_bits | |
1113 | DDI_BUF_CTL_ENABLE); | ||
1113 | } else if (type == INTEL_OUTPUT_EDP) { | 1114 | } else if (type == INTEL_OUTPUT_EDP) { |
1114 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1115 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1115 | 1116 | ||
@@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1347 | intel_encoder->get_config = intel_ddi_get_config; | 1348 | intel_encoder->get_config = intel_ddi_get_config; |
1348 | 1349 | ||
1349 | intel_dig_port->port = port; | 1350 | intel_dig_port->port = port; |
1350 | intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & | 1351 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
1351 | DDI_BUF_PORT_REVERSAL; | 1352 | (DDI_BUF_PORT_REVERSAL | |
1353 | DDI_A_4_LANES); | ||
1352 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | 1354 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
1353 | 1355 | ||
1354 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1356 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b7..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
4913 | uint32_t tmp; | 4913 | uint32_t tmp; |
4914 | 4914 | ||
4915 | tmp = I915_READ(PFIT_CONTROL); | 4915 | tmp = I915_READ(PFIT_CONTROL); |
4916 | if (!(tmp & PFIT_ENABLE)) | ||
4917 | return; | ||
4916 | 4918 | ||
4919 | /* Check whether the pfit is attached to our pipe. */ | ||
4917 | if (INTEL_INFO(dev)->gen < 4) { | 4920 | if (INTEL_INFO(dev)->gen < 4) { |
4918 | if (crtc->pipe != PIPE_B) | 4921 | if (crtc->pipe != PIPE_B) |
4919 | return; | 4922 | return; |
4920 | |||
4921 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
4922 | pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; | ||
4923 | } else { | 4923 | } else { |
4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 4924 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
4925 | return; | 4925 | return; |
4926 | } | 4926 | } |
4927 | 4927 | ||
4928 | if (!(tmp & PFIT_ENABLE)) | 4928 | pipe_config->gmch_pfit.control = tmp; |
4929 | return; | ||
4930 | |||
4931 | pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); | ||
4932 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 4929 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
4933 | if (INTEL_INFO(dev)->gen < 5) | 4930 | if (INTEL_INFO(dev)->gen < 5) |
4934 | pipe_config->gmch_pfit.lvds_border_bits = | 4931 | pipe_config->gmch_pfit.lvds_border_bits = |
@@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) | |||
8272 | 8269 | ||
8273 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 8270 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
8274 | base.head) { | 8271 | base.head) { |
8272 | enum pipe pipe; | ||
8275 | if (encoder->base.crtc != &crtc->base) | 8273 | if (encoder->base.crtc != &crtc->base) |
8276 | continue; | 8274 | continue; |
8277 | if (encoder->get_config) | 8275 | if (encoder->get_config && |
8276 | encoder->get_hw_state(encoder, &pipe)) | ||
8278 | encoder->get_config(encoder, &pipe_config); | 8277 | encoder->get_config(encoder, &pipe_config); |
8279 | } | 8278 | } |
8280 | 8279 | ||
@@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev) | |||
8317 | pll->active, pll->refcount); | 8316 | pll->active, pll->refcount); |
8318 | WARN(pll->active && !pll->on, | 8317 | WARN(pll->active && !pll->on, |
8319 | "pll in active use but not on in sw tracking\n"); | 8318 | "pll in active use but not on in sw tracking\n"); |
8319 | WARN(pll->on && !pll->active, | ||
8320 | "pll in on but not on in use in sw tracking\n"); | ||
8320 | WARN(pll->on != active, | 8321 | WARN(pll->on != active, |
8321 | "pll on state mismatch (expected %i, found %i)\n", | 8322 | "pll on state mismatch (expected %i, found %i)\n", |
8322 | pll->on, active); | 8323 | pll->on, active); |
@@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
8541 | } | 8542 | } |
8542 | 8543 | ||
8543 | static bool | 8544 | static bool |
8544 | is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, | 8545 | is_crtc_connector_off(struct drm_mode_set *set) |
8545 | int num_connectors) | ||
8546 | { | 8546 | { |
8547 | int i; | 8547 | int i; |
8548 | 8548 | ||
8549 | for (i = 0; i < num_connectors; i++) | 8549 | if (set->num_connectors == 0) |
8550 | if (connectors[i].encoder && | 8550 | return false; |
8551 | connectors[i].encoder->crtc == crtc && | 8551 | |
8552 | connectors[i].dpms != DRM_MODE_DPMS_ON) | 8552 | if (WARN_ON(set->connectors == NULL)) |
8553 | return false; | ||
8554 | |||
8555 | for (i = 0; i < set->num_connectors; i++) | ||
8556 | if (set->connectors[i]->encoder && | ||
8557 | set->connectors[i]->encoder->crtc == set->crtc && | ||
8558 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | ||
8553 | return true; | 8559 | return true; |
8554 | 8560 | ||
8555 | return false; | 8561 | return false; |
@@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
8562 | 8568 | ||
8563 | /* We should be able to check here if the fb has the same properties | 8569 | /* We should be able to check here if the fb has the same properties |
8564 | * and then just flip_or_move it */ | 8570 | * and then just flip_or_move it */ |
8565 | if (set->connectors != NULL && | 8571 | if (is_crtc_connector_off(set)) { |
8566 | is_crtc_connector_off(set->crtc, *set->connectors, | 8572 | config->mode_changed = true; |
8567 | set->num_connectors)) { | ||
8568 | config->mode_changed = true; | ||
8569 | } else if (set->crtc->fb != set->fb) { | 8573 | } else if (set->crtc->fb != set->fb) { |
8570 | /* If we have no fb then treat it as a full mode set */ | 8574 | /* If we have no fb then treat it as a full mode set */ |
8571 | if (set->crtc->fb == NULL) { | 8575 | if (set->crtc->fb == NULL) { |
@@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
9398 | DRM_INFO("applying inverted panel brightness quirk\n"); | 9402 | DRM_INFO("applying inverted panel brightness quirk\n"); |
9399 | } | 9403 | } |
9400 | 9404 | ||
9405 | /* | ||
9406 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
9407 | * BLM_PCH_PWM_ENABLE is set. | ||
9408 | */ | ||
9409 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
9410 | { | ||
9411 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9412 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
9413 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
9414 | } | ||
9415 | |||
9401 | struct intel_quirk { | 9416 | struct intel_quirk { |
9402 | int device; | 9417 | int device; |
9403 | int subsystem_vendor; | 9418 | int subsystem_vendor; |
@@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = { | |||
9467 | 9482 | ||
9468 | /* Acer Aspire 4736Z */ | 9483 | /* Acer Aspire 4736Z */ |
9469 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 9484 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
9485 | |||
9486 | /* Dell XPS13 HD Sandy Bridge */ | ||
9487 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
9488 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
9489 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
9470 | }; | 9490 | }; |
9471 | 9491 | ||
9472 | static void intel_init_quirks(struct drm_device *dev) | 9492 | static void intel_init_quirks(struct drm_device *dev) |
@@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
9817 | } | 9837 | } |
9818 | pll->refcount = pll->active; | 9838 | pll->refcount = pll->active; |
9819 | 9839 | ||
9820 | DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", | 9840 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
9821 | pll->name, pll->refcount); | 9841 | pll->name, pll->refcount, pll->on); |
9822 | } | 9842 | } |
9823 | 9843 | ||
9824 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 9844 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
@@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9869 | struct drm_plane *plane; | 9889 | struct drm_plane *plane; |
9870 | struct intel_crtc *crtc; | 9890 | struct intel_crtc *crtc; |
9871 | struct intel_encoder *encoder; | 9891 | struct intel_encoder *encoder; |
9892 | int i; | ||
9872 | 9893 | ||
9873 | intel_modeset_readout_hw_state(dev); | 9894 | intel_modeset_readout_hw_state(dev); |
9874 | 9895 | ||
@@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9884 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 9905 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
9885 | } | 9906 | } |
9886 | 9907 | ||
9908 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
9909 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
9910 | |||
9911 | if (!pll->on || pll->active) | ||
9912 | continue; | ||
9913 | |||
9914 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | ||
9915 | |||
9916 | pll->disable(dev_priv, pll); | ||
9917 | pll->on = false; | ||
9918 | } | ||
9919 | |||
9887 | if (force_restore) { | 9920 | if (force_restore) { |
9888 | /* | 9921 | /* |
9889 | * We need to use raw interfaces for restoring state to avoid | 9922 | * We need to use raw interfaces for restoring state to avoid |
@@ -10009,6 +10042,8 @@ struct intel_display_error_state { | |||
10009 | 10042 | ||
10010 | u32 power_well_driver; | 10043 | u32 power_well_driver; |
10011 | 10044 | ||
10045 | int num_transcoders; | ||
10046 | |||
10012 | struct intel_cursor_error_state { | 10047 | struct intel_cursor_error_state { |
10013 | u32 control; | 10048 | u32 control; |
10014 | u32 position; | 10049 | u32 position; |
@@ -10017,16 +10052,7 @@ struct intel_display_error_state { | |||
10017 | } cursor[I915_MAX_PIPES]; | 10052 | } cursor[I915_MAX_PIPES]; |
10018 | 10053 | ||
10019 | struct intel_pipe_error_state { | 10054 | struct intel_pipe_error_state { |
10020 | enum transcoder cpu_transcoder; | ||
10021 | u32 conf; | ||
10022 | u32 source; | 10055 | u32 source; |
10023 | |||
10024 | u32 htotal; | ||
10025 | u32 hblank; | ||
10026 | u32 hsync; | ||
10027 | u32 vtotal; | ||
10028 | u32 vblank; | ||
10029 | u32 vsync; | ||
10030 | } pipe[I915_MAX_PIPES]; | 10056 | } pipe[I915_MAX_PIPES]; |
10031 | 10057 | ||
10032 | struct intel_plane_error_state { | 10058 | struct intel_plane_error_state { |
@@ -10038,6 +10064,19 @@ struct intel_display_error_state { | |||
10038 | u32 surface; | 10064 | u32 surface; |
10039 | u32 tile_offset; | 10065 | u32 tile_offset; |
10040 | } plane[I915_MAX_PIPES]; | 10066 | } plane[I915_MAX_PIPES]; |
10067 | |||
10068 | struct intel_transcoder_error_state { | ||
10069 | enum transcoder cpu_transcoder; | ||
10070 | |||
10071 | u32 conf; | ||
10072 | |||
10073 | u32 htotal; | ||
10074 | u32 hblank; | ||
10075 | u32 hsync; | ||
10076 | u32 vtotal; | ||
10077 | u32 vblank; | ||
10078 | u32 vsync; | ||
10079 | } transcoder[4]; | ||
10041 | }; | 10080 | }; |
10042 | 10081 | ||
10043 | struct intel_display_error_state * | 10082 | struct intel_display_error_state * |
@@ -10045,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10045 | { | 10084 | { |
10046 | drm_i915_private_t *dev_priv = dev->dev_private; | 10085 | drm_i915_private_t *dev_priv = dev->dev_private; |
10047 | struct intel_display_error_state *error; | 10086 | struct intel_display_error_state *error; |
10048 | enum transcoder cpu_transcoder; | 10087 | int transcoders[] = { |
10088 | TRANSCODER_A, | ||
10089 | TRANSCODER_B, | ||
10090 | TRANSCODER_C, | ||
10091 | TRANSCODER_EDP, | ||
10092 | }; | ||
10049 | int i; | 10093 | int i; |
10050 | 10094 | ||
10095 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
10096 | return NULL; | ||
10097 | |||
10051 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 10098 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
10052 | if (error == NULL) | 10099 | if (error == NULL) |
10053 | return NULL; | 10100 | return NULL; |
@@ -10056,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10056 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 10103 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
10057 | 10104 | ||
10058 | for_each_pipe(i) { | 10105 | for_each_pipe(i) { |
10059 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); | ||
10060 | error->pipe[i].cpu_transcoder = cpu_transcoder; | ||
10061 | |||
10062 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 10106 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
10063 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 10107 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
10064 | error->cursor[i].position = I915_READ(CURPOS(i)); | 10108 | error->cursor[i].position = I915_READ(CURPOS(i)); |
@@ -10082,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10082 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 10126 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
10083 | } | 10127 | } |
10084 | 10128 | ||
10085 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10086 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 10129 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
10087 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | 10130 | } |
10088 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | 10131 | |
10089 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | 10132 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; |
10090 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | 10133 | if (HAS_DDI(dev_priv->dev)) |
10091 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | 10134 | error->num_transcoders++; /* Account for eDP. */ |
10092 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | 10135 | |
10136 | for (i = 0; i < error->num_transcoders; i++) { | ||
10137 | enum transcoder cpu_transcoder = transcoders[i]; | ||
10138 | |||
10139 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | ||
10140 | |||
10141 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10142 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | ||
10143 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | ||
10144 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10145 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | ||
10146 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | ||
10147 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10093 | } | 10148 | } |
10094 | 10149 | ||
10095 | /* In the code above we read the registers without checking if the power | 10150 | /* In the code above we read the registers without checking if the power |
@@ -10111,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10111 | { | 10166 | { |
10112 | int i; | 10167 | int i; |
10113 | 10168 | ||
10169 | if (!error) | ||
10170 | return; | ||
10171 | |||
10114 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 10172 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
10115 | if (HAS_POWER_WELL(dev)) | 10173 | if (HAS_POWER_WELL(dev)) |
10116 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 10174 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
10117 | error->power_well_driver); | 10175 | error->power_well_driver); |
10118 | for_each_pipe(i) { | 10176 | for_each_pipe(i) { |
10119 | err_printf(m, "Pipe [%d]:\n", i); | 10177 | err_printf(m, "Pipe [%d]:\n", i); |
10120 | err_printf(m, " CPU transcoder: %c\n", | ||
10121 | transcoder_name(error->pipe[i].cpu_transcoder)); | ||
10122 | err_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
10123 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 10178 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
10124 | err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
10125 | err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
10126 | err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
10127 | err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
10128 | err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
10129 | err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
10130 | 10179 | ||
10131 | err_printf(m, "Plane [%d]:\n", i); | 10180 | err_printf(m, "Plane [%d]:\n", i); |
10132 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); | 10181 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); |
@@ -10147,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10147 | err_printf(m, " POS: %08x\n", error->cursor[i].position); | 10196 | err_printf(m, " POS: %08x\n", error->cursor[i].position); |
10148 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); | 10197 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); |
10149 | } | 10198 | } |
10199 | |||
10200 | for (i = 0; i < error->num_transcoders; i++) { | ||
10201 | err_printf(m, " CPU transcoder: %c\n", | ||
10202 | transcoder_name(error->transcoder[i].cpu_transcoder)); | ||
10203 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | ||
10204 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | ||
10205 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | ||
10206 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); | ||
10207 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); | ||
10208 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); | ||
10209 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); | ||
10210 | } | ||
10150 | } | 10211 | } |
10151 | #endif | 10212 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f48230..b7d6e09456ce 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -504,7 +504,7 @@ struct intel_dp { | |||
504 | struct intel_digital_port { | 504 | struct intel_digital_port { |
505 | struct intel_encoder base; | 505 | struct intel_encoder base; |
506 | enum port port; | 506 | enum port port; |
507 | u32 port_reversal; | 507 | u32 saved_port_bits; |
508 | struct intel_dp dp; | 508 | struct intel_dp dp; |
509 | struct intel_hdmi hdmi; | 509 | struct intel_hdmi hdmi; |
510 | }; | 510 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..2fd3fd5b943e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) | |||
785 | } | 785 | } |
786 | } | 786 | } |
787 | 787 | ||
788 | static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | ||
789 | { | ||
790 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | ||
791 | |||
792 | if (IS_G4X(dev)) | ||
793 | return 165000; | ||
794 | else if (IS_HASWELL(dev)) | ||
795 | return 300000; | ||
796 | else | ||
797 | return 225000; | ||
798 | } | ||
799 | |||
788 | static int intel_hdmi_mode_valid(struct drm_connector *connector, | 800 | static int intel_hdmi_mode_valid(struct drm_connector *connector, |
789 | struct drm_display_mode *mode) | 801 | struct drm_display_mode *mode) |
790 | { | 802 | { |
791 | if (mode->clock > 165000) | 803 | if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) |
792 | return MODE_CLOCK_HIGH; | 804 | return MODE_CLOCK_HIGH; |
793 | if (mode->clock < 20000) | 805 | if (mode->clock < 20000) |
794 | return MODE_CLOCK_LOW; | 806 | return MODE_CLOCK_LOW; |
@@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
806 | struct drm_device *dev = encoder->base.dev; | 818 | struct drm_device *dev = encoder->base.dev; |
807 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; | 819 | struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; |
808 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; | 820 | int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; |
821 | int portclock_limit = hdmi_portclock_limit(intel_hdmi); | ||
809 | int desired_bpp; | 822 | int desired_bpp; |
810 | 823 | ||
811 | if (intel_hdmi->color_range_auto) { | 824 | if (intel_hdmi->color_range_auto) { |
@@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
829 | * outputs. We also need to check that the higher clock still fits | 842 | * outputs. We also need to check that the higher clock still fits |
830 | * within limits. | 843 | * within limits. |
831 | */ | 844 | */ |
832 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 | 845 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit |
833 | && HAS_PCH_SPLIT(dev)) { | 846 | && HAS_PCH_SPLIT(dev)) { |
834 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 847 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
835 | desired_bpp = 12*3; | 848 | desired_bpp = 12*3; |
@@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
846 | pipe_config->pipe_bpp = desired_bpp; | 859 | pipe_config->pipe_bpp = desired_bpp; |
847 | } | 860 | } |
848 | 861 | ||
849 | if (adjusted_mode->clock > 225000) { | 862 | if (adjusted_mode->clock > portclock_limit) { |
850 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); | 863 | DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); |
851 | return false; | 864 | return false; |
852 | } | 865 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 021e8daa022d..61348eae2f04 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
109 | flags |= DRM_MODE_FLAG_PVSYNC; | 109 | flags |= DRM_MODE_FLAG_PVSYNC; |
110 | 110 | ||
111 | pipe_config->adjusted_mode.flags |= flags; | 111 | pipe_config->adjusted_mode.flags |= flags; |
112 | |||
113 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
114 | if (INTEL_INFO(dev)->gen < 4) { | ||
115 | tmp = I915_READ(PFIT_CONTROL); | ||
116 | |||
117 | pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; | ||
118 | } | ||
112 | } | 119 | } |
113 | 120 | ||
114 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 121 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
290 | 297 | ||
291 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 298 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
292 | intel_connector->panel.fitting_mode); | 299 | intel_connector->panel.fitting_mode); |
293 | return true; | ||
294 | } else { | 300 | } else { |
295 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 301 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
296 | intel_connector->panel.fitting_mode); | 302 | intel_connector->panel.fitting_mode); |
297 | } | ||
298 | 303 | ||
299 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 304 | } |
300 | pipe_config->timings_set = true; | ||
301 | 305 | ||
302 | /* | 306 | /* |
303 | * XXX: It would be nice to support lower refresh rates on the | 307 | * XXX: It would be nice to support lower refresh rates on the |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209f..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, | |||
194 | adjusted_mode->vdisplay == mode->vdisplay) | 194 | adjusted_mode->vdisplay == mode->vdisplay) |
195 | goto out; | 195 | goto out; |
196 | 196 | ||
197 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
198 | pipe_config->timings_set = true; | ||
199 | |||
197 | switch (fitting_mode) { | 200 | switch (fitting_mode) { |
198 | case DRM_MODE_SCALE_CENTER: | 201 | case DRM_MODE_SCALE_CENTER: |
199 | /* | 202 | /* |
@@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) | |||
494 | goto out; | 497 | goto out; |
495 | } | 498 | } |
496 | 499 | ||
497 | /* scale to hardware */ | 500 | /* scale to hardware, but be careful to not overflow */ |
498 | level = level * freq / max; | 501 | if (freq < max) |
502 | level = level * freq / max; | ||
503 | else | ||
504 | level = freq / max * level; | ||
499 | 505 | ||
500 | dev_priv->backlight.level = level; | 506 | dev_priv->backlight.level = level; |
501 | if (dev_priv->backlight.device) | 507 | if (dev_priv->backlight.device) |
@@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) | |||
512 | struct drm_i915_private *dev_priv = dev->dev_private; | 518 | struct drm_i915_private *dev_priv = dev->dev_private; |
513 | unsigned long flags; | 519 | unsigned long flags; |
514 | 520 | ||
521 | /* | ||
522 | * Do not disable backlight on the vgaswitcheroo path. When switching | ||
523 | * away from i915, the other client may depend on i915 to handle the | ||
524 | * backlight. This will leave the backlight on unnecessarily when | ||
525 | * another client is not activated. | ||
526 | */ | ||
527 | if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { | ||
528 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); | ||
529 | return; | ||
530 | } | ||
531 | |||
515 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 532 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
516 | 533 | ||
517 | dev_priv->backlight.enabled = false; | 534 | dev_priv->backlight.enabled = false; |
@@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
580 | POSTING_READ(reg); | 597 | POSTING_READ(reg); |
581 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 598 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
582 | 599 | ||
583 | if (HAS_PCH_SPLIT(dev)) { | 600 | if (HAS_PCH_SPLIT(dev) && |
601 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
584 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 602 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
585 | tmp |= BLM_PCH_PWM_ENABLE; | 603 | tmp |= BLM_PCH_PWM_ENABLE; |
586 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | 604 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d10e6735771f..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5063 | } | 5063 | } |
5064 | } else { | 5064 | } else { |
5065 | if (enable_requested) { | 5065 | if (enable_requested) { |
5066 | unsigned long irqflags; | ||
5067 | enum pipe p; | ||
5068 | |||
5066 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5069 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
5070 | POSTING_READ(HSW_PWR_WELL_DRIVER); | ||
5067 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5071 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
5072 | |||
5073 | /* | ||
5074 | * After this, the registers on the pipes that are part | ||
5075 | * of the power well will become zero, so we have to | ||
5076 | * adjust our counters according to that. | ||
5077 | * | ||
5078 | * FIXME: Should we do this in general in | ||
5079 | * drm_vblank_post_modeset? | ||
5080 | */ | ||
5081 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
5082 | for_each_pipe(p) | ||
5083 | if (p != PIPE_A) | ||
5084 | dev->last_vblank[p] = 0; | ||
5085 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
5068 | } | 5086 | } |
5069 | } | 5087 | } |
5070 | } | 5088 | } |
@@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | |||
5476 | gen6_gt_check_fifodbg(dev_priv); | 5494 | gen6_gt_check_fifodbg(dev_priv); |
5477 | } | 5495 | } |
5478 | 5496 | ||
5479 | void intel_gt_reset(struct drm_device *dev) | 5497 | void intel_gt_sanitize(struct drm_device *dev) |
5480 | { | 5498 | { |
5481 | struct drm_i915_private *dev_priv = dev->dev_private; | 5499 | struct drm_i915_private *dev_priv = dev->dev_private; |
5482 | 5500 | ||
@@ -5487,16 +5505,16 @@ void intel_gt_reset(struct drm_device *dev) | |||
5487 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 5505 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
5488 | __gen6_gt_force_wake_mt_reset(dev_priv); | 5506 | __gen6_gt_force_wake_mt_reset(dev_priv); |
5489 | } | 5507 | } |
5508 | |||
5509 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||
5510 | if (INTEL_INFO(dev)->gen >= 6) | ||
5511 | intel_disable_gt_powersave(dev); | ||
5490 | } | 5512 | } |
5491 | 5513 | ||
5492 | void intel_gt_init(struct drm_device *dev) | 5514 | void intel_gt_init(struct drm_device *dev) |
5493 | { | 5515 | { |
5494 | struct drm_i915_private *dev_priv = dev->dev_private; | 5516 | struct drm_i915_private *dev_priv = dev->dev_private; |
5495 | 5517 | ||
5496 | spin_lock_init(&dev_priv->gt_lock); | ||
5497 | |||
5498 | intel_gt_reset(dev); | ||
5499 | |||
5500 | if (IS_VALLEYVIEW(dev)) { | 5518 | if (IS_VALLEYVIEW(dev)) { |
5501 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | 5519 | dev_priv->gt.force_wake_get = vlv_force_wake_get; |
5502 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | 5520 | dev_priv->gt.force_wake_put = vlv_force_wake_put; |
@@ -5536,6 +5554,12 @@ void intel_gt_init(struct drm_device *dev) | |||
5536 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | 5554 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; |
5537 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | 5555 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; |
5538 | } | 5556 | } |
5557 | } | ||
5558 | |||
5559 | void intel_pm_init(struct drm_device *dev) | ||
5560 | { | ||
5561 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5562 | |||
5539 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5563 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
5540 | intel_gen6_powersave_work); | 5564 | intel_gen6_powersave_work); |
5541 | } | 5565 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..079ef0129e74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
968 | 968 | ||
969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
970 | POSTING_READ(mmio); | 970 | POSTING_READ(mmio); |
971 | |||
972 | /* Flush the TLB for this page */ | ||
973 | if (INTEL_INFO(dev)->gen >= 6) { | ||
974 | u32 reg = RING_INSTPM(ring->mmio_base); | ||
975 | I915_WRITE(reg, | ||
976 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | ||
977 | INSTPM_SYNC_FLUSH)); | ||
978 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | ||
979 | 1000)) | ||
980 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | ||
981 | ring->name); | ||
982 | } | ||
971 | } | 983 | } |
972 | 984 | ||
973 | static int | 985 | static int |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 251784aa2225..503a414cbdad 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); | 29 | struct mga_crtc *mga_crtc = to_mga_crtc(crtc); |
30 | struct drm_device *dev = crtc->dev; | 30 | struct drm_device *dev = crtc->dev; |
31 | struct mga_device *mdev = dev->dev_private; | 31 | struct mga_device *mdev = dev->dev_private; |
32 | struct drm_framebuffer *fb = crtc->fb; | ||
32 | int i; | 33 | int i; |
33 | 34 | ||
34 | if (!crtc->enabled) | 35 | if (!crtc->enabled) |
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) | |||
36 | 37 | ||
37 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); | 38 | WREG8(DAC_INDEX + MGA1064_INDEX, 0); |
38 | 39 | ||
40 | if (fb && fb->bits_per_pixel == 16) { | ||
41 | int inc = (fb->depth == 15) ? 8 : 4; | ||
42 | u8 r, b; | ||
43 | for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { | ||
44 | if (fb->depth == 16) { | ||
45 | if (i > (MGAG200_LUT_SIZE >> 1)) { | ||
46 | r = b = 0; | ||
47 | } else { | ||
48 | r = mga_crtc->lut_r[i << 1]; | ||
49 | b = mga_crtc->lut_b[i << 1]; | ||
50 | } | ||
51 | } else { | ||
52 | r = mga_crtc->lut_r[i]; | ||
53 | b = mga_crtc->lut_b[i]; | ||
54 | } | ||
55 | /* VGA registers */ | ||
56 | WREG8(DAC_INDEX + MGA1064_COL_PAL, r); | ||
57 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]); | ||
58 | WREG8(DAC_INDEX + MGA1064_COL_PAL, b); | ||
59 | } | ||
60 | return; | ||
61 | } | ||
39 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { | 62 | for (i = 0; i < MGAG200_LUT_SIZE; i++) { |
40 | /* VGA registers */ | 63 | /* VGA registers */ |
41 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); | 64 | WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); |
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
877 | 900 | ||
878 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); | 901 | pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); |
879 | if (crtc->fb->bits_per_pixel == 24) | 902 | if (crtc->fb->bits_per_pixel == 24) |
880 | pitch = pitch >> (4 - bppshift); | 903 | pitch = (pitch * 3) >> (4 - bppshift); |
881 | else | 904 | else |
882 | pitch = pitch >> (4 - bppshift); | 905 | pitch = pitch >> (4 - bppshift); |
883 | 906 | ||
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc) | |||
1251 | kfree(mga_crtc); | 1274 | kfree(mga_crtc); |
1252 | } | 1275 | } |
1253 | 1276 | ||
1277 | static void mga_crtc_disable(struct drm_crtc *crtc) | ||
1278 | { | ||
1279 | int ret; | ||
1280 | DRM_DEBUG_KMS("\n"); | ||
1281 | mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
1282 | if (crtc->fb) { | ||
1283 | struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb); | ||
1284 | struct drm_gem_object *obj = mga_fb->obj; | ||
1285 | struct mgag200_bo *bo = gem_to_mga_bo(obj); | ||
1286 | ret = mgag200_bo_reserve(bo, false); | ||
1287 | if (ret) | ||
1288 | return; | ||
1289 | mgag200_bo_push_sysram(bo); | ||
1290 | mgag200_bo_unreserve(bo); | ||
1291 | } | ||
1292 | crtc->fb = NULL; | ||
1293 | } | ||
1294 | |||
1254 | /* These provide the minimum set of functions required to handle a CRTC */ | 1295 | /* These provide the minimum set of functions required to handle a CRTC */ |
1255 | static const struct drm_crtc_funcs mga_crtc_funcs = { | 1296 | static const struct drm_crtc_funcs mga_crtc_funcs = { |
1256 | .cursor_set = mga_crtc_cursor_set, | 1297 | .cursor_set = mga_crtc_cursor_set, |
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = { | |||
1261 | }; | 1302 | }; |
1262 | 1303 | ||
1263 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { | 1304 | static const struct drm_crtc_helper_funcs mga_helper_funcs = { |
1305 | .disable = mga_crtc_disable, | ||
1264 | .dpms = mga_crtc_dpms, | 1306 | .dpms = mga_crtc_dpms, |
1265 | .mode_fixup = mga_crtc_mode_fixup, | 1307 | .mode_fixup = mga_crtc_mode_fixup, |
1266 | .mode_set = mga_crtc_mode_set, | 1308 | .mode_set = mga_crtc_mode_set, |
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) | |||
1581 | 1623 | ||
1582 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); | 1624 | drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); |
1583 | 1625 | ||
1626 | drm_sysfs_connector_add(connector); | ||
1627 | |||
1584 | mga_connector->i2c = mgag200_i2c_create(dev); | 1628 | mga_connector->i2c = mgag200_i2c_create(dev); |
1585 | if (!mga_connector->i2c) | 1629 | if (!mga_connector->i2c) |
1586 | DRM_ERROR("failed to add ddc bus\n"); | 1630 | DRM_ERROR("failed to add ddc bus\n"); |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 3acb2b044c7b..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
323 | 323 | ||
324 | mgabo->gem.driver_private = NULL; | 324 | mgabo->gem.driver_private = NULL; |
325 | mgabo->bo.bdev = &mdev->ttm.bdev; | 325 | mgabo->bo.bdev = &mdev->ttm.bdev; |
326 | mgabo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
326 | 327 | ||
327 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
328 | 329 | ||
@@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) | |||
353 | bo->pin_count++; | 354 | bo->pin_count++; |
354 | if (gpu_addr) | 355 | if (gpu_addr) |
355 | *gpu_addr = mgag200_bo_gpu_offset(bo); | 356 | *gpu_addr = mgag200_bo_gpu_offset(bo); |
357 | return 0; | ||
356 | } | 358 | } |
357 | 359 | ||
358 | mgag200_ttm_placement(bo, pl_flag); | 360 | mgag200_ttm_placement(bo, pl_flag); |
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd4..7a4e0891c5f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c | |||
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
98 | u32 splitoff; | 98 | u32 splitoff; |
99 | u32 s, e; | 99 | u32 s, e; |
100 | 100 | ||
101 | BUG_ON(!type); | ||
102 | |||
101 | list_for_each_entry(this, &mm->free, fl_entry) { | 103 | list_for_each_entry(this, &mm->free, fl_entry) { |
102 | e = this->offset + this->length; | 104 | e = this->offset + this->length; |
103 | s = this->offset; | 105 | s = this->offset; |
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
162 | struct nouveau_mm_node *prev, *this, *next; | 164 | struct nouveau_mm_node *prev, *this, *next; |
163 | u32 mask = align - 1; | 165 | u32 mask = align - 1; |
164 | 166 | ||
167 | BUG_ON(!type); | ||
168 | |||
165 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { | 169 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { |
166 | u32 e = this->offset + this->length; | 170 | u32 e = this->offset + this->length; |
167 | u32 s = this->offset; | 171 | u32 s = this->offset; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f60..ce860de43e61 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c83982..ba6aeca0285e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nve0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nve0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index 373dbcc523b2..a19e7d79b847 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c | |||
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
36 | if (data && data[0]) { | 36 | if (data && data[0]) { |
37 | for (i = 0; i < size; i++) | 37 | for (i = 0; i < size; i++) |
38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); | 38 | nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); |
39 | for (; i < 0x60; i++) | ||
40 | nv_wr32(priv, 0x61c440 + soff, (i << 8)); | ||
39 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); | 41 | nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); |
40 | } else | 42 | } else |
41 | if (data) { | 43 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index dc57e24fc1df..717639386ced 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c | |||
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) | |||
41 | if (data && data[0]) { | 41 | if (data && data[0]) { |
42 | for (i = 0; i < size; i++) | 42 | for (i = 0; i < size; i++) |
43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); | 43 | nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); |
44 | for (; i < 0x60; i++) | ||
45 | nv_wr32(priv, 0x10ec00 + soff, (i << 8)); | ||
44 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); | 46 | nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); |
45 | } else | 47 | } else |
46 | if (data) { | 48 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index ab1e918469a8..526b75242899 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c | |||
@@ -47,14 +47,8 @@ int | |||
47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | 47 | nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) |
48 | { | 48 | { |
49 | struct nv50_disp_priv *priv = (void *)object->engine; | 49 | struct nv50_disp_priv *priv = (void *)object->engine; |
50 | struct nouveau_bios *bios = nouveau_bios(priv); | ||
51 | const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; | ||
52 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; | 50 | const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; |
53 | const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; | ||
54 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); | 51 | const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); |
55 | const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); | ||
56 | struct dcb_output outp; | ||
57 | u8 ver, hdr; | ||
58 | u32 data; | 52 | u32 data; |
59 | int ret = -EINVAL; | 53 | int ret = -EINVAL; |
60 | 54 | ||
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) | |||
62 | return -EINVAL; | 56 | return -EINVAL; |
63 | data = *(u32 *)args; | 57 | data = *(u32 *)args; |
64 | 58 | ||
65 | if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) | ||
66 | return -ENODEV; | ||
67 | 59 | ||
68 | switch (mthd & ~0x3f) { | 60 | switch (mthd & ~0x3f) { |
69 | case NV50_DISP_SOR_PWR: | 61 | case NV50_DISP_SOR_PWR: |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590e..e03fc8e4dc1d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
@@ -23,6 +23,25 @@ | |||
23 | #include <engine/falcon.h> | 23 | #include <engine/falcon.h> |
24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
25 | 25 | ||
26 | void | ||
27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | ||
28 | { | ||
29 | struct nouveau_falcon *falcon = (void *)subdev; | ||
30 | u32 dispatch = nv_ro32(falcon, 0x01c); | ||
31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | ||
32 | |||
33 | if (intr & 0x00000010) { | ||
34 | nv_debug(falcon, "ucode halted\n"); | ||
35 | nv_wo32(falcon, 0x004, 0x00000010); | ||
36 | intr &= ~0x00000010; | ||
37 | } | ||
38 | |||
39 | if (intr) { | ||
40 | nv_error(falcon, "unhandled intr 0x%08x\n", intr); | ||
41 | nv_wo32(falcon, 0x004, intr); | ||
42 | } | ||
43 | } | ||
44 | |||
26 | u32 | 45 | u32 |
27 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) |
28 | { | 47 | { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 49ecbb859b25..c19004301309 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c | |||
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
265 | int | 265 | int |
266 | nv31_mpeg_init(struct nouveau_object *object) | 266 | nv31_mpeg_init(struct nouveau_object *object) |
267 | { | 267 | { |
268 | struct nouveau_engine *engine = nv_engine(object->engine); | 268 | struct nouveau_engine *engine = nv_engine(object); |
269 | struct nv31_mpeg_priv *priv = (void *)engine; | 269 | struct nv31_mpeg_priv *priv = (void *)object; |
270 | struct nouveau_fb *pfb = nouveau_fb(object); | 270 | struct nouveau_fb *pfb = nouveau_fb(object); |
271 | int ret, i; | 271 | int ret, i; |
272 | 272 | ||
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object) | |||
284 | /* PMPEG init */ | 284 | /* PMPEG init */ |
285 | nv_wr32(priv, 0x00b32c, 0x00000000); | 285 | nv_wr32(priv, 0x00b32c, 0x00000000); |
286 | nv_wr32(priv, 0x00b314, 0x00000100); | 286 | nv_wr32(priv, 0x00b314, 0x00000100); |
287 | nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); | 287 | if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) |
288 | nv_wr32(priv, 0x00b220, 0x00000044); | ||
289 | else | ||
290 | nv_wr32(priv, 0x00b220, 0x00000031); | ||
288 | nv_wr32(priv, 0x00b300, 0x02001ec1); | 291 | nv_wr32(priv, 0x00b300, 0x02001ec1); |
289 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); | 292 | nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); |
290 | 293 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index f7c581ad1991..dd6196072e9c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c | |||
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent, | |||
61 | if (ret) | 61 | if (ret) |
62 | return ret; | 62 | return ret; |
63 | 63 | ||
64 | nv_wo32(&chan->base.base, 0x78, 0x02001ec1); | ||
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
66 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff360..73719aaa62d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00000002; | 92 | nv_subdev(priv)->unit = 0x00000002; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_ppp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_ppp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60eb..ac1f62aace72 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_vp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_vp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc49..d4c3108479c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_vp_cclass; |
94 | nv_engine(priv)->sclass = nve0_vp_sclass; | 95 | nv_engine(priv)->sclass = nve0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c index 0639bc59d0a5..5f6ede7c4892 100644 --- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c | |||
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object) | |||
118 | return ret; | 118 | return ret; |
119 | } | 119 | } |
120 | 120 | ||
121 | ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, | 121 | if (fw->size > 0x40000) { |
122 | nv_warn(xtensa, "firmware %s too large\n", name); | ||
123 | release_firmware(fw); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, | ||
122 | &xtensa->gpu_fw); | 128 | &xtensa->gpu_fw); |
123 | if (ret) { | 129 | if (ret) { |
124 | release_firmware(fw); | 130 | release_firmware(fw); |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab36..181aa7da524d 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h | |||
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | |||
72 | struct nouveau_oclass *, u32, bool, const char *, | 72 | struct nouveau_oclass *, u32, bool, const char *, |
73 | const char *, int, void **); | 73 | const char *, int, void **); |
74 | 74 | ||
75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | ||
76 | |||
75 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | 77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor |
76 | int _nouveau_falcon_init(struct nouveau_object *); | 78 | int _nouveau_falcon_init(struct nouveau_object *); |
77 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | 79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30f..9d2cd2006250 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
@@ -20,8 +20,8 @@ nouveau_mc(void *obj) | |||
20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; | 20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; |
21 | } | 21 | } |
22 | 22 | ||
23 | #define nouveau_mc_create(p,e,o,d) \ | 23 | #define nouveau_mc_create(p,e,o,m,d) \ |
24 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | 24 | nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) |
25 | #define nouveau_mc_destroy(p) ({ \ | 25 | #define nouveau_mc_destroy(p) ({ \ |
26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | 26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ |
27 | }) | 27 | }) |
@@ -33,7 +33,8 @@ nouveau_mc(void *obj) | |||
33 | }) | 33 | }) |
34 | 34 | ||
35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | 35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, |
36 | struct nouveau_oclass *, int, void **); | 36 | struct nouveau_oclass *, const struct nouveau_mc_intr *, |
37 | int, void **); | ||
37 | void _nouveau_mc_dtor(struct nouveau_object *); | 38 | void _nouveau_mc_dtor(struct nouveau_object *); |
38 | int _nouveau_mc_init(struct nouveau_object *); | 39 | int _nouveau_mc_init(struct nouveau_object *); |
39 | int _nouveau_mc_fini(struct nouveau_object *, bool); | 40 | int _nouveau_mc_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h index f2e87b105666..fcf57fa309bf 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h | |||
@@ -55,7 +55,7 @@ struct nouveau_vma { | |||
55 | struct nouveau_vm { | 55 | struct nouveau_vm { |
56 | struct nouveau_vmmgr *vmm; | 56 | struct nouveau_vmmgr *vmm; |
57 | struct nouveau_mm mm; | 57 | struct nouveau_mm mm; |
58 | int refcount; | 58 | struct kref refcount; |
59 | 59 | ||
60 | struct list_head pgd_list; | 60 | struct list_head pgd_list; |
61 | atomic_t engref[NVDEV_SUBDEV_NR]; | 61 | atomic_t engref[NVDEV_SUBDEV_NR]; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h index 6c974dd83e8b..db9d6ddde52c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h | |||
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); | |||
81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, | 81 | void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, |
82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); | 82 | u32 pitch, u32 flags, struct nouveau_fb_tile *); |
83 | 83 | ||
84 | void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); | 84 | void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *); |
85 | extern int nv50_fb_memtype[0x80]; | 85 | extern int nv50_fb_memtype[0x80]; |
86 | 86 | ||
87 | #endif | 87 | #endif |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a02..ab7ef0ac9e34 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c | |||
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
40 | return ret; | 40 | return ret; |
41 | 41 | ||
42 | switch (pfb914 & 0x00000003) { | 42 | switch (pfb914 & 0x00000003) { |
43 | case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; | 43 | case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; |
44 | case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; | 44 | case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; |
45 | case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; | 45 | case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; |
46 | case 0x00000003: break; | 46 | case 0x00000003: break; |
47 | } | 47 | } |
48 | 48 | ||
49 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 49 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
50 | pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; | 50 | ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; |
51 | pfb->ram->tags = nv_rd32(pfb, 0x100320); | 51 | ram->tags = nv_rd32(pfb, 0x100320); |
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e5577..63a6aab86028 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c | |||
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
38 | if (ret) | 38 | if (ret) |
39 | return ret; | 39 | return ret; |
40 | 40 | ||
41 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 41 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
42 | pfb->ram->type = NV_MEM_TYPE_STOLEN; | 42 | ram->type = NV_MEM_TYPE_STOLEN; |
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c index af5aa7ee8ad9..903baff77fdd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c | |||
@@ -27,17 +27,10 @@ | |||
27 | #include "priv.h" | 27 | #include "priv.h" |
28 | 28 | ||
29 | void | 29 | void |
30 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 30 | __nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) |
31 | { | 31 | { |
32 | struct nouveau_mm_node *this; | 32 | struct nouveau_mm_node *this; |
33 | struct nouveau_mem *mem; | ||
34 | 33 | ||
35 | mem = *pmem; | ||
36 | *pmem = NULL; | ||
37 | if (unlikely(mem == NULL)) | ||
38 | return; | ||
39 | |||
40 | mutex_lock(&pfb->base.mutex); | ||
41 | while (!list_empty(&mem->regions)) { | 34 | while (!list_empty(&mem->regions)) { |
42 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); | 35 | this = list_first_entry(&mem->regions, typeof(*this), rl_entry); |
43 | 36 | ||
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | |||
46 | } | 39 | } |
47 | 40 | ||
48 | nouveau_mm_free(&pfb->tags, &mem->tag); | 41 | nouveau_mm_free(&pfb->tags, &mem->tag); |
42 | } | ||
43 | |||
44 | void | ||
45 | nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | ||
46 | { | ||
47 | struct nouveau_mem *mem = *pmem; | ||
48 | |||
49 | *pmem = NULL; | ||
50 | if (unlikely(mem == NULL)) | ||
51 | return; | ||
52 | |||
53 | mutex_lock(&pfb->base.mutex); | ||
54 | __nv50_ram_put(pfb, mem); | ||
49 | mutex_unlock(&pfb->base.mutex); | 55 | mutex_unlock(&pfb->base.mutex); |
50 | 56 | ||
51 | kfree(mem); | 57 | kfree(mem); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 9c3634acbb9d..cf97c4de4a6b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c | |||
@@ -33,11 +33,19 @@ void | |||
33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) | 33 | nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) |
34 | { | 34 | { |
35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); | 35 | struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); |
36 | struct nouveau_mem *mem = *pmem; | ||
36 | 37 | ||
37 | if ((*pmem)->tag) | 38 | *pmem = NULL; |
38 | ltcg->tags_free(ltcg, &(*pmem)->tag); | 39 | if (unlikely(mem == NULL)) |
40 | return; | ||
39 | 41 | ||
40 | nv50_ram_put(pfb, pmem); | 42 | mutex_lock(&pfb->base.mutex); |
43 | if (mem->tag) | ||
44 | ltcg->tags_free(ltcg, &mem->tag); | ||
45 | __nv50_ram_put(pfb, mem); | ||
46 | mutex_unlock(&pfb->base.mutex); | ||
47 | |||
48 | kfree(mem); | ||
41 | } | 49 | } |
42 | 50 | ||
43 | int | 51 | int |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c index bf489dcf46e2..c4c1d415e7fe 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c | |||
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
103 | int i; | 103 | int i; |
104 | 104 | ||
105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); | 105 | intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); |
106 | if (nv_device(priv)->chipset >= 0x90) | 106 | if (nv_device(priv)->chipset > 0x92) |
107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); | 107 | intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); |
108 | 108 | ||
109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); | 109 | hi = (intr0 & 0x0000ffff) | (intr1 << 16); |
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | nv_wr32(priv, 0xe054, intr0); | 117 | nv_wr32(priv, 0xe054, intr0); |
118 | if (nv_device(priv)->chipset >= 0x90) | 118 | if (nv_device(priv)->chipset > 0x92) |
119 | nv_wr32(priv, 0xe074, intr1); | 119 | nv_wr32(priv, 0xe074, intr1); |
120 | } | 120 | } |
121 | 121 | ||
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
146 | int ret; | 146 | int ret; |
147 | 147 | ||
148 | ret = nouveau_gpio_create(parent, engine, oclass, | 148 | ret = nouveau_gpio_create(parent, engine, oclass, |
149 | nv_device(parent)->chipset >= 0x90 ? 32 : 16, | 149 | nv_device(parent)->chipset > 0x92 ? 32 : 16, |
150 | &priv); | 150 | &priv); |
151 | *pobject = nv_object(priv); | 151 | *pobject = nv_object(priv); |
152 | if (ret) | 152 | if (ret) |
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object) | |||
182 | /* disable, and ack any pending gpio interrupts */ | 182 | /* disable, and ack any pending gpio interrupts */ |
183 | nv_wr32(priv, 0xe050, 0x00000000); | 183 | nv_wr32(priv, 0xe050, 0x00000000); |
184 | nv_wr32(priv, 0xe054, 0xffffffff); | 184 | nv_wr32(priv, 0xe054, 0xffffffff); |
185 | if (nv_device(priv)->chipset >= 0x90) { | 185 | if (nv_device(priv)->chipset > 0x92) { |
186 | nv_wr32(priv, 0xe070, 0x00000000); | 186 | nv_wr32(priv, 0xe070, 0x00000000); |
187 | nv_wr32(priv, 0xe074, 0xffffffff); | 187 | nv_wr32(priv, 0xe074, 0xffffffff); |
188 | } | 188 | } |
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend) | |||
195 | { | 195 | { |
196 | struct nv50_gpio_priv *priv = (void *)object; | 196 | struct nv50_gpio_priv *priv = (void *)object; |
197 | nv_wr32(priv, 0xe050, 0x00000000); | 197 | nv_wr32(priv, 0xe050, 0x00000000); |
198 | if (nv_device(priv)->chipset >= 0x90) | 198 | if (nv_device(priv)->chipset > 0x92) |
199 | nv_wr32(priv, 0xe070, 0x00000000); | 199 | nv_wr32(priv, 0xe070, 0x00000000); |
200 | return nouveau_gpio_fini(&priv->base, suspend); | 200 | return nouveau_gpio_fini(&priv->base, suspend); |
201 | } | 201 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f4..cce65cc56514 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c | |||
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { | |||
30 | struct nouveau_ltcg base; | 30 | struct nouveau_ltcg base; |
31 | u32 part_nr; | 31 | u32 part_nr; |
32 | u32 subp_nr; | 32 | u32 subp_nr; |
33 | struct nouveau_mm tags; | ||
34 | u32 num_tags; | 33 | u32 num_tags; |
34 | u32 tag_base; | ||
35 | struct nouveau_mm tags; | ||
35 | struct nouveau_mm_node *tag_ram; | 36 | struct nouveau_mm_node *tag_ram; |
36 | }; | 37 | }; |
37 | 38 | ||
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
117 | u32 tag_size, tag_margin, tag_align; | 118 | u32 tag_size, tag_margin, tag_align; |
118 | int ret; | 119 | int ret; |
119 | 120 | ||
120 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
121 | if (nv_device(pfb)->card_type >= NV_E0) | ||
122 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
123 | |||
124 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ | 121 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ |
125 | priv->num_tags = (pfb->ram->size >> 17) / 4; | 122 | priv->num_tags = (pfb->ram->size >> 17) / 4; |
126 | if (priv->num_tags > (1 << 17)) | 123 | if (priv->num_tags > (1 << 17)) |
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
142 | tag_size += tag_align; | 139 | tag_size += tag_align; |
143 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ | 140 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ |
144 | 141 | ||
145 | ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, | 142 | ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, |
146 | &priv->tag_ram); | 143 | &priv->tag_ram); |
147 | if (ret) { | 144 | if (ret) { |
148 | priv->num_tags = 0; | 145 | priv->num_tags = 0; |
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
152 | tag_base += tag_align - 1; | 149 | tag_base += tag_align - 1; |
153 | ret = do_div(tag_base, tag_align); | 150 | ret = do_div(tag_base, tag_align); |
154 | 151 | ||
155 | nv_wr32(priv, 0x17e8d4, tag_base); | 152 | priv->tag_base = tag_base; |
156 | } | 153 | } |
157 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | 154 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); |
158 | 155 | ||
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
182 | } | 179 | } |
183 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; | 180 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; |
184 | 181 | ||
185 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
186 | |||
187 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); | 182 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); |
188 | if (ret) | 183 | if (ret) |
189 | return ret; | 184 | return ret; |
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) | |||
209 | nouveau_ltcg_destroy(ltcg); | 204 | nouveau_ltcg_destroy(ltcg); |
210 | } | 205 | } |
211 | 206 | ||
207 | static int | ||
208 | nvc0_ltcg_init(struct nouveau_object *object) | ||
209 | { | ||
210 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
211 | struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; | ||
212 | int ret; | ||
213 | |||
214 | ret = nouveau_ltcg_init(ltcg); | ||
215 | if (ret) | ||
216 | return ret; | ||
217 | |||
218 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
219 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
220 | if (nv_device(ltcg)->card_type >= NV_E0) | ||
221 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
222 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
212 | struct nouveau_oclass | 226 | struct nouveau_oclass |
213 | nvc0_ltcg_oclass = { | 227 | nvc0_ltcg_oclass = { |
214 | .handle = NV_SUBDEV(LTCG, 0xc0), | 228 | .handle = NV_SUBDEV(LTCG, 0xc0), |
215 | .ofuncs = &(struct nouveau_ofuncs) { | 229 | .ofuncs = &(struct nouveau_ofuncs) { |
216 | .ctor = nvc0_ltcg_ctor, | 230 | .ctor = nvc0_ltcg_ctor, |
217 | .dtor = nvc0_ltcg_dtor, | 231 | .dtor = nvc0_ltcg_dtor, |
218 | .init = _nouveau_ltcg_init, | 232 | .init = nvc0_ltcg_init, |
219 | .fini = _nouveau_ltcg_fini, | 233 | .fini = _nouveau_ltcg_fini, |
220 | }, | 234 | }, |
221 | }; | 235 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..ec9cd6f10f91 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) | |||
80 | 80 | ||
81 | int | 81 | int |
82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | 82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, |
83 | struct nouveau_oclass *oclass, int length, void **pobject) | 83 | struct nouveau_oclass *oclass, |
84 | const struct nouveau_mc_intr *intr_map, | ||
85 | int length, void **pobject) | ||
84 | { | 86 | { |
85 | struct nouveau_device *device = nv_device(parent); | 87 | struct nouveau_device *device = nv_device(parent); |
86 | struct nouveau_mc *pmc; | 88 | struct nouveau_mc *pmc; |
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
92 | if (ret) | 94 | if (ret) |
93 | return ret; | 95 | return ret; |
94 | 96 | ||
97 | pmc->intr_map = intr_map; | ||
98 | |||
95 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, | 99 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, |
96 | IRQF_SHARED, "nouveau", pmc); | 100 | IRQF_SHARED, "nouveau", pmc); |
97 | if (ret < 0) | 101 | if (ret < 0) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227b..64aa4edb0d9d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c | |||
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
50 | struct nv04_mc_priv *priv; | 50 | struct nv04_mc_priv *priv; |
51 | int ret; | 51 | int ret; |
52 | 52 | ||
53 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 53 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
54 | *pobject = nv_object(priv); | 54 | *pobject = nv_object(priv); |
55 | if (ret) | 55 | if (ret) |
56 | return ret; | 56 | return ret; |
57 | 57 | ||
58 | priv->base.intr_map = nv04_mc_intr; | ||
59 | return 0; | 58 | return 0; |
60 | } | 59 | } |
61 | 60 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810f..d9891782bf28 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
36 | struct nv44_mc_priv *priv; | 36 | struct nv44_mc_priv *priv; |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 39 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
40 | *pobject = nv_object(priv); | 40 | *pobject = nv_object(priv); |
41 | if (ret) | 41 | if (ret) |
42 | return ret; | 42 | return ret; |
43 | 43 | ||
44 | priv->base.intr_map = nv04_mc_intr; | ||
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
47 | 46 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 0cb322a5e72c..2b1afe225db8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | |||
@@ -41,7 +41,7 @@ nv50_mc_intr[] = { | |||
41 | { 0x04000000, NVDEV_ENGINE_DISP }, | 41 | { 0x04000000, NVDEV_ENGINE_DISP }, |
42 | { 0x10000000, NVDEV_SUBDEV_BUS }, | 42 | { 0x10000000, NVDEV_SUBDEV_BUS }, |
43 | { 0x80000000, NVDEV_ENGINE_SW }, | 43 | { 0x80000000, NVDEV_ENGINE_SW }, |
44 | { 0x0000d101, NVDEV_SUBDEV_FB }, | 44 | { 0x0002d101, NVDEV_SUBDEV_FB }, |
45 | {}, | 45 | {}, |
46 | }; | 46 | }; |
47 | 47 | ||
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
53 | struct nv50_mc_priv *priv; | 53 | struct nv50_mc_priv *priv; |
54 | int ret; | 54 | int ret; |
55 | 55 | ||
56 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 56 | ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); |
57 | *pobject = nv_object(priv); | 57 | *pobject = nv_object(priv); |
58 | if (ret) | 58 | if (ret) |
59 | return ret; | 59 | return ret; |
60 | 60 | ||
61 | priv->base.intr_map = nv50_mc_intr; | ||
62 | return 0; | 61 | return 0; |
63 | } | 62 | } |
64 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b5041..0d57b4d3e001 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c | |||
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
54 | struct nv98_mc_priv *priv; | 54 | struct nv98_mc_priv *priv; |
55 | int ret; | 55 | int ret; |
56 | 56 | ||
57 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 57 | ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); |
58 | *pobject = nv_object(priv); | 58 | *pobject = nv_object(priv); |
59 | if (ret) | 59 | if (ret) |
60 | return ret; | 60 | return ret; |
61 | 61 | ||
62 | priv->base.intr_map = nv98_mc_intr; | ||
63 | return 0; | 62 | return 0; |
64 | } | 63 | } |
65 | 64 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc62..104175c5a2dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
57 | struct nvc0_mc_priv *priv; | 57 | struct nvc0_mc_priv *priv; |
58 | int ret; | 58 | int ret; |
59 | 59 | ||
60 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 60 | ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); |
61 | *pobject = nv_object(priv); | 61 | *pobject = nv_object(priv); |
62 | if (ret) | 62 | if (ret) |
63 | return ret; | 63 | return ret; |
64 | 64 | ||
65 | priv->base.intr_map = nvc0_mc_intr; | ||
66 | return 0; | 65 | return 0; |
67 | } | 66 | } |
68 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c index 67fcb6c852ac..ef3133e7575c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c | |||
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, | |||
361 | 361 | ||
362 | INIT_LIST_HEAD(&vm->pgd_list); | 362 | INIT_LIST_HEAD(&vm->pgd_list); |
363 | vm->vmm = vmm; | 363 | vm->vmm = vmm; |
364 | vm->refcount = 1; | 364 | kref_init(&vm->refcount); |
365 | vm->fpde = offset >> (vmm->pgt_bits + 12); | 365 | vm->fpde = offset >> (vmm->pgt_bits + 12); |
366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); | 366 | vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); |
367 | 367 | ||
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
441 | } | 441 | } |
442 | 442 | ||
443 | static void | 443 | static void |
444 | nouveau_vm_del(struct nouveau_vm *vm) | 444 | nouveau_vm_del(struct kref *kref) |
445 | { | 445 | { |
446 | struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); | ||
446 | struct nouveau_vm_pgd *vpgd, *tmp; | 447 | struct nouveau_vm_pgd *vpgd, *tmp; |
447 | 448 | ||
448 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 449 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
@@ -458,27 +459,19 @@ int | |||
458 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, | 459 | nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, |
459 | struct nouveau_gpuobj *pgd) | 460 | struct nouveau_gpuobj *pgd) |
460 | { | 461 | { |
461 | struct nouveau_vm *vm; | 462 | if (ref) { |
462 | int ret; | 463 | int ret = nouveau_vm_link(ref, pgd); |
463 | |||
464 | vm = ref; | ||
465 | if (vm) { | ||
466 | ret = nouveau_vm_link(vm, pgd); | ||
467 | if (ret) | 464 | if (ret) |
468 | return ret; | 465 | return ret; |
469 | 466 | ||
470 | vm->refcount++; | 467 | kref_get(&ref->refcount); |
471 | } | 468 | } |
472 | 469 | ||
473 | vm = *ptr; | 470 | if (*ptr) { |
474 | *ptr = ref; | 471 | nouveau_vm_unlink(*ptr, pgd); |
475 | 472 | kref_put(&(*ptr)->refcount, nouveau_vm_del); | |
476 | if (vm) { | ||
477 | nouveau_vm_unlink(vm, pgd); | ||
478 | |||
479 | if (--vm->refcount == 0) | ||
480 | nouveau_vm_del(vm); | ||
481 | } | 473 | } |
482 | 474 | ||
475 | *ptr = ref; | ||
483 | return 0; | 476 | return 0; |
484 | } | 477 | } |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6a13ffb53bdb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
606 | regp->ramdac_a34 = 0x1; | 606 | regp->ramdac_a34 = 0x1; |
607 | } | 607 | } |
608 | 608 | ||
609 | static int | ||
610 | nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | ||
611 | { | ||
612 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
613 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | ||
614 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
615 | int ret; | ||
616 | |||
617 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | ||
618 | if (ret == 0) { | ||
619 | if (disp->image[nv_crtc->index]) | ||
620 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
621 | nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); | ||
622 | } | ||
623 | |||
624 | return ret; | ||
625 | } | ||
626 | |||
609 | /** | 627 | /** |
610 | * Sets up registers for the given mode/adjusted_mode pair. | 628 | * Sets up registers for the given mode/adjusted_mode pair. |
611 | * | 629 | * |
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
622 | struct drm_device *dev = crtc->dev; | 640 | struct drm_device *dev = crtc->dev; |
623 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 641 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
624 | struct nouveau_drm *drm = nouveau_drm(dev); | 642 | struct nouveau_drm *drm = nouveau_drm(dev); |
643 | int ret; | ||
625 | 644 | ||
626 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); | 645 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); |
627 | drm_mode_debug_printmodeline(adjusted_mode); | 646 | drm_mode_debug_printmodeline(adjusted_mode); |
628 | 647 | ||
648 | ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
649 | if (ret) | ||
650 | return ret; | ||
651 | |||
629 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ | 652 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ |
630 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); | 653 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); |
631 | 654 | ||
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) | |||
722 | 745 | ||
723 | static void nv_crtc_destroy(struct drm_crtc *crtc) | 746 | static void nv_crtc_destroy(struct drm_crtc *crtc) |
724 | { | 747 | { |
748 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
725 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 749 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
726 | 750 | ||
727 | if (!nv_crtc) | 751 | if (!nv_crtc) |
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) | |||
729 | 753 | ||
730 | drm_crtc_cleanup(crtc); | 754 | drm_crtc_cleanup(crtc); |
731 | 755 | ||
756 | if (disp->image[nv_crtc->index]) | ||
757 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
758 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
759 | |||
732 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 760 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
733 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 761 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
734 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 762 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) | |||
754 | } | 782 | } |
755 | 783 | ||
756 | static void | 784 | static void |
785 | nv_crtc_disable(struct drm_crtc *crtc) | ||
786 | { | ||
787 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
788 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
789 | if (disp->image[nv_crtc->index]) | ||
790 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
791 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
792 | } | ||
793 | |||
794 | static void | ||
757 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, | 795 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, |
758 | uint32_t size) | 796 | uint32_t size) |
759 | { | 797 | { |
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
791 | struct drm_framebuffer *drm_fb; | 829 | struct drm_framebuffer *drm_fb; |
792 | struct nouveau_framebuffer *fb; | 830 | struct nouveau_framebuffer *fb; |
793 | int arb_burst, arb_lwm; | 831 | int arb_burst, arb_lwm; |
794 | int ret; | ||
795 | 832 | ||
796 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); | 833 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); |
797 | 834 | ||
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
801 | return 0; | 838 | return 0; |
802 | } | 839 | } |
803 | 840 | ||
804 | |||
805 | /* If atomic, we want to switch to the fb we were passed, so | 841 | /* If atomic, we want to switch to the fb we were passed, so |
806 | * now we update pointers to do that. (We don't pin; just | 842 | * now we update pointers to do that. |
807 | * assume we're already pinned and update the base address.) | ||
808 | */ | 843 | */ |
809 | if (atomic) { | 844 | if (atomic) { |
810 | drm_fb = passed_fb; | 845 | drm_fb = passed_fb; |
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
812 | } else { | 847 | } else { |
813 | drm_fb = crtc->fb; | 848 | drm_fb = crtc->fb; |
814 | fb = nouveau_framebuffer(crtc->fb); | 849 | fb = nouveau_framebuffer(crtc->fb); |
815 | /* If not atomic, we can go ahead and pin, and unpin the | ||
816 | * old fb we were passed. | ||
817 | */ | ||
818 | ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); | ||
819 | if (ret) | ||
820 | return ret; | ||
821 | |||
822 | if (passed_fb) { | ||
823 | struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); | ||
824 | nouveau_bo_unpin(ofb->nvbo); | ||
825 | } | ||
826 | } | 850 | } |
827 | 851 | ||
828 | nv_crtc->fb.offset = fb->nvbo->bo.offset; | 852 | nv_crtc->fb.offset = fb->nvbo->bo.offset; |
@@ -877,6 +901,9 @@ static int | |||
877 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | 901 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
878 | struct drm_framebuffer *old_fb) | 902 | struct drm_framebuffer *old_fb) |
879 | { | 903 | { |
904 | int ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
905 | if (ret) | ||
906 | return ret; | ||
880 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | 907 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); |
881 | } | 908 | } |
882 | 909 | ||
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { | |||
1027 | .mode_set_base = nv04_crtc_mode_set_base, | 1054 | .mode_set_base = nv04_crtc_mode_set_base, |
1028 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, | 1055 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, |
1029 | .load_lut = nv_crtc_gamma_load, | 1056 | .load_lut = nv_crtc_gamma_load, |
1057 | .disable = nv_crtc_disable, | ||
1030 | }; | 1058 | }; |
1031 | 1059 | ||
1032 | int | 1060 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13f..9928187f0a7d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
@@ -81,6 +81,7 @@ struct nv04_display { | |||
81 | uint32_t saved_vga_font[4][16384]; | 81 | uint32_t saved_vga_font[4][16384]; |
82 | uint32_t dac_users[4]; | 82 | uint32_t dac_users[4]; |
83 | struct nouveau_object *core; | 83 | struct nouveau_object *core; |
84 | struct nouveau_bo *image[2]; | ||
84 | }; | 85 | }; |
85 | 86 | ||
86 | static inline struct nv04_display * | 87 | static inline struct nv04_display * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..af20fba3a1a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
148 | 148 | ||
149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem)) |
150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
151 | WARN_ON(nvbo->pin_refcnt > 0); | ||
151 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
152 | kfree(nvbo); | 153 | kfree(nvbo); |
153 | } | 154 | } |
@@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
197 | size_t acc_size; | 198 | size_t acc_size; |
198 | int ret; | 199 | int ret; |
199 | int type = ttm_bo_type_device; | 200 | int type = ttm_bo_type_device; |
201 | int lpg_shift = 12; | ||
202 | int max_size; | ||
203 | |||
204 | if (drm->client.base.vm) | ||
205 | lpg_shift = drm->client.base.vm->vmm->lpg_shift; | ||
206 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); | ||
207 | |||
208 | if (size <= 0 || size > max_size) { | ||
209 | nv_warn(drm, "skipped size %x\n", (u32)size); | ||
210 | return -EINVAL; | ||
211 | } | ||
200 | 212 | ||
201 | if (sg) | 213 | if (sg) |
202 | type = ttm_bo_type_sg; | 214 | type = ttm_bo_type_sg; |
@@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
340 | { | 352 | { |
341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 353 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
342 | struct ttm_buffer_object *bo = &nvbo->bo; | 354 | struct ttm_buffer_object *bo = &nvbo->bo; |
343 | int ret; | 355 | int ret, ref; |
344 | 356 | ||
345 | ret = ttm_bo_reserve(bo, false, false, false, 0); | 357 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
346 | if (ret) | 358 | if (ret) |
347 | return ret; | 359 | return ret; |
348 | 360 | ||
349 | if (--nvbo->pin_refcnt) | 361 | ref = --nvbo->pin_refcnt; |
362 | WARN_ON_ONCE(ref < 0); | ||
363 | if (ref) | ||
350 | goto out; | 364 | goto out; |
351 | 365 | ||
352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 366 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
@@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
578 | int ret = RING_SPACE(chan, 2); | 592 | int ret = RING_SPACE(chan, 2); |
579 | if (ret == 0) { | 593 | if (ret == 0) { |
580 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | 594 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
581 | OUT_RING (chan, handle); | 595 | OUT_RING (chan, handle & 0x0000ffff); |
582 | FIRE_RING (chan); | 596 | FIRE_RING (chan); |
583 | } | 597 | } |
584 | return ret; | 598 | return ret; |
@@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
973 | struct ttm_mem_reg *old_mem = &bo->mem; | 987 | struct ttm_mem_reg *old_mem = &bo->mem; |
974 | int ret; | 988 | int ret; |
975 | 989 | ||
976 | mutex_lock(&chan->cli->mutex); | 990 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
977 | 991 | ||
978 | /* create temporary vmas for the transfer and attach them to the | 992 | /* create temporary vmas for the transfer and attach them to the |
979 | * old nouveau_mem node, these will get cleaned up after ttm has | 993 | * old nouveau_mem node, these will get cleaned up after ttm has |
@@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 1028 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
1015 | int (*init)(struct nouveau_channel *, u32 handle); | 1029 | int (*init)(struct nouveau_channel *, u32 handle); |
1016 | } _methods[] = { | 1030 | } _methods[] = { |
1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, | 1031 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | 1032 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1033 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1034 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
@@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1034 | struct nouveau_channel *chan; | 1048 | struct nouveau_channel *chan; |
1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1049 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
1036 | 1050 | ||
1037 | if (mthd->init == nve0_bo_move_init) | 1051 | if (mthd->engine) |
1038 | chan = drm->cechan; | 1052 | chan = drm->cechan; |
1039 | else | 1053 | else |
1040 | chan = drm->channel; | 1054 | chan = drm->channel; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c0037..a03e75deacaf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
138 | { | 138 | { |
139 | struct nouveau_framebuffer *nouveau_fb; | 139 | struct nouveau_framebuffer *nouveau_fb; |
140 | struct drm_gem_object *gem; | 140 | struct drm_gem_object *gem; |
141 | int ret; | 141 | int ret = -ENOMEM; |
142 | 142 | ||
143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
144 | if (!gem) | 144 | if (!gem) |
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
146 | 146 | ||
147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); | 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); |
148 | if (!nouveau_fb) | 148 | if (!nouveau_fb) |
149 | return ERR_PTR(-ENOMEM); | 149 | goto err_unref; |
150 | 150 | ||
151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); | 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); |
152 | if (ret) { | 152 | if (ret) |
153 | drm_gem_object_unreference(gem); | 153 | goto err; |
154 | return ERR_PTR(ret); | ||
155 | } | ||
156 | 154 | ||
157 | return &nouveau_fb->base; | 155 | return &nouveau_fb->base; |
156 | |||
157 | err: | ||
158 | kfree(nouveau_fb); | ||
159 | err_unref: | ||
160 | drm_gem_object_unreference(gem); | ||
161 | return ERR_PTR(ret); | ||
158 | } | 162 | } |
159 | 163 | ||
160 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 164 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
524 | struct nouveau_page_flip_state *s; | 528 | struct nouveau_page_flip_state *s; |
525 | struct nouveau_channel *chan = NULL; | 529 | struct nouveau_channel *chan = NULL; |
526 | struct nouveau_fence *fence; | 530 | struct nouveau_fence *fence; |
527 | struct list_head res; | 531 | struct ttm_validate_buffer resv[2] = { |
528 | struct ttm_validate_buffer res_val[2]; | 532 | { .bo = &old_bo->bo }, |
533 | { .bo = &new_bo->bo }, | ||
534 | }; | ||
529 | struct ww_acquire_ctx ticket; | 535 | struct ww_acquire_ctx ticket; |
536 | LIST_HEAD(res); | ||
530 | int ret; | 537 | int ret; |
531 | 538 | ||
532 | if (!drm->channel) | 539 | if (!drm->channel) |
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
545 | chan = drm->channel; | 552 | chan = drm->channel; |
546 | spin_unlock(&old_bo->bo.bdev->fence_lock); | 553 | spin_unlock(&old_bo->bo.bdev->fence_lock); |
547 | 554 | ||
548 | mutex_lock(&chan->cli->mutex); | ||
549 | |||
550 | if (new_bo != old_bo) { | 555 | if (new_bo != old_bo) { |
551 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 556 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
552 | if (likely(!ret)) { | 557 | if (ret) |
553 | res_val[0].bo = &old_bo->bo; | 558 | goto fail_free; |
554 | res_val[1].bo = &new_bo->bo; | ||
555 | INIT_LIST_HEAD(&res); | ||
556 | list_add_tail(&res_val[0].head, &res); | ||
557 | list_add_tail(&res_val[1].head, &res); | ||
558 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
559 | if (ret) | ||
560 | nouveau_bo_unpin(new_bo); | ||
561 | } | ||
562 | } else | ||
563 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
564 | 559 | ||
565 | if (ret) { | 560 | list_add(&resv[1].head, &res); |
566 | mutex_unlock(&chan->cli->mutex); | ||
567 | goto fail_free; | ||
568 | } | 561 | } |
562 | list_add(&resv[0].head, &res); | ||
563 | |||
564 | mutex_lock(&chan->cli->mutex); | ||
565 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
566 | if (ret) | ||
567 | goto fail_unpin; | ||
569 | 568 | ||
570 | /* Initialize a page flip struct */ | 569 | /* Initialize a page flip struct */ |
571 | *s = (struct nouveau_page_flip_state) | 570 | *s = (struct nouveau_page_flip_state) |
@@ -576,10 +575,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
576 | /* Emit a page flip */ | 575 | /* Emit a page flip */ |
577 | if (nv_device(drm->device)->card_type >= NV_50) { | 576 | if (nv_device(drm->device)->card_type >= NV_50) { |
578 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
579 | if (ret) { | 578 | if (ret) |
580 | mutex_unlock(&chan->cli->mutex); | ||
581 | goto fail_unreserve; | 579 | goto fail_unreserve; |
582 | } | 580 | } else { |
581 | struct nv04_display *dispnv04 = nv04_display(dev); | ||
582 | nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); | ||
583 | } | 583 | } |
584 | 584 | ||
585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
@@ -590,22 +590,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
590 | /* Update the crtc struct and cleanup */ | 590 | /* Update the crtc struct and cleanup */ |
591 | crtc->fb = fb; | 591 | crtc->fb = fb; |
592 | 592 | ||
593 | if (old_bo != new_bo) { | 593 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); |
594 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 594 | if (old_bo != new_bo) |
595 | nouveau_bo_unpin(old_bo); | 595 | nouveau_bo_unpin(old_bo); |
596 | } else { | ||
597 | nouveau_bo_fence(new_bo, fence); | ||
598 | ttm_bo_unreserve(&new_bo->bo); | ||
599 | } | ||
600 | nouveau_fence_unref(&fence); | 596 | nouveau_fence_unref(&fence); |
601 | return 0; | 597 | return 0; |
602 | 598 | ||
603 | fail_unreserve: | 599 | fail_unreserve: |
604 | if (old_bo != new_bo) { | 600 | ttm_eu_backoff_reservation(&ticket, &res); |
605 | ttm_eu_backoff_reservation(&ticket, &res); | 601 | fail_unpin: |
602 | mutex_unlock(&chan->cli->mutex); | ||
603 | if (old_bo != new_bo) | ||
606 | nouveau_bo_unpin(new_bo); | 604 | nouveau_bo_unpin(new_bo); |
607 | } else | ||
608 | ttm_bo_unreserve(&new_bo->bo); | ||
609 | fail_free: | 605 | fail_free: |
610 | kfree(s); | 606 | kfree(s); |
611 | return ret; | 607 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe5..61972668fd05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
192 | 192 | ||
193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; |
194 | arg1 = 1; | 194 | arg1 = 1; |
195 | } else | ||
196 | if (device->chipset >= 0xa3 && | ||
197 | device->chipset != 0xaa && | ||
198 | device->chipset != 0xac) { | ||
199 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | ||
200 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | ||
201 | &drm->cechan); | ||
202 | if (ret) | ||
203 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | ||
204 | |||
205 | arg0 = NvDmaFB; | ||
206 | arg1 = NvDmaTT; | ||
195 | } else { | 207 | } else { |
196 | arg0 = NvDmaFB; | 208 | arg0 = NvDmaFB; |
197 | arg1 = NvDmaTT; | 209 | arg1 = NvDmaTT; |
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
284 | return 0; | 296 | return 0; |
285 | } | 297 | } |
286 | 298 | ||
287 | static struct lock_class_key drm_client_lock_class_key; | ||
288 | |||
289 | static int | 299 | static int |
290 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 300 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
291 | { | 301 | { |
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
297 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 307 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
298 | if (ret) | 308 | if (ret) |
299 | return ret; | 309 | return ret; |
300 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
301 | 310 | ||
302 | dev->dev_private = drm; | 311 | dev->dev_private = drm; |
303 | drm->dev = dev; | 312 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e9..8f6d63d7edd3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -385,6 +385,7 @@ out_unlock: | |||
385 | mutex_unlock(&dev->struct_mutex); | 385 | mutex_unlock(&dev->struct_mutex); |
386 | if (chan) | 386 | if (chan) |
387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); | 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); |
388 | nouveau_bo_unmap(nvbo); | ||
388 | out_unpin: | 389 | out_unpin: |
389 | nouveau_bo_unpin(nvbo); | 390 | nouveau_bo_unpin(nvbo); |
390 | out_unref: | 391 | out_unref: |
@@ -397,7 +398,8 @@ void | |||
397 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) | 398 | nouveau_fbcon_output_poll_changed(struct drm_device *dev) |
398 | { | 399 | { |
399 | struct nouveau_drm *drm = nouveau_drm(dev); | 400 | struct nouveau_drm *drm = nouveau_drm(dev); |
400 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | 401 | if (drm->fbcon) |
402 | drm_fb_helper_hotplug_event(&drm->fbcon->helper); | ||
401 | } | 403 | } |
402 | 404 | ||
403 | static int | 405 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187bab..be3149932c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
143 | int ret; | 143 | int ret; |
144 | 144 | ||
145 | fence->channel = chan; | 145 | fence->channel = chan; |
146 | fence->timeout = jiffies + (3 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * DRM_HZ); |
147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
148 | 148 | ||
149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a8..830cb7bad922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
50 | return; | 50 | return; |
51 | nvbo->gem = NULL; | 51 | nvbo->gem = NULL; |
52 | 52 | ||
53 | /* Lockdep hates you for doing reserve with gem object lock held */ | ||
54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | ||
55 | nvbo->pin_refcnt = 1; | ||
56 | nouveau_bo_unpin(nvbo); | ||
57 | } | ||
58 | |||
59 | if (gem->import_attach) | 53 | if (gem->import_attach) |
60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
61 | 55 | ||
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 8e47a9bae8c3..22aa9963ea6f 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 76 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
77 | struct nouveau_object *object; | 77 | struct nouveau_object *object; |
78 | u32 start = mem->start * PAGE_SIZE; | 78 | u32 start = mem->start * PAGE_SIZE; |
79 | u32 limit = mem->start + mem->size - 1; | 79 | u32 limit = start + mem->size - 1; |
80 | int ret = 0; | 80 | int ret = 0; |
81 | 81 | ||
82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 82 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b203..625f80d53dc2 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, | |||
131 | if (clk < pll->vco1.max_freq) | 131 | if (clk < pll->vco1.max_freq) |
132 | pll->vco2.max_freq = 0; | 132 | pll->vco2.max_freq = 0; |
133 | 133 | ||
134 | pclk->pll_calc(pclk, pll, clk, &coef); | 134 | ret = pclk->pll_calc(pclk, pll, clk, &coef); |
135 | if (ret == 0) | 135 | if (ret == 0) |
136 | return -ERANGE; | 136 | return -ERANGE; |
137 | 137 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c2..8b40a36c1b57 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -355,6 +355,7 @@ struct nv50_oimm { | |||
355 | 355 | ||
356 | struct nv50_head { | 356 | struct nv50_head { |
357 | struct nouveau_crtc base; | 357 | struct nouveau_crtc base; |
358 | struct nouveau_bo *image; | ||
358 | struct nv50_curs curs; | 359 | struct nv50_curs curs; |
359 | struct nv50_sync sync; | 360 | struct nv50_sync sync; |
360 | struct nv50_ovly ovly; | 361 | struct nv50_ovly ovly; |
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
517 | { | 518 | { |
518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 519 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 520 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
521 | struct nv50_head *head = nv50_head(crtc); | ||
520 | struct nv50_sync *sync = nv50_sync(crtc); | 522 | struct nv50_sync *sync = nv50_sync(crtc); |
521 | int head = nv_crtc->index, ret; | ||
522 | u32 *push; | 523 | u32 *push; |
524 | int ret; | ||
523 | 525 | ||
524 | swap_interval <<= 4; | 526 | swap_interval <<= 4; |
525 | if (swap_interval == 0) | 527 | if (swap_interval == 0) |
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
537 | return ret; | 539 | return ret; |
538 | 540 | ||
539 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 541 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
540 | OUT_RING (chan, NvEvoSema0 + head); | 542 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
541 | OUT_RING (chan, sync->addr ^ 0x10); | 543 | OUT_RING (chan, sync->addr ^ 0x10); |
542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 544 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); |
543 | OUT_RING (chan, sync->data + 1); | 545 | OUT_RING (chan, sync->data + 1); |
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
546 | OUT_RING (chan, sync->data); | 548 | OUT_RING (chan, sync->data); |
547 | } else | 549 | } else |
548 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 550 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
549 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 551 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
550 | ret = RING_SPACE(chan, 12); | 552 | ret = RING_SPACE(chan, 12); |
551 | if (ret) | 553 | if (ret) |
552 | return ret; | 554 | return ret; |
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
565 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | 567 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); |
566 | } else | 568 | } else |
567 | if (chan) { | 569 | if (chan) { |
568 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 570 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
569 | ret = RING_SPACE(chan, 10); | 571 | ret = RING_SPACE(chan, 10); |
570 | if (ret) | 572 | if (ret) |
571 | return ret; | 573 | return ret; |
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
630 | evo_mthd(push, 0x0080, 1); | 632 | evo_mthd(push, 0x0080, 1); |
631 | evo_data(push, 0x00000000); | 633 | evo_data(push, 0x00000000); |
632 | evo_kick(push, sync); | 634 | evo_kick(push, sync); |
635 | |||
636 | nouveau_bo_ref(nv_fb->nvbo, &head->image); | ||
633 | return 0; | 637 | return 0; |
634 | } | 638 | } |
635 | 639 | ||
@@ -1038,18 +1042,17 @@ static int | |||
1038 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | 1042 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) |
1039 | { | 1043 | { |
1040 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | 1044 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); |
1045 | struct nv50_head *head = nv50_head(crtc); | ||
1041 | int ret; | 1046 | int ret; |
1042 | 1047 | ||
1043 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | 1048 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); |
1044 | if (ret) | 1049 | if (ret == 0) { |
1045 | return ret; | 1050 | if (head->image) |
1046 | 1051 | nouveau_bo_unpin(head->image); | |
1047 | if (old_fb) { | 1052 | nouveau_bo_ref(nvfb->nvbo, &head->image); |
1048 | nvfb = nouveau_framebuffer(old_fb); | ||
1049 | nouveau_bo_unpin(nvfb->nvbo); | ||
1050 | } | 1053 | } |
1051 | 1054 | ||
1052 | return 0; | 1055 | return ret; |
1053 | } | 1056 | } |
1054 | 1057 | ||
1055 | static int | 1058 | static int |
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
1198 | } | 1201 | } |
1199 | } | 1202 | } |
1200 | 1203 | ||
1204 | static void | ||
1205 | nv50_crtc_disable(struct drm_crtc *crtc) | ||
1206 | { | ||
1207 | struct nv50_head *head = nv50_head(crtc); | ||
1208 | if (head->image) | ||
1209 | nouveau_bo_unpin(head->image); | ||
1210 | nouveau_bo_ref(NULL, &head->image); | ||
1211 | } | ||
1212 | |||
1201 | static int | 1213 | static int |
1202 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 1214 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
1203 | uint32_t handle, uint32_t width, uint32_t height) | 1215 | uint32_t handle, uint32_t width, uint32_t height) |
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
1271 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
1272 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
1273 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
1286 | |||
1274 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1287 | nv50_dmac_destroy(disp->core, &head->ovly.base); |
1275 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1288 | nv50_pioc_destroy(disp->core, &head->oimm.base); |
1276 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1289 | nv50_dmac_destroy(disp->core, &head->sync.base); |
1277 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1290 | nv50_pioc_destroy(disp->core, &head->curs.base); |
1291 | |||
1292 | /*XXX: this shouldn't be necessary, but the core doesn't call | ||
1293 | * disconnect() during the cleanup paths | ||
1294 | */ | ||
1295 | if (head->image) | ||
1296 | nouveau_bo_unpin(head->image); | ||
1297 | nouveau_bo_ref(NULL, &head->image); | ||
1298 | |||
1278 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 1299 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
1279 | if (nv_crtc->cursor.nvbo) | 1300 | if (nv_crtc->cursor.nvbo) |
1280 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 1301 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
1281 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 1302 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
1303 | |||
1282 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 1304 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
1283 | if (nv_crtc->lut.nvbo) | 1305 | if (nv_crtc->lut.nvbo) |
1284 | nouveau_bo_unpin(nv_crtc->lut.nvbo); | 1306 | nouveau_bo_unpin(nv_crtc->lut.nvbo); |
1285 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 1307 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
1308 | |||
1286 | drm_crtc_cleanup(crtc); | 1309 | drm_crtc_cleanup(crtc); |
1287 | kfree(crtc); | 1310 | kfree(crtc); |
1288 | } | 1311 | } |
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { | |||
1296 | .mode_set_base = nv50_crtc_mode_set_base, | 1319 | .mode_set_base = nv50_crtc_mode_set_base, |
1297 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | 1320 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, |
1298 | .load_lut = nv50_crtc_lut_load, | 1321 | .load_lut = nv50_crtc_lut_load, |
1322 | .disable = nv50_crtc_disable, | ||
1299 | }; | 1323 | }; |
1300 | 1324 | ||
1301 | static const struct drm_crtc_funcs nv50_crtc_func = { | 1325 | static const struct drm_crtc_funcs nv50_crtc_func = { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index f9701e567db8..0ee363840035 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
39 | struct nv10_fence_chan *fctx; | 39 | struct nv10_fence_chan *fctx; |
40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; | 40 | struct ttm_mem_reg *mem = &priv->bo->bo.mem; |
41 | struct nouveau_object *object; | 41 | struct nouveau_object *object; |
42 | u32 start = mem->start * PAGE_SIZE; | ||
43 | u32 limit = start + mem->size - 1; | ||
42 | int ret, i; | 44 | int ret, i; |
43 | 45 | ||
44 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 46 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
51 | fctx->base.sync = nv17_fence_sync; | 53 | fctx->base.sync = nv17_fence_sync; |
52 | 54 | ||
53 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 55 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
54 | NvSema, 0x0002, | 56 | NvSema, 0x003d, |
55 | &(struct nv_dma_class) { | 57 | &(struct nv_dma_class) { |
56 | .flags = NV_DMA_TARGET_VRAM | | 58 | .flags = NV_DMA_TARGET_VRAM | |
57 | NV_DMA_ACCESS_RDWR, | 59 | NV_DMA_ACCESS_RDWR, |
58 | .start = mem->start * PAGE_SIZE, | 60 | .start = start, |
59 | .limit = mem->size - 1, | 61 | .limit = limit, |
60 | }, sizeof(struct nv_dma_class), | 62 | }, sizeof(struct nv_dma_class), |
61 | &object); | 63 | &object); |
62 | 64 | ||
63 | /* dma objects for display sync channel semaphore blocks */ | 65 | /* dma objects for display sync channel semaphore blocks */ |
64 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { | 66 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { |
65 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | 67 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); |
68 | u32 start = bo->bo.mem.start * PAGE_SIZE; | ||
69 | u32 limit = start + bo->bo.mem.size - 1; | ||
66 | 70 | ||
67 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 71 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
68 | NvEvoSema0 + i, 0x003d, | 72 | NvEvoSema0 + i, 0x003d, |
69 | &(struct nv_dma_class) { | 73 | &(struct nv_dma_class) { |
70 | .flags = NV_DMA_TARGET_VRAM | | 74 | .flags = NV_DMA_TARGET_VRAM | |
71 | NV_DMA_ACCESS_RDWR, | 75 | NV_DMA_ACCESS_RDWR, |
72 | .start = bo->bo.offset, | 76 | .start = start, |
73 | .limit = bo->bo.offset + 0xfff, | 77 | .limit = limit, |
74 | }, sizeof(struct nv_dma_class), | 78 | }, sizeof(struct nv_dma_class), |
75 | &object); | 79 | &object); |
76 | } | 80 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 93c2f2cceb51..eb89653a7a17 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea | |||
179 | uint32_t type, bool interruptible) | 179 | uint32_t type, bool interruptible) |
180 | { | 180 | { |
181 | struct qxl_command cmd; | 181 | struct qxl_command cmd; |
182 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
182 | 183 | ||
183 | cmd.type = type; | 184 | cmd.type = type; |
184 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 185 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
185 | 186 | ||
186 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | 187 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); |
187 | } | 188 | } |
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas | |||
191 | uint32_t type, bool interruptible) | 192 | uint32_t type, bool interruptible) |
192 | { | 193 | { |
193 | struct qxl_command cmd; | 194 | struct qxl_command cmd; |
195 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); | ||
194 | 196 | ||
195 | cmd.type = type; | 197 | cmd.type = type; |
196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | 198 | cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); |
197 | 199 | ||
198 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | 200 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); |
199 | } | 201 | } |
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
214 | struct qxl_release *release; | 216 | struct qxl_release *release; |
215 | uint64_t id, next_id; | 217 | uint64_t id, next_id; |
216 | int i = 0; | 218 | int i = 0; |
217 | int ret; | ||
218 | union qxl_release_info *info; | 219 | union qxl_release_info *info; |
219 | 220 | ||
220 | while (qxl_ring_pop(qdev->release_ring, &id)) { | 221 | while (qxl_ring_pop(qdev->release_ring, &id)) { |
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
224 | if (release == NULL) | 225 | if (release == NULL) |
225 | break; | 226 | break; |
226 | 227 | ||
227 | ret = qxl_release_reserve(qdev, release, false); | ||
228 | if (ret) { | ||
229 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | ||
230 | DRM_ERROR("failed to reserve release %lld\n", id); | ||
231 | } | ||
232 | |||
233 | info = qxl_release_map(qdev, release); | 228 | info = qxl_release_map(qdev, release); |
234 | next_id = info->next; | 229 | next_id = info->next; |
235 | qxl_release_unmap(qdev, release, info); | 230 | qxl_release_unmap(qdev, release, info); |
236 | 231 | ||
237 | qxl_release_unreserve(qdev, release); | ||
238 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | 232 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, |
239 | next_id); | 233 | next_id); |
240 | 234 | ||
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev) | |||
259 | return i; | 253 | return i; |
260 | } | 254 | } |
261 | 255 | ||
262 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 256 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
257 | struct qxl_release *release, | ||
258 | unsigned long size, | ||
263 | struct qxl_bo **_bo) | 259 | struct qxl_bo **_bo) |
264 | { | 260 | { |
265 | struct qxl_bo *bo; | 261 | struct qxl_bo *bo; |
266 | int ret; | 262 | int ret; |
267 | 263 | ||
268 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | 264 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, |
269 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | 265 | false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); |
270 | if (ret) { | 266 | if (ret) { |
271 | DRM_ERROR("failed to allocate VRAM BO\n"); | 267 | DRM_ERROR("failed to allocate VRAM BO\n"); |
272 | return ret; | 268 | return ret; |
273 | } | 269 | } |
274 | ret = qxl_bo_reserve(bo, false); | 270 | ret = qxl_release_list_add(release, bo); |
275 | if (unlikely(ret != 0)) | 271 | if (ret) |
276 | goto out_unref; | 272 | goto out_unref; |
277 | 273 | ||
278 | *_bo = bo; | 274 | *_bo = bo; |
279 | return 0; | 275 | return 0; |
280 | out_unref: | 276 | out_unref: |
281 | qxl_bo_unref(&bo); | 277 | qxl_bo_unref(&bo); |
282 | return 0; | 278 | return ret; |
283 | } | 279 | } |
284 | 280 | ||
285 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) | 281 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) |
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
503 | if (ret) | 499 | if (ret) |
504 | return ret; | 500 | return ret; |
505 | 501 | ||
502 | ret = qxl_release_reserve_list(release, true); | ||
503 | if (ret) | ||
504 | return ret; | ||
505 | |||
506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | 506 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); |
507 | cmd->type = QXL_SURFACE_CMD_CREATE; | 507 | cmd->type = QXL_SURFACE_CMD_CREATE; |
508 | cmd->u.surface_create.format = surf->surf.format; | 508 | cmd->u.surface_create.format = surf->surf.format; |
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, | |||
524 | 524 | ||
525 | surf->surf_create = release; | 525 | surf->surf_create = release; |
526 | 526 | ||
527 | /* no need to add a release to the fence for this bo, | 527 | /* no need to add a release to the fence for this surface bo, |
528 | since it is only released when we ask to destroy the surface | 528 | since it is only released when we ask to destroy the surface |
529 | and it would never signal otherwise */ | 529 | and it would never signal otherwise */ |
530 | qxl_fence_releaseable(qdev, release); | ||
531 | |||
532 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 530 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
533 | 531 | qxl_release_fence_buffer_objects(release); | |
534 | qxl_release_unreserve(qdev, release); | ||
535 | 532 | ||
536 | surf->hw_surf_alloc = true; | 533 | surf->hw_surf_alloc = true; |
537 | spin_lock(&qdev->surf_id_idr_lock); | 534 | spin_lock(&qdev->surf_id_idr_lock); |
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, | |||
573 | cmd->surface_id = id; | 570 | cmd->surface_id = id; |
574 | qxl_release_unmap(qdev, release, &cmd->release_info); | 571 | qxl_release_unmap(qdev, release, &cmd->release_info); |
575 | 572 | ||
576 | qxl_fence_releaseable(qdev, release); | ||
577 | |||
578 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | 573 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); |
579 | 574 | ||
580 | qxl_release_unreserve(qdev, release); | 575 | qxl_release_fence_buffer_objects(release); |
581 | |||
582 | 576 | ||
583 | return 0; | 577 | return 0; |
584 | } | 578 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f76f5dd7bfc4..835caba026d3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) | |||
179 | kfree(qxl_crtc); | 179 | kfree(qxl_crtc); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void | 182 | static int |
183 | qxl_hide_cursor(struct qxl_device *qdev) | 183 | qxl_hide_cursor(struct qxl_device *qdev) |
184 | { | 184 | { |
185 | struct qxl_release *release; | 185 | struct qxl_release *release; |
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev) | |||
188 | 188 | ||
189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 189 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
190 | &release, NULL); | 190 | &release, NULL); |
191 | if (ret) | ||
192 | return ret; | ||
193 | |||
194 | ret = qxl_release_reserve_list(release, true); | ||
195 | if (ret) { | ||
196 | qxl_release_free(qdev, release); | ||
197 | return ret; | ||
198 | } | ||
191 | 199 | ||
192 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 200 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
193 | cmd->type = QXL_CURSOR_HIDE; | 201 | cmd->type = QXL_CURSOR_HIDE; |
194 | qxl_release_unmap(qdev, release, &cmd->release_info); | 202 | qxl_release_unmap(qdev, release, &cmd->release_info); |
195 | 203 | ||
196 | qxl_fence_releaseable(qdev, release); | ||
197 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 204 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
198 | qxl_release_unreserve(qdev, release); | 205 | qxl_release_fence_buffer_objects(release); |
206 | return 0; | ||
199 | } | 207 | } |
200 | 208 | ||
201 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | 209 | static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, |
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
216 | 224 | ||
217 | int size = 64*64*4; | 225 | int size = 64*64*4; |
218 | int ret = 0; | 226 | int ret = 0; |
219 | if (!handle) { | 227 | if (!handle) |
220 | qxl_hide_cursor(qdev); | 228 | return qxl_hide_cursor(qdev); |
221 | return 0; | ||
222 | } | ||
223 | 229 | ||
224 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | 230 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
225 | if (!obj) { | 231 | if (!obj) { |
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
234 | goto out_unref; | 240 | goto out_unref; |
235 | 241 | ||
236 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); | 242 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); |
243 | qxl_bo_unreserve(user_bo); | ||
237 | if (ret) | 244 | if (ret) |
238 | goto out_unreserve; | 245 | goto out_unref; |
239 | 246 | ||
240 | ret = qxl_bo_kmap(user_bo, &user_ptr); | 247 | ret = qxl_bo_kmap(user_bo, &user_ptr); |
241 | if (ret) | 248 | if (ret) |
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
246 | &release, NULL); | 253 | &release, NULL); |
247 | if (ret) | 254 | if (ret) |
248 | goto out_kunmap; | 255 | goto out_kunmap; |
249 | ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, | 256 | |
250 | &cursor_bo); | 257 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, |
258 | &cursor_bo); | ||
251 | if (ret) | 259 | if (ret) |
252 | goto out_free_release; | 260 | goto out_free_release; |
253 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | 261 | |
262 | ret = qxl_release_reserve_list(release, false); | ||
254 | if (ret) | 263 | if (ret) |
255 | goto out_free_bo; | 264 | goto out_free_bo; |
256 | 265 | ||
266 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | ||
267 | if (ret) | ||
268 | goto out_backoff; | ||
269 | |||
257 | cursor->header.unique = 0; | 270 | cursor->header.unique = 0; |
258 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; | 271 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; |
259 | cursor->header.width = 64; | 272 | cursor->header.width = 64; |
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
269 | 282 | ||
270 | qxl_bo_kunmap(cursor_bo); | 283 | qxl_bo_kunmap(cursor_bo); |
271 | 284 | ||
272 | /* finish with the userspace bo */ | ||
273 | qxl_bo_kunmap(user_bo); | 285 | qxl_bo_kunmap(user_bo); |
274 | qxl_bo_unpin(user_bo); | ||
275 | qxl_bo_unreserve(user_bo); | ||
276 | drm_gem_object_unreference_unlocked(obj); | ||
277 | 286 | ||
278 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | 287 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); |
279 | cmd->type = QXL_CURSOR_SET; | 288 | cmd->type = QXL_CURSOR_SET; |
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, | |||
281 | cmd->u.set.position.y = qcrtc->cur_y; | 290 | cmd->u.set.position.y = qcrtc->cur_y; |
282 | 291 | ||
283 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); | 292 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); |
284 | qxl_release_add_res(qdev, release, cursor_bo); | ||
285 | 293 | ||
286 | cmd->u.set.visible = 1; | 294 | cmd->u.set.visible = 1; |
287 | qxl_release_unmap(qdev, release, &cmd->release_info); | 295 | qxl_release_unmap(qdev, release, &cmd->release_info); |
288 | 296 | ||
289 | qxl_fence_releaseable(qdev, release); | ||
290 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 297 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
291 | qxl_release_unreserve(qdev, release); | 298 | qxl_release_fence_buffer_objects(release); |
299 | |||
300 | /* finish with the userspace bo */ | ||
301 | ret = qxl_bo_reserve(user_bo, false); | ||
302 | if (!ret) { | ||
303 | qxl_bo_unpin(user_bo); | ||
304 | qxl_bo_unreserve(user_bo); | ||
305 | } | ||
306 | drm_gem_object_unreference_unlocked(obj); | ||
292 | 307 | ||
293 | qxl_bo_unreserve(cursor_bo); | ||
294 | qxl_bo_unref(&cursor_bo); | 308 | qxl_bo_unref(&cursor_bo); |
295 | 309 | ||
296 | return ret; | 310 | return ret; |
311 | |||
312 | out_backoff: | ||
313 | qxl_release_backoff_reserve_list(release); | ||
297 | out_free_bo: | 314 | out_free_bo: |
298 | qxl_bo_unref(&cursor_bo); | 315 | qxl_bo_unref(&cursor_bo); |
299 | out_free_release: | 316 | out_free_release: |
300 | qxl_release_unreserve(qdev, release); | ||
301 | qxl_release_free(qdev, release); | 317 | qxl_release_free(qdev, release); |
302 | out_kunmap: | 318 | out_kunmap: |
303 | qxl_bo_kunmap(user_bo); | 319 | qxl_bo_kunmap(user_bo); |
304 | out_unpin: | 320 | out_unpin: |
305 | qxl_bo_unpin(user_bo); | 321 | qxl_bo_unpin(user_bo); |
306 | out_unreserve: | ||
307 | qxl_bo_unreserve(user_bo); | ||
308 | out_unref: | 322 | out_unref: |
309 | drm_gem_object_unreference_unlocked(obj); | 323 | drm_gem_object_unreference_unlocked(obj); |
310 | return ret; | 324 | return ret; |
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
322 | 336 | ||
323 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | 337 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, |
324 | &release, NULL); | 338 | &release, NULL); |
339 | if (ret) | ||
340 | return ret; | ||
341 | |||
342 | ret = qxl_release_reserve_list(release, true); | ||
343 | if (ret) { | ||
344 | qxl_release_free(qdev, release); | ||
345 | return ret; | ||
346 | } | ||
325 | 347 | ||
326 | qcrtc->cur_x = x; | 348 | qcrtc->cur_x = x; |
327 | qcrtc->cur_y = y; | 349 | qcrtc->cur_y = y; |
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | |||
332 | cmd->u.position.y = qcrtc->cur_y; | 354 | cmd->u.position.y = qcrtc->cur_y; |
333 | qxl_release_unmap(qdev, release, &cmd->release_info); | 355 | qxl_release_unmap(qdev, release, &cmd->release_info); |
334 | 356 | ||
335 | qxl_fence_releaseable(qdev, release); | ||
336 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | 357 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); |
337 | qxl_release_unreserve(qdev, release); | 358 | qxl_release_fence_buffer_objects(release); |
359 | |||
338 | return 0; | 360 | return 0; |
339 | } | 361 | } |
340 | 362 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3c8c3dbf9378..56e1d633875e 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
@@ -23,25 +23,29 @@ | |||
23 | #include "qxl_drv.h" | 23 | #include "qxl_drv.h" |
24 | #include "qxl_object.h" | 24 | #include "qxl_object.h" |
25 | 25 | ||
26 | static int alloc_clips(struct qxl_device *qdev, | ||
27 | struct qxl_release *release, | ||
28 | unsigned num_clips, | ||
29 | struct qxl_bo **clips_bo) | ||
30 | { | ||
31 | int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; | ||
32 | |||
33 | return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); | ||
34 | } | ||
35 | |||
26 | /* returns a pointer to the already allocated qxl_rect array inside | 36 | /* returns a pointer to the already allocated qxl_rect array inside |
27 | * the qxl_clip_rects. This is *not* the same as the memory allocated | 37 | * the qxl_clip_rects. This is *not* the same as the memory allocated |
28 | * on the device, it is offset to qxl_clip_rects.chunk.data */ | 38 | * on the device, it is offset to qxl_clip_rects.chunk.data */ |
29 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | 39 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, |
30 | struct qxl_drawable *drawable, | 40 | struct qxl_drawable *drawable, |
31 | unsigned num_clips, | 41 | unsigned num_clips, |
32 | struct qxl_bo **clips_bo, | 42 | struct qxl_bo *clips_bo) |
33 | struct qxl_release *release) | ||
34 | { | 43 | { |
35 | struct qxl_clip_rects *dev_clips; | 44 | struct qxl_clip_rects *dev_clips; |
36 | int ret; | 45 | int ret; |
37 | int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; | ||
38 | ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); | ||
39 | if (ret) | ||
40 | return NULL; | ||
41 | 46 | ||
42 | ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); | 47 | ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); |
43 | if (ret) { | 48 | if (ret) { |
44 | qxl_bo_unref(clips_bo); | ||
45 | return NULL; | 49 | return NULL; |
46 | } | 50 | } |
47 | dev_clips->num_rects = num_clips; | 51 | dev_clips->num_rects = num_clips; |
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | |||
52 | } | 56 | } |
53 | 57 | ||
54 | static int | 58 | static int |
59 | alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) | ||
60 | { | ||
61 | int ret; | ||
62 | ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), | ||
63 | QXL_RELEASE_DRAWABLE, release, | ||
64 | NULL); | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | free_drawable(struct qxl_device *qdev, struct qxl_release *release) | ||
70 | { | ||
71 | qxl_release_free(qdev, release); | ||
72 | } | ||
73 | |||
74 | /* release needs to be reserved at this point */ | ||
75 | static int | ||
55 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | 76 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, |
56 | const struct qxl_rect *rect, | 77 | const struct qxl_rect *rect, |
57 | struct qxl_release **release) | 78 | struct qxl_release *release) |
58 | { | 79 | { |
59 | struct qxl_drawable *drawable; | 80 | struct qxl_drawable *drawable; |
60 | int i, ret; | 81 | int i; |
61 | 82 | ||
62 | ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), | 83 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
63 | QXL_RELEASE_DRAWABLE, release, | 84 | if (!drawable) |
64 | NULL); | 85 | return -ENOMEM; |
65 | if (ret) | ||
66 | return ret; | ||
67 | 86 | ||
68 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); | ||
69 | drawable->type = type; | 87 | drawable->type = type; |
70 | 88 | ||
71 | drawable->surface_id = surface; /* Only primary for now */ | 89 | drawable->surface_id = surface; /* Only primary for now */ |
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | |||
91 | drawable->bbox = *rect; | 109 | drawable->bbox = *rect; |
92 | 110 | ||
93 | drawable->mm_time = qdev->rom->mm_clock; | 111 | drawable->mm_time = qdev->rom->mm_clock; |
94 | qxl_release_unmap(qdev, *release, &drawable->release_info); | 112 | qxl_release_unmap(qdev, release, &drawable->release_info); |
95 | return 0; | 113 | return 0; |
96 | } | 114 | } |
97 | 115 | ||
98 | static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | 116 | static int alloc_palette_object(struct qxl_device *qdev, |
117 | struct qxl_release *release, | ||
118 | struct qxl_bo **palette_bo) | ||
119 | { | ||
120 | return qxl_alloc_bo_reserved(qdev, release, | ||
121 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
122 | palette_bo); | ||
123 | } | ||
124 | |||
125 | static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, | ||
126 | struct qxl_release *release, | ||
99 | const struct qxl_fb_image *qxl_fb_image) | 127 | const struct qxl_fb_image *qxl_fb_image) |
100 | { | 128 | { |
101 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
102 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | 129 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; |
103 | uint32_t visual = qxl_fb_image->visual; | 130 | uint32_t visual = qxl_fb_image->visual; |
104 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; | 131 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; |
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
108 | static uint64_t unique; /* we make no attempt to actually set this | 135 | static uint64_t unique; /* we make no attempt to actually set this |
109 | * correctly globaly, since that would require | 136 | * correctly globaly, since that would require |
110 | * tracking all of our palettes. */ | 137 | * tracking all of our palettes. */ |
111 | 138 | ret = qxl_bo_kmap(palette_bo, (void **)&pal); | |
112 | ret = qxl_alloc_bo_reserved(qdev, | ||
113 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
114 | palette_bo); | ||
115 | |||
116 | ret = qxl_bo_kmap(*palette_bo, (void **)&pal); | ||
117 | pal->num_ents = 2; | 139 | pal->num_ents = 2; |
118 | pal->unique = unique++; | 140 | pal->unique = unique++; |
119 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { | 141 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { |
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | |||
126 | } | 148 | } |
127 | pal->ents[0] = bgcolor; | 149 | pal->ents[0] = bgcolor; |
128 | pal->ents[1] = fgcolor; | 150 | pal->ents[1] = fgcolor; |
129 | qxl_bo_kunmap(*palette_bo); | 151 | qxl_bo_kunmap(palette_bo); |
130 | return 0; | 152 | return 0; |
131 | } | 153 | } |
132 | 154 | ||
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
144 | const char *src = fb_image->data; | 166 | const char *src = fb_image->data; |
145 | int depth = fb_image->depth; | 167 | int depth = fb_image->depth; |
146 | struct qxl_release *release; | 168 | struct qxl_release *release; |
147 | struct qxl_bo *image_bo; | ||
148 | struct qxl_image *image; | 169 | struct qxl_image *image; |
149 | int ret; | 170 | int ret; |
150 | 171 | struct qxl_drm_image *dimage; | |
172 | struct qxl_bo *palette_bo = NULL; | ||
151 | if (stride == 0) | 173 | if (stride == 0) |
152 | stride = depth * width / 8; | 174 | stride = depth * width / 8; |
153 | 175 | ||
176 | ret = alloc_drawable(qdev, &release); | ||
177 | if (ret) | ||
178 | return; | ||
179 | |||
180 | ret = qxl_image_alloc_objects(qdev, release, | ||
181 | &dimage, | ||
182 | height, stride); | ||
183 | if (ret) | ||
184 | goto out_free_drawable; | ||
185 | |||
186 | if (depth == 1) { | ||
187 | ret = alloc_palette_object(qdev, release, &palette_bo); | ||
188 | if (ret) | ||
189 | goto out_free_image; | ||
190 | } | ||
191 | |||
192 | /* do a reservation run over all the objects we just allocated */ | ||
193 | ret = qxl_release_reserve_list(release, true); | ||
194 | if (ret) | ||
195 | goto out_free_palette; | ||
196 | |||
154 | rect.left = x; | 197 | rect.left = x; |
155 | rect.right = x + width; | 198 | rect.right = x + width; |
156 | rect.top = y; | 199 | rect.top = y; |
157 | rect.bottom = y + height; | 200 | rect.bottom = y + height; |
158 | 201 | ||
159 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); | 202 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); |
160 | if (ret) | 203 | if (ret) { |
161 | return; | 204 | qxl_release_backoff_reserve_list(release); |
205 | goto out_free_palette; | ||
206 | } | ||
162 | 207 | ||
163 | ret = qxl_image_create(qdev, release, &image_bo, | 208 | ret = qxl_image_init(qdev, release, dimage, |
164 | (const uint8_t *)src, 0, 0, | 209 | (const uint8_t *)src, 0, 0, |
165 | width, height, depth, stride); | 210 | width, height, depth, stride); |
166 | if (ret) { | 211 | if (ret) { |
167 | qxl_release_unreserve(qdev, release); | 212 | qxl_release_backoff_reserve_list(release); |
168 | qxl_release_free(qdev, release); | 213 | qxl_release_free(qdev, release); |
169 | return; | 214 | return; |
170 | } | 215 | } |
171 | 216 | ||
172 | if (depth == 1) { | 217 | if (depth == 1) { |
173 | struct qxl_bo *palette_bo; | ||
174 | void *ptr; | 218 | void *ptr; |
175 | ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); | 219 | ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); |
176 | qxl_release_add_res(qdev, release, palette_bo); | ||
177 | 220 | ||
178 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); | 221 | ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); |
179 | image = ptr; | 222 | image = ptr; |
180 | image->u.bitmap.palette = | 223 | image->u.bitmap.palette = |
181 | qxl_bo_physical_address(qdev, palette_bo, 0); | 224 | qxl_bo_physical_address(qdev, palette_bo, 0); |
182 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); | 225 | qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); |
183 | qxl_bo_unreserve(palette_bo); | ||
184 | qxl_bo_unref(&palette_bo); | ||
185 | } | 226 | } |
186 | 227 | ||
187 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 228 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | |||
199 | drawable->u.copy.mask.bitmap = 0; | 240 | drawable->u.copy.mask.bitmap = 0; |
200 | 241 | ||
201 | drawable->u.copy.src_bitmap = | 242 | drawable->u.copy.src_bitmap = |
202 | qxl_bo_physical_address(qdev, image_bo, 0); | 243 | qxl_bo_physical_address(qdev, dimage->bo, 0); |
203 | qxl_release_unmap(qdev, release, &drawable->release_info); | 244 | qxl_release_unmap(qdev, release, &drawable->release_info); |
204 | 245 | ||
205 | qxl_release_add_res(qdev, release, image_bo); | ||
206 | qxl_bo_unreserve(image_bo); | ||
207 | qxl_bo_unref(&image_bo); | ||
208 | |||
209 | qxl_fence_releaseable(qdev, release); | ||
210 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 246 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
211 | qxl_release_unreserve(qdev, release); | 247 | qxl_release_fence_buffer_objects(release); |
248 | |||
249 | out_free_palette: | ||
250 | if (palette_bo) | ||
251 | qxl_bo_unref(&palette_bo); | ||
252 | out_free_image: | ||
253 | qxl_image_free_objects(qdev, dimage); | ||
254 | out_free_drawable: | ||
255 | if (ret) | ||
256 | free_drawable(qdev, release); | ||
212 | } | 257 | } |
213 | 258 | ||
214 | /* push a draw command using the given clipping rectangles as | 259 | /* push a draw command using the given clipping rectangles as |
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
243 | int depth = qxl_fb->base.bits_per_pixel; | 288 | int depth = qxl_fb->base.bits_per_pixel; |
244 | uint8_t *surface_base; | 289 | uint8_t *surface_base; |
245 | struct qxl_release *release; | 290 | struct qxl_release *release; |
246 | struct qxl_bo *image_bo; | ||
247 | struct qxl_bo *clips_bo; | 291 | struct qxl_bo *clips_bo; |
292 | struct qxl_drm_image *dimage; | ||
248 | int ret; | 293 | int ret; |
249 | 294 | ||
295 | ret = alloc_drawable(qdev, &release); | ||
296 | if (ret) | ||
297 | return; | ||
298 | |||
250 | left = clips->x1; | 299 | left = clips->x1; |
251 | right = clips->x2; | 300 | right = clips->x2; |
252 | top = clips->y1; | 301 | top = clips->y1; |
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
263 | 312 | ||
264 | width = right - left; | 313 | width = right - left; |
265 | height = bottom - top; | 314 | height = bottom - top; |
315 | |||
316 | ret = alloc_clips(qdev, release, num_clips, &clips_bo); | ||
317 | if (ret) | ||
318 | goto out_free_drawable; | ||
319 | |||
320 | ret = qxl_image_alloc_objects(qdev, release, | ||
321 | &dimage, | ||
322 | height, stride); | ||
323 | if (ret) | ||
324 | goto out_free_clips; | ||
325 | |||
326 | /* do a reservation run over all the objects we just allocated */ | ||
327 | ret = qxl_release_reserve_list(release, true); | ||
328 | if (ret) | ||
329 | goto out_free_image; | ||
330 | |||
266 | drawable_rect.left = left; | 331 | drawable_rect.left = left; |
267 | drawable_rect.right = right; | 332 | drawable_rect.right = right; |
268 | drawable_rect.top = top; | 333 | drawable_rect.top = top; |
269 | drawable_rect.bottom = bottom; | 334 | drawable_rect.bottom = bottom; |
335 | |||
270 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, | 336 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, |
271 | &release); | 337 | release); |
272 | if (ret) | 338 | if (ret) |
273 | return; | 339 | goto out_release_backoff; |
274 | 340 | ||
275 | ret = qxl_bo_kmap(bo, (void **)&surface_base); | 341 | ret = qxl_bo_kmap(bo, (void **)&surface_base); |
276 | if (ret) | 342 | if (ret) |
277 | goto out_unref; | 343 | goto out_release_backoff; |
278 | 344 | ||
279 | ret = qxl_image_create(qdev, release, &image_bo, surface_base, | 345 | |
280 | left, top, width, height, depth, stride); | 346 | ret = qxl_image_init(qdev, release, dimage, surface_base, |
347 | left, top, width, height, depth, stride); | ||
281 | qxl_bo_kunmap(bo); | 348 | qxl_bo_kunmap(bo); |
282 | if (ret) | 349 | if (ret) |
283 | goto out_unref; | 350 | goto out_release_backoff; |
351 | |||
352 | rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); | ||
353 | if (!rects) | ||
354 | goto out_release_backoff; | ||
284 | 355 | ||
285 | rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); | ||
286 | if (!rects) { | ||
287 | qxl_bo_unref(&image_bo); | ||
288 | goto out_unref; | ||
289 | } | ||
290 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 356 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
291 | 357 | ||
292 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; | 358 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; |
293 | drawable->clip.data = qxl_bo_physical_address(qdev, | 359 | drawable->clip.data = qxl_bo_physical_address(qdev, |
294 | clips_bo, 0); | 360 | clips_bo, 0); |
295 | qxl_release_add_res(qdev, release, clips_bo); | ||
296 | 361 | ||
297 | drawable->u.copy.src_area.top = 0; | 362 | drawable->u.copy.src_area.top = 0; |
298 | drawable->u.copy.src_area.bottom = height; | 363 | drawable->u.copy.src_area.bottom = height; |
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
306 | drawable->u.copy.mask.pos.y = 0; | 371 | drawable->u.copy.mask.pos.y = 0; |
307 | drawable->u.copy.mask.bitmap = 0; | 372 | drawable->u.copy.mask.bitmap = 0; |
308 | 373 | ||
309 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); | 374 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); |
310 | qxl_release_unmap(qdev, release, &drawable->release_info); | 375 | qxl_release_unmap(qdev, release, &drawable->release_info); |
311 | qxl_release_add_res(qdev, release, image_bo); | 376 | |
312 | qxl_bo_unreserve(image_bo); | ||
313 | qxl_bo_unref(&image_bo); | ||
314 | clips_ptr = clips; | 377 | clips_ptr = clips; |
315 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | 378 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { |
316 | rects[i].left = clips_ptr->x1; | 379 | rects[i].left = clips_ptr->x1; |
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, | |||
319 | rects[i].bottom = clips_ptr->y2; | 382 | rects[i].bottom = clips_ptr->y2; |
320 | } | 383 | } |
321 | qxl_bo_kunmap(clips_bo); | 384 | qxl_bo_kunmap(clips_bo); |
322 | qxl_bo_unreserve(clips_bo); | ||
323 | qxl_bo_unref(&clips_bo); | ||
324 | 385 | ||
325 | qxl_fence_releaseable(qdev, release); | ||
326 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 386 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
327 | qxl_release_unreserve(qdev, release); | 387 | qxl_release_fence_buffer_objects(release); |
328 | return; | 388 | |
389 | out_release_backoff: | ||
390 | if (ret) | ||
391 | qxl_release_backoff_reserve_list(release); | ||
392 | out_free_image: | ||
393 | qxl_image_free_objects(qdev, dimage); | ||
394 | out_free_clips: | ||
395 | qxl_bo_unref(&clips_bo); | ||
396 | out_free_drawable: | ||
397 | /* only free drawable on error */ | ||
398 | if (ret) | ||
399 | free_drawable(qdev, release); | ||
329 | 400 | ||
330 | out_unref: | ||
331 | qxl_release_unreserve(qdev, release); | ||
332 | qxl_release_free(qdev, release); | ||
333 | } | 401 | } |
334 | 402 | ||
335 | void qxl_draw_copyarea(struct qxl_device *qdev, | 403 | void qxl_draw_copyarea(struct qxl_device *qdev, |
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
342 | struct qxl_release *release; | 410 | struct qxl_release *release; |
343 | int ret; | 411 | int ret; |
344 | 412 | ||
413 | ret = alloc_drawable(qdev, &release); | ||
414 | if (ret) | ||
415 | return; | ||
416 | |||
417 | /* do a reservation run over all the objects we just allocated */ | ||
418 | ret = qxl_release_reserve_list(release, true); | ||
419 | if (ret) | ||
420 | goto out_free_release; | ||
421 | |||
345 | rect.left = dx; | 422 | rect.left = dx; |
346 | rect.top = dy; | 423 | rect.top = dy; |
347 | rect.right = dx + width; | 424 | rect.right = dx + width; |
348 | rect.bottom = dy + height; | 425 | rect.bottom = dy + height; |
349 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); | 426 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); |
350 | if (ret) | 427 | if (ret) { |
351 | return; | 428 | qxl_release_backoff_reserve_list(release); |
429 | goto out_free_release; | ||
430 | } | ||
352 | 431 | ||
353 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 432 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
354 | drawable->u.copy_bits.src_pos.x = sx; | 433 | drawable->u.copy_bits.src_pos.x = sx; |
355 | drawable->u.copy_bits.src_pos.y = sy; | 434 | drawable->u.copy_bits.src_pos.y = sy; |
356 | |||
357 | qxl_release_unmap(qdev, release, &drawable->release_info); | 435 | qxl_release_unmap(qdev, release, &drawable->release_info); |
358 | qxl_fence_releaseable(qdev, release); | 436 | |
359 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 437 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
360 | qxl_release_unreserve(qdev, release); | 438 | qxl_release_fence_buffer_objects(release); |
439 | |||
440 | out_free_release: | ||
441 | if (ret) | ||
442 | free_drawable(qdev, release); | ||
361 | } | 443 | } |
362 | 444 | ||
363 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | 445 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) |
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
370 | struct qxl_release *release; | 452 | struct qxl_release *release; |
371 | int ret; | 453 | int ret; |
372 | 454 | ||
373 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); | 455 | ret = alloc_drawable(qdev, &release); |
374 | if (ret) | 456 | if (ret) |
375 | return; | 457 | return; |
376 | 458 | ||
459 | /* do a reservation run over all the objects we just allocated */ | ||
460 | ret = qxl_release_reserve_list(release, true); | ||
461 | if (ret) | ||
462 | goto out_free_release; | ||
463 | |||
464 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); | ||
465 | if (ret) { | ||
466 | qxl_release_backoff_reserve_list(release); | ||
467 | goto out_free_release; | ||
468 | } | ||
469 | |||
377 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | 470 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); |
378 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; | 471 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; |
379 | drawable->u.fill.brush.u.color = color; | 472 | drawable->u.fill.brush.u.color = color; |
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | |||
384 | drawable->u.fill.mask.bitmap = 0; | 477 | drawable->u.fill.mask.bitmap = 0; |
385 | 478 | ||
386 | qxl_release_unmap(qdev, release, &drawable->release_info); | 479 | qxl_release_unmap(qdev, release, &drawable->release_info); |
387 | qxl_fence_releaseable(qdev, release); | 480 | |
388 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | 481 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); |
389 | qxl_release_unreserve(qdev, release); | 482 | qxl_release_fence_buffer_objects(release); |
483 | |||
484 | out_free_release: | ||
485 | if (ret) | ||
486 | free_drawable(qdev, release); | ||
390 | } | 487 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a3..7e96f4f11738 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -42,6 +42,9 @@ | |||
42 | #include <ttm/ttm_placement.h> | 42 | #include <ttm/ttm_placement.h> |
43 | #include <ttm/ttm_module.h> | 43 | #include <ttm/ttm_module.h> |
44 | 44 | ||
45 | /* just for ttm_validate_buffer */ | ||
46 | #include <ttm/ttm_execbuf_util.h> | ||
47 | |||
45 | #include <drm/qxl_drm.h> | 48 | #include <drm/qxl_drm.h> |
46 | #include "qxl_dev.h" | 49 | #include "qxl_dev.h" |
47 | 50 | ||
@@ -118,9 +121,9 @@ struct qxl_bo { | |||
118 | uint32_t surface_id; | 121 | uint32_t surface_id; |
119 | struct qxl_fence fence; /* per bo fence - list of releases */ | 122 | struct qxl_fence fence; /* per bo fence - list of releases */ |
120 | struct qxl_release *surf_create; | 123 | struct qxl_release *surf_create; |
121 | atomic_t reserve_count; | ||
122 | }; | 124 | }; |
123 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) | 125 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) |
126 | #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) | ||
124 | 127 | ||
125 | struct qxl_gem { | 128 | struct qxl_gem { |
126 | struct mutex mutex; | 129 | struct mutex mutex; |
@@ -128,12 +131,7 @@ struct qxl_gem { | |||
128 | }; | 131 | }; |
129 | 132 | ||
130 | struct qxl_bo_list { | 133 | struct qxl_bo_list { |
131 | struct list_head lhead; | 134 | struct ttm_validate_buffer tv; |
132 | struct qxl_bo *bo; | ||
133 | }; | ||
134 | |||
135 | struct qxl_reloc_list { | ||
136 | struct list_head bos; | ||
137 | }; | 135 | }; |
138 | 136 | ||
139 | struct qxl_crtc { | 137 | struct qxl_crtc { |
@@ -195,10 +193,20 @@ enum { | |||
195 | struct qxl_release { | 193 | struct qxl_release { |
196 | int id; | 194 | int id; |
197 | int type; | 195 | int type; |
198 | int bo_count; | ||
199 | uint32_t release_offset; | 196 | uint32_t release_offset; |
200 | uint32_t surface_release_id; | 197 | uint32_t surface_release_id; |
201 | struct qxl_bo *bos[QXL_MAX_RES]; | 198 | struct ww_acquire_ctx ticket; |
199 | struct list_head bos; | ||
200 | }; | ||
201 | |||
202 | struct qxl_drm_chunk { | ||
203 | struct list_head head; | ||
204 | struct qxl_bo *bo; | ||
205 | }; | ||
206 | |||
207 | struct qxl_drm_image { | ||
208 | struct qxl_bo *bo; | ||
209 | struct list_head chunk_list; | ||
202 | }; | 210 | }; |
203 | 211 | ||
204 | struct qxl_fb_image { | 212 | struct qxl_fb_image { |
@@ -314,6 +322,7 @@ struct qxl_device { | |||
314 | struct workqueue_struct *gc_queue; | 322 | struct workqueue_struct *gc_queue; |
315 | struct work_struct gc_work; | 323 | struct work_struct gc_work; |
316 | 324 | ||
325 | struct work_struct fb_work; | ||
317 | }; | 326 | }; |
318 | 327 | ||
319 | /* forward declaration for QXL_INFO_IO */ | 328 | /* forward declaration for QXL_INFO_IO */ |
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma); | |||
433 | 442 | ||
434 | /* qxl image */ | 443 | /* qxl image */ |
435 | 444 | ||
436 | int qxl_image_create(struct qxl_device *qdev, | 445 | int qxl_image_init(struct qxl_device *qdev, |
437 | struct qxl_release *release, | 446 | struct qxl_release *release, |
438 | struct qxl_bo **image_bo, | 447 | struct qxl_drm_image *dimage, |
439 | const uint8_t *data, | 448 | const uint8_t *data, |
440 | int x, int y, int width, int height, | 449 | int x, int y, int width, int height, |
441 | int depth, int stride); | 450 | int depth, int stride); |
451 | int | ||
452 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
453 | struct qxl_release *release, | ||
454 | struct qxl_drm_image **image_ptr, | ||
455 | int height, int stride); | ||
456 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); | ||
457 | |||
442 | void qxl_update_screen(struct qxl_device *qxl); | 458 | void qxl_update_screen(struct qxl_device *qxl); |
443 | 459 | ||
444 | /* qxl io operations (qxl_cmd.c) */ | 460 | /* qxl io operations (qxl_cmd.c) */ |
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible | |||
459 | void qxl_io_flush_release(struct qxl_device *qdev); | 475 | void qxl_io_flush_release(struct qxl_device *qdev); |
460 | void qxl_io_flush_surfaces(struct qxl_device *qdev); | 476 | void qxl_io_flush_surfaces(struct qxl_device *qdev); |
461 | 477 | ||
462 | int qxl_release_reserve(struct qxl_device *qdev, | ||
463 | struct qxl_release *release, bool no_wait); | ||
464 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
465 | struct qxl_release *release); | ||
466 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | 478 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, |
467 | struct qxl_release *release); | 479 | struct qxl_release *release); |
468 | void qxl_release_unmap(struct qxl_device *qdev, | 480 | void qxl_release_unmap(struct qxl_device *qdev, |
469 | struct qxl_release *release, | 481 | struct qxl_release *release, |
470 | union qxl_release_info *info); | 482 | union qxl_release_info *info); |
471 | /* | 483 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); |
472 | * qxl_bo_add_resource. | 484 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); |
473 | * | 485 | void qxl_release_backoff_reserve_list(struct qxl_release *release); |
474 | */ | 486 | void qxl_release_fence_buffer_objects(struct qxl_release *release); |
475 | void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); | ||
476 | 487 | ||
477 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 488 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
478 | enum qxl_surface_cmd_type surface_cmd_type, | 489 | enum qxl_surface_cmd_type surface_cmd_type, |
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | |||
481 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | 492 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, |
482 | int type, struct qxl_release **release, | 493 | int type, struct qxl_release **release, |
483 | struct qxl_bo **rbo); | 494 | struct qxl_bo **rbo); |
484 | int qxl_fence_releaseable(struct qxl_device *qdev, | 495 | |
485 | struct qxl_release *release); | ||
486 | int | 496 | int |
487 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 497 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
488 | uint32_t type, bool interruptible); | 498 | uint32_t type, bool interruptible); |
489 | int | 499 | int |
490 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | 500 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, |
491 | uint32_t type, bool interruptible); | 501 | uint32_t type, bool interruptible); |
492 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | 502 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, |
503 | struct qxl_release *release, | ||
504 | unsigned long size, | ||
493 | struct qxl_bo **_bo); | 505 | struct qxl_bo **_bo); |
494 | /* qxl drawing commands */ | 506 | /* qxl drawing commands */ |
495 | 507 | ||
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev, | |||
510 | u32 sx, u32 sy, | 522 | u32 sx, u32 sy, |
511 | u32 dx, u32 dy); | 523 | u32 dx, u32 dy); |
512 | 524 | ||
513 | uint64_t | ||
514 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
515 | struct qxl_release **ret); | ||
516 | |||
517 | void qxl_release_free(struct qxl_device *qdev, | 525 | void qxl_release_free(struct qxl_device *qdev, |
518 | struct qxl_release *release); | 526 | struct qxl_release *release); |
519 | void qxl_release_add_res(struct qxl_device *qdev, | 527 | |
520 | struct qxl_release *release, | ||
521 | struct qxl_bo *bo); | ||
522 | /* used by qxl_debugfs_release */ | 528 | /* used by qxl_debugfs_release */ |
523 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 529 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
524 | uint64_t id); | 530 | uint64_t id); |
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein | |||
561 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); | 567 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); |
562 | 568 | ||
563 | /* qxl_fence.c */ | 569 | /* qxl_fence.c */ |
564 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); | 570 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); |
565 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); | 571 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); |
566 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); | 572 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); |
567 | void qxl_fence_fini(struct qxl_fence *qfence); | 573 | void qxl_fence_fini(struct qxl_fence *qfence); |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 76f39d88d684..88722f233430 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -37,12 +37,29 @@ | |||
37 | 37 | ||
38 | #define QXL_DIRTY_DELAY (HZ / 30) | 38 | #define QXL_DIRTY_DELAY (HZ / 30) |
39 | 39 | ||
40 | #define QXL_FB_OP_FILLRECT 1 | ||
41 | #define QXL_FB_OP_COPYAREA 2 | ||
42 | #define QXL_FB_OP_IMAGEBLIT 3 | ||
43 | |||
44 | struct qxl_fb_op { | ||
45 | struct list_head head; | ||
46 | int op_type; | ||
47 | union { | ||
48 | struct fb_fillrect fr; | ||
49 | struct fb_copyarea ca; | ||
50 | struct fb_image ib; | ||
51 | } op; | ||
52 | void *img_data; | ||
53 | }; | ||
54 | |||
40 | struct qxl_fbdev { | 55 | struct qxl_fbdev { |
41 | struct drm_fb_helper helper; | 56 | struct drm_fb_helper helper; |
42 | struct qxl_framebuffer qfb; | 57 | struct qxl_framebuffer qfb; |
43 | struct list_head fbdev_list; | 58 | struct list_head fbdev_list; |
44 | struct qxl_device *qdev; | 59 | struct qxl_device *qdev; |
45 | 60 | ||
61 | spinlock_t delayed_ops_lock; | ||
62 | struct list_head delayed_ops; | ||
46 | void *shadow; | 63 | void *shadow; |
47 | int size; | 64 | int size; |
48 | 65 | ||
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = { | |||
164 | .deferred_io = qxl_deferred_io, | 181 | .deferred_io = qxl_deferred_io, |
165 | }; | 182 | }; |
166 | 183 | ||
167 | static void qxl_fb_fillrect(struct fb_info *info, | 184 | static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, |
168 | const struct fb_fillrect *fb_rect) | 185 | const struct fb_fillrect *fb_rect) |
186 | { | ||
187 | struct qxl_fb_op *op; | ||
188 | unsigned long flags; | ||
189 | |||
190 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
191 | if (!op) | ||
192 | return; | ||
193 | |||
194 | op->op.fr = *fb_rect; | ||
195 | op->img_data = NULL; | ||
196 | op->op_type = QXL_FB_OP_FILLRECT; | ||
197 | |||
198 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
199 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
200 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
201 | } | ||
202 | |||
203 | static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, | ||
204 | const struct fb_copyarea *fb_copy) | ||
205 | { | ||
206 | struct qxl_fb_op *op; | ||
207 | unsigned long flags; | ||
208 | |||
209 | op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); | ||
210 | if (!op) | ||
211 | return; | ||
212 | |||
213 | op->op.ca = *fb_copy; | ||
214 | op->img_data = NULL; | ||
215 | op->op_type = QXL_FB_OP_COPYAREA; | ||
216 | |||
217 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
218 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
219 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
220 | } | ||
221 | |||
222 | static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, | ||
223 | const struct fb_image *fb_image) | ||
224 | { | ||
225 | struct qxl_fb_op *op; | ||
226 | unsigned long flags; | ||
227 | uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); | ||
228 | |||
229 | op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); | ||
230 | if (!op) | ||
231 | return; | ||
232 | |||
233 | op->op.ib = *fb_image; | ||
234 | op->img_data = (void *)(op + 1); | ||
235 | op->op_type = QXL_FB_OP_IMAGEBLIT; | ||
236 | |||
237 | memcpy(op->img_data, fb_image->data, size); | ||
238 | |||
239 | op->op.ib.data = op->img_data; | ||
240 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
241 | list_add_tail(&op->head, &qfbdev->delayed_ops); | ||
242 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
243 | } | ||
244 | |||
245 | static void qxl_fb_fillrect_internal(struct fb_info *info, | ||
246 | const struct fb_fillrect *fb_rect) | ||
169 | { | 247 | { |
170 | struct qxl_fbdev *qfbdev = info->par; | 248 | struct qxl_fbdev *qfbdev = info->par; |
171 | struct qxl_device *qdev = qfbdev->qdev; | 249 | struct qxl_device *qdev = qfbdev->qdev; |
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info, | |||
203 | qxl_draw_fill_rec.rect = rect; | 281 | qxl_draw_fill_rec.rect = rect; |
204 | qxl_draw_fill_rec.color = color; | 282 | qxl_draw_fill_rec.color = color; |
205 | qxl_draw_fill_rec.rop = rop; | 283 | qxl_draw_fill_rec.rop = rop; |
284 | |||
285 | qxl_draw_fill(&qxl_draw_fill_rec); | ||
286 | } | ||
287 | |||
288 | static void qxl_fb_fillrect(struct fb_info *info, | ||
289 | const struct fb_fillrect *fb_rect) | ||
290 | { | ||
291 | struct qxl_fbdev *qfbdev = info->par; | ||
292 | struct qxl_device *qdev = qfbdev->qdev; | ||
293 | |||
206 | if (!drm_can_sleep()) { | 294 | if (!drm_can_sleep()) { |
207 | qxl_io_log(qdev, | 295 | qxl_fb_delayed_fillrect(qfbdev, fb_rect); |
208 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | 296 | schedule_work(&qdev->fb_work); |
209 | __func__); | ||
210 | return; | 297 | return; |
211 | } | 298 | } |
212 | qxl_draw_fill(&qxl_draw_fill_rec); | 299 | /* make sure any previous work is done */ |
300 | flush_work(&qdev->fb_work); | ||
301 | qxl_fb_fillrect_internal(info, fb_rect); | ||
213 | } | 302 | } |
214 | 303 | ||
215 | static void qxl_fb_copyarea(struct fb_info *info, | 304 | static void qxl_fb_copyarea_internal(struct fb_info *info, |
216 | const struct fb_copyarea *region) | 305 | const struct fb_copyarea *region) |
217 | { | 306 | { |
218 | struct qxl_fbdev *qfbdev = info->par; | 307 | struct qxl_fbdev *qfbdev = info->par; |
219 | 308 | ||
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info, | |||
223 | region->dx, region->dy); | 312 | region->dx, region->dy); |
224 | } | 313 | } |
225 | 314 | ||
315 | static void qxl_fb_copyarea(struct fb_info *info, | ||
316 | const struct fb_copyarea *region) | ||
317 | { | ||
318 | struct qxl_fbdev *qfbdev = info->par; | ||
319 | struct qxl_device *qdev = qfbdev->qdev; | ||
320 | |||
321 | if (!drm_can_sleep()) { | ||
322 | qxl_fb_delayed_copyarea(qfbdev, region); | ||
323 | schedule_work(&qdev->fb_work); | ||
324 | return; | ||
325 | } | ||
326 | /* make sure any previous work is done */ | ||
327 | flush_work(&qdev->fb_work); | ||
328 | qxl_fb_copyarea_internal(info, region); | ||
329 | } | ||
330 | |||
226 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) | 331 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) |
227 | { | 332 | { |
228 | qxl_draw_opaque_fb(qxl_fb_image, 0); | 333 | qxl_draw_opaque_fb(qxl_fb_image, 0); |
229 | } | 334 | } |
230 | 335 | ||
336 | static void qxl_fb_imageblit_internal(struct fb_info *info, | ||
337 | const struct fb_image *image) | ||
338 | { | ||
339 | struct qxl_fbdev *qfbdev = info->par; | ||
340 | struct qxl_fb_image qxl_fb_image; | ||
341 | |||
342 | /* ensure proper order rendering operations - TODO: must do this | ||
343 | * for everything. */ | ||
344 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | ||
345 | qxl_fb_imageblit_safe(&qxl_fb_image); | ||
346 | } | ||
347 | |||
231 | static void qxl_fb_imageblit(struct fb_info *info, | 348 | static void qxl_fb_imageblit(struct fb_info *info, |
232 | const struct fb_image *image) | 349 | const struct fb_image *image) |
233 | { | 350 | { |
234 | struct qxl_fbdev *qfbdev = info->par; | 351 | struct qxl_fbdev *qfbdev = info->par; |
235 | struct qxl_device *qdev = qfbdev->qdev; | 352 | struct qxl_device *qdev = qfbdev->qdev; |
236 | struct qxl_fb_image qxl_fb_image; | ||
237 | 353 | ||
238 | if (!drm_can_sleep()) { | 354 | if (!drm_can_sleep()) { |
239 | /* we cannot do any ttm_bo allocation since that will fail on | 355 | qxl_fb_delayed_imageblit(qfbdev, image); |
240 | * ioremap_wc..__get_vm_area_node, so queue the work item | 356 | schedule_work(&qdev->fb_work); |
241 | * instead This can happen from printk inside an interrupt | ||
242 | * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ | ||
243 | qxl_io_log(qdev, | ||
244 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
245 | __func__); | ||
246 | return; | 357 | return; |
247 | } | 358 | } |
359 | /* make sure any previous work is done */ | ||
360 | flush_work(&qdev->fb_work); | ||
361 | qxl_fb_imageblit_internal(info, image); | ||
362 | } | ||
248 | 363 | ||
249 | /* ensure proper order of rendering operations - TODO: must do this | 364 | static void qxl_fb_work(struct work_struct *work) |
250 | * for everything. */ | 365 | { |
251 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | 366 | struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); |
252 | qxl_fb_imageblit_safe(&qxl_fb_image); | 367 | unsigned long flags; |
368 | struct qxl_fb_op *entry, *tmp; | ||
369 | struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; | ||
370 | |||
371 | /* since the irq context just adds entries to the end of the | ||
372 | list dropping the lock should be fine, as entry isn't modified | ||
373 | in the operation code */ | ||
374 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
375 | list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { | ||
376 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
377 | switch (entry->op_type) { | ||
378 | case QXL_FB_OP_FILLRECT: | ||
379 | qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); | ||
380 | break; | ||
381 | case QXL_FB_OP_COPYAREA: | ||
382 | qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); | ||
383 | break; | ||
384 | case QXL_FB_OP_IMAGEBLIT: | ||
385 | qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); | ||
386 | break; | ||
387 | } | ||
388 | spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); | ||
389 | list_del(&entry->head); | ||
390 | kfree(entry); | ||
391 | } | ||
392 | spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); | ||
253 | } | 393 | } |
254 | 394 | ||
255 | int qxl_fb_init(struct qxl_device *qdev) | 395 | int qxl_fb_init(struct qxl_device *qdev) |
256 | { | 396 | { |
397 | INIT_WORK(&qdev->fb_work, qxl_fb_work); | ||
257 | return 0; | 398 | return 0; |
258 | } | 399 | } |
259 | 400 | ||
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) | |||
536 | qfbdev->qdev = qdev; | 677 | qfbdev->qdev = qdev; |
537 | qdev->mode_info.qfbdev = qfbdev; | 678 | qdev->mode_info.qfbdev = qfbdev; |
538 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; | 679 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; |
539 | 680 | spin_lock_init(&qfbdev->delayed_ops_lock); | |
681 | INIT_LIST_HEAD(&qfbdev->delayed_ops); | ||
540 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, | 682 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, |
541 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, | 683 | qxl_num_crtc /* num_crtc - QXL supports just 1 */, |
542 | QXLFB_CONN_LIMIT); | 684 | QXLFB_CONN_LIMIT); |
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index 63c6715ad385..ae59e91cfb9a 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
@@ -49,17 +49,11 @@ | |||
49 | 49 | ||
50 | For some reason every so often qxl hw fails to release, things go wrong. | 50 | For some reason every so often qxl hw fails to release, things go wrong. |
51 | */ | 51 | */ |
52 | 52 | /* must be called with the fence lock held */ | |
53 | 53 | void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) | |
54 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
55 | { | 54 | { |
56 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
57 | |||
58 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
59 | radix_tree_insert(&qfence->tree, rel_id, qfence); | 55 | radix_tree_insert(&qfence->tree, rel_id, qfence); |
60 | qfence->num_active_releases++; | 56 | qfence->num_active_releases++; |
61 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
62 | return 0; | ||
63 | } | 57 | } |
64 | 58 | ||
65 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | 59 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) |
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a235693aabba..25e1777fb0a2 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, | |||
55 | /* At least align on page size */ | 55 | /* At least align on page size */ |
56 | if (alignment < PAGE_SIZE) | 56 | if (alignment < PAGE_SIZE) |
57 | alignment = PAGE_SIZE; | 57 | alignment = PAGE_SIZE; |
58 | r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); | 58 | r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); |
59 | if (r) { | 59 | if (r) { |
60 | if (r != -ERESTARTSYS) | 60 | if (r != -ERESTARTSYS) |
61 | DRM_ERROR( | 61 | DRM_ERROR( |
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996b..7fbcc35e8ad3 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c | |||
@@ -30,31 +30,100 @@ | |||
30 | #include "qxl_object.h" | 30 | #include "qxl_object.h" |
31 | 31 | ||
32 | static int | 32 | static int |
33 | qxl_image_create_helper(struct qxl_device *qdev, | 33 | qxl_allocate_chunk(struct qxl_device *qdev, |
34 | struct qxl_release *release, | ||
35 | struct qxl_drm_image *image, | ||
36 | unsigned int chunk_size) | ||
37 | { | ||
38 | struct qxl_drm_chunk *chunk; | ||
39 | int ret; | ||
40 | |||
41 | chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); | ||
42 | if (!chunk) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); | ||
46 | if (ret) { | ||
47 | kfree(chunk); | ||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | list_add_tail(&chunk->head, &image->chunk_list); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | int | ||
56 | qxl_image_alloc_objects(struct qxl_device *qdev, | ||
34 | struct qxl_release *release, | 57 | struct qxl_release *release, |
35 | struct qxl_bo **image_bo, | 58 | struct qxl_drm_image **image_ptr, |
36 | const uint8_t *data, | 59 | int height, int stride) |
37 | int width, int height, | 60 | { |
38 | int depth, unsigned int hash, | 61 | struct qxl_drm_image *image; |
39 | int stride) | 62 | int ret; |
63 | |||
64 | image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); | ||
65 | if (!image) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | INIT_LIST_HEAD(&image->chunk_list); | ||
69 | |||
70 | ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); | ||
71 | if (ret) { | ||
72 | kfree(image); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); | ||
77 | if (ret) { | ||
78 | qxl_bo_unref(&image->bo); | ||
79 | kfree(image); | ||
80 | return ret; | ||
81 | } | ||
82 | *image_ptr = image; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) | ||
40 | { | 87 | { |
88 | struct qxl_drm_chunk *chunk, *tmp; | ||
89 | |||
90 | list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { | ||
91 | qxl_bo_unref(&chunk->bo); | ||
92 | kfree(chunk); | ||
93 | } | ||
94 | |||
95 | qxl_bo_unref(&dimage->bo); | ||
96 | kfree(dimage); | ||
97 | } | ||
98 | |||
99 | static int | ||
100 | qxl_image_init_helper(struct qxl_device *qdev, | ||
101 | struct qxl_release *release, | ||
102 | struct qxl_drm_image *dimage, | ||
103 | const uint8_t *data, | ||
104 | int width, int height, | ||
105 | int depth, unsigned int hash, | ||
106 | int stride) | ||
107 | { | ||
108 | struct qxl_drm_chunk *drv_chunk; | ||
41 | struct qxl_image *image; | 109 | struct qxl_image *image; |
42 | struct qxl_data_chunk *chunk; | 110 | struct qxl_data_chunk *chunk; |
43 | int i; | 111 | int i; |
44 | int chunk_stride; | 112 | int chunk_stride; |
45 | int linesize = width * depth / 8; | 113 | int linesize = width * depth / 8; |
46 | struct qxl_bo *chunk_bo; | 114 | struct qxl_bo *chunk_bo, *image_bo; |
47 | int ret; | ||
48 | void *ptr; | 115 | void *ptr; |
49 | /* Chunk */ | 116 | /* Chunk */ |
50 | /* FIXME: Check integer overflow */ | 117 | /* FIXME: Check integer overflow */ |
51 | /* TODO: variable number of chunks */ | 118 | /* TODO: variable number of chunks */ |
119 | |||
120 | drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); | ||
121 | |||
122 | chunk_bo = drv_chunk->bo; | ||
52 | chunk_stride = stride; /* TODO: should use linesize, but it renders | 123 | chunk_stride = stride; /* TODO: should use linesize, but it renders |
53 | wrong (check the bitmaps are sent correctly | 124 | wrong (check the bitmaps are sent correctly |
54 | first) */ | 125 | first) */ |
55 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, | 126 | |
56 | &chunk_bo); | ||
57 | |||
58 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); | 127 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); |
59 | chunk = ptr; | 128 | chunk = ptr; |
60 | chunk->data_size = height * chunk_stride; | 129 | chunk->data_size = height * chunk_stride; |
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
102 | while (remain > 0) { | 171 | while (remain > 0) { |
103 | page_base = out_offset & PAGE_MASK; | 172 | page_base = out_offset & PAGE_MASK; |
104 | page_offset = offset_in_page(out_offset); | 173 | page_offset = offset_in_page(out_offset); |
105 | |||
106 | size = min((int)(PAGE_SIZE - page_offset), remain); | 174 | size = min((int)(PAGE_SIZE - page_offset), remain); |
107 | 175 | ||
108 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); | 176 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); |
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
116 | } | 184 | } |
117 | } | 185 | } |
118 | } | 186 | } |
119 | |||
120 | |||
121 | qxl_bo_kunmap(chunk_bo); | 187 | qxl_bo_kunmap(chunk_bo); |
122 | 188 | ||
123 | /* Image */ | 189 | image_bo = dimage->bo; |
124 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); | 190 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); |
125 | |||
126 | ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); | ||
127 | image = ptr; | 191 | image = ptr; |
128 | 192 | ||
129 | image->descriptor.id = 0; | 193 | image->descriptor.id = 0; |
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, | |||
154 | image->u.bitmap.stride = chunk_stride; | 218 | image->u.bitmap.stride = chunk_stride; |
155 | image->u.bitmap.palette = 0; | 219 | image->u.bitmap.palette = 0; |
156 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); | 220 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); |
157 | qxl_release_add_res(qdev, release, chunk_bo); | ||
158 | qxl_bo_unreserve(chunk_bo); | ||
159 | qxl_bo_unref(&chunk_bo); | ||
160 | 221 | ||
161 | qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); | 222 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); |
162 | 223 | ||
163 | return 0; | 224 | return 0; |
164 | } | 225 | } |
165 | 226 | ||
166 | int qxl_image_create(struct qxl_device *qdev, | 227 | int qxl_image_init(struct qxl_device *qdev, |
167 | struct qxl_release *release, | 228 | struct qxl_release *release, |
168 | struct qxl_bo **image_bo, | 229 | struct qxl_drm_image *dimage, |
169 | const uint8_t *data, | 230 | const uint8_t *data, |
170 | int x, int y, int width, int height, | 231 | int x, int y, int width, int height, |
171 | int depth, int stride) | 232 | int depth, int stride) |
172 | { | 233 | { |
173 | data += y * stride + x * (depth / 8); | 234 | data += y * stride + x * (depth / 8); |
174 | return qxl_image_create_helper(qdev, release, image_bo, data, | 235 | return qxl_image_init_helper(qdev, release, dimage, data, |
175 | width, height, depth, 0, stride); | 236 | width, height, depth, 0, stride); |
176 | } | 237 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250d..6de33563d6f1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data, | |||
68 | &qxl_map->offset); | 68 | &qxl_map->offset); |
69 | } | 69 | } |
70 | 70 | ||
71 | struct qxl_reloc_info { | ||
72 | int type; | ||
73 | struct qxl_bo *dst_bo; | ||
74 | uint32_t dst_offset; | ||
75 | struct qxl_bo *src_bo; | ||
76 | int src_offset; | ||
77 | }; | ||
78 | |||
71 | /* | 79 | /* |
72 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's | 80 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's |
73 | * are on vram). | 81 | * are on vram). |
74 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) | 82 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) |
75 | */ | 83 | */ |
76 | static void | 84 | static void |
77 | apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 85 | apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
78 | struct qxl_bo *src, uint64_t src_off) | ||
79 | { | 86 | { |
80 | void *reloc_page; | 87 | void *reloc_page; |
81 | 88 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); | |
82 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 89 | *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, |
83 | *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, | 90 | info->src_bo, |
84 | src, src_off); | 91 | info->src_offset); |
85 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 92 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
86 | } | 93 | } |
87 | 94 | ||
88 | static void | 95 | static void |
89 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | 96 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) |
90 | struct qxl_bo *src) | ||
91 | { | 97 | { |
92 | uint32_t id = 0; | 98 | uint32_t id = 0; |
93 | void *reloc_page; | 99 | void *reloc_page; |
94 | 100 | ||
95 | if (src && !src->is_primary) | 101 | if (info->src_bo && !info->src_bo->is_primary) |
96 | id = src->surface_id; | 102 | id = info->src_bo->surface_id; |
97 | 103 | ||
98 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | 104 | reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); |
99 | *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; | 105 | *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; |
100 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | 106 | qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); |
101 | } | 107 | } |
102 | 108 | ||
103 | /* return holding the reference to this object */ | 109 | /* return holding the reference to this object */ |
104 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | 110 | static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, |
105 | struct drm_file *file_priv, uint64_t handle, | 111 | struct drm_file *file_priv, uint64_t handle, |
106 | struct qxl_reloc_list *reloc_list) | 112 | struct qxl_release *release) |
107 | { | 113 | { |
108 | struct drm_gem_object *gobj; | 114 | struct drm_gem_object *gobj; |
109 | struct qxl_bo *qobj; | 115 | struct qxl_bo *qobj; |
110 | int ret; | 116 | int ret; |
111 | 117 | ||
112 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); | 118 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); |
113 | if (!gobj) { | 119 | if (!gobj) |
114 | DRM_ERROR("bad bo handle %lld\n", handle); | ||
115 | return NULL; | 120 | return NULL; |
116 | } | 121 | |
117 | qobj = gem_to_qxl_bo(gobj); | 122 | qobj = gem_to_qxl_bo(gobj); |
118 | 123 | ||
119 | ret = qxl_bo_list_add(reloc_list, qobj); | 124 | ret = qxl_release_list_add(release, qobj); |
120 | if (ret) | 125 | if (ret) |
121 | return NULL; | 126 | return NULL; |
122 | 127 | ||
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | |||
129 | * However, the command as passed from user space must *not* contain the initial | 134 | * However, the command as passed from user space must *not* contain the initial |
130 | * QXLReleaseInfo struct (first XXX bytes) | 135 | * QXLReleaseInfo struct (first XXX bytes) |
131 | */ | 136 | */ |
132 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | 137 | static int qxl_process_single_command(struct qxl_device *qdev, |
133 | struct drm_file *file_priv) | 138 | struct drm_qxl_command *cmd, |
139 | struct drm_file *file_priv) | ||
134 | { | 140 | { |
135 | struct qxl_device *qdev = dev->dev_private; | 141 | struct qxl_reloc_info *reloc_info; |
136 | struct drm_qxl_execbuffer *execbuffer = data; | 142 | int release_type; |
137 | struct drm_qxl_command user_cmd; | 143 | struct qxl_release *release; |
138 | int cmd_num; | 144 | struct qxl_bo *cmd_bo; |
139 | struct qxl_bo *reloc_src_bo; | ||
140 | struct qxl_bo *reloc_dst_bo; | ||
141 | struct drm_qxl_reloc reloc; | ||
142 | void *fb_cmd; | 145 | void *fb_cmd; |
143 | int i, ret; | 146 | int i, j, ret, num_relocs; |
144 | struct qxl_reloc_list reloc_list; | ||
145 | int unwritten; | 147 | int unwritten; |
146 | uint32_t reloc_dst_offset; | ||
147 | INIT_LIST_HEAD(&reloc_list.bos); | ||
148 | 148 | ||
149 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | 149 | switch (cmd->type) { |
150 | struct qxl_release *release; | 150 | case QXL_CMD_DRAW: |
151 | struct qxl_bo *cmd_bo; | 151 | release_type = QXL_RELEASE_DRAWABLE; |
152 | int release_type; | 152 | break; |
153 | struct drm_qxl_command *commands = | 153 | case QXL_CMD_SURFACE: |
154 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | 154 | case QXL_CMD_CURSOR: |
155 | default: | ||
156 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
157 | return -EINVAL; | ||
158 | break; | ||
159 | } | ||
155 | 160 | ||
156 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | 161 | if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) |
157 | sizeof(user_cmd))) | 162 | return -EINVAL; |
158 | return -EFAULT; | ||
159 | switch (user_cmd.type) { | ||
160 | case QXL_CMD_DRAW: | ||
161 | release_type = QXL_RELEASE_DRAWABLE; | ||
162 | break; | ||
163 | case QXL_CMD_SURFACE: | ||
164 | case QXL_CMD_CURSOR: | ||
165 | default: | ||
166 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
167 | return -EINVAL; | ||
168 | break; | ||
169 | } | ||
170 | 163 | ||
171 | if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) | 164 | if (!access_ok(VERIFY_READ, |
172 | return -EINVAL; | 165 | (void *)(unsigned long)cmd->command, |
166 | cmd->command_size)) | ||
167 | return -EFAULT; | ||
173 | 168 | ||
174 | if (!access_ok(VERIFY_READ, | 169 | reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); |
175 | (void *)(unsigned long)user_cmd.command, | 170 | if (!reloc_info) |
176 | user_cmd.command_size)) | 171 | return -ENOMEM; |
177 | return -EFAULT; | ||
178 | 172 | ||
179 | ret = qxl_alloc_release_reserved(qdev, | 173 | ret = qxl_alloc_release_reserved(qdev, |
180 | sizeof(union qxl_release_info) + | 174 | sizeof(union qxl_release_info) + |
181 | user_cmd.command_size, | 175 | cmd->command_size, |
182 | release_type, | 176 | release_type, |
183 | &release, | 177 | &release, |
184 | &cmd_bo); | 178 | &cmd_bo); |
185 | if (ret) | 179 | if (ret) |
186 | return ret; | 180 | goto out_free_reloc; |
187 | 181 | ||
188 | /* TODO copy slow path code from i915 */ | 182 | /* TODO copy slow path code from i915 */ |
189 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); | 183 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); |
190 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); | 184 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); |
191 | 185 | ||
192 | { | 186 | { |
193 | struct qxl_drawable *draw = fb_cmd; | 187 | struct qxl_drawable *draw = fb_cmd; |
188 | draw->mm_time = qdev->rom->mm_clock; | ||
189 | } | ||
194 | 190 | ||
195 | draw->mm_time = qdev->rom->mm_clock; | 191 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); |
196 | } | 192 | if (unwritten) { |
197 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); | 193 | DRM_ERROR("got unwritten %d\n", unwritten); |
198 | if (unwritten) { | 194 | ret = -EFAULT; |
199 | DRM_ERROR("got unwritten %d\n", unwritten); | 195 | goto out_free_release; |
200 | qxl_release_unreserve(qdev, release); | 196 | } |
201 | qxl_release_free(qdev, release); | 197 | |
202 | return -EFAULT; | 198 | /* fill out reloc info structs */ |
199 | num_relocs = 0; | ||
200 | for (i = 0; i < cmd->relocs_num; ++i) { | ||
201 | struct drm_qxl_reloc reloc; | ||
202 | |||
203 | if (DRM_COPY_FROM_USER(&reloc, | ||
204 | &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], | ||
205 | sizeof(reloc))) { | ||
206 | ret = -EFAULT; | ||
207 | goto out_free_bos; | ||
203 | } | 208 | } |
204 | 209 | ||
205 | for (i = 0 ; i < user_cmd.relocs_num; ++i) { | 210 | /* add the bos to the list of bos to validate - |
206 | if (DRM_COPY_FROM_USER(&reloc, | 211 | need to validate first then process relocs? */ |
207 | &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], | 212 | if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { |
208 | sizeof(reloc))) { | 213 | DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); |
209 | qxl_bo_list_unreserve(&reloc_list, true); | ||
210 | qxl_release_unreserve(qdev, release); | ||
211 | qxl_release_free(qdev, release); | ||
212 | return -EFAULT; | ||
213 | } | ||
214 | 214 | ||
215 | /* add the bos to the list of bos to validate - | 215 | ret = -EINVAL; |
216 | need to validate first then process relocs? */ | 216 | goto out_free_bos; |
217 | if (reloc.dst_handle) { | 217 | } |
218 | reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, | 218 | reloc_info[i].type = reloc.reloc_type; |
219 | reloc.dst_handle, &reloc_list); | 219 | |
220 | if (!reloc_dst_bo) { | 220 | if (reloc.dst_handle) { |
221 | qxl_bo_list_unreserve(&reloc_list, true); | 221 | reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, |
222 | qxl_release_unreserve(qdev, release); | 222 | reloc.dst_handle, release); |
223 | qxl_release_free(qdev, release); | 223 | if (!reloc_info[i].dst_bo) { |
224 | return -EINVAL; | 224 | ret = -EINVAL; |
225 | } | 225 | reloc_info[i].src_bo = NULL; |
226 | reloc_dst_offset = 0; | 226 | goto out_free_bos; |
227 | } else { | ||
228 | reloc_dst_bo = cmd_bo; | ||
229 | reloc_dst_offset = release->release_offset; | ||
230 | } | 227 | } |
231 | 228 | reloc_info[i].dst_offset = reloc.dst_offset; | |
232 | /* reserve and validate the reloc dst bo */ | 229 | } else { |
233 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { | 230 | reloc_info[i].dst_bo = cmd_bo; |
234 | reloc_src_bo = | 231 | reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; |
235 | qxlhw_handle_to_bo(qdev, file_priv, | 232 | } |
236 | reloc.src_handle, &reloc_list); | 233 | num_relocs++; |
237 | if (!reloc_src_bo) { | 234 | |
238 | if (reloc_dst_bo != cmd_bo) | 235 | /* reserve and validate the reloc dst bo */ |
239 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 236 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { |
240 | qxl_bo_list_unreserve(&reloc_list, true); | 237 | reloc_info[i].src_bo = |
241 | qxl_release_unreserve(qdev, release); | 238 | qxlhw_handle_to_bo(qdev, file_priv, |
242 | qxl_release_free(qdev, release); | 239 | reloc.src_handle, release); |
243 | return -EINVAL; | 240 | if (!reloc_info[i].src_bo) { |
244 | } | 241 | if (reloc_info[i].dst_bo != cmd_bo) |
245 | } else | 242 | drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); |
246 | reloc_src_bo = NULL; | 243 | ret = -EINVAL; |
247 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { | 244 | goto out_free_bos; |
248 | apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, | ||
249 | reloc_src_bo, reloc.src_offset); | ||
250 | } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { | ||
251 | apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); | ||
252 | } else { | ||
253 | DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); | ||
254 | return -EINVAL; | ||
255 | } | 245 | } |
246 | reloc_info[i].src_offset = reloc.src_offset; | ||
247 | } else { | ||
248 | reloc_info[i].src_bo = NULL; | ||
249 | reloc_info[i].src_offset = 0; | ||
250 | } | ||
251 | } | ||
256 | 252 | ||
257 | if (reloc_src_bo && reloc_src_bo != cmd_bo) { | 253 | /* validate all buffers */ |
258 | qxl_release_add_res(qdev, release, reloc_src_bo); | 254 | ret = qxl_release_reserve_list(release, false); |
259 | drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); | 255 | if (ret) |
260 | } | 256 | goto out_free_bos; |
261 | 257 | ||
262 | if (reloc_dst_bo != cmd_bo) | 258 | for (i = 0; i < cmd->relocs_num; ++i) { |
263 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | 259 | if (reloc_info[i].type == QXL_RELOC_TYPE_BO) |
264 | } | 260 | apply_reloc(qdev, &reloc_info[i]); |
265 | qxl_fence_releaseable(qdev, release); | 261 | else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) |
262 | apply_surf_reloc(qdev, &reloc_info[i]); | ||
263 | } | ||
266 | 264 | ||
267 | ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); | 265 | ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); |
268 | if (ret == -ERESTARTSYS) { | 266 | if (ret) |
269 | qxl_release_unreserve(qdev, release); | 267 | qxl_release_backoff_reserve_list(release); |
270 | qxl_release_free(qdev, release); | 268 | else |
271 | qxl_bo_list_unreserve(&reloc_list, true); | 269 | qxl_release_fence_buffer_objects(release); |
270 | |||
271 | out_free_bos: | ||
272 | for (j = 0; j < num_relocs; j++) { | ||
273 | if (reloc_info[j].dst_bo != cmd_bo) | ||
274 | drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); | ||
275 | if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) | ||
276 | drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); | ||
277 | } | ||
278 | out_free_release: | ||
279 | if (ret) | ||
280 | qxl_release_free(qdev, release); | ||
281 | out_free_reloc: | ||
282 | kfree(reloc_info); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | ||
287 | struct drm_file *file_priv) | ||
288 | { | ||
289 | struct qxl_device *qdev = dev->dev_private; | ||
290 | struct drm_qxl_execbuffer *execbuffer = data; | ||
291 | struct drm_qxl_command user_cmd; | ||
292 | int cmd_num; | ||
293 | int ret; | ||
294 | |||
295 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | ||
296 | |||
297 | struct drm_qxl_command *commands = | ||
298 | (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; | ||
299 | |||
300 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | ||
301 | sizeof(user_cmd))) | ||
302 | return -EFAULT; | ||
303 | |||
304 | ret = qxl_process_single_command(qdev, &user_cmd, file_priv); | ||
305 | if (ret) | ||
272 | return ret; | 306 | return ret; |
273 | } | ||
274 | qxl_release_unreserve(qdev, release); | ||
275 | } | 307 | } |
276 | qxl_bo_list_unreserve(&reloc_list, 0); | ||
277 | return 0; | 308 | return 0; |
278 | } | 309 | } |
279 | 310 | ||
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, | |||
305 | goto out; | 336 | goto out; |
306 | 337 | ||
307 | if (!qobj->pin_count) { | 338 | if (!qobj->pin_count) { |
308 | qxl_ttm_placement_from_domain(qobj, qobj->type); | 339 | qxl_ttm_placement_from_domain(qobj, qobj->type, false); |
309 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | 340 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, |
310 | true, false); | 341 | true, false); |
311 | if (unlikely(ret)) | 342 | if (unlikely(ret)) |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 1191fe7788c9..aa161cddd87e 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) | |||
51 | return false; | 51 | return false; |
52 | } | 52 | } |
53 | 53 | ||
54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | 54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) |
55 | { | 55 | { |
56 | u32 c = 0; | 56 | u32 c = 0; |
57 | u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; | ||
57 | 58 | ||
58 | qbo->placement.fpfn = 0; | 59 | qbo->placement.fpfn = 0; |
59 | qbo->placement.lpfn = 0; | 60 | qbo->placement.lpfn = 0; |
60 | qbo->placement.placement = qbo->placements; | 61 | qbo->placement.placement = qbo->placements; |
61 | qbo->placement.busy_placement = qbo->placements; | 62 | qbo->placement.busy_placement = qbo->placements; |
62 | if (domain == QXL_GEM_DOMAIN_VRAM) | 63 | if (domain == QXL_GEM_DOMAIN_VRAM) |
63 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; | 64 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; |
64 | if (domain == QXL_GEM_DOMAIN_SURFACE) | 65 | if (domain == QXL_GEM_DOMAIN_SURFACE) |
65 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; | 66 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; |
66 | if (domain == QXL_GEM_DOMAIN_CPU) | 67 | if (domain == QXL_GEM_DOMAIN_CPU) |
67 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 68 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; |
68 | if (!c) | 69 | if (!c) |
69 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | 70 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
70 | qbo->placement.num_placement = c; | 71 | qbo->placement.num_placement = c; |
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | |||
73 | 74 | ||
74 | 75 | ||
75 | int qxl_bo_create(struct qxl_device *qdev, | 76 | int qxl_bo_create(struct qxl_device *qdev, |
76 | unsigned long size, bool kernel, u32 domain, | 77 | unsigned long size, bool kernel, bool pinned, u32 domain, |
77 | struct qxl_surface *surf, | 78 | struct qxl_surface *surf, |
78 | struct qxl_bo **bo_ptr) | 79 | struct qxl_bo **bo_ptr) |
79 | { | 80 | { |
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
99 | } | 100 | } |
100 | bo->gem_base.driver_private = NULL; | 101 | bo->gem_base.driver_private = NULL; |
101 | bo->type = domain; | 102 | bo->type = domain; |
102 | bo->pin_count = 0; | 103 | bo->pin_count = pinned ? 1 : 0; |
103 | bo->surface_id = 0; | 104 | bo->surface_id = 0; |
104 | qxl_fence_init(qdev, &bo->fence); | 105 | qxl_fence_init(qdev, &bo->fence); |
105 | INIT_LIST_HEAD(&bo->list); | 106 | INIT_LIST_HEAD(&bo->list); |
106 | atomic_set(&bo->reserve_count, 0); | 107 | |
107 | if (surf) | 108 | if (surf) |
108 | bo->surf = *surf; | 109 | bo->surf = *surf; |
109 | 110 | ||
110 | qxl_ttm_placement_from_domain(bo, domain); | 111 | qxl_ttm_placement_from_domain(bo, domain, pinned); |
111 | 112 | ||
112 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | 113 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, |
113 | &bo->placement, 0, !kernel, NULL, size, | 114 | &bo->placement, 0, !kernel, NULL, size, |
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) | |||
228 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | 229 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) |
229 | { | 230 | { |
230 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | 231 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; |
231 | int r, i; | 232 | int r; |
232 | 233 | ||
233 | if (bo->pin_count) { | 234 | if (bo->pin_count) { |
234 | bo->pin_count++; | 235 | bo->pin_count++; |
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | |||
236 | *gpu_addr = qxl_bo_gpu_offset(bo); | 237 | *gpu_addr = qxl_bo_gpu_offset(bo); |
237 | return 0; | 238 | return 0; |
238 | } | 239 | } |
239 | qxl_ttm_placement_from_domain(bo, domain); | 240 | qxl_ttm_placement_from_domain(bo, domain, true); |
240 | for (i = 0; i < bo->placement.num_placement; i++) | ||
241 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
242 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 241 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
243 | if (likely(r == 0)) { | 242 | if (likely(r == 0)) { |
244 | bo->pin_count = 1; | 243 | bo->pin_count = 1; |
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) | |||
317 | return 0; | 316 | return 0; |
318 | } | 317 | } |
319 | 318 | ||
320 | void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) | ||
321 | { | ||
322 | struct qxl_bo_list *entry, *sf; | ||
323 | |||
324 | list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { | ||
325 | qxl_bo_unreserve(entry->bo); | ||
326 | list_del(&entry->lhead); | ||
327 | kfree(entry); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) | ||
332 | { | ||
333 | struct qxl_bo_list *entry; | ||
334 | int ret; | ||
335 | |||
336 | list_for_each_entry(entry, &reloc_list->bos, lhead) { | ||
337 | if (entry->bo == bo) | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
342 | if (!entry) | ||
343 | return -ENOMEM; | ||
344 | |||
345 | entry->bo = bo; | ||
346 | list_add(&entry->lhead, &reloc_list->bos); | ||
347 | |||
348 | ret = qxl_bo_reserve(bo, false); | ||
349 | if (ret) | ||
350 | return ret; | ||
351 | |||
352 | if (!bo->pin_count) { | ||
353 | qxl_ttm_placement_from_domain(bo, bo->type); | ||
354 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
355 | true, false); | ||
356 | if (ret) | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | /* allocate a surface for reserved + validated buffers */ | ||
361 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
362 | if (ret) | ||
363 | return ret; | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | int qxl_surf_evict(struct qxl_device *qdev) | 319 | int qxl_surf_evict(struct qxl_device *qdev) |
368 | { | 320 | { |
369 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | 321 | return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce781..8cb6167038e5 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
88 | 88 | ||
89 | extern int qxl_bo_create(struct qxl_device *qdev, | 89 | extern int qxl_bo_create(struct qxl_device *qdev, |
90 | unsigned long size, | 90 | unsigned long size, |
91 | bool kernel, u32 domain, | 91 | bool kernel, bool pinned, u32 domain, |
92 | struct qxl_surface *surf, | 92 | struct qxl_surface *surf, |
93 | struct qxl_bo **bo_ptr); | 93 | struct qxl_bo **bo_ptr); |
94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | 94 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); |
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); | |||
99 | extern void qxl_bo_unref(struct qxl_bo **bo); | 99 | extern void qxl_bo_unref(struct qxl_bo **bo); |
100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); | 100 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); |
101 | extern int qxl_bo_unpin(struct qxl_bo *bo); | 101 | extern int qxl_bo_unpin(struct qxl_bo *bo); |
102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); | 102 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); |
103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); | 103 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); |
104 | 104 | ||
105 | extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); | ||
106 | extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); | ||
107 | #endif | 105 | #endif |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5f..b61449e52cd5 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -38,7 +38,8 @@ | |||
38 | 38 | ||
39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | 39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; |
40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | 40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; |
41 | uint64_t | 41 | |
42 | static uint64_t | ||
42 | qxl_release_alloc(struct qxl_device *qdev, int type, | 43 | qxl_release_alloc(struct qxl_device *qdev, int type, |
43 | struct qxl_release **ret) | 44 | struct qxl_release **ret) |
44 | { | 45 | { |
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type, | |||
53 | return 0; | 54 | return 0; |
54 | } | 55 | } |
55 | release->type = type; | 56 | release->type = type; |
56 | release->bo_count = 0; | ||
57 | release->release_offset = 0; | 57 | release->release_offset = 0; |
58 | release->surface_release_id = 0; | 58 | release->surface_release_id = 0; |
59 | INIT_LIST_HEAD(&release->bos); | ||
59 | 60 | ||
60 | idr_preload(GFP_KERNEL); | 61 | idr_preload(GFP_KERNEL); |
61 | spin_lock(&qdev->release_idr_lock); | 62 | spin_lock(&qdev->release_idr_lock); |
@@ -77,20 +78,20 @@ void | |||
77 | qxl_release_free(struct qxl_device *qdev, | 78 | qxl_release_free(struct qxl_device *qdev, |
78 | struct qxl_release *release) | 79 | struct qxl_release *release) |
79 | { | 80 | { |
80 | int i; | 81 | struct qxl_bo_list *entry, *tmp; |
81 | 82 | QXL_INFO(qdev, "release %d, type %d\n", release->id, | |
82 | QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, | 83 | release->type); |
83 | release->type, release->bo_count); | ||
84 | 84 | ||
85 | if (release->surface_release_id) | 85 | if (release->surface_release_id) |
86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); | 86 | qxl_surface_id_dealloc(qdev, release->surface_release_id); |
87 | 87 | ||
88 | for (i = 0 ; i < release->bo_count; ++i) { | 88 | list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { |
89 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
89 | QXL_INFO(qdev, "release %llx\n", | 90 | QXL_INFO(qdev, "release %llx\n", |
90 | release->bos[i]->tbo.addr_space_offset | 91 | entry->tv.bo->addr_space_offset |
91 | - DRM_FILE_OFFSET); | 92 | - DRM_FILE_OFFSET); |
92 | qxl_fence_remove_release(&release->bos[i]->fence, release->id); | 93 | qxl_fence_remove_release(&bo->fence, release->id); |
93 | qxl_bo_unref(&release->bos[i]); | 94 | qxl_bo_unref(&bo); |
94 | } | 95 | } |
95 | spin_lock(&qdev->release_idr_lock); | 96 | spin_lock(&qdev->release_idr_lock); |
96 | idr_remove(&qdev->release_idr, release->id); | 97 | idr_remove(&qdev->release_idr, release->id); |
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev, | |||
98 | kfree(release); | 99 | kfree(release); |
99 | } | 100 | } |
100 | 101 | ||
101 | void | ||
102 | qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, | ||
103 | struct qxl_bo *bo) | ||
104 | { | ||
105 | int i; | ||
106 | for (i = 0; i < release->bo_count; i++) | ||
107 | if (release->bos[i] == bo) | ||
108 | return; | ||
109 | |||
110 | if (release->bo_count >= QXL_MAX_RES) { | ||
111 | DRM_ERROR("exceeded max resource on a qxl_release item\n"); | ||
112 | return; | ||
113 | } | ||
114 | release->bos[release->bo_count++] = qxl_bo_ref(bo); | ||
115 | } | ||
116 | |||
117 | static int qxl_release_bo_alloc(struct qxl_device *qdev, | 102 | static int qxl_release_bo_alloc(struct qxl_device *qdev, |
118 | struct qxl_bo **bo) | 103 | struct qxl_bo **bo) |
119 | { | 104 | { |
120 | int ret; | 105 | int ret; |
121 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, | 106 | /* pin releases bo's they are too messy to evict */ |
107 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, | ||
108 | QXL_GEM_DOMAIN_VRAM, NULL, | ||
122 | bo); | 109 | bo); |
123 | return ret; | 110 | return ret; |
124 | } | 111 | } |
125 | 112 | ||
126 | int qxl_release_reserve(struct qxl_device *qdev, | 113 | int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) |
127 | struct qxl_release *release, bool no_wait) | 114 | { |
115 | struct qxl_bo_list *entry; | ||
116 | |||
117 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
118 | if (entry->tv.bo == &bo->tbo) | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
123 | if (!entry) | ||
124 | return -ENOMEM; | ||
125 | |||
126 | qxl_bo_ref(bo); | ||
127 | entry->tv.bo = &bo->tbo; | ||
128 | list_add_tail(&entry->tv.head, &release->bos); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int qxl_release_validate_bo(struct qxl_bo *bo) | ||
128 | { | 133 | { |
129 | int ret; | 134 | int ret; |
130 | if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { | 135 | |
131 | ret = qxl_bo_reserve(release->bos[0], no_wait); | 136 | if (!bo->pin_count) { |
137 | qxl_ttm_placement_from_domain(bo, bo->type, false); | ||
138 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
139 | true, false); | ||
132 | if (ret) | 140 | if (ret) |
133 | return ret; | 141 | return ret; |
134 | } | 142 | } |
143 | |||
144 | /* allocate a surface for reserved + validated buffers */ | ||
145 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
146 | if (ret) | ||
147 | return ret; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) | ||
152 | { | ||
153 | int ret; | ||
154 | struct qxl_bo_list *entry; | ||
155 | |||
156 | /* if only one object on the release its the release itself | ||
157 | since these objects are pinned no need to reserve */ | ||
158 | if (list_is_singular(&release->bos)) | ||
159 | return 0; | ||
160 | |||
161 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); | ||
162 | if (ret) | ||
163 | return ret; | ||
164 | |||
165 | list_for_each_entry(entry, &release->bos, tv.head) { | ||
166 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
167 | |||
168 | ret = qxl_release_validate_bo(bo); | ||
169 | if (ret) { | ||
170 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
171 | return ret; | ||
172 | } | ||
173 | } | ||
135 | return 0; | 174 | return 0; |
136 | } | 175 | } |
137 | 176 | ||
138 | void qxl_release_unreserve(struct qxl_device *qdev, | 177 | void qxl_release_backoff_reserve_list(struct qxl_release *release) |
139 | struct qxl_release *release) | ||
140 | { | 178 | { |
141 | if (atomic_dec_and_test(&release->bos[0]->reserve_count)) | 179 | /* if only one object on the release its the release itself |
142 | qxl_bo_unreserve(release->bos[0]); | 180 | since these objects are pinned no need to reserve */ |
181 | if (list_is_singular(&release->bos)) | ||
182 | return; | ||
183 | |||
184 | ttm_eu_backoff_reservation(&release->ticket, &release->bos); | ||
143 | } | 185 | } |
144 | 186 | ||
187 | |||
145 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | 188 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, |
146 | enum qxl_surface_cmd_type surface_cmd_type, | 189 | enum qxl_surface_cmd_type surface_cmd_type, |
147 | struct qxl_release *create_rel, | 190 | struct qxl_release *create_rel, |
148 | struct qxl_release **release) | 191 | struct qxl_release **release) |
149 | { | 192 | { |
150 | int ret; | ||
151 | |||
152 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { | 193 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { |
153 | int idr_ret; | 194 | int idr_ret; |
195 | struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); | ||
154 | struct qxl_bo *bo; | 196 | struct qxl_bo *bo; |
155 | union qxl_release_info *info; | 197 | union qxl_release_info *info; |
156 | 198 | ||
157 | /* stash the release after the create command */ | 199 | /* stash the release after the create command */ |
158 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | 200 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); |
159 | bo = qxl_bo_ref(create_rel->bos[0]); | 201 | bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); |
160 | 202 | ||
161 | (*release)->release_offset = create_rel->release_offset + 64; | 203 | (*release)->release_offset = create_rel->release_offset + 64; |
162 | 204 | ||
163 | qxl_release_add_res(qdev, *release, bo); | 205 | qxl_release_list_add(*release, bo); |
164 | 206 | ||
165 | ret = qxl_release_reserve(qdev, *release, false); | ||
166 | if (ret) { | ||
167 | DRM_ERROR("release reserve failed\n"); | ||
168 | goto out_unref; | ||
169 | } | ||
170 | info = qxl_release_map(qdev, *release); | 207 | info = qxl_release_map(qdev, *release); |
171 | info->id = idr_ret; | 208 | info->id = idr_ret; |
172 | qxl_release_unmap(qdev, *release, info); | 209 | qxl_release_unmap(qdev, *release, info); |
173 | 210 | ||
174 | |||
175 | out_unref: | ||
176 | qxl_bo_unref(&bo); | 211 | qxl_bo_unref(&bo); |
177 | return ret; | 212 | return 0; |
178 | } | 213 | } |
179 | 214 | ||
180 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), | 215 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), |
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
187 | { | 222 | { |
188 | struct qxl_bo *bo; | 223 | struct qxl_bo *bo; |
189 | int idr_ret; | 224 | int idr_ret; |
190 | int ret; | 225 | int ret = 0; |
191 | union qxl_release_info *info; | 226 | union qxl_release_info *info; |
192 | int cur_idx; | 227 | int cur_idx; |
193 | 228 | ||
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
216 | mutex_unlock(&qdev->release_mutex); | 251 | mutex_unlock(&qdev->release_mutex); |
217 | return ret; | 252 | return ret; |
218 | } | 253 | } |
219 | |||
220 | /* pin releases bo's they are too messy to evict */ | ||
221 | ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); | ||
222 | qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); | ||
223 | qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); | ||
224 | } | 254 | } |
225 | 255 | ||
226 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); | 256 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); |
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | |||
231 | if (rbo) | 261 | if (rbo) |
232 | *rbo = bo; | 262 | *rbo = bo; |
233 | 263 | ||
234 | qxl_release_add_res(qdev, *release, bo); | ||
235 | |||
236 | ret = qxl_release_reserve(qdev, *release, false); | ||
237 | mutex_unlock(&qdev->release_mutex); | 264 | mutex_unlock(&qdev->release_mutex); |
238 | if (ret) | 265 | |
239 | goto out_unref; | 266 | qxl_release_list_add(*release, bo); |
240 | 267 | ||
241 | info = qxl_release_map(qdev, *release); | 268 | info = qxl_release_map(qdev, *release); |
242 | info->id = idr_ret; | 269 | info->id = idr_ret; |
243 | qxl_release_unmap(qdev, *release, info); | 270 | qxl_release_unmap(qdev, *release, info); |
244 | 271 | ||
245 | out_unref: | ||
246 | qxl_bo_unref(&bo); | 272 | qxl_bo_unref(&bo); |
247 | return ret; | 273 | return ret; |
248 | } | 274 | } |
249 | 275 | ||
250 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
251 | struct qxl_release *release) | ||
252 | { | ||
253 | int i, ret; | ||
254 | for (i = 0; i < release->bo_count; i++) { | ||
255 | if (!release->bos[i]->tbo.sync_obj) | ||
256 | release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; | ||
257 | ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); | ||
258 | if (ret) | ||
259 | return ret; | ||
260 | } | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | 276 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, |
265 | uint64_t id) | 277 | uint64_t id) |
266 | { | 278 | { |
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | |||
273 | DRM_ERROR("failed to find id in release_idr\n"); | 285 | DRM_ERROR("failed to find id in release_idr\n"); |
274 | return NULL; | 286 | return NULL; |
275 | } | 287 | } |
276 | if (release->bo_count < 1) { | 288 | |
277 | DRM_ERROR("read a released resource with 0 bos\n"); | ||
278 | return NULL; | ||
279 | } | ||
280 | return release; | 289 | return release; |
281 | } | 290 | } |
282 | 291 | ||
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | |||
285 | { | 294 | { |
286 | void *ptr; | 295 | void *ptr; |
287 | union qxl_release_info *info; | 296 | union qxl_release_info *info; |
288 | struct qxl_bo *bo = release->bos[0]; | 297 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
298 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
289 | 299 | ||
290 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); | 300 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); |
301 | if (!ptr) | ||
302 | return NULL; | ||
291 | info = ptr + (release->release_offset & ~PAGE_SIZE); | 303 | info = ptr + (release->release_offset & ~PAGE_SIZE); |
292 | return info; | 304 | return info; |
293 | } | 305 | } |
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev, | |||
296 | struct qxl_release *release, | 308 | struct qxl_release *release, |
297 | union qxl_release_info *info) | 309 | union qxl_release_info *info) |
298 | { | 310 | { |
299 | struct qxl_bo *bo = release->bos[0]; | 311 | struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); |
312 | struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); | ||
300 | void *ptr; | 313 | void *ptr; |
301 | 314 | ||
302 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); | 315 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); |
303 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); | 316 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); |
304 | } | 317 | } |
318 | |||
319 | void qxl_release_fence_buffer_objects(struct qxl_release *release) | ||
320 | { | ||
321 | struct ttm_validate_buffer *entry; | ||
322 | struct ttm_buffer_object *bo; | ||
323 | struct ttm_bo_global *glob; | ||
324 | struct ttm_bo_device *bdev; | ||
325 | struct ttm_bo_driver *driver; | ||
326 | struct qxl_bo *qbo; | ||
327 | |||
328 | /* if only one object on the release its the release itself | ||
329 | since these objects are pinned no need to reserve */ | ||
330 | if (list_is_singular(&release->bos)) | ||
331 | return; | ||
332 | |||
333 | bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; | ||
334 | bdev = bo->bdev; | ||
335 | driver = bdev->driver; | ||
336 | glob = bo->glob; | ||
337 | |||
338 | spin_lock(&glob->lru_lock); | ||
339 | spin_lock(&bdev->fence_lock); | ||
340 | |||
341 | list_for_each_entry(entry, &release->bos, head) { | ||
342 | bo = entry->bo; | ||
343 | qbo = to_qxl_bo(bo); | ||
344 | |||
345 | if (!entry->bo->sync_obj) | ||
346 | entry->bo->sync_obj = &qbo->fence; | ||
347 | |||
348 | qxl_fence_add_release_locked(&qbo->fence, release->id); | ||
349 | |||
350 | ttm_bo_add_to_lru(bo); | ||
351 | ww_mutex_unlock(&bo->resv->lock); | ||
352 | entry->reserved = false; | ||
353 | } | ||
354 | spin_unlock(&bdev->fence_lock); | ||
355 | spin_unlock(&glob->lru_lock); | ||
356 | ww_acquire_fini(&release->ticket); | ||
357 | } | ||
358 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 489cb8cece4d..1dfd84cda2a1 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, | |||
206 | return; | 206 | return; |
207 | } | 207 | } |
208 | qbo = container_of(bo, struct qxl_bo, tbo); | 208 | qbo = container_of(bo, struct qxl_bo, tbo); |
209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | 209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); |
210 | *placement = qbo->placement; | 210 | *placement = qbo->placement; |
211 | } | 211 | } |
212 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index fb441a790f3d..15da7ef344a4 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
1222 | int r; | 1222 | int r; |
1223 | 1223 | ||
1224 | mutex_lock(&ctx->mutex); | 1224 | mutex_lock(&ctx->mutex); |
1225 | /* reset data block */ | ||
1226 | ctx->data_block = 0; | ||
1225 | /* reset reg block */ | 1227 | /* reset reg block */ |
1226 | ctx->reg_block = 0; | 1228 | ctx->reg_block = 0; |
1227 | /* reset fb window */ | 1229 | /* reset fb window */ |
1228 | ctx->fb_base = 0; | 1230 | ctx->fb_base = 0; |
1229 | /* reset io mode */ | 1231 | /* reset io mode */ |
1230 | ctx->io_mode = ATOM_IO_MM; | 1232 | ctx->io_mode = ATOM_IO_MM; |
1233 | /* reset divmul */ | ||
1234 | ctx->divmul[0] = 0; | ||
1235 | ctx->divmul[1] = 0; | ||
1231 | r = atom_execute_table_locked(ctx, index, params); | 1236 | r = atom_execute_table_locked(ctx, index, params); |
1232 | mutex_unlock(&ctx->mutex); | 1237 | mutex_unlock(&ctx->mutex); |
1233 | return r; | 1238 | return r; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..32501f6ec991 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /***** radeon AUX functions *****/ | 46 | /***** radeon AUX functions *****/ |
47 | |||
48 | /* Atom needs data in little endian format | ||
49 | * so swap as appropriate when copying data to | ||
50 | * or from atom. Note that atom operates on | ||
51 | * dw units. | ||
52 | */ | ||
53 | static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) | ||
54 | { | ||
55 | #ifdef __BIG_ENDIAN | ||
56 | u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ | ||
57 | u32 *dst32, *src32; | ||
58 | int i; | ||
59 | |||
60 | memcpy(src_tmp, src, num_bytes); | ||
61 | src32 = (u32 *)src_tmp; | ||
62 | dst32 = (u32 *)dst_tmp; | ||
63 | if (to_le) { | ||
64 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
65 | dst32[i] = cpu_to_le32(src32[i]); | ||
66 | memcpy(dst, dst_tmp, num_bytes); | ||
67 | } else { | ||
68 | u8 dws = num_bytes & ~3; | ||
69 | for (i = 0; i < ((num_bytes + 3) / 4); i++) | ||
70 | dst32[i] = le32_to_cpu(src32[i]); | ||
71 | memcpy(dst, dst_tmp, dws); | ||
72 | if (num_bytes % 4) { | ||
73 | for (i = 0; i < (num_bytes % 4); i++) | ||
74 | dst[dws+i] = dst_tmp[dws+i]; | ||
75 | } | ||
76 | } | ||
77 | #else | ||
78 | memcpy(dst, src, num_bytes); | ||
79 | #endif | ||
80 | } | ||
81 | |||
47 | union aux_channel_transaction { | 82 | union aux_channel_transaction { |
48 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; | 83 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; |
49 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; | 84 | PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; |
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
65 | 100 | ||
66 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); | 101 | base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
67 | 102 | ||
68 | memcpy(base, send, send_bytes); | 103 | radeon_copy_swap(base, send, send_bytes, true); |
69 | 104 | ||
70 | args.v1.lpAuxRequest = 0 + 4; | 105 | args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); |
71 | args.v1.lpDataOut = 16 + 4; | 106 | args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); |
72 | args.v1.ucDataOutLen = 0; | 107 | args.v1.ucDataOutLen = 0; |
73 | args.v1.ucChannelID = chan->rec.i2c_id; | 108 | args.v1.ucChannelID = chan->rec.i2c_id; |
74 | args.v1.ucDelay = delay / 10; | 109 | args.v1.ucDelay = delay / 10; |
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, | |||
102 | recv_bytes = recv_size; | 137 | recv_bytes = recv_size; |
103 | 138 | ||
104 | if (recv && recv_size) | 139 | if (recv && recv_size) |
105 | memcpy(recv, base + 16, recv_bytes); | 140 | radeon_copy_swap(recv, base + 16, recv_bytes, false); |
106 | 141 | ||
107 | return recv_bytes; | 142 | return recv_bytes; |
108 | } | 143 | } |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2548 | { | 2548 | { |
2549 | struct rv7xx_power_info *pi; | 2549 | struct rv7xx_power_info *pi; |
2550 | struct evergreen_power_info *eg_pi; | 2550 | struct evergreen_power_info *eg_pi; |
2551 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2552 | u16 data_offset, size; | ||
2553 | u8 frev, crev; | ||
2554 | struct atom_clock_dividers dividers; | 2551 | struct atom_clock_dividers dividers; |
2555 | int ret; | 2552 | int ret; |
2556 | 2553 | ||
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2633 | eg_pi->vddci_control = | 2630 | eg_pi->vddci_control = |
2634 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2631 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
2635 | 2632 | ||
2636 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2633 | rv770_get_engine_memory_ss(rdev); |
2637 | &frev, &crev, &data_offset)) { | ||
2638 | pi->sclk_ss = true; | ||
2639 | pi->mclk_ss = true; | ||
2640 | pi->dynamic_ss = true; | ||
2641 | } else { | ||
2642 | pi->sclk_ss = false; | ||
2643 | pi->mclk_ss = false; | ||
2644 | pi->dynamic_ss = true; | ||
2645 | } | ||
2646 | 2634 | ||
2647 | pi->asi = RV770_ASI_DFLT; | 2635 | pi->asi = RV770_ASI_DFLT; |
2648 | pi->pasi = CYPRESS_HASI_DFLT; | 2636 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2659 | 2647 | ||
2660 | pi->dynamic_pcie_gen2 = true; | 2648 | pi->dynamic_pcie_gen2 = true; |
2661 | 2649 | ||
2662 | if (pi->gfx_clock_gating && | 2650 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2663 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2664 | pi->thermal_protection = true; | 2651 | pi->thermal_protection = true; |
2665 | else | 2652 | else |
2666 | pi->thermal_protection = false; | 2653 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
2587 | if (rdev->wb.enabled) { | 2587 | if (rdev->wb.enabled) { |
2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
2589 | } else { | 2589 | } else { |
2590 | mutex_lock(&rdev->srbm_mutex); | ||
2590 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2591 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
2591 | rptr = RREG32(CP_HQD_PQ_RPTR); | 2592 | rptr = RREG32(CP_HQD_PQ_RPTR); |
2592 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2593 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2594 | mutex_unlock(&rdev->srbm_mutex); | ||
2593 | } | 2595 | } |
2594 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2596 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
2595 | 2597 | ||
@@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
2604 | if (rdev->wb.enabled) { | 2606 | if (rdev->wb.enabled) { |
2605 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 2607 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); |
2606 | } else { | 2608 | } else { |
2609 | mutex_lock(&rdev->srbm_mutex); | ||
2607 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2610 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
2608 | wptr = RREG32(CP_HQD_PQ_WPTR); | 2611 | wptr = RREG32(CP_HQD_PQ_WPTR); |
2609 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2612 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2613 | mutex_unlock(&rdev->srbm_mutex); | ||
2610 | } | 2614 | } |
2611 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2615 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
2612 | 2616 | ||
@@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2897 | WREG32(CP_CPF_DEBUG, tmp); | 2901 | WREG32(CP_CPF_DEBUG, tmp); |
2898 | 2902 | ||
2899 | /* init the pipes */ | 2903 | /* init the pipes */ |
2904 | mutex_lock(&rdev->srbm_mutex); | ||
2900 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { | 2905 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { |
2901 | int me = (i < 4) ? 1 : 2; | 2906 | int me = (i < 4) ? 1 : 2; |
2902 | int pipe = (i < 4) ? i : (i - 4); | 2907 | int pipe = (i < 4) ? i : (i - 4); |
@@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2919 | WREG32(CP_HPD_EOP_CONTROL, tmp); | 2924 | WREG32(CP_HPD_EOP_CONTROL, tmp); |
2920 | } | 2925 | } |
2921 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2926 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2927 | mutex_unlock(&rdev->srbm_mutex); | ||
2922 | 2928 | ||
2923 | /* init the queues. Just two for now. */ | 2929 | /* init the queues. Just two for now. */ |
2924 | for (i = 0; i < 2; i++) { | 2930 | for (i = 0; i < 2; i++) { |
@@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2972 | mqd->static_thread_mgmt23[0] = 0xffffffff; | 2978 | mqd->static_thread_mgmt23[0] = 0xffffffff; |
2973 | mqd->static_thread_mgmt23[1] = 0xffffffff; | 2979 | mqd->static_thread_mgmt23[1] = 0xffffffff; |
2974 | 2980 | ||
2981 | mutex_lock(&rdev->srbm_mutex); | ||
2975 | cik_srbm_select(rdev, rdev->ring[idx].me, | 2982 | cik_srbm_select(rdev, rdev->ring[idx].me, |
2976 | rdev->ring[idx].pipe, | 2983 | rdev->ring[idx].pipe, |
2977 | rdev->ring[idx].queue, 0); | 2984 | rdev->ring[idx].queue, 0); |
@@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
3099 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); | 3106 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); |
3100 | 3107 | ||
3101 | cik_srbm_select(rdev, 0, 0, 0, 0); | 3108 | cik_srbm_select(rdev, 0, 0, 0, 0); |
3109 | mutex_unlock(&rdev->srbm_mutex); | ||
3102 | 3110 | ||
3103 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); | 3111 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); |
3104 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); | 3112 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); |
@@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
4320 | 4328 | ||
4321 | /* XXX SH_MEM regs */ | 4329 | /* XXX SH_MEM regs */ |
4322 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | 4330 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
4331 | mutex_lock(&rdev->srbm_mutex); | ||
4323 | for (i = 0; i < 16; i++) { | 4332 | for (i = 0; i < 16; i++) { |
4324 | cik_srbm_select(rdev, 0, 0, 0, i); | 4333 | cik_srbm_select(rdev, 0, 0, 0, i); |
4325 | /* CP and shaders */ | 4334 | /* CP and shaders */ |
@@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
4335 | /* XXX SDMA RLC - todo */ | 4344 | /* XXX SDMA RLC - todo */ |
4336 | } | 4345 | } |
4337 | cik_srbm_select(rdev, 0, 0, 0, 0); | 4346 | cik_srbm_select(rdev, 0, 0, 0, 0); |
4347 | mutex_unlock(&rdev->srbm_mutex); | ||
4338 | 4348 | ||
4339 | cik_pcie_gart_tlb_flush(rdev); | 4349 | cik_pcie_gart_tlb_flush(rdev); |
4340 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 4350 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
@@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) | |||
5954 | struct radeon_ring *ring; | 5964 | struct radeon_ring *ring; |
5955 | int r; | 5965 | int r; |
5956 | 5966 | ||
5967 | cik_mc_program(rdev); | ||
5968 | |||
5957 | if (rdev->flags & RADEON_IS_IGP) { | 5969 | if (rdev->flags & RADEON_IS_IGP) { |
5958 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 5970 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
5959 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | 5971 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { |
@@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
5985 | if (r) | 5997 | if (r) |
5986 | return r; | 5998 | return r; |
5987 | 5999 | ||
5988 | cik_mc_program(rdev); | ||
5989 | r = cik_pcie_gart_enable(rdev); | 6000 | r = cik_pcie_gart_enable(rdev); |
5990 | if (r) | 6001 | if (r) |
5991 | return r; | 6002 | return r; |
@@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) | |||
6194 | radeon_vm_manager_fini(rdev); | 6205 | radeon_vm_manager_fini(rdev); |
6195 | cik_cp_enable(rdev, false); | 6206 | cik_cp_enable(rdev, false); |
6196 | cik_sdma_enable(rdev, false); | 6207 | cik_sdma_enable(rdev, false); |
6197 | r600_uvd_rbc_stop(rdev); | 6208 | r600_uvd_stop(rdev); |
6198 | radeon_uvd_suspend(rdev); | 6209 | radeon_uvd_suspend(rdev); |
6199 | cik_irq_suspend(rdev); | 6210 | cik_irq_suspend(rdev); |
6200 | radeon_wb_disable(rdev); | 6211 | radeon_wb_disable(rdev); |
@@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) | |||
6358 | radeon_vm_manager_fini(rdev); | 6369 | radeon_vm_manager_fini(rdev); |
6359 | radeon_ib_pool_fini(rdev); | 6370 | radeon_ib_pool_fini(rdev); |
6360 | radeon_irq_kms_fini(rdev); | 6371 | radeon_irq_kms_fini(rdev); |
6372 | r600_uvd_stop(rdev); | ||
6361 | radeon_uvd_fini(rdev); | 6373 | radeon_uvd_fini(rdev); |
6362 | cik_pcie_gart_fini(rdev); | 6374 | cik_pcie_gart_fini(rdev); |
6363 | r600_vram_scratch_fini(rdev); | 6375 | r600_vram_scratch_fini(rdev); |
@@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) | |||
6978 | 6990 | ||
6979 | /* programm the VCPU memory controller bits 0-27 */ | 6991 | /* programm the VCPU memory controller bits 0-27 */ |
6980 | addr = rdev->uvd.gpu_addr >> 3; | 6992 | addr = rdev->uvd.gpu_addr >> 3; |
6981 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 6993 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
6982 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 6994 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
6983 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 6995 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
6984 | 6996 | ||
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2038 | { | 2038 | { |
2039 | struct rv7xx_power_info *pi; | 2039 | struct rv7xx_power_info *pi; |
2040 | struct evergreen_power_info *eg_pi; | 2040 | struct evergreen_power_info *eg_pi; |
2041 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2042 | uint16_t data_offset, size; | ||
2043 | uint8_t frev, crev; | ||
2044 | struct atom_clock_dividers dividers; | 2041 | struct atom_clock_dividers dividers; |
2045 | int ret; | 2042 | int ret; |
2046 | 2043 | ||
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2092 | eg_pi->vddci_control = | 2089 | eg_pi->vddci_control = |
2093 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2090 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
2094 | 2091 | ||
2095 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2092 | rv770_get_engine_memory_ss(rdev); |
2096 | &frev, &crev, &data_offset)) { | ||
2097 | pi->sclk_ss = true; | ||
2098 | pi->mclk_ss = true; | ||
2099 | pi->dynamic_ss = true; | ||
2100 | } else { | ||
2101 | pi->sclk_ss = false; | ||
2102 | pi->mclk_ss = false; | ||
2103 | pi->dynamic_ss = true; | ||
2104 | } | ||
2105 | 2093 | ||
2106 | pi->asi = RV770_ASI_DFLT; | 2094 | pi->asi = RV770_ASI_DFLT; |
2107 | pi->pasi = CYPRESS_HASI_DFLT; | 2095 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2122 | 2110 | ||
2123 | pi->dynamic_pcie_gen2 = true; | 2111 | pi->dynamic_pcie_gen2 = true; |
2124 | 2112 | ||
2125 | if (pi->gfx_clock_gating && | 2113 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2126 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2127 | pi->thermal_protection = true; | 2114 | pi->thermal_protection = true; |
2128 | else | 2115 | else |
2129 | pi->thermal_protection = false; | 2116 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5106 | /* enable aspm */ | 5106 | /* enable aspm */ |
5107 | evergreen_program_aspm(rdev); | 5107 | evergreen_program_aspm(rdev); |
5108 | 5108 | ||
5109 | evergreen_mc_program(rdev); | ||
5110 | |||
5109 | if (ASIC_IS_DCE5(rdev)) { | 5111 | if (ASIC_IS_DCE5(rdev)) { |
5110 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 5112 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
5111 | r = ni_init_microcode(rdev); | 5113 | r = ni_init_microcode(rdev); |
@@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5133 | if (r) | 5135 | if (r) |
5134 | return r; | 5136 | return r; |
5135 | 5137 | ||
5136 | evergreen_mc_program(rdev); | ||
5137 | if (rdev->flags & RADEON_IS_AGP) { | 5138 | if (rdev->flags & RADEON_IS_AGP) { |
5138 | evergreen_agp_enable(rdev); | 5139 | evergreen_agp_enable(rdev); |
5139 | } else { | 5140 | } else { |
@@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) | |||
5291 | int evergreen_suspend(struct radeon_device *rdev) | 5292 | int evergreen_suspend(struct radeon_device *rdev) |
5292 | { | 5293 | { |
5293 | r600_audio_fini(rdev); | 5294 | r600_audio_fini(rdev); |
5295 | r600_uvd_stop(rdev); | ||
5294 | radeon_uvd_suspend(rdev); | 5296 | radeon_uvd_suspend(rdev); |
5295 | r700_cp_stop(rdev); | 5297 | r700_cp_stop(rdev); |
5296 | r600_dma_stop(rdev); | 5298 | r600_dma_stop(rdev); |
5297 | r600_uvd_rbc_stop(rdev); | ||
5298 | evergreen_irq_suspend(rdev); | 5299 | evergreen_irq_suspend(rdev); |
5299 | radeon_wb_disable(rdev); | 5300 | radeon_wb_disable(rdev); |
5300 | evergreen_pcie_gart_disable(rdev); | 5301 | evergreen_pcie_gart_disable(rdev); |
@@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
5429 | radeon_ib_pool_fini(rdev); | 5430 | radeon_ib_pool_fini(rdev); |
5430 | radeon_irq_kms_fini(rdev); | 5431 | radeon_irq_kms_fini(rdev); |
5431 | evergreen_pcie_gart_fini(rdev); | 5432 | evergreen_pcie_gart_fini(rdev); |
5433 | r600_uvd_stop(rdev); | ||
5432 | radeon_uvd_fini(rdev); | 5434 | radeon_uvd_fini(rdev); |
5433 | r600_vram_scratch_fini(rdev); | 5435 | r600_vram_scratch_fini(rdev); |
5434 | radeon_gem_fini(rdev); | 5436 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b0d3fb341417..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
150 | u32 base_rate = 24000; | 150 | u32 base_rate = 24000; |
151 | u32 max_ratio = clock / base_rate; | ||
152 | u32 dto_phase; | ||
153 | u32 dto_modulo = clock; | ||
154 | u32 wallclock_ratio; | ||
155 | u32 dto_cntl; | ||
151 | 156 | ||
152 | if (!dig || !dig->afmt) | 157 | if (!dig || !dig->afmt) |
153 | return; | 158 | return; |
154 | 159 | ||
160 | if (max_ratio >= 8) { | ||
161 | dto_phase = 192 * 1000; | ||
162 | wallclock_ratio = 3; | ||
163 | } else if (max_ratio >= 4) { | ||
164 | dto_phase = 96 * 1000; | ||
165 | wallclock_ratio = 2; | ||
166 | } else if (max_ratio >= 2) { | ||
167 | dto_phase = 48 * 1000; | ||
168 | wallclock_ratio = 1; | ||
169 | } else { | ||
170 | dto_phase = 24 * 1000; | ||
171 | wallclock_ratio = 0; | ||
172 | } | ||
173 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
174 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
175 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
176 | |||
155 | /* XXX two dtos; generally use dto0 for hdmi */ | 177 | /* XXX two dtos; generally use dto0 for hdmi */ |
156 | /* Express [24MHz / target pixel clock] as an exact rational | 178 | /* Express [24MHz / target pixel clock] as an exact rational |
157 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 179 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
158 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 180 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
159 | */ | 181 | */ |
160 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | ||
161 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | ||
162 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); | 182 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); |
183 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
184 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
163 | } | 185 | } |
164 | 186 | ||
165 | 187 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -497,6 +497,9 @@ | |||
497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 | 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 |
499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc | 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc |
500 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
501 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
502 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
500 | 503 | ||
501 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 | 504 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
502 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 | 505 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { | 794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { |
795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
797 | if (err) | 797 | if (err) { |
798 | goto out; | 798 | printk(KERN_ERR |
799 | if (rdev->smc_fw->size != smc_req_size) { | 799 | "smc: error loading firmware \"%s\"\n", |
800 | fw_name); | ||
801 | release_firmware(rdev->smc_fw); | ||
802 | rdev->smc_fw = NULL; | ||
803 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
800 | printk(KERN_ERR | 804 | printk(KERN_ERR |
801 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | 805 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", |
802 | rdev->mc_fw->size, fw_name); | 806 | rdev->mc_fw->size, fw_name); |
@@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) | |||
2079 | /* enable aspm */ | 2083 | /* enable aspm */ |
2080 | evergreen_program_aspm(rdev); | 2084 | evergreen_program_aspm(rdev); |
2081 | 2085 | ||
2086 | evergreen_mc_program(rdev); | ||
2087 | |||
2082 | if (rdev->flags & RADEON_IS_IGP) { | 2088 | if (rdev->flags & RADEON_IS_IGP) { |
2083 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2089 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
2084 | r = ni_init_microcode(rdev); | 2090 | r = ni_init_microcode(rdev); |
@@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
2107 | if (r) | 2113 | if (r) |
2108 | return r; | 2114 | return r; |
2109 | 2115 | ||
2110 | evergreen_mc_program(rdev); | ||
2111 | r = cayman_pcie_gart_enable(rdev); | 2116 | r = cayman_pcie_gart_enable(rdev); |
2112 | if (r) | 2117 | if (r) |
2113 | return r; | 2118 | return r; |
@@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
2286 | radeon_vm_manager_fini(rdev); | 2291 | radeon_vm_manager_fini(rdev); |
2287 | cayman_cp_enable(rdev, false); | 2292 | cayman_cp_enable(rdev, false); |
2288 | cayman_dma_stop(rdev); | 2293 | cayman_dma_stop(rdev); |
2289 | r600_uvd_rbc_stop(rdev); | 2294 | r600_uvd_stop(rdev); |
2290 | radeon_uvd_suspend(rdev); | 2295 | radeon_uvd_suspend(rdev); |
2291 | evergreen_irq_suspend(rdev); | 2296 | evergreen_irq_suspend(rdev); |
2292 | radeon_wb_disable(rdev); | 2297 | radeon_wb_disable(rdev); |
@@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
2418 | radeon_vm_manager_fini(rdev); | 2423 | radeon_vm_manager_fini(rdev); |
2419 | radeon_ib_pool_fini(rdev); | 2424 | radeon_ib_pool_fini(rdev); |
2420 | radeon_irq_kms_fini(rdev); | 2425 | radeon_irq_kms_fini(rdev); |
2426 | r600_uvd_stop(rdev); | ||
2421 | radeon_uvd_fini(rdev); | 2427 | radeon_uvd_fini(rdev); |
2422 | cayman_pcie_gart_fini(rdev); | 2428 | cayman_pcie_gart_fini(rdev); |
2423 | r600_vram_scratch_fini(rdev); | 2429 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 559cf24d51af..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd | |||
1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, | 1054 | int ni_dpm_force_performance_level(struct radeon_device *rdev, |
1055 | enum radeon_dpm_forced_level level) | 1055 | enum radeon_dpm_forced_level level) |
1056 | { | 1056 | { |
1057 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | ||
1058 | struct ni_ps *ps = ni_get_ps(rps); | ||
1059 | u32 levels; | ||
1060 | |||
1061 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 1057 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
1062 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 1058 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) |
1063 | return -EINVAL; | 1059 | return -EINVAL; |
@@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, | |||
1068 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1064 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
1069 | return -EINVAL; | 1065 | return -EINVAL; |
1070 | 1066 | ||
1071 | levels = ps->performance_level_count - 1; | 1067 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
1072 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
1073 | return -EINVAL; | 1068 | return -EINVAL; |
1074 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 1069 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
1075 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 1070 | if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
@@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4072 | struct rv7xx_power_info *pi; | 4067 | struct rv7xx_power_info *pi; |
4073 | struct evergreen_power_info *eg_pi; | 4068 | struct evergreen_power_info *eg_pi; |
4074 | struct ni_power_info *ni_pi; | 4069 | struct ni_power_info *ni_pi; |
4075 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
4076 | u16 data_offset, size; | ||
4077 | u8 frev, crev; | ||
4078 | struct atom_clock_dividers dividers; | 4070 | struct atom_clock_dividers dividers; |
4079 | int ret; | 4071 | int ret; |
4080 | 4072 | ||
@@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4167 | eg_pi->vddci_control = | 4159 | eg_pi->vddci_control = |
4168 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 4160 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
4169 | 4161 | ||
4170 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 4162 | rv770_get_engine_memory_ss(rdev); |
4171 | &frev, &crev, &data_offset)) { | ||
4172 | pi->sclk_ss = true; | ||
4173 | pi->mclk_ss = true; | ||
4174 | pi->dynamic_ss = true; | ||
4175 | } else { | ||
4176 | pi->sclk_ss = false; | ||
4177 | pi->mclk_ss = false; | ||
4178 | pi->dynamic_ss = true; | ||
4179 | } | ||
4180 | 4163 | ||
4181 | pi->asi = RV770_ASI_DFLT; | 4164 | pi->asi = RV770_ASI_DFLT; |
4182 | pi->pasi = CYPRESS_HASI_DFLT; | 4165 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4193 | 4176 | ||
4194 | pi->dynamic_pcie_gen2 = true; | 4177 | pi->dynamic_pcie_gen2 = true; |
4195 | 4178 | ||
4196 | if (pi->gfx_clock_gating && | 4179 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
4197 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
4198 | pi->thermal_protection = true; | 4180 | pi->thermal_protection = true; |
4199 | else | 4181 | else |
4200 | pi->thermal_protection = false; | 4182 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a09412..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { | 2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { |
2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); | 2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); |
2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
2302 | if (err) | 2302 | if (err) { |
2303 | goto out; | 2303 | printk(KERN_ERR |
2304 | if (rdev->smc_fw->size != smc_req_size) { | 2304 | "smc: error loading firmware \"%s\"\n", |
2305 | fw_name); | ||
2306 | release_firmware(rdev->smc_fw); | ||
2307 | rdev->smc_fw = NULL; | ||
2308 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
2305 | printk(KERN_ERR | 2309 | printk(KERN_ERR |
2306 | "smc: Bogus length %zu in firmware \"%s\"\n", | 2310 | "smc: Bogus length %zu in firmware \"%s\"\n", |
2307 | rdev->smc_fw->size, fw_name); | 2311 | rdev->smc_fw->size, fw_name); |
@@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) | |||
2697 | return 0; | 2701 | return 0; |
2698 | } | 2702 | } |
2699 | 2703 | ||
2700 | void r600_uvd_rbc_stop(struct radeon_device *rdev) | 2704 | void r600_uvd_stop(struct radeon_device *rdev) |
2701 | { | 2705 | { |
2702 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2706 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
2703 | 2707 | ||
2704 | /* force RBC into idle state */ | 2708 | /* force RBC into idle state */ |
2705 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); | 2709 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
2710 | |||
2711 | /* Stall UMC and register bus before resetting VCPU */ | ||
2712 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2713 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2714 | mdelay(1); | ||
2715 | |||
2716 | /* put VCPU into reset */ | ||
2717 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | ||
2718 | mdelay(5); | ||
2719 | |||
2720 | /* disable VCPU clock */ | ||
2721 | WREG32(UVD_VCPU_CNTL, 0x0); | ||
2722 | |||
2723 | /* Unstall UMC and register bus */ | ||
2724 | WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
2725 | WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); | ||
2726 | |||
2706 | ring->ready = false; | 2727 | ring->ready = false; |
2707 | } | 2728 | } |
2708 | 2729 | ||
@@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
2722 | /* disable interupt */ | 2743 | /* disable interupt */ |
2723 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); | 2744 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); |
2724 | 2745 | ||
2746 | /* Stall UMC and register bus before resetting VCPU */ | ||
2747 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2748 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2749 | mdelay(1); | ||
2750 | |||
2725 | /* put LMI, VCPU, RBC etc... into reset */ | 2751 | /* put LMI, VCPU, RBC etc... into reset */ |
2726 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | | 2752 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | |
2727 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | | 2753 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | |
@@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
2751 | WREG32(UVD_MPC_SET_ALU, 0); | 2777 | WREG32(UVD_MPC_SET_ALU, 0); |
2752 | WREG32(UVD_MPC_SET_MUX, 0x88); | 2778 | WREG32(UVD_MPC_SET_MUX, 0x88); |
2753 | 2779 | ||
2754 | /* Stall UMC */ | ||
2755 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2756 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2757 | |||
2758 | /* take all subblocks out of reset, except VCPU */ | 2780 | /* take all subblocks out of reset, except VCPU */ |
2759 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | 2781 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); |
2760 | mdelay(5); | 2782 | mdelay(5); |
@@ -3166,7 +3188,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
3166 | 3188 | ||
3167 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | 3189 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
3168 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); | 3190 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
3169 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); | 3191 | r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
3170 | if (r) { | 3192 | if (r) { |
3171 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 3193 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
3172 | radeon_semaphore_free(rdev, &sem, NULL); | 3194 | radeon_semaphore_free(rdev, &sem, NULL); |
@@ -3181,6 +3203,9 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
3181 | radeon_semaphore_free(rdev, &sem, NULL); | 3203 | radeon_semaphore_free(rdev, &sem, NULL); |
3182 | } | 3204 | } |
3183 | 3205 | ||
3206 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
3207 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | ||
3208 | radeon_ring_write(ring, WAIT_3D_IDLE_bit); | ||
3184 | for (i = 0; i < num_loops; i++) { | 3209 | for (i = 0; i < num_loops; i++) { |
3185 | cur_size_in_bytes = size_in_bytes; | 3210 | cur_size_in_bytes = size_in_bytes; |
3186 | if (cur_size_in_bytes > 0x1fffff) | 3211 | if (cur_size_in_bytes > 0x1fffff) |
@@ -3309,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) | |||
3309 | /* enable pcie gen2 link */ | 3334 | /* enable pcie gen2 link */ |
3310 | r600_pcie_gen2_enable(rdev); | 3335 | r600_pcie_gen2_enable(rdev); |
3311 | 3336 | ||
3337 | r600_mc_program(rdev); | ||
3338 | |||
3312 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 3339 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
3313 | r = r600_init_microcode(rdev); | 3340 | r = r600_init_microcode(rdev); |
3314 | if (r) { | 3341 | if (r) { |
@@ -3321,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
3321 | if (r) | 3348 | if (r) |
3322 | return r; | 3349 | return r; |
3323 | 3350 | ||
3324 | r600_mc_program(rdev); | ||
3325 | if (rdev->flags & RADEON_IS_AGP) { | 3351 | if (rdev->flags & RADEON_IS_AGP) { |
3326 | r600_agp_enable(rdev); | 3352 | r600_agp_enable(rdev); |
3327 | } else { | 3353 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b88f54b134ab..e5c860f4ccbe 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev) | |||
278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) | 278 | void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) |
279 | { | 279 | { |
280 | if (enable) | 280 | if (enable) |
281 | WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); | 281 | WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); |
282 | else | 282 | else |
283 | WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); | 283 | WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); |
284 | } | 284 | } |
285 | 285 | ||
286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) | 286 | void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c56..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
228 | u32 base_rate = 24000; | 228 | u32 base_rate = 24000; |
229 | u32 max_ratio = clock / base_rate; | ||
230 | u32 dto_phase; | ||
231 | u32 dto_modulo = clock; | ||
232 | u32 wallclock_ratio; | ||
233 | u32 dto_cntl; | ||
229 | 234 | ||
230 | if (!dig || !dig->afmt) | 235 | if (!dig || !dig->afmt) |
231 | return; | 236 | return; |
232 | 237 | ||
238 | if (max_ratio >= 8) { | ||
239 | dto_phase = 192 * 1000; | ||
240 | wallclock_ratio = 3; | ||
241 | } else if (max_ratio >= 4) { | ||
242 | dto_phase = 96 * 1000; | ||
243 | wallclock_ratio = 2; | ||
244 | } else if (max_ratio >= 2) { | ||
245 | dto_phase = 48 * 1000; | ||
246 | wallclock_ratio = 1; | ||
247 | } else { | ||
248 | dto_phase = 24 * 1000; | ||
249 | wallclock_ratio = 0; | ||
250 | } | ||
251 | |||
233 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. | 252 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. |
234 | * doesn't matter which one you use. Just use the first one. | 253 | * doesn't matter which one you use. Just use the first one. |
235 | */ | 254 | */ |
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
242 | /* according to the reg specs, this should DCE3.2 only, but in | 261 | /* according to the reg specs, this should DCE3.2 only, but in |
243 | * practice it seems to cover DCE3.0 as well. | 262 | * practice it seems to cover DCE3.0 as well. |
244 | */ | 263 | */ |
245 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 264 | if (dig->dig_encoder == 0) { |
246 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | 265 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
247 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | 266 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
267 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
268 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
269 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
270 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | ||
271 | } else { | ||
272 | dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
273 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
274 | WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); | ||
275 | WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); | ||
276 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | ||
277 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | ||
278 | } | ||
248 | } else { | 279 | } else { |
249 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ | 280 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ |
250 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | 281 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815edab..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -933,6 +933,9 @@ | |||
933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c | 933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c |
934 | # define DTO_LOAD (1 << 31) | 934 | # define DTO_LOAD (1 << 31) |
935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 | 935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 |
936 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
937 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
938 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
936 | 939 | ||
937 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 | 940 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 |
938 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 | 941 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1468,7 +1468,6 @@ struct radeon_uvd { | |||
1468 | void *cpu_addr; | 1468 | void *cpu_addr; |
1469 | uint64_t gpu_addr; | 1469 | uint64_t gpu_addr; |
1470 | void *saved_bo; | 1470 | void *saved_bo; |
1471 | unsigned fw_size; | ||
1472 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; | 1471 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
1473 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; | 1472 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
1474 | struct delayed_work idle_work; | 1473 | struct delayed_work idle_work; |
@@ -2066,6 +2065,7 @@ struct radeon_device { | |||
2066 | const struct firmware *mec_fw; /* CIK MEC firmware */ | 2065 | const struct firmware *mec_fw; /* CIK MEC firmware */ |
2067 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ | 2066 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ |
2068 | const struct firmware *smc_fw; /* SMC firmware */ | 2067 | const struct firmware *smc_fw; /* SMC firmware */ |
2068 | const struct firmware *uvd_fw; /* UVD firmware */ | ||
2069 | struct r600_blit r600_blit; | 2069 | struct r600_blit r600_blit; |
2070 | struct r600_vram_scratch vram_scratch; | 2070 | struct r600_vram_scratch vram_scratch; |
2071 | int msi_enabled; /* msi enabled */ | 2071 | int msi_enabled; /* msi enabled */ |
@@ -2095,6 +2095,8 @@ struct radeon_device { | |||
2095 | /* ACPI interface */ | 2095 | /* ACPI interface */ |
2096 | struct radeon_atif atif; | 2096 | struct radeon_atif atif; |
2097 | struct radeon_atcs atcs; | 2097 | struct radeon_atcs atcs; |
2098 | /* srbm instance registers */ | ||
2099 | struct mutex srbm_mutex; | ||
2098 | }; | 2100 | }; |
2099 | 2101 | ||
2100 | int radeon_device_init(struct radeon_device *rdev, | 2102 | int radeon_device_init(struct radeon_device *rdev, |
@@ -2161,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); | |||
2161 | WREG32(reg, tmp_); \ | 2163 | WREG32(reg, tmp_); \ |
2162 | } while (0) | 2164 | } while (0) |
2163 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | 2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
2164 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) | 2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
2165 | #define WREG32_PLL_P(reg, val, mask) \ | 2167 | #define WREG32_PLL_P(reg, val, mask) \ |
2166 | do { \ | 2168 | do { \ |
2167 | uint32_t tmp_ = RREG32_PLL(reg); \ | 2169 | uint32_t tmp_ = RREG32_PLL(reg); \ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 78bec1a58ed1..f8f8b3113ddd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1161 | .get_mclk = &rv6xx_dpm_get_mclk, | 1161 | .get_mclk = &rv6xx_dpm_get_mclk, |
1162 | .print_power_state = &rv6xx_dpm_print_power_state, | 1162 | .print_power_state = &rv6xx_dpm_print_power_state, |
1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, | 1163 | .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, |
1164 | .force_performance_level = &rv6xx_dpm_force_performance_level, | ||
1164 | }, | 1165 | }, |
1165 | .pflip = { | 1166 | .pflip = { |
1166 | .pre_page_flip = &rs600_pre_page_flip, | 1167 | .pre_page_flip = &rs600_pre_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ca1895709908..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev, | |||
421 | struct radeon_ps *ps); | 421 | struct radeon_ps *ps); |
422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 422 | void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
423 | struct seq_file *m); | 423 | struct seq_file *m); |
424 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
425 | enum radeon_dpm_forced_level level); | ||
424 | /* rs780 dpm */ | 426 | /* rs780 dpm */ |
425 | int rs780_dpm_init(struct radeon_device *rdev); | 427 | int rs780_dpm_init(struct radeon_device *rdev); |
426 | int rs780_dpm_enable(struct radeon_device *rdev); | 428 | int rs780_dpm_enable(struct radeon_device *rdev); |
@@ -439,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde | |||
439 | /* uvd */ | 441 | /* uvd */ |
440 | int r600_uvd_init(struct radeon_device *rdev); | 442 | int r600_uvd_init(struct radeon_device *rdev); |
441 | int r600_uvd_rbc_start(struct radeon_device *rdev); | 443 | int r600_uvd_rbc_start(struct radeon_device *rdev); |
442 | void r600_uvd_rbc_stop(struct radeon_device *rdev); | 444 | void r600_uvd_stop(struct radeon_device *rdev); |
443 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | 445 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
444 | void r600_uvd_fence_emit(struct radeon_device *rdev, | 446 | void r600_uvd_fence_emit(struct radeon_device *rdev, |
445 | struct radeon_fence *fence); | 447 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index e3f3e8841789..4ccd61f60eb6 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, | |||
2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; | 2782 | ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; |
2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & | 2783 | dividers->enable_dithen = (args.v3.ucCntlFlag & |
2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; | 2784 | ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; |
2785 | dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); | 2785 | dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); |
2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); | 2786 | dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); |
2787 | dividers->ref_div = args.v3.ucRefDiv; | 2787 | dividers->ref_div = args.v3.ucRefDiv; |
2788 | dividers->vco_mode = (args.v3.ucCntlFlag & | 2788 | dividers->vco_mode = (args.v3.ucCntlFlag & |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc9e86b..68ce36056019 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
147 | enum radeon_combios_table_offset table) | 147 | enum radeon_combios_table_offset table) |
148 | { | 148 | { |
149 | struct radeon_device *rdev = dev->dev_private; | 149 | struct radeon_device *rdev = dev->dev_private; |
150 | int rev; | 150 | int rev, size; |
151 | uint16_t offset = 0, check_offset; | 151 | uint16_t offset = 0, check_offset; |
152 | 152 | ||
153 | if (!rdev->bios) | 153 | if (!rdev->bios) |
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
156 | switch (table) { | 156 | switch (table) { |
157 | /* absolute offset tables */ | 157 | /* absolute offset tables */ |
158 | case COMBIOS_ASIC_INIT_1_TABLE: | 158 | case COMBIOS_ASIC_INIT_1_TABLE: |
159 | check_offset = RBIOS16(rdev->bios_header_start + 0xc); | 159 | check_offset = 0xc; |
160 | if (check_offset) | ||
161 | offset = check_offset; | ||
162 | break; | 160 | break; |
163 | case COMBIOS_BIOS_SUPPORT_TABLE: | 161 | case COMBIOS_BIOS_SUPPORT_TABLE: |
164 | check_offset = RBIOS16(rdev->bios_header_start + 0x14); | 162 | check_offset = 0x14; |
165 | if (check_offset) | ||
166 | offset = check_offset; | ||
167 | break; | 163 | break; |
168 | case COMBIOS_DAC_PROGRAMMING_TABLE: | 164 | case COMBIOS_DAC_PROGRAMMING_TABLE: |
169 | check_offset = RBIOS16(rdev->bios_header_start + 0x2a); | 165 | check_offset = 0x2a; |
170 | if (check_offset) | ||
171 | offset = check_offset; | ||
172 | break; | 166 | break; |
173 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: | 167 | case COMBIOS_MAX_COLOR_DEPTH_TABLE: |
174 | check_offset = RBIOS16(rdev->bios_header_start + 0x2c); | 168 | check_offset = 0x2c; |
175 | if (check_offset) | ||
176 | offset = check_offset; | ||
177 | break; | 169 | break; |
178 | case COMBIOS_CRTC_INFO_TABLE: | 170 | case COMBIOS_CRTC_INFO_TABLE: |
179 | check_offset = RBIOS16(rdev->bios_header_start + 0x2e); | 171 | check_offset = 0x2e; |
180 | if (check_offset) | ||
181 | offset = check_offset; | ||
182 | break; | 172 | break; |
183 | case COMBIOS_PLL_INFO_TABLE: | 173 | case COMBIOS_PLL_INFO_TABLE: |
184 | check_offset = RBIOS16(rdev->bios_header_start + 0x30); | 174 | check_offset = 0x30; |
185 | if (check_offset) | ||
186 | offset = check_offset; | ||
187 | break; | 175 | break; |
188 | case COMBIOS_TV_INFO_TABLE: | 176 | case COMBIOS_TV_INFO_TABLE: |
189 | check_offset = RBIOS16(rdev->bios_header_start + 0x32); | 177 | check_offset = 0x32; |
190 | if (check_offset) | ||
191 | offset = check_offset; | ||
192 | break; | 178 | break; |
193 | case COMBIOS_DFP_INFO_TABLE: | 179 | case COMBIOS_DFP_INFO_TABLE: |
194 | check_offset = RBIOS16(rdev->bios_header_start + 0x34); | 180 | check_offset = 0x34; |
195 | if (check_offset) | ||
196 | offset = check_offset; | ||
197 | break; | 181 | break; |
198 | case COMBIOS_HW_CONFIG_INFO_TABLE: | 182 | case COMBIOS_HW_CONFIG_INFO_TABLE: |
199 | check_offset = RBIOS16(rdev->bios_header_start + 0x36); | 183 | check_offset = 0x36; |
200 | if (check_offset) | ||
201 | offset = check_offset; | ||
202 | break; | 184 | break; |
203 | case COMBIOS_MULTIMEDIA_INFO_TABLE: | 185 | case COMBIOS_MULTIMEDIA_INFO_TABLE: |
204 | check_offset = RBIOS16(rdev->bios_header_start + 0x38); | 186 | check_offset = 0x38; |
205 | if (check_offset) | ||
206 | offset = check_offset; | ||
207 | break; | 187 | break; |
208 | case COMBIOS_TV_STD_PATCH_TABLE: | 188 | case COMBIOS_TV_STD_PATCH_TABLE: |
209 | check_offset = RBIOS16(rdev->bios_header_start + 0x3e); | 189 | check_offset = 0x3e; |
210 | if (check_offset) | ||
211 | offset = check_offset; | ||
212 | break; | 190 | break; |
213 | case COMBIOS_LCD_INFO_TABLE: | 191 | case COMBIOS_LCD_INFO_TABLE: |
214 | check_offset = RBIOS16(rdev->bios_header_start + 0x40); | 192 | check_offset = 0x40; |
215 | if (check_offset) | ||
216 | offset = check_offset; | ||
217 | break; | 193 | break; |
218 | case COMBIOS_MOBILE_INFO_TABLE: | 194 | case COMBIOS_MOBILE_INFO_TABLE: |
219 | check_offset = RBIOS16(rdev->bios_header_start + 0x42); | 195 | check_offset = 0x42; |
220 | if (check_offset) | ||
221 | offset = check_offset; | ||
222 | break; | 196 | break; |
223 | case COMBIOS_PLL_INIT_TABLE: | 197 | case COMBIOS_PLL_INIT_TABLE: |
224 | check_offset = RBIOS16(rdev->bios_header_start + 0x46); | 198 | check_offset = 0x46; |
225 | if (check_offset) | ||
226 | offset = check_offset; | ||
227 | break; | 199 | break; |
228 | case COMBIOS_MEM_CONFIG_TABLE: | 200 | case COMBIOS_MEM_CONFIG_TABLE: |
229 | check_offset = RBIOS16(rdev->bios_header_start + 0x48); | 201 | check_offset = 0x48; |
230 | if (check_offset) | ||
231 | offset = check_offset; | ||
232 | break; | 202 | break; |
233 | case COMBIOS_SAVE_MASK_TABLE: | 203 | case COMBIOS_SAVE_MASK_TABLE: |
234 | check_offset = RBIOS16(rdev->bios_header_start + 0x4a); | 204 | check_offset = 0x4a; |
235 | if (check_offset) | ||
236 | offset = check_offset; | ||
237 | break; | 205 | break; |
238 | case COMBIOS_HARDCODED_EDID_TABLE: | 206 | case COMBIOS_HARDCODED_EDID_TABLE: |
239 | check_offset = RBIOS16(rdev->bios_header_start + 0x4c); | 207 | check_offset = 0x4c; |
240 | if (check_offset) | ||
241 | offset = check_offset; | ||
242 | break; | 208 | break; |
243 | case COMBIOS_ASIC_INIT_2_TABLE: | 209 | case COMBIOS_ASIC_INIT_2_TABLE: |
244 | check_offset = RBIOS16(rdev->bios_header_start + 0x4e); | 210 | check_offset = 0x4e; |
245 | if (check_offset) | ||
246 | offset = check_offset; | ||
247 | break; | 211 | break; |
248 | case COMBIOS_CONNECTOR_INFO_TABLE: | 212 | case COMBIOS_CONNECTOR_INFO_TABLE: |
249 | check_offset = RBIOS16(rdev->bios_header_start + 0x50); | 213 | check_offset = 0x50; |
250 | if (check_offset) | ||
251 | offset = check_offset; | ||
252 | break; | 214 | break; |
253 | case COMBIOS_DYN_CLK_1_TABLE: | 215 | case COMBIOS_DYN_CLK_1_TABLE: |
254 | check_offset = RBIOS16(rdev->bios_header_start + 0x52); | 216 | check_offset = 0x52; |
255 | if (check_offset) | ||
256 | offset = check_offset; | ||
257 | break; | 217 | break; |
258 | case COMBIOS_RESERVED_MEM_TABLE: | 218 | case COMBIOS_RESERVED_MEM_TABLE: |
259 | check_offset = RBIOS16(rdev->bios_header_start + 0x54); | 219 | check_offset = 0x54; |
260 | if (check_offset) | ||
261 | offset = check_offset; | ||
262 | break; | 220 | break; |
263 | case COMBIOS_EXT_TMDS_INFO_TABLE: | 221 | case COMBIOS_EXT_TMDS_INFO_TABLE: |
264 | check_offset = RBIOS16(rdev->bios_header_start + 0x58); | 222 | check_offset = 0x58; |
265 | if (check_offset) | ||
266 | offset = check_offset; | ||
267 | break; | 223 | break; |
268 | case COMBIOS_MEM_CLK_INFO_TABLE: | 224 | case COMBIOS_MEM_CLK_INFO_TABLE: |
269 | check_offset = RBIOS16(rdev->bios_header_start + 0x5a); | 225 | check_offset = 0x5a; |
270 | if (check_offset) | ||
271 | offset = check_offset; | ||
272 | break; | 226 | break; |
273 | case COMBIOS_EXT_DAC_INFO_TABLE: | 227 | case COMBIOS_EXT_DAC_INFO_TABLE: |
274 | check_offset = RBIOS16(rdev->bios_header_start + 0x5c); | 228 | check_offset = 0x5c; |
275 | if (check_offset) | ||
276 | offset = check_offset; | ||
277 | break; | 229 | break; |
278 | case COMBIOS_MISC_INFO_TABLE: | 230 | case COMBIOS_MISC_INFO_TABLE: |
279 | check_offset = RBIOS16(rdev->bios_header_start + 0x5e); | 231 | check_offset = 0x5e; |
280 | if (check_offset) | ||
281 | offset = check_offset; | ||
282 | break; | 232 | break; |
283 | case COMBIOS_CRT_INFO_TABLE: | 233 | case COMBIOS_CRT_INFO_TABLE: |
284 | check_offset = RBIOS16(rdev->bios_header_start + 0x60); | 234 | check_offset = 0x60; |
285 | if (check_offset) | ||
286 | offset = check_offset; | ||
287 | break; | 235 | break; |
288 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: | 236 | case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: |
289 | check_offset = RBIOS16(rdev->bios_header_start + 0x62); | 237 | check_offset = 0x62; |
290 | if (check_offset) | ||
291 | offset = check_offset; | ||
292 | break; | 238 | break; |
293 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: | 239 | case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: |
294 | check_offset = RBIOS16(rdev->bios_header_start + 0x64); | 240 | check_offset = 0x64; |
295 | if (check_offset) | ||
296 | offset = check_offset; | ||
297 | break; | 241 | break; |
298 | case COMBIOS_FAN_SPEED_INFO_TABLE: | 242 | case COMBIOS_FAN_SPEED_INFO_TABLE: |
299 | check_offset = RBIOS16(rdev->bios_header_start + 0x66); | 243 | check_offset = 0x66; |
300 | if (check_offset) | ||
301 | offset = check_offset; | ||
302 | break; | 244 | break; |
303 | case COMBIOS_OVERDRIVE_INFO_TABLE: | 245 | case COMBIOS_OVERDRIVE_INFO_TABLE: |
304 | check_offset = RBIOS16(rdev->bios_header_start + 0x68); | 246 | check_offset = 0x68; |
305 | if (check_offset) | ||
306 | offset = check_offset; | ||
307 | break; | 247 | break; |
308 | case COMBIOS_OEM_INFO_TABLE: | 248 | case COMBIOS_OEM_INFO_TABLE: |
309 | check_offset = RBIOS16(rdev->bios_header_start + 0x6a); | 249 | check_offset = 0x6a; |
310 | if (check_offset) | ||
311 | offset = check_offset; | ||
312 | break; | 250 | break; |
313 | case COMBIOS_DYN_CLK_2_TABLE: | 251 | case COMBIOS_DYN_CLK_2_TABLE: |
314 | check_offset = RBIOS16(rdev->bios_header_start + 0x6c); | 252 | check_offset = 0x6c; |
315 | if (check_offset) | ||
316 | offset = check_offset; | ||
317 | break; | 253 | break; |
318 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: | 254 | case COMBIOS_POWER_CONNECTOR_INFO_TABLE: |
319 | check_offset = RBIOS16(rdev->bios_header_start + 0x6e); | 255 | check_offset = 0x6e; |
320 | if (check_offset) | ||
321 | offset = check_offset; | ||
322 | break; | 256 | break; |
323 | case COMBIOS_I2C_INFO_TABLE: | 257 | case COMBIOS_I2C_INFO_TABLE: |
324 | check_offset = RBIOS16(rdev->bios_header_start + 0x70); | 258 | check_offset = 0x70; |
325 | if (check_offset) | ||
326 | offset = check_offset; | ||
327 | break; | 259 | break; |
328 | /* relative offset tables */ | 260 | /* relative offset tables */ |
329 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ | 261 | case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ |
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
439 | } | 371 | } |
440 | break; | 372 | break; |
441 | default: | 373 | default: |
374 | check_offset = 0; | ||
442 | break; | 375 | break; |
443 | } | 376 | } |
444 | 377 | ||
445 | return offset; | 378 | size = RBIOS8(rdev->bios_header_start + 0x6); |
379 | /* check absolute offset tables */ | ||
380 | if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) | ||
381 | offset = RBIOS16(rdev->bios_header_start + check_offset); | ||
446 | 382 | ||
383 | return offset; | ||
447 | } | 384 | } |
448 | 385 | ||
449 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) | 386 | bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) |
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
965 | dac = RBIOS8(dac_info + 0x3) & 0xf; | 902 | dac = RBIOS8(dac_info + 0x3) & 0xf; |
966 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); | 903 | p_dac->ps2_pdac_adj = (bg << 8) | (dac); |
967 | } | 904 | } |
968 | /* if the values are all zeros, use the table */ | 905 | /* if the values are zeros, use the table */ |
969 | if (p_dac->ps2_pdac_adj) | 906 | if ((dac == 0) || (bg == 0)) |
907 | found = 0; | ||
908 | else | ||
970 | found = 1; | 909 | found = 1; |
971 | } | 910 | } |
972 | 911 | ||
973 | /* quirks */ | 912 | /* quirks */ |
913 | /* Radeon 7000 (RV100) */ | ||
914 | if (((dev->pdev->device == 0x5159) && | ||
915 | (dev->pdev->subsystem_vendor == 0x174B) && | ||
916 | (dev->pdev->subsystem_device == 0x7c28)) || | ||
974 | /* Radeon 9100 (R200) */ | 917 | /* Radeon 9100 (R200) */ |
975 | if ((dev->pdev->device == 0x514D) && | 918 | ((dev->pdev->device == 0x514D) && |
976 | (dev->pdev->subsystem_vendor == 0x174B) && | 919 | (dev->pdev->subsystem_vendor == 0x174B) && |
977 | (dev->pdev->subsystem_device == 0x7149)) { | 920 | (dev->pdev->subsystem_device == 0x7149))) { |
978 | /* vbios value is bad, use the default */ | 921 | /* vbios value is bad, use the default */ |
979 | found = 0; | 922 | found = 0; |
980 | } | 923 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1163 | mutex_init(&rdev->gem.mutex); | 1163 | mutex_init(&rdev->gem.mutex); |
1164 | mutex_init(&rdev->pm.mutex); | 1164 | mutex_init(&rdev->pm.mutex); |
1165 | mutex_init(&rdev->gpu_clock_mutex); | 1165 | mutex_init(&rdev->gpu_clock_mutex); |
1166 | mutex_init(&rdev->srbm_mutex); | ||
1166 | init_rwsem(&rdev->pm.mclk_lock); | 1167 | init_rwsem(&rdev->pm.mclk_lock); |
1167 | init_rwsem(&rdev->exclusive_lock); | 1168 | init_rwsem(&rdev->exclusive_lock); |
1168 | init_waitqueue_head(&rdev->irq.vblank_queue); | 1169 | init_waitqueue_head(&rdev->irq.vblank_queue); |
@@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
1519 | radeon_save_bios_scratch_regs(rdev); | 1520 | radeon_save_bios_scratch_regs(rdev); |
1520 | /* block TTM */ | 1521 | /* block TTM */ |
1521 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 1522 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
1523 | radeon_pm_suspend(rdev); | ||
1522 | radeon_suspend(rdev); | 1524 | radeon_suspend(rdev); |
1523 | 1525 | ||
1524 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1526 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
@@ -1564,6 +1566,7 @@ retry: | |||
1564 | } | 1566 | } |
1565 | } | 1567 | } |
1566 | 1568 | ||
1569 | radeon_pm_resume(rdev); | ||
1567 | drm_helper_resume_force_mode(rdev->ddev); | 1570 | drm_helper_resume_force_mode(rdev->ddev); |
1568 | 1571 | ||
1569 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 1572 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe2408..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) | |||
782 | 782 | ||
783 | } else { | 783 | } else { |
784 | /* put fence directly behind firmware */ | 784 | /* put fence directly behind firmware */ |
785 | index = ALIGN(rdev->uvd.fw_size, 8); | 785 | index = ALIGN(rdev->uvd_fw->size, 8); |
786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; | 786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; | 787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
788 | } | 788 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d9d31a383276..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) | |||
207 | if (rdev->gart.robj == NULL) { | 207 | if (rdev->gart.robj == NULL) { |
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | radeon_gart_table_vram_unpin(rdev); | ||
211 | radeon_bo_unref(&rdev->gart.robj); | 210 | radeon_bo_unref(&rdev->gart.robj); |
212 | } | 211 | } |
213 | 212 | ||
@@ -466,7 +465,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
466 | size += rdev->vm_manager.max_pfn * 8; | 465 | size += rdev->vm_manager.max_pfn * 8; |
467 | size *= 2; | 466 | size *= 2; |
468 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | 467 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
469 | RADEON_VM_PTB_ALIGN(size), | 468 | RADEON_GPU_PAGE_ALIGN(size), |
470 | RADEON_VM_PTB_ALIGN_SIZE, | 469 | RADEON_VM_PTB_ALIGN_SIZE, |
471 | RADEON_GEM_DOMAIN_VRAM); | 470 | RADEON_GEM_DOMAIN_VRAM); |
472 | if (r) { | 471 | if (r) { |
@@ -621,7 +620,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | |||
621 | } | 620 | } |
622 | 621 | ||
623 | retry: | 622 | retry: |
624 | pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); | 623 | pd_size = radeon_vm_directory_size(rdev); |
625 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 624 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
626 | &vm->page_directory, pd_size, | 625 | &vm->page_directory, pd_size, |
627 | RADEON_VM_PTB_ALIGN_SIZE, false); | 626 | RADEON_VM_PTB_ALIGN_SIZE, false); |
@@ -953,8 +952,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev, | |||
953 | retry: | 952 | retry: |
954 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | 953 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
955 | &vm->page_tables[pt_idx], | 954 | &vm->page_tables[pt_idx], |
956 | RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), | 955 | RADEON_VM_PTE_COUNT * 8, |
957 | RADEON_VM_PTB_ALIGN_SIZE, false); | 956 | RADEON_GPU_PAGE_SIZE, false); |
958 | 957 | ||
959 | if (r == -ENOMEM) { | 958 | if (r == -ENOMEM) { |
960 | r = radeon_vm_evict(rdev, vm); | 959 | r = radeon_vm_evict(rdev, vm); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1176 | case CHIP_VERDE: | 1176 | case CHIP_VERDE: |
1177 | case CHIP_OLAND: | 1177 | case CHIP_OLAND: |
1178 | case CHIP_HAINAN: | 1178 | case CHIP_HAINAN: |
1179 | if (radeon_dpm == 1) | 1179 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1180 | if (!rdev->rlc_fw) | ||
1181 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
1182 | else if ((rdev->family >= CHIP_RV770) && | ||
1183 | (!(rdev->flags & RADEON_IS_IGP)) && | ||
1184 | (!rdev->smc_fw)) | ||
1185 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
1186 | else if (radeon_dpm == 1) | ||
1180 | rdev->pm.pm_method = PM_METHOD_DPM; | 1187 | rdev->pm.pm_method = PM_METHOD_DPM; |
1181 | else | 1188 | else |
1182 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1189 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20e..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); | |||
56 | 56 | ||
57 | int radeon_uvd_init(struct radeon_device *rdev) | 57 | int radeon_uvd_init(struct radeon_device *rdev) |
58 | { | 58 | { |
59 | const struct firmware *fw; | ||
60 | unsigned long bo_size; | 59 | unsigned long bo_size; |
61 | const char *fw_name; | 60 | const char *fw_name; |
62 | int i, r; | 61 | int i, r; |
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
105 | return -EINVAL; | 104 | return -EINVAL; |
106 | } | 105 | } |
107 | 106 | ||
108 | r = request_firmware(&fw, fw_name, rdev->dev); | 107 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); |
109 | if (r) { | 108 | if (r) { |
110 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | 109 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", |
111 | fw_name); | 110 | fw_name); |
112 | return r; | 111 | return r; |
113 | } | 112 | } |
114 | 113 | ||
115 | bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + | 114 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
116 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; | 115 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
117 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, | 116 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
118 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); | 117 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); |
@@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
145 | 144 | ||
146 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | 145 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
147 | 146 | ||
148 | rdev->uvd.fw_size = fw->size; | ||
149 | memset(rdev->uvd.cpu_addr, 0, bo_size); | ||
150 | memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); | ||
151 | |||
152 | release_firmware(fw); | ||
153 | |||
154 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 147 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
155 | atomic_set(&rdev->uvd.handles[i], 0); | 148 | atomic_set(&rdev->uvd.handles[i], 0); |
156 | rdev->uvd.filp[i] = NULL; | 149 | rdev->uvd.filp[i] = NULL; |
@@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) | |||
174 | } | 167 | } |
175 | 168 | ||
176 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | 169 | radeon_bo_unref(&rdev->uvd.vcpu_bo); |
170 | |||
171 | release_firmware(rdev->uvd_fw); | ||
177 | } | 172 | } |
178 | 173 | ||
179 | int radeon_uvd_suspend(struct radeon_device *rdev) | 174 | int radeon_uvd_suspend(struct radeon_device *rdev) |
180 | { | 175 | { |
181 | unsigned size; | 176 | unsigned size; |
177 | void *ptr; | ||
178 | int i; | ||
182 | 179 | ||
183 | if (rdev->uvd.vcpu_bo == NULL) | 180 | if (rdev->uvd.vcpu_bo == NULL) |
184 | return 0; | 181 | return 0; |
185 | 182 | ||
183 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | ||
184 | if (atomic_read(&rdev->uvd.handles[i])) | ||
185 | break; | ||
186 | |||
187 | if (i == RADEON_MAX_UVD_HANDLES) | ||
188 | return 0; | ||
189 | |||
186 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | 190 | size = radeon_bo_size(rdev->uvd.vcpu_bo); |
191 | size -= rdev->uvd_fw->size; | ||
192 | |||
193 | ptr = rdev->uvd.cpu_addr; | ||
194 | ptr += rdev->uvd_fw->size; | ||
195 | |||
187 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | 196 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); |
188 | memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); | 197 | memcpy(rdev->uvd.saved_bo, ptr, size); |
189 | 198 | ||
190 | return 0; | 199 | return 0; |
191 | } | 200 | } |
192 | 201 | ||
193 | int radeon_uvd_resume(struct radeon_device *rdev) | 202 | int radeon_uvd_resume(struct radeon_device *rdev) |
194 | { | 203 | { |
204 | unsigned size; | ||
205 | void *ptr; | ||
206 | |||
195 | if (rdev->uvd.vcpu_bo == NULL) | 207 | if (rdev->uvd.vcpu_bo == NULL) |
196 | return -EINVAL; | 208 | return -EINVAL; |
197 | 209 | ||
210 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); | ||
211 | |||
212 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | ||
213 | size -= rdev->uvd_fw->size; | ||
214 | |||
215 | ptr = rdev->uvd.cpu_addr; | ||
216 | ptr += rdev->uvd_fw->size; | ||
217 | |||
198 | if (rdev->uvd.saved_bo != NULL) { | 218 | if (rdev->uvd.saved_bo != NULL) { |
199 | unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); | 219 | memcpy(ptr, rdev->uvd.saved_bo, size); |
200 | memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); | ||
201 | kfree(rdev->uvd.saved_bo); | 220 | kfree(rdev->uvd.saved_bo); |
202 | rdev->uvd.saved_bo = NULL; | 221 | rdev->uvd.saved_bo = NULL; |
203 | } | 222 | } else |
223 | memset(ptr, 0, size); | ||
204 | 224 | ||
205 | return 0; | 225 | return 0; |
206 | } | 226 | } |
@@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |||
215 | { | 235 | { |
216 | int i, r; | 236 | int i, r; |
217 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 237 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
218 | if (rdev->uvd.filp[i] == filp) { | 238 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
219 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | 239 | if (handle != 0 && rdev->uvd.filp[i] == filp) { |
220 | struct radeon_fence *fence; | 240 | struct radeon_fence *fence; |
221 | 241 | ||
222 | r = radeon_uvd_get_destroy_msg(rdev, | 242 | r = radeon_uvd_get_destroy_msg(rdev, |
@@ -336,9 +356,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
336 | return -EINVAL; | 356 | return -EINVAL; |
337 | } | 357 | } |
338 | 358 | ||
359 | if (bo->tbo.sync_obj) { | ||
360 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | ||
361 | if (r) { | ||
362 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | ||
363 | return r; | ||
364 | } | ||
365 | } | ||
366 | |||
339 | r = radeon_bo_kmap(bo, &ptr); | 367 | r = radeon_bo_kmap(bo, &ptr); |
340 | if (r) | 368 | if (r) { |
369 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | ||
341 | return r; | 370 | return r; |
371 | } | ||
342 | 372 | ||
343 | msg = ptr + offset; | 373 | msg = ptr + offset; |
344 | 374 | ||
@@ -364,8 +394,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
364 | radeon_bo_kunmap(bo); | 394 | radeon_bo_kunmap(bo); |
365 | return 0; | 395 | return 0; |
366 | } else { | 396 | } else { |
367 | /* it's a create msg, no special handling needed */ | ||
368 | radeon_bo_kunmap(bo); | 397 | radeon_bo_kunmap(bo); |
398 | |||
399 | if (msg_type != 0) { | ||
400 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
401 | return -EINVAL; | ||
402 | } | ||
403 | |||
404 | /* it's a create msg, no special handling needed */ | ||
369 | } | 405 | } |
370 | 406 | ||
371 | /* create or decode, validate the handle */ | 407 | /* create or decode, validate the handle */ |
@@ -388,7 +424,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
388 | 424 | ||
389 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | 425 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, |
390 | int data0, int data1, | 426 | int data0, int data1, |
391 | unsigned buf_sizes[]) | 427 | unsigned buf_sizes[], bool *has_msg_cmd) |
392 | { | 428 | { |
393 | struct radeon_cs_chunk *relocs_chunk; | 429 | struct radeon_cs_chunk *relocs_chunk; |
394 | struct radeon_cs_reloc *reloc; | 430 | struct radeon_cs_reloc *reloc; |
@@ -417,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
417 | 453 | ||
418 | if (cmd < 0x4) { | 454 | if (cmd < 0x4) { |
419 | if ((end - start) < buf_sizes[cmd]) { | 455 | if ((end - start) < buf_sizes[cmd]) { |
420 | DRM_ERROR("buffer to small (%d / %d)!\n", | 456 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
421 | (unsigned)(end - start), buf_sizes[cmd]); | 457 | (unsigned)(end - start), buf_sizes[cmd]); |
422 | return -EINVAL; | 458 | return -EINVAL; |
423 | } | 459 | } |
@@ -442,9 +478,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
442 | } | 478 | } |
443 | 479 | ||
444 | if (cmd == 0) { | 480 | if (cmd == 0) { |
481 | if (*has_msg_cmd) { | ||
482 | DRM_ERROR("More than one message in a UVD-IB!\n"); | ||
483 | return -EINVAL; | ||
484 | } | ||
485 | *has_msg_cmd = true; | ||
445 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | 486 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); |
446 | if (r) | 487 | if (r) |
447 | return r; | 488 | return r; |
489 | } else if (!*has_msg_cmd) { | ||
490 | DRM_ERROR("Message needed before other commands are send!\n"); | ||
491 | return -EINVAL; | ||
448 | } | 492 | } |
449 | 493 | ||
450 | return 0; | 494 | return 0; |
@@ -453,7 +497,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
453 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | 497 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, |
454 | struct radeon_cs_packet *pkt, | 498 | struct radeon_cs_packet *pkt, |
455 | int *data0, int *data1, | 499 | int *data0, int *data1, |
456 | unsigned buf_sizes[]) | 500 | unsigned buf_sizes[], |
501 | bool *has_msg_cmd) | ||
457 | { | 502 | { |
458 | int i, r; | 503 | int i, r; |
459 | 504 | ||
@@ -467,7 +512,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |||
467 | *data1 = p->idx; | 512 | *data1 = p->idx; |
468 | break; | 513 | break; |
469 | case UVD_GPCOM_VCPU_CMD: | 514 | case UVD_GPCOM_VCPU_CMD: |
470 | r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); | 515 | r = radeon_uvd_cs_reloc(p, *data0, *data1, |
516 | buf_sizes, has_msg_cmd); | ||
471 | if (r) | 517 | if (r) |
472 | return r; | 518 | return r; |
473 | break; | 519 | break; |
@@ -488,6 +534,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
488 | struct radeon_cs_packet pkt; | 534 | struct radeon_cs_packet pkt; |
489 | int r, data0 = 0, data1 = 0; | 535 | int r, data0 = 0, data1 = 0; |
490 | 536 | ||
537 | /* does the IB has a msg command */ | ||
538 | bool has_msg_cmd = false; | ||
539 | |||
491 | /* minimum buffer sizes */ | 540 | /* minimum buffer sizes */ |
492 | unsigned buf_sizes[] = { | 541 | unsigned buf_sizes[] = { |
493 | [0x00000000] = 2048, | 542 | [0x00000000] = 2048, |
@@ -514,8 +563,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
514 | return r; | 563 | return r; |
515 | switch (pkt.type) { | 564 | switch (pkt.type) { |
516 | case RADEON_PACKET_TYPE0: | 565 | case RADEON_PACKET_TYPE0: |
517 | r = radeon_uvd_cs_reg(p, &pkt, &data0, | 566 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, |
518 | &data1, buf_sizes); | 567 | buf_sizes, &has_msg_cmd); |
519 | if (r) | 568 | if (r) |
520 | return r; | 569 | return r; |
521 | break; | 570 | break; |
@@ -527,6 +576,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
527 | return -EINVAL; | 576 | return -EINVAL; |
528 | } | 577 | } |
529 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 578 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
579 | |||
580 | if (!has_msg_cmd) { | ||
581 | DRM_ERROR("UVD-IBs need a msg command!\n"); | ||
582 | return -EINVAL; | ||
583 | } | ||
584 | |||
530 | return 0; | 585 | return 0; |
531 | } | 586 | } |
532 | 587 | ||
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 65e33f387341..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev) | |||
819 | POWERMODE1(calculate_memory_refresh_rate(rdev, | 819 | POWERMODE1(calculate_memory_refresh_rate(rdev, |
820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 820 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | |
821 | POWERMODE2(calculate_memory_refresh_rate(rdev, | 821 | POWERMODE2(calculate_memory_refresh_rate(rdev, |
822 | pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | | 822 | pi->hw.sclks[R600_POWER_LEVEL_HIGH])) | |
823 | POWERMODE3(calculate_memory_refresh_rate(rdev, | 823 | POWERMODE3(calculate_memory_refresh_rate(rdev, |
824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); | 824 | pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); |
825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); | 825 | WREG32(ARB_RFSH_RATE, arb_refresh_rate); |
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev) | |||
1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 1182 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
1183 | 1183 | ||
1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 1184 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
1185 | if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1185 | if (rdev->pm.dpm.new_active_crtcs & 1) { |
1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1186 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1187 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1188 | } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { | 1188 | } else if (rdev->pm.dpm.new_active_crtcs & 2) { |
1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); | 1189 | tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); |
1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); | 1190 | tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); |
1191 | } else { | 1191 | } else { |
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; | 1670 | struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; |
1671 | int ret; | 1671 | int ret; |
1672 | 1672 | ||
1673 | pi->restricted_levels = 0; | ||
1674 | |||
1673 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); | 1675 | rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); |
1674 | 1676 | ||
1675 | rv6xx_clear_vc(rdev); | 1677 | rv6xx_clear_vc(rdev); |
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) | |||
1756 | 1758 | ||
1757 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); | 1759 | rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
1758 | 1760 | ||
1761 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
1762 | |||
1759 | return 0; | 1763 | return 0; |
1760 | } | 1764 | } |
1761 | 1765 | ||
@@ -1940,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) | |||
1940 | 1944 | ||
1941 | int rv6xx_dpm_init(struct radeon_device *rdev) | 1945 | int rv6xx_dpm_init(struct radeon_device *rdev) |
1942 | { | 1946 | { |
1943 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 1947 | struct radeon_atom_ss ss; |
1944 | uint16_t data_offset, size; | ||
1945 | uint8_t frev, crev; | ||
1946 | struct atom_clock_dividers dividers; | 1948 | struct atom_clock_dividers dividers; |
1947 | struct rv6xx_power_info *pi; | 1949 | struct rv6xx_power_info *pi; |
1948 | int ret; | 1950 | int ret; |
@@ -1985,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) | |||
1985 | 1987 | ||
1986 | pi->gfx_clock_gating = true; | 1988 | pi->gfx_clock_gating = true; |
1987 | 1989 | ||
1988 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 1990 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1989 | &frev, &crev, &data_offset)) { | 1991 | ASIC_INTERNAL_ENGINE_SS, 0); |
1990 | pi->sclk_ss = true; | 1992 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1991 | pi->mclk_ss = true; | 1993 | ASIC_INTERNAL_MEMORY_SS, 0); |
1994 | |||
1995 | /* Disable sclk ss, causes hangs on a lot of systems */ | ||
1996 | pi->sclk_ss = false; | ||
1997 | |||
1998 | if (pi->sclk_ss || pi->mclk_ss) | ||
1992 | pi->dynamic_ss = true; | 1999 | pi->dynamic_ss = true; |
1993 | } else { | 2000 | else |
1994 | pi->sclk_ss = false; | ||
1995 | pi->mclk_ss = false; | ||
1996 | pi->dynamic_ss = false; | 2001 | pi->dynamic_ss = false; |
1997 | } | ||
1998 | 2002 | ||
1999 | pi->dynamic_pcie_gen2 = true; | 2003 | pi->dynamic_pcie_gen2 = true; |
2000 | 2004 | ||
@@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
2085 | else | 2089 | else |
2086 | return requested_state->high.mclk; | 2090 | return requested_state->high.mclk; |
2087 | } | 2091 | } |
2092 | |||
2093 | int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, | ||
2094 | enum radeon_dpm_forced_level level) | ||
2095 | { | ||
2096 | struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); | ||
2097 | |||
2098 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | ||
2099 | pi->restricted_levels = 3; | ||
2100 | } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { | ||
2101 | pi->restricted_levels = 2; | ||
2102 | } else { | ||
2103 | pi->restricted_levels = 0; | ||
2104 | } | ||
2105 | |||
2106 | rv6xx_clear_vc(rdev); | ||
2107 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true); | ||
2108 | r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF); | ||
2109 | r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW); | ||
2110 | r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false); | ||
2111 | r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false); | ||
2112 | rv6xx_enable_medium(rdev); | ||
2113 | rv6xx_enable_high(rdev); | ||
2114 | if (pi->restricted_levels == 3) | ||
2115 | r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false); | ||
2116 | rv6xx_program_vc(rdev); | ||
2117 | rv6xx_program_at(rdev); | ||
2118 | |||
2119 | rdev->pm.dpm.forced_level = level; | ||
2120 | |||
2121 | return 0; | ||
2122 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854c..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
745 | radeon_program_register_sequence(rdev, | 745 | radeon_program_register_sequence(rdev, |
746 | rv730_golden_registers, | 746 | rv730_golden_registers, |
747 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 747 | (const u32)ARRAY_SIZE(rv730_golden_registers)); |
748 | radeon_program_register_sequence(rdev, | 748 | radeon_program_register_sequence(rdev, |
749 | rv730_mgcg_init, | 749 | rv730_mgcg_init, |
750 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 750 | (const u32)ARRAY_SIZE(rv730_mgcg_init)); |
751 | break; | 751 | break; |
752 | case CHIP_RV710: | 752 | case CHIP_RV710: |
753 | radeon_program_register_sequence(rdev, | 753 | radeon_program_register_sequence(rdev, |
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
759 | radeon_program_register_sequence(rdev, | 759 | radeon_program_register_sequence(rdev, |
760 | rv710_golden_registers, | 760 | rv710_golden_registers, |
761 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 761 | (const u32)ARRAY_SIZE(rv710_golden_registers)); |
762 | radeon_program_register_sequence(rdev, | 762 | radeon_program_register_sequence(rdev, |
763 | rv710_mgcg_init, | 763 | rv710_mgcg_init, |
764 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 764 | (const u32)ARRAY_SIZE(rv710_mgcg_init)); |
765 | break; | 765 | break; |
766 | case CHIP_RV740: | 766 | case CHIP_RV740: |
767 | radeon_program_register_sequence(rdev, | 767 | radeon_program_register_sequence(rdev, |
768 | rv740_golden_registers, | 768 | rv740_golden_registers, |
769 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 769 | (const u32)ARRAY_SIZE(rv740_golden_registers)); |
770 | radeon_program_register_sequence(rdev, | 770 | radeon_program_register_sequence(rdev, |
771 | rv740_mgcg_init, | 771 | rv740_mgcg_init, |
772 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 772 | (const u32)ARRAY_SIZE(rv740_mgcg_init)); |
773 | break; | 773 | break; |
774 | default: | 774 | default: |
775 | break; | 775 | break; |
@@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) | |||
813 | 813 | ||
814 | /* programm the VCPU memory controller bits 0-27 */ | 814 | /* programm the VCPU memory controller bits 0-27 */ |
815 | addr = rdev->uvd.gpu_addr >> 3; | 815 | addr = rdev->uvd.gpu_addr >> 3; |
816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
819 | 819 | ||
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1829 | /* enable pcie gen2 link */ | 1829 | /* enable pcie gen2 link */ |
1830 | rv770_pcie_gen2_enable(rdev); | 1830 | rv770_pcie_gen2_enable(rdev); |
1831 | 1831 | ||
1832 | rv770_mc_program(rdev); | ||
1833 | |||
1832 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1834 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1833 | r = r600_init_microcode(rdev); | 1835 | r = r600_init_microcode(rdev); |
1834 | if (r) { | 1836 | if (r) { |
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1841 | if (r) | 1843 | if (r) |
1842 | return r; | 1844 | return r; |
1843 | 1845 | ||
1844 | rv770_mc_program(rdev); | ||
1845 | if (rdev->flags & RADEON_IS_AGP) { | 1846 | if (rdev->flags & RADEON_IS_AGP) { |
1846 | rv770_agp_enable(rdev); | 1847 | rv770_agp_enable(rdev); |
1847 | } else { | 1848 | } else { |
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
1983 | int rv770_suspend(struct radeon_device *rdev) | 1984 | int rv770_suspend(struct radeon_device *rdev) |
1984 | { | 1985 | { |
1985 | r600_audio_fini(rdev); | 1986 | r600_audio_fini(rdev); |
1987 | r600_uvd_stop(rdev); | ||
1986 | radeon_uvd_suspend(rdev); | 1988 | radeon_uvd_suspend(rdev); |
1987 | r700_cp_stop(rdev); | 1989 | r700_cp_stop(rdev); |
1988 | r600_dma_stop(rdev); | 1990 | r600_dma_stop(rdev); |
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
2098 | radeon_ib_pool_fini(rdev); | 2100 | radeon_ib_pool_fini(rdev); |
2099 | radeon_irq_kms_fini(rdev); | 2101 | radeon_irq_kms_fini(rdev); |
2100 | rv770_pcie_gart_fini(rdev); | 2102 | rv770_pcie_gart_fini(rdev); |
2103 | r600_uvd_stop(rdev); | ||
2101 | radeon_uvd_fini(rdev); | 2104 | radeon_uvd_fini(rdev); |
2102 | r600_vram_scratch_fini(rdev); | 2105 | r600_vram_scratch_fini(rdev); |
2103 | radeon_gem_fini(rdev); | 2106 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77d..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) | |||
2319 | return 0; | 2319 | return 0; |
2320 | } | 2320 | } |
2321 | 2321 | ||
2322 | void rv770_get_engine_memory_ss(struct radeon_device *rdev) | ||
2323 | { | ||
2324 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
2325 | struct radeon_atom_ss ss; | ||
2326 | |||
2327 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
2328 | ASIC_INTERNAL_ENGINE_SS, 0); | ||
2329 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
2330 | ASIC_INTERNAL_MEMORY_SS, 0); | ||
2331 | |||
2332 | if (pi->sclk_ss || pi->mclk_ss) | ||
2333 | pi->dynamic_ss = true; | ||
2334 | else | ||
2335 | pi->dynamic_ss = false; | ||
2336 | } | ||
2337 | |||
2322 | int rv770_dpm_init(struct radeon_device *rdev) | 2338 | int rv770_dpm_init(struct radeon_device *rdev) |
2323 | { | 2339 | { |
2324 | struct rv7xx_power_info *pi; | 2340 | struct rv7xx_power_info *pi; |
2325 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2326 | uint16_t data_offset, size; | ||
2327 | uint8_t frev, crev; | ||
2328 | struct atom_clock_dividers dividers; | 2341 | struct atom_clock_dividers dividers; |
2329 | int ret; | 2342 | int ret; |
2330 | 2343 | ||
@@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
2369 | pi->mvdd_control = | 2382 | pi->mvdd_control = |
2370 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); | 2383 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); |
2371 | 2384 | ||
2372 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2385 | rv770_get_engine_memory_ss(rdev); |
2373 | &frev, &crev, &data_offset)) { | ||
2374 | pi->sclk_ss = true; | ||
2375 | pi->mclk_ss = true; | ||
2376 | pi->dynamic_ss = true; | ||
2377 | } else { | ||
2378 | pi->sclk_ss = false; | ||
2379 | pi->mclk_ss = false; | ||
2380 | pi->dynamic_ss = false; | ||
2381 | } | ||
2382 | 2386 | ||
2383 | pi->asi = RV770_ASI_DFLT; | 2387 | pi->asi = RV770_ASI_DFLT; |
2384 | pi->pasi = RV770_HASI_DFLT; | 2388 | pi->pasi = RV770_HASI_DFLT; |
@@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
2393 | 2397 | ||
2394 | pi->dynamic_pcie_gen2 = true; | 2398 | pi->dynamic_pcie_gen2 = true; |
2395 | 2399 | ||
2396 | if (pi->gfx_clock_gating && | 2400 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2397 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2398 | pi->thermal_protection = true; | 2401 | pi->thermal_protection = true; |
2399 | else | 2402 | else |
2400 | pi->thermal_protection = false; | 2403 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, | |||
275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
276 | struct radeon_ps *new_ps, | 276 | struct radeon_ps *new_ps, |
277 | struct radeon_ps *old_ps); | 277 | struct radeon_ps *old_ps); |
278 | void rv770_get_engine_memory_ss(struct radeon_device *rdev); | ||
278 | 279 | ||
279 | /* smc */ | 280 | /* smc */ |
280 | int rv770_read_smc_soft_register(struct radeon_device *rdev, | 281 | int rv770_read_smc_soft_register(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9f..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1663 | 1663 | ||
1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
1666 | if (err) | 1666 | if (err) { |
1667 | goto out; | 1667 | printk(KERN_ERR |
1668 | if (rdev->smc_fw->size != smc_req_size) { | 1668 | "smc: error loading firmware \"%s\"\n", |
1669 | fw_name); | ||
1670 | release_firmware(rdev->smc_fw); | ||
1671 | rdev->smc_fw = NULL; | ||
1672 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
1669 | printk(KERN_ERR | 1673 | printk(KERN_ERR |
1670 | "si_smc: Bogus length %zu in firmware \"%s\"\n", | 1674 | "si_smc: Bogus length %zu in firmware \"%s\"\n", |
1671 | rdev->smc_fw->size, fw_name); | 1675 | rdev->smc_fw->size, fw_name); |
@@ -5215,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev, | |||
5215 | 5219 | ||
5216 | static void si_init_cg(struct radeon_device *rdev) | 5220 | static void si_init_cg(struct radeon_device *rdev) |
5217 | { | 5221 | { |
5218 | bool has_uvd = true; | ||
5219 | |||
5220 | si_enable_mgcg(rdev, true); | 5222 | si_enable_mgcg(rdev, true); |
5221 | si_enable_cgcg(rdev, true); | 5223 | si_enable_cgcg(rdev, false); |
5222 | /* disable MC LS on Tahiti */ | 5224 | /* disable MC LS on Tahiti */ |
5223 | if (rdev->family == CHIP_TAHITI) | 5225 | if (rdev->family == CHIP_TAHITI) |
5224 | si_enable_mc_ls(rdev, false); | 5226 | si_enable_mc_ls(rdev, false); |
5225 | if (has_uvd) { | 5227 | if (rdev->has_uvd) { |
5226 | si_enable_uvd_mgcg(rdev, true); | 5228 | si_enable_uvd_mgcg(rdev, true); |
5227 | si_init_uvd_internal_cg(rdev); | 5229 | si_init_uvd_internal_cg(rdev); |
5228 | } | 5230 | } |
@@ -5230,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev) | |||
5230 | 5232 | ||
5231 | static void si_fini_cg(struct radeon_device *rdev) | 5233 | static void si_fini_cg(struct radeon_device *rdev) |
5232 | { | 5234 | { |
5233 | bool has_uvd = true; | 5235 | if (rdev->has_uvd) |
5234 | |||
5235 | if (has_uvd) | ||
5236 | si_enable_uvd_mgcg(rdev, false); | 5236 | si_enable_uvd_mgcg(rdev, false); |
5237 | si_enable_cgcg(rdev, false); | 5237 | si_enable_cgcg(rdev, false); |
5238 | si_enable_mgcg(rdev, false); | 5238 | si_enable_mgcg(rdev, false); |
@@ -5241,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev) | |||
5241 | static void si_init_pg(struct radeon_device *rdev) | 5241 | static void si_init_pg(struct radeon_device *rdev) |
5242 | { | 5242 | { |
5243 | bool has_pg = false; | 5243 | bool has_pg = false; |
5244 | 5244 | #if 0 | |
5245 | /* only cape verde supports PG */ | 5245 | /* only cape verde supports PG */ |
5246 | if (rdev->family == CHIP_VERDE) | 5246 | if (rdev->family == CHIP_VERDE) |
5247 | has_pg = true; | 5247 | has_pg = true; |
5248 | 5248 | #endif | |
5249 | if (has_pg) { | 5249 | if (has_pg) { |
5250 | si_init_ao_cu_mask(rdev); | 5250 | si_init_ao_cu_mask(rdev); |
5251 | si_init_dma_pg(rdev); | 5251 | si_init_dma_pg(rdev); |
@@ -6422,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) | |||
6422 | /* enable aspm */ | 6422 | /* enable aspm */ |
6423 | si_program_aspm(rdev); | 6423 | si_program_aspm(rdev); |
6424 | 6424 | ||
6425 | si_mc_program(rdev); | ||
6426 | |||
6425 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6427 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
6426 | !rdev->rlc_fw || !rdev->mc_fw) { | 6428 | !rdev->rlc_fw || !rdev->mc_fw) { |
6427 | r = si_init_microcode(rdev); | 6429 | r = si_init_microcode(rdev); |
@@ -6441,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) | |||
6441 | if (r) | 6443 | if (r) |
6442 | return r; | 6444 | return r; |
6443 | 6445 | ||
6444 | si_mc_program(rdev); | ||
6445 | r = si_pcie_gart_enable(rdev); | 6446 | r = si_pcie_gart_enable(rdev); |
6446 | if (r) | 6447 | if (r) |
6447 | return r; | 6448 | return r; |
@@ -6625,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) | |||
6625 | si_cp_enable(rdev, false); | 6626 | si_cp_enable(rdev, false); |
6626 | cayman_dma_stop(rdev); | 6627 | cayman_dma_stop(rdev); |
6627 | if (rdev->has_uvd) { | 6628 | if (rdev->has_uvd) { |
6628 | r600_uvd_rbc_stop(rdev); | 6629 | r600_uvd_stop(rdev); |
6629 | radeon_uvd_suspend(rdev); | 6630 | radeon_uvd_suspend(rdev); |
6630 | } | 6631 | } |
6631 | si_irq_suspend(rdev); | 6632 | si_irq_suspend(rdev); |
@@ -6767,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) | |||
6767 | radeon_vm_manager_fini(rdev); | 6768 | radeon_vm_manager_fini(rdev); |
6768 | radeon_ib_pool_fini(rdev); | 6769 | radeon_ib_pool_fini(rdev); |
6769 | radeon_irq_kms_fini(rdev); | 6770 | radeon_irq_kms_fini(rdev); |
6770 | if (rdev->has_uvd) | 6771 | if (rdev->has_uvd) { |
6772 | r600_uvd_stop(rdev); | ||
6771 | radeon_uvd_fini(rdev); | 6773 | radeon_uvd_fini(rdev); |
6774 | } | ||
6772 | si_pcie_gart_fini(rdev); | 6775 | si_pcie_gart_fini(rdev); |
6773 | r600_vram_scratch_fini(rdev); | 6776 | r600_vram_scratch_fini(rdev); |
6774 | radeon_gem_fini(rdev); | 6777 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 73aaa2e4c312..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -37,8 +37,6 @@ | |||
37 | 37 | ||
38 | #define SMC_RAM_END 0x20000 | 38 | #define SMC_RAM_END 0x20000 |
39 | 39 | ||
40 | #define DDR3_DRAM_ROWS 0x2000 | ||
41 | |||
42 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 | 40 | #define SCLK_MIN_DEEPSLEEP_FREQ 1350 |
43 | 41 | ||
44 | static const struct si_cac_config_reg cac_weights_tahiti[] = | 42 | static const struct si_cac_config_reg cac_weights_tahiti[] = |
@@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
1767 | { | 1765 | { |
1768 | s64 kt, kv, leakage_w, i_leakage, vddc; | 1766 | s64 kt, kv, leakage_w, i_leakage, vddc; |
1769 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; | 1767 | s64 temperature, t_slope, t_intercept, av, bv, t_ref; |
1768 | s64 tmp; | ||
1770 | 1769 | ||
1771 | i_leakage = drm_int2fixp(ileakage / 100); | 1770 | i_leakage = div64_s64(drm_int2fixp(ileakage), 100); |
1772 | vddc = div64_s64(drm_int2fixp(v), 1000); | 1771 | vddc = div64_s64(drm_int2fixp(v), 1000); |
1773 | temperature = div64_s64(drm_int2fixp(t), 1000); | 1772 | temperature = div64_s64(drm_int2fixp(t), 1000); |
1774 | 1773 | ||
@@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe | |||
1778 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); | 1777 | bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); |
1779 | t_ref = drm_int2fixp(coeff->t_ref); | 1778 | t_ref = drm_int2fixp(coeff->t_ref); |
1780 | 1779 | ||
1781 | kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), | 1780 | tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; |
1782 | drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); | 1781 | kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); |
1782 | kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); | ||
1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); | 1783 | kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); |
1784 | 1784 | ||
1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); | 1785 | leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); |
@@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
1931 | si_pi->cac_override = cac_override_pitcairn; | 1931 | si_pi->cac_override = cac_override_pitcairn; |
1932 | si_pi->powertune_data = &powertune_data_pitcairn; | 1932 | si_pi->powertune_data = &powertune_data_pitcairn; |
1933 | si_pi->dte_data = dte_data_pitcairn; | 1933 | si_pi->dte_data = dte_data_pitcairn; |
1934 | break; | ||
1934 | } | 1935 | } |
1935 | } else if (rdev->family == CHIP_VERDE) { | 1936 | } else if (rdev->family == CHIP_VERDE) { |
1936 | si_pi->lcac_config = lcac_cape_verde; | 1937 | si_pi->lcac_config = lcac_cape_verde; |
@@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) | |||
1941 | case 0x683B: | 1942 | case 0x683B: |
1942 | case 0x683F: | 1943 | case 0x683F: |
1943 | case 0x6829: | 1944 | case 0x6829: |
1945 | case 0x6835: | ||
1944 | si_pi->cac_weights = cac_weights_cape_verde_pro; | 1946 | si_pi->cac_weights = cac_weights_cape_verde_pro; |
1945 | si_pi->dte_data = dte_data_cape_verde; | 1947 | si_pi->dte_data = dte_data_cape_verde; |
1946 | break; | 1948 | break; |
@@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2901 | { | 2903 | { |
2902 | struct ni_ps *ps = ni_get_ps(rps); | 2904 | struct ni_ps *ps = ni_get_ps(rps); |
2903 | struct radeon_clock_and_voltage_limits *max_limits; | 2905 | struct radeon_clock_and_voltage_limits *max_limits; |
2904 | bool disable_mclk_switching; | 2906 | bool disable_mclk_switching = false; |
2907 | bool disable_sclk_switching = false; | ||
2905 | u32 mclk, sclk; | 2908 | u32 mclk, sclk; |
2906 | u16 vddc, vddci; | 2909 | u16 vddc, vddci; |
2907 | int i; | 2910 | int i; |
@@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2909 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2912 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
2910 | ni_dpm_vblank_too_short(rdev)) | 2913 | ni_dpm_vblank_too_short(rdev)) |
2911 | disable_mclk_switching = true; | 2914 | disable_mclk_switching = true; |
2912 | else | 2915 | |
2913 | disable_mclk_switching = false; | 2916 | if (rps->vclk || rps->dclk) { |
2917 | disable_mclk_switching = true; | ||
2918 | disable_sclk_switching = true; | ||
2919 | } | ||
2914 | 2920 | ||
2915 | if (rdev->pm.dpm.ac_power) | 2921 | if (rdev->pm.dpm.ac_power) |
2916 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 2922 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
@@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2938 | 2944 | ||
2939 | if (disable_mclk_switching) { | 2945 | if (disable_mclk_switching) { |
2940 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; | 2946 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; |
2941 | sclk = ps->performance_levels[0].sclk; | ||
2942 | vddc = ps->performance_levels[0].vddc; | ||
2943 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; | 2947 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; |
2944 | } else { | 2948 | } else { |
2945 | sclk = ps->performance_levels[0].sclk; | ||
2946 | mclk = ps->performance_levels[0].mclk; | 2949 | mclk = ps->performance_levels[0].mclk; |
2947 | vddc = ps->performance_levels[0].vddc; | ||
2948 | vddci = ps->performance_levels[0].vddci; | 2950 | vddci = ps->performance_levels[0].vddci; |
2949 | } | 2951 | } |
2950 | 2952 | ||
2953 | if (disable_sclk_switching) { | ||
2954 | sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; | ||
2955 | vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; | ||
2956 | } else { | ||
2957 | sclk = ps->performance_levels[0].sclk; | ||
2958 | vddc = ps->performance_levels[0].vddc; | ||
2959 | } | ||
2960 | |||
2951 | /* adjusted low state */ | 2961 | /* adjusted low state */ |
2952 | ps->performance_levels[0].sclk = sclk; | 2962 | ps->performance_levels[0].sclk = sclk; |
2953 | ps->performance_levels[0].mclk = mclk; | 2963 | ps->performance_levels[0].mclk = mclk; |
2954 | ps->performance_levels[0].vddc = vddc; | 2964 | ps->performance_levels[0].vddc = vddc; |
2955 | ps->performance_levels[0].vddci = vddci; | 2965 | ps->performance_levels[0].vddci = vddci; |
2956 | 2966 | ||
2957 | for (i = 1; i < ps->performance_level_count; i++) { | 2967 | if (disable_sclk_switching) { |
2958 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | 2968 | sclk = ps->performance_levels[0].sclk; |
2959 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | 2969 | for (i = 1; i < ps->performance_level_count; i++) { |
2960 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | 2970 | if (sclk < ps->performance_levels[i].sclk) |
2961 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | 2971 | sclk = ps->performance_levels[i].sclk; |
2972 | } | ||
2973 | for (i = 0; i < ps->performance_level_count; i++) { | ||
2974 | ps->performance_levels[i].sclk = sclk; | ||
2975 | ps->performance_levels[i].vddc = vddc; | ||
2976 | } | ||
2977 | } else { | ||
2978 | for (i = 1; i < ps->performance_level_count; i++) { | ||
2979 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | ||
2980 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | ||
2981 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | ||
2982 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | ||
2983 | } | ||
2962 | } | 2984 | } |
2963 | 2985 | ||
2964 | if (disable_mclk_switching) { | 2986 | if (disable_mclk_switching) { |
@@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
3237 | { | 3259 | { |
3238 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 3260 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
3239 | struct ni_ps *ps = ni_get_ps(rps); | 3261 | struct ni_ps *ps = ni_get_ps(rps); |
3240 | u32 levels; | 3262 | u32 levels = ps->performance_level_count; |
3241 | 3263 | ||
3242 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { | 3264 | if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
3243 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3265 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
3244 | return -EINVAL; | 3266 | return -EINVAL; |
3245 | 3267 | ||
3246 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) | 3268 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) |
@@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, | |||
3249 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3271 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
3250 | return -EINVAL; | 3272 | return -EINVAL; |
3251 | 3273 | ||
3252 | levels = ps->performance_level_count - 1; | 3274 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) |
3253 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) | ||
3254 | return -EINVAL; | 3275 | return -EINVAL; |
3255 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { | 3276 | } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
3256 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) | 3277 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) |
3257 | return -EINVAL; | 3278 | return -EINVAL; |
3258 | 3279 | ||
3259 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) | 3280 | if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) |
3260 | return -EINVAL; | 3281 | return -EINVAL; |
3261 | } | 3282 | } |
3262 | 3283 | ||
@@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev) | |||
3620 | { | 3641 | { |
3621 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); | 3642 | u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); |
3622 | 3643 | ||
3644 | tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); | ||
3645 | tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | | ||
3646 | DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); | ||
3647 | |||
3623 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); | 3648 | tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); |
3624 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | | 3649 | tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | |
3625 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); | 3650 | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); |
3626 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); | 3651 | WREG32(CG_DISPLAY_GAP_CNTL, tmp); |
3627 | } | 3652 | } |
@@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev) | |||
4036 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, | 4061 | static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, |
4037 | u32 engine_clock) | 4062 | u32 engine_clock) |
4038 | { | 4063 | { |
4039 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
4040 | u32 dram_rows; | 4064 | u32 dram_rows; |
4041 | u32 dram_refresh_rate; | 4065 | u32 dram_refresh_rate; |
4042 | u32 mc_arb_rfsh_rate; | 4066 | u32 mc_arb_rfsh_rate; |
4043 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | 4067 | u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
4044 | 4068 | ||
4045 | if (pi->mem_gddr5) | 4069 | if (tmp >= 4) |
4046 | dram_rows = 1 << (tmp + 10); | 4070 | dram_rows = 16384; |
4047 | else | 4071 | else |
4048 | dram_rows = DDR3_DRAM_ROWS; | 4072 | dram_rows = 1 << (tmp + 10); |
4049 | 4073 | ||
4050 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); | 4074 | dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); |
4051 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; | 4075 | mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; |
@@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev) | |||
6013 | return ret; | 6037 | return ret; |
6014 | } | 6038 | } |
6015 | 6039 | ||
6016 | #if 0 | ||
6017 | /* XXX */ | ||
6018 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); | 6040 | ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); |
6019 | if (ret) { | 6041 | if (ret) { |
6020 | DRM_ERROR("si_dpm_force_performance_level failed\n"); | 6042 | DRM_ERROR("si_dpm_force_performance_level failed\n"); |
6021 | return ret; | 6043 | return ret; |
6022 | } | 6044 | } |
6023 | #else | ||
6024 | rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; | ||
6025 | #endif | ||
6026 | 6045 | ||
6027 | return 0; | 6046 | return 0; |
6028 | } | 6047 | } |
@@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6254 | struct evergreen_power_info *eg_pi; | 6273 | struct evergreen_power_info *eg_pi; |
6255 | struct ni_power_info *ni_pi; | 6274 | struct ni_power_info *ni_pi; |
6256 | struct si_power_info *si_pi; | 6275 | struct si_power_info *si_pi; |
6257 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
6258 | u16 data_offset, size; | ||
6259 | u8 frev, crev; | ||
6260 | struct atom_clock_dividers dividers; | 6276 | struct atom_clock_dividers dividers; |
6261 | int ret; | 6277 | int ret; |
6262 | u32 mask; | 6278 | u32 mask; |
@@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6347 | si_pi->vddc_phase_shed_control = | 6363 | si_pi->vddc_phase_shed_control = |
6348 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); | 6364 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); |
6349 | 6365 | ||
6350 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 6366 | rv770_get_engine_memory_ss(rdev); |
6351 | &frev, &crev, &data_offset)) { | ||
6352 | pi->sclk_ss = true; | ||
6353 | pi->mclk_ss = true; | ||
6354 | pi->dynamic_ss = true; | ||
6355 | } else { | ||
6356 | pi->sclk_ss = false; | ||
6357 | pi->mclk_ss = false; | ||
6358 | pi->dynamic_ss = true; | ||
6359 | } | ||
6360 | 6367 | ||
6361 | pi->asi = RV770_ASI_DFLT; | 6368 | pi->asi = RV770_ASI_DFLT; |
6362 | pi->pasi = CYPRESS_HASI_DFLT; | 6369 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6367 | eg_pi->sclk_deep_sleep = true; | 6374 | eg_pi->sclk_deep_sleep = true; |
6368 | si_pi->sclk_deep_sleep_above_low = false; | 6375 | si_pi->sclk_deep_sleep_above_low = false; |
6369 | 6376 | ||
6370 | if (pi->gfx_clock_gating && | 6377 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
6371 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
6372 | pi->thermal_protection = true; | 6378 | pi->thermal_protection = true; |
6373 | else | 6379 | else |
6374 | pi->thermal_protection = false; | 6380 | pi->thermal_protection = false; |