diff options
Diffstat (limited to 'drivers/gpu/drm')
114 files changed, 1858 insertions, 577 deletions
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index acf3a36c9ebc..32982da82694 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c | |||
| @@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev, | |||
| 68 | { | 68 | { |
| 69 | struct armada_private *priv = dev->dev_private; | 69 | struct armada_private *priv = dev->dev_private; |
| 70 | 70 | ||
| 71 | /* | 71 | WARN_ON(!kfifo_put(&priv->fb_unref, fb)); |
| 72 | * Yes, we really must jump through these hoops just to store a | ||
| 73 | * _pointer_ to something into the kfifo. This is utterly insane | ||
| 74 | * and idiotic, because it kfifo requires the _data_ pointed to by | ||
| 75 | * the pointer const, not the pointer itself. Not only that, but | ||
| 76 | * you have to pass a pointer _to_ the pointer you want stored. | ||
| 77 | */ | ||
| 78 | const struct drm_framebuffer *silly_api_alert = fb; | ||
| 79 | WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); | ||
| 80 | schedule_work(&priv->fb_unref_work); | 72 | schedule_work(&priv->fb_unref_work); |
| 81 | } | 73 | } |
| 82 | 74 | ||
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 3f65dd6676b2..a28640f47c27 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c | |||
| @@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, | |||
| 65 | * then the BO is being moved and we should | 65 | * then the BO is being moved and we should |
| 66 | * store up the damage until later. | 66 | * store up the damage until later. |
| 67 | */ | 67 | */ |
| 68 | if (!drm_can_sleep()) | 68 | if (drm_can_sleep()) |
| 69 | ret = ast_bo_reserve(bo, true); | 69 | ret = ast_bo_reserve(bo, true); |
| 70 | if (ret) { | 70 | if (ret) { |
| 71 | if (ret != -EBUSY) | 71 | if (ret != -EBUSY) |
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index c8fcf12019f0..5f8b0c2b9a44 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig | |||
| @@ -2,6 +2,7 @@ config DRM_BOCHS | |||
| 2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" | 2 | tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" |
| 3 | depends on DRM && PCI | 3 | depends on DRM && PCI |
| 4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
| 5 | select DRM_KMS_FB_HELPER | ||
| 5 | select FB_SYS_FILLRECT | 6 | select FB_SYS_FILLRECT |
| 6 | select FB_SYS_COPYAREA | 7 | select FB_SYS_COPYAREA |
| 7 | select FB_SYS_IMAGEBLIT | 8 | select FB_SYS_IMAGEBLIT |
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 2fd4a92162cb..32bbba0a787b 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c | |||
| @@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, | |||
| 39 | * then the BO is being moved and we should | 39 | * then the BO is being moved and we should |
| 40 | * store up the damage until later. | 40 | * store up the damage until later. |
| 41 | */ | 41 | */ |
| 42 | if (!drm_can_sleep()) | 42 | if (drm_can_sleep()) |
| 43 | ret = cirrus_bo_reserve(bo, true); | 43 | ret = cirrus_bo_reserve(bo, true); |
| 44 | if (ret) { | 44 | if (ret) { |
| 45 | if (ret != -EBUSY) | 45 | if (ret != -EBUSY) |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index dffc836144cc..f4dc9b7a3831 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
| @@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
| 296 | case DRM_CAP_ASYNC_PAGE_FLIP: | 296 | case DRM_CAP_ASYNC_PAGE_FLIP: |
| 297 | req->value = dev->mode_config.async_page_flip; | 297 | req->value = dev->mode_config.async_page_flip; |
| 298 | break; | 298 | break; |
| 299 | case DRM_CAP_CURSOR_WIDTH: | ||
| 300 | if (dev->mode_config.cursor_width) | ||
| 301 | req->value = dev->mode_config.cursor_width; | ||
| 302 | else | ||
| 303 | req->value = 64; | ||
| 304 | break; | ||
| 305 | case DRM_CAP_CURSOR_HEIGHT: | ||
| 306 | if (dev->mode_config.cursor_height) | ||
| 307 | req->value = dev->mode_config.cursor_height; | ||
| 308 | else | ||
| 309 | req->value = 64; | ||
| 310 | break; | ||
| 299 | default: | 311 | default: |
| 300 | return -EINVAL; | 312 | return -EINVAL; |
| 301 | } | 313 | } |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 5736aaa7e86c..f7af69bcf3f4 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
| @@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | |||
| 468 | } else { | 468 | } else { |
| 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, | 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, |
| 470 | legacy_dev_list) { | 470 | legacy_dev_list) { |
| 471 | drm_put_dev(dev); | ||
| 472 | list_del(&dev->legacy_dev_list); | 471 | list_del(&dev->legacy_dev_list); |
| 472 | drm_put_dev(dev); | ||
| 473 | } | 473 | } |
| 474 | } | 474 | } |
| 475 | DRM_INFO("Module unloaded\n"); | 475 | DRM_INFO("Module unloaded\n"); |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f227f544aa36..6e1a1a20cf6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
| @@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D | |||
| 51 | 51 | ||
| 52 | config DRM_EXYNOS_IPP | 52 | config DRM_EXYNOS_IPP |
| 53 | bool "Exynos DRM IPP" | 53 | bool "Exynos DRM IPP" |
| 54 | depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM | 54 | depends on DRM_EXYNOS |
| 55 | help | 55 | help |
| 56 | Choose this option if you want to use IPP feature for DRM. | 56 | Choose this option if you want to use IPP feature for DRM. |
| 57 | 57 | ||
| @@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR | |||
| 69 | 69 | ||
| 70 | config DRM_EXYNOS_GSC | 70 | config DRM_EXYNOS_GSC |
| 71 | bool "Exynos DRM GSC" | 71 | bool "Exynos DRM GSC" |
| 72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 | 72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM |
| 73 | help | 73 | help |
| 74 | Choose this option if you want to use Exynos GSC for DRM. | 74 | Choose this option if you want to use Exynos GSC for DRM. |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9d096a0c5f8d..c204b4e3356e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -171,22 +171,28 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
| 171 | file->driver_priv = file_priv; | 171 | file->driver_priv = file_priv; |
| 172 | 172 | ||
| 173 | ret = exynos_drm_subdrv_open(dev, file); | 173 | ret = exynos_drm_subdrv_open(dev, file); |
| 174 | if (ret) { | 174 | if (ret) |
| 175 | kfree(file_priv); | 175 | goto err_file_priv_free; |
| 176 | file->driver_priv = NULL; | ||
| 177 | } | ||
| 178 | 176 | ||
| 179 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, |
| 180 | NULL, 0); | 178 | NULL, 0); |
| 181 | if (IS_ERR(anon_filp)) { | 179 | if (IS_ERR(anon_filp)) { |
| 182 | kfree(file_priv); | 180 | ret = PTR_ERR(anon_filp); |
| 183 | return PTR_ERR(anon_filp); | 181 | goto err_subdrv_close; |
| 184 | } | 182 | } |
| 185 | 183 | ||
| 186 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; |
| 187 | file_priv->anon_filp = anon_filp; | 185 | file_priv->anon_filp = anon_filp; |
| 188 | 186 | ||
| 189 | return ret; | 187 | return ret; |
| 188 | |||
| 189 | err_subdrv_close: | ||
| 190 | exynos_drm_subdrv_close(dev, file); | ||
| 191 | |||
| 192 | err_file_priv_free: | ||
| 193 | kfree(file_priv); | ||
| 194 | file->driver_priv = NULL; | ||
| 195 | return ret; | ||
| 190 | } | 196 | } |
| 191 | 197 | ||
| 192 | static void exynos_drm_preclose(struct drm_device *dev, | 198 | static void exynos_drm_preclose(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 380aec28840b..6c1885eedfdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
| 607 | reg_type = REG_TYPE_NONE; | 607 | reg_type = REG_TYPE_NONE; |
| 608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); | 608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); |
| 609 | break; | 609 | break; |
| 610 | }; | 610 | } |
| 611 | 611 | ||
| 612 | return reg_type; | 612 | return reg_type; |
| 613 | } | 613 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d519a4e5fe40..09312b877470 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| 18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
| 19 | #include <plat/map-base.h> | ||
| 20 | 19 | ||
| 21 | #include <drm/drmP.h> | 20 | #include <drm/drmP.h> |
| 22 | #include <drm/exynos_drm.h> | 21 | #include <drm/exynos_drm.h> |
| @@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, | |||
| 826 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); | 825 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); |
| 827 | 826 | ||
| 828 | /* | 827 | /* |
| 829 | * quf == NULL condition means all event deletion. | 828 | * qbuf == NULL condition means all event deletion. |
| 830 | * stop operations want to delete all event list. | 829 | * stop operations want to delete all event list. |
| 831 | * another case delete only same buf id. | 830 | * another case delete only same buf id. |
| 832 | */ | 831 | */ |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a0e10aeb0e67..c021ddc1ffb4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
| 35 | #include <linux/of.h> | 35 | #include <linux/of.h> |
| 36 | #include <linux/of_gpio.h> | 36 | #include <linux/of_gpio.h> |
| 37 | #include <linux/hdmi.h> | ||
| 37 | 38 | ||
| 38 | #include <drm/exynos_drm.h> | 39 | #include <drm/exynos_drm.h> |
| 39 | 40 | ||
| @@ -59,19 +60,6 @@ | |||
| 59 | #define HDMI_AUI_VERSION 0x01 | 60 | #define HDMI_AUI_VERSION 0x01 |
| 60 | #define HDMI_AUI_LENGTH 0x0A | 61 | #define HDMI_AUI_LENGTH 0x0A |
| 61 | 62 | ||
| 62 | /* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ | ||
| 63 | enum HDMI_PACKET_TYPE { | ||
| 64 | /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ | ||
| 65 | /* InfoFrame packet type */ | ||
| 66 | HDMI_PACKET_TYPE_INFOFRAME = 0x80, | ||
| 67 | /* Vendor-Specific InfoFrame */ | ||
| 68 | HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, | ||
| 69 | /* Auxiliary Video information InfoFrame */ | ||
| 70 | HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, | ||
| 71 | /* Audio information InfoFrame */ | ||
| 72 | HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 | ||
| 73 | }; | ||
| 74 | |||
| 75 | enum hdmi_type { | 63 | enum hdmi_type { |
| 76 | HDMI_TYPE13, | 64 | HDMI_TYPE13, |
| 77 | HDMI_TYPE14, | 65 | HDMI_TYPE14, |
| @@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { | |||
| 379 | }, | 367 | }, |
| 380 | }; | 368 | }; |
| 381 | 369 | ||
| 382 | struct hdmi_infoframe { | ||
| 383 | enum HDMI_PACKET_TYPE type; | ||
| 384 | u8 ver; | ||
| 385 | u8 len; | ||
| 386 | }; | ||
| 387 | |||
| 388 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) | 370 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) |
| 389 | { | 371 | { |
| 390 | return readl(hdata->regs + reg_id); | 372 | return readl(hdata->regs + reg_id); |
| @@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata, | |||
| 682 | } | 664 | } |
| 683 | 665 | ||
| 684 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, | 666 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, |
| 685 | struct hdmi_infoframe *infoframe) | 667 | union hdmi_infoframe *infoframe) |
| 686 | { | 668 | { |
| 687 | u32 hdr_sum; | 669 | u32 hdr_sum; |
| 688 | u8 chksum; | 670 | u8 chksum; |
| @@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
| 700 | return; | 682 | return; |
| 701 | } | 683 | } |
| 702 | 684 | ||
| 703 | switch (infoframe->type) { | 685 | switch (infoframe->any.type) { |
| 704 | case HDMI_PACKET_TYPE_AVI: | 686 | case HDMI_INFOFRAME_TYPE_AVI: |
| 705 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); | 687 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); |
| 706 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); | 688 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); |
| 707 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); | 689 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, |
| 708 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); | 690 | infoframe->any.version); |
| 709 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 691 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); |
| 692 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
| 693 | infoframe->any.length; | ||
| 710 | 694 | ||
| 711 | /* Output format zero hardcoded ,RGB YBCR selection */ | 695 | /* Output format zero hardcoded ,RGB YBCR selection */ |
| 712 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | | 696 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | |
| @@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
| 722 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); | 706 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); |
| 723 | 707 | ||
| 724 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), | 708 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), |
| 725 | infoframe->len, hdr_sum); | 709 | infoframe->any.length, hdr_sum); |
| 726 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); | 710 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); |
| 727 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); | 711 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); |
| 728 | break; | 712 | break; |
| 729 | case HDMI_PACKET_TYPE_AUI: | 713 | case HDMI_INFOFRAME_TYPE_AUDIO: |
| 730 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); | 714 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); |
| 731 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); | 715 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); |
| 732 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); | 716 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, |
| 733 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); | 717 | infoframe->any.version); |
| 734 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 718 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); |
| 719 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
| 720 | infoframe->any.length; | ||
| 735 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), | 721 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), |
| 736 | infoframe->len, hdr_sum); | 722 | infoframe->any.length, hdr_sum); |
| 737 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); | 723 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); |
| 738 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); | 724 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); |
| 739 | break; | 725 | break; |
| @@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) | |||
| 985 | 971 | ||
| 986 | static void hdmi_conf_init(struct hdmi_context *hdata) | 972 | static void hdmi_conf_init(struct hdmi_context *hdata) |
| 987 | { | 973 | { |
| 988 | struct hdmi_infoframe infoframe; | 974 | union hdmi_infoframe infoframe; |
| 989 | 975 | ||
| 990 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ | 976 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ |
| 991 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | | 977 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | |
| @@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata) | |||
| 1021 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); | 1007 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); |
| 1022 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); | 1008 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); |
| 1023 | } else { | 1009 | } else { |
| 1024 | infoframe.type = HDMI_PACKET_TYPE_AVI; | 1010 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; |
| 1025 | infoframe.ver = HDMI_AVI_VERSION; | 1011 | infoframe.any.version = HDMI_AVI_VERSION; |
| 1026 | infoframe.len = HDMI_AVI_LENGTH; | 1012 | infoframe.any.length = HDMI_AVI_LENGTH; |
| 1027 | hdmi_reg_infoframe(hdata, &infoframe); | 1013 | hdmi_reg_infoframe(hdata, &infoframe); |
| 1028 | 1014 | ||
| 1029 | infoframe.type = HDMI_PACKET_TYPE_AUI; | 1015 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; |
| 1030 | infoframe.ver = HDMI_AUI_VERSION; | 1016 | infoframe.any.version = HDMI_AUI_VERSION; |
| 1031 | infoframe.len = HDMI_AUI_LENGTH; | 1017 | infoframe.any.length = HDMI_AUI_LENGTH; |
| 1032 | hdmi_reg_infoframe(hdata, &infoframe); | 1018 | hdmi_reg_infoframe(hdata, &infoframe); |
| 1033 | 1019 | ||
| 1034 | /* enable AVI packet every vsync, fixes purple line problem */ | 1020 | /* enable AVI packet every vsync, fixes purple line problem */ |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 400b0c4a10fb..faa77f543a07 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
| @@ -208,7 +208,7 @@ struct tda998x_priv { | |||
| 208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) | 208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) |
| 209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) | 209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) |
| 210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ | 210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ |
| 211 | # define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) | 211 | # define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0) |
| 212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) | 212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) |
| 213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ | 213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ |
| 214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) | 214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) |
| @@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) | |||
| 528 | { | 528 | { |
| 529 | uint8_t buf[PB(5) + 1]; | 529 | uint8_t buf[PB(5) + 1]; |
| 530 | 530 | ||
| 531 | memset(buf, 0, sizeof(buf)); | ||
| 531 | buf[HB(0)] = 0x84; | 532 | buf[HB(0)] = 0x84; |
| 532 | buf[HB(1)] = 0x01; | 533 | buf[HB(1)] = 0x01; |
| 533 | buf[HB(2)] = 10; | 534 | buf[HB(2)] = 10; |
| 534 | buf[PB(0)] = 0; | ||
| 535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ | 535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ |
| 536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ | 536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ |
| 537 | buf[PB(4)] = p->audio_frame[4]; | 537 | buf[PB(4)] = p->audio_frame[4]; |
| @@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
| 824 | } | 824 | } |
| 825 | 825 | ||
| 826 | div = 148500 / mode->clock; | 826 | div = 148500 / mode->clock; |
| 827 | if (div != 0) { | ||
| 828 | div--; | ||
| 829 | if (div > 3) | ||
| 830 | div = 3; | ||
| 831 | } | ||
| 827 | 832 | ||
| 828 | /* mute the audio FIFO: */ | 833 | /* mute the audio FIFO: */ |
| 829 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 834 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
| @@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
| 913 | 918 | ||
| 914 | if (priv->rev == TDA19988) { | 919 | if (priv->rev == TDA19988) { |
| 915 | /* let incoming pixels fill the active space (if any) */ | 920 | /* let incoming pixels fill the active space (if any) */ |
| 916 | reg_write(encoder, REG_ENABLE_SPACE, 0x01); | 921 | reg_write(encoder, REG_ENABLE_SPACE, 0x00); |
| 917 | } | 922 | } |
| 918 | 923 | ||
| 919 | /* must be last register set: */ | 924 | /* must be last register set: */ |
| @@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) | |||
| 1094 | { | 1099 | { |
| 1095 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | 1100 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
| 1096 | drm_i2c_encoder_destroy(encoder); | 1101 | drm_i2c_encoder_destroy(encoder); |
| 1102 | if (priv->cec) | ||
| 1103 | i2c_unregister_device(priv->cec); | ||
| 1097 | kfree(priv); | 1104 | kfree(priv); |
| 1098 | } | 1105 | } |
| 1099 | 1106 | ||
| @@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client, | |||
| 1142 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1149 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
| 1143 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); | 1150 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); |
| 1144 | 1151 | ||
| 1145 | priv->current_page = 0; | 1152 | priv->current_page = 0xff; |
| 1146 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1153 | priv->cec = i2c_new_dummy(client->adapter, 0x34); |
| 1154 | if (!priv->cec) { | ||
| 1155 | kfree(priv); | ||
| 1156 | return -ENODEV; | ||
| 1157 | } | ||
| 1147 | priv->dpms = DRM_MODE_DPMS_OFF; | 1158 | priv->dpms = DRM_MODE_DPMS_OFF; |
| 1148 | 1159 | ||
| 1149 | encoder_slave->slave_priv = priv; | 1160 | encoder_slave->slave_priv = priv; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 04f1f02c4019..ec7bb0fc71bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); | |||
| 403 | void intel_detect_pch(struct drm_device *dev) | 403 | void intel_detect_pch(struct drm_device *dev) |
| 404 | { | 404 | { |
| 405 | struct drm_i915_private *dev_priv = dev->dev_private; | 405 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 406 | struct pci_dev *pch; | 406 | struct pci_dev *pch = NULL; |
| 407 | 407 | ||
| 408 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | 408 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
| 409 | * (which really amounts to a PCH but no South Display). | 409 | * (which really amounts to a PCH but no South Display). |
| @@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 424 | * all the ISA bridge devices and check for the first match, instead | 424 | * all the ISA bridge devices and check for the first match, instead |
| 425 | * of only checking the first one. | 425 | * of only checking the first one. |
| 426 | */ | 426 | */ |
| 427 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | 427 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
| 428 | while (pch) { | ||
| 429 | struct pci_dev *curr = pch; | ||
| 430 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { | 428 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
| 431 | unsigned short id; | 429 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
| 432 | id = pch->device & INTEL_PCH_DEVICE_ID_MASK; | ||
| 433 | dev_priv->pch_id = id; | 430 | dev_priv->pch_id = id; |
| 434 | 431 | ||
| 435 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { | 432 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
| @@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 461 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 458 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
| 462 | WARN_ON(!IS_HASWELL(dev)); | 459 | WARN_ON(!IS_HASWELL(dev)); |
| 463 | WARN_ON(!IS_ULT(dev)); | 460 | WARN_ON(!IS_ULT(dev)); |
| 464 | } else { | 461 | } else |
| 465 | goto check_next; | 462 | continue; |
| 466 | } | 463 | |
| 467 | pci_dev_put(pch); | ||
| 468 | break; | 464 | break; |
| 469 | } | 465 | } |
| 470 | check_next: | ||
| 471 | pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); | ||
| 472 | pci_dev_put(curr); | ||
| 473 | } | 466 | } |
| 474 | if (!pch) | 467 | if (!pch) |
| 475 | DRM_DEBUG_KMS("No PCH found?\n"); | 468 | DRM_DEBUG_KMS("No PCH found.\n"); |
| 469 | |||
| 470 | pci_dev_put(pch); | ||
| 476 | } | 471 | } |
| 477 | 472 | ||
| 478 | bool i915_semaphore_is_enabled(struct drm_device *dev) | 473 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a2bf8e3f739..df77e20e3c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -1831,6 +1831,14 @@ struct drm_i915_file_private { | |||
| 1831 | 1831 | ||
| 1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ | 1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
| 1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) | 1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
| 1834 | /* | ||
| 1835 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | ||
| 1836 | * even when in MSI mode. This results in spurious interrupt warnings if the | ||
| 1837 | * legacy irq no. is shared with another device. The kernel then disables that | ||
| 1838 | * interrupt source and so prevents the other device from working properly. | ||
| 1839 | */ | ||
| 1840 | #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
| 1841 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
| 1834 | 1842 | ||
| 1835 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1843 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
| 1836 | * rows, which changed the alignment requirements and fence programming. | 1844 | * rows, which changed the alignment requirements and fence programming. |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1a24e84f2315..28d24caa49f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
| @@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) | |||
| 82 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, | 82 | r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, |
| 83 | "Graphics Stolen Memory"); | 83 | "Graphics Stolen Memory"); |
| 84 | if (r == NULL) { | 84 | if (r == NULL) { |
| 85 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", | 85 | /* |
| 86 | base, base + (uint32_t)dev_priv->gtt.stolen_size); | 86 | * One more attempt but this time requesting region from |
| 87 | base = 0; | 87 | * base + 1, as we have seen that this resolves the region |
| 88 | * conflict with the PCI Bus. | ||
| 89 | * This is a BIOS w/a: Some BIOS wrap stolen in the root | ||
| 90 | * PCI bus, but have an off-by-one error. Hence retry the | ||
| 91 | * reservation starting from 1 instead of 0. | ||
| 92 | */ | ||
| 93 | r = devm_request_mem_region(dev->dev, base + 1, | ||
| 94 | dev_priv->gtt.stolen_size - 1, | ||
| 95 | "Graphics Stolen Memory"); | ||
| 96 | if (r == NULL) { | ||
| 97 | DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", | ||
| 98 | base, base + (uint32_t)dev_priv->gtt.stolen_size); | ||
| 99 | base = 0; | ||
| 100 | } | ||
| 88 | } | 101 | } |
| 89 | 102 | ||
| 90 | return base; | 103 | return base; |
| @@ -201,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev) | |||
| 201 | struct drm_i915_private *dev_priv = dev->dev_private; | 214 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 202 | int bios_reserved = 0; | 215 | int bios_reserved = 0; |
| 203 | 216 | ||
| 217 | #ifdef CONFIG_INTEL_IOMMU | ||
| 218 | if (intel_iommu_gfx_mapped) { | ||
| 219 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | #endif | ||
| 223 | |||
| 204 | if (dev_priv->gtt.stolen_size == 0) | 224 | if (dev_priv->gtt.stolen_size == 0) |
| 205 | return 0; | 225 | return 0; |
| 206 | 226 | ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7fd2fd2f0a5..990cf8f43efd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
| @@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | |||
| 146 | va_list tmp; | 146 | va_list tmp; |
| 147 | 147 | ||
| 148 | va_copy(tmp, args); | 148 | va_copy(tmp, args); |
| 149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) | 149 | len = vsnprintf(NULL, 0, f, tmp); |
| 150 | va_end(tmp); | ||
| 151 | |||
| 152 | if (!__i915_error_seek(e, len)) | ||
| 150 | return; | 153 | return; |
| 151 | } | 154 | } |
| 152 | 155 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 17d8fcb1b6f7..d554169ac592 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 567 | 567 | ||
| 568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; | 568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; |
| 569 | } else { | 569 | } else { |
| 570 | enum transcoder cpu_transcoder = | 570 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
| 571 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | ||
| 572 | u32 htotal; | 571 | u32 htotal; |
| 573 | 572 | ||
| 574 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | 573 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; |
| @@ -619,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
| 619 | 618 | ||
| 620 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | 619 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
| 621 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | 620 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
| 622 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | ||
| 623 | 621 | ||
| 624 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | 622 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) |
| 625 | { | 623 | { |
| 626 | struct drm_i915_private *dev_priv = dev->dev_private; | 624 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 627 | uint32_t status; | 625 | uint32_t status; |
| 628 | 626 | int reg; | |
| 629 | if (INTEL_INFO(dev)->gen < 7) { | 627 | |
| 630 | status = pipe == PIPE_A ? | 628 | if (INTEL_INFO(dev)->gen >= 8) { |
| 631 | DE_PIPEA_VBLANK : | 629 | status = GEN8_PIPE_VBLANK; |
| 632 | DE_PIPEB_VBLANK; | 630 | reg = GEN8_DE_PIPE_ISR(pipe); |
| 631 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
| 632 | status = DE_PIPE_VBLANK_IVB(pipe); | ||
| 633 | reg = DEISR; | ||
| 633 | } else { | 634 | } else { |
| 634 | switch (pipe) { | 635 | status = DE_PIPE_VBLANK(pipe); |
| 635 | default: | 636 | reg = DEISR; |
| 636 | case PIPE_A: | ||
| 637 | status = DE_PIPEA_VBLANK_IVB; | ||
| 638 | break; | ||
| 639 | case PIPE_B: | ||
| 640 | status = DE_PIPEB_VBLANK_IVB; | ||
| 641 | break; | ||
| 642 | case PIPE_C: | ||
| 643 | status = DE_PIPEC_VBLANK_IVB; | ||
| 644 | break; | ||
| 645 | } | ||
| 646 | } | 637 | } |
| 647 | 638 | ||
| 648 | return __raw_i915_read32(dev_priv, DEISR) & status; | 639 | return __raw_i915_read32(dev_priv, reg) & status; |
| 649 | } | 640 | } |
| 650 | 641 | ||
| 651 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 642 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
| @@ -703,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
| 703 | else | 694 | else |
| 704 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 695 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
| 705 | 696 | ||
| 706 | if (HAS_PCH_SPLIT(dev)) { | 697 | if (HAS_DDI(dev)) { |
| 698 | /* | ||
| 699 | * On HSW HDMI outputs there seems to be a 2 line | ||
| 700 | * difference, whereas eDP has the normal 1 line | ||
| 701 | * difference that earlier platforms have. External | ||
| 702 | * DP is unknown. For now just check for the 2 line | ||
| 703 | * difference case on all output types on HSW+. | ||
| 704 | * | ||
| 705 | * This might misinterpret the scanline counter being | ||
| 706 | * one line too far along on eDP, but that's less | ||
| 707 | * dangerous than the alternative since that would lead | ||
| 708 | * the vblank timestamp code astray when it sees a | ||
| 709 | * scanline count before vblank_start during a vblank | ||
| 710 | * interrupt. | ||
| 711 | */ | ||
| 712 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); | ||
| 713 | if ((in_vbl && (position == vbl_start - 2 || | ||
| 714 | position == vbl_start - 1)) || | ||
| 715 | (!in_vbl && (position == vbl_end - 2 || | ||
| 716 | position == vbl_end - 1))) | ||
| 717 | position = (position + 2) % vtotal; | ||
| 718 | } else if (HAS_PCH_SPLIT(dev)) { | ||
| 707 | /* | 719 | /* |
| 708 | * The scanline counter increments at the leading edge | 720 | * The scanline counter increments at the leading edge |
| 709 | * of hsync, ie. it completely misses the active portion | 721 | * of hsync, ie. it completely misses the active portion |
| @@ -2770,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
| 2770 | return; | 2782 | return; |
| 2771 | 2783 | ||
| 2772 | if (HAS_PCH_IBX(dev)) { | 2784 | if (HAS_PCH_IBX(dev)) { |
| 2773 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | 2785 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
| 2774 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; | ||
| 2775 | } else { | 2786 | } else { |
| 2776 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | 2787 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
| 2777 | 2788 | ||
| 2778 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | 2789 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
| 2779 | } | 2790 | } |
| @@ -2833,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 2833 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | 2844 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
| 2834 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | 2845 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
| 2835 | DE_PLANEB_FLIP_DONE_IVB | | 2846 | DE_PLANEB_FLIP_DONE_IVB | |
| 2836 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | | 2847 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
| 2837 | DE_ERR_INT_IVB); | ||
| 2838 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | 2848 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
| 2839 | DE_PIPEA_VBLANK_IVB); | 2849 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
| 2840 | 2850 | ||
| 2841 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | 2851 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
| 2842 | } else { | 2852 | } else { |
| 2843 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 2853 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 2844 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 2854 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
| 2845 | DE_AUX_CHANNEL_A | | 2855 | DE_AUX_CHANNEL_A | |
| 2846 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | | ||
| 2847 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | 2856 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
| 2848 | DE_POISON); | 2857 | DE_POISON); |
| 2849 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | 2858 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
| 2859 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | ||
| 2850 | } | 2860 | } |
| 2851 | 2861 | ||
| 2852 | dev_priv->irq_mask = ~display_mask; | 2862 | dev_priv->irq_mask = ~display_mask; |
| @@ -2962,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2962 | struct drm_device *dev = dev_priv->dev; | 2972 | struct drm_device *dev = dev_priv->dev; |
| 2963 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | | 2973 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | |
| 2964 | GEN8_PIPE_CDCLK_CRC_DONE | | 2974 | GEN8_PIPE_CDCLK_CRC_DONE | |
| 2965 | GEN8_PIPE_FIFO_UNDERRUN | | ||
| 2966 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 2975 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
| 2967 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; | 2976 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
| 2977 | GEN8_PIPE_FIFO_UNDERRUN; | ||
| 2968 | int pipe; | 2978 | int pipe; |
| 2969 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | 2979 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
| 2970 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | 2980 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e06b9e017d6b..234ac5f7bc5a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
| 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
| 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
| 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1247 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1247 | ironlake_edp_panel_off(intel_dp); | 1248 | ironlake_edp_panel_off(intel_dp); |
| 1248 | } | 1249 | } |
| 1249 | 1250 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9fa24347963a..9b8a7c7ea7fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv, | |||
| 1092 | struct drm_device *dev = dev_priv->dev; | 1092 | struct drm_device *dev = dev_priv->dev; |
| 1093 | bool cur_state; | 1093 | bool cur_state; |
| 1094 | 1094 | ||
| 1095 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1095 | if (IS_845G(dev) || IS_I865G(dev)) |
| 1096 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | ||
| 1097 | else if (IS_845G(dev) || IS_I865G(dev)) | ||
| 1098 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; | 1096 | cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; |
| 1099 | else | 1097 | else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) |
| 1100 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 1098 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
| 1099 | else | ||
| 1100 | cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; | ||
| 1101 | 1101 | ||
| 1102 | WARN(cur_state != state, | 1102 | WARN(cur_state != state, |
| 1103 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", | 1103 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", |
| @@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
| 8586 | if (ring->id == RCS) | 8586 | if (ring->id == RCS) |
| 8587 | len += 6; | 8587 | len += 6; |
| 8588 | 8588 | ||
| 8589 | /* | ||
| 8590 | * BSpec MI_DISPLAY_FLIP for IVB: | ||
| 8591 | * "The full packet must be contained within the same cache line." | ||
| 8592 | * | ||
| 8593 | * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same | ||
| 8594 | * cacheline, if we ever start emitting more commands before | ||
| 8595 | * the MI_DISPLAY_FLIP we may need to first emit everything else, | ||
| 8596 | * then do the cacheline alignment, and finally emit the | ||
| 8597 | * MI_DISPLAY_FLIP. | ||
| 8598 | */ | ||
| 8599 | ret = intel_ring_cacheline_align(ring); | ||
| 8600 | if (ret) | ||
| 8601 | goto err_unpin; | ||
| 8602 | |||
| 8589 | ret = intel_ring_begin(ring, len); | 8603 | ret = intel_ring_begin(ring, len); |
| 8590 | if (ret) | 8604 | if (ret) |
| 8591 | goto err_unpin; | 8605 | goto err_unpin; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ede4e8e290d..2688f6d64bb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
| 404 | int i, ret, recv_bytes; | 404 | int i, ret, recv_bytes; |
| 405 | uint32_t status; | 405 | uint32_t status; |
| 406 | int try, precharge, clock = 0; | 406 | int try, precharge, clock = 0; |
| 407 | bool has_aux_irq = true; | 407 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
| 408 | uint32_t timeout; | 408 | uint32_t timeout; |
| 409 | 409 | ||
| 410 | /* dp aux is extremely sensitive to irq latency, hence request the | 410 | /* dp aux is extremely sensitive to irq latency, hence request the |
| @@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
| 537 | uint8_t msg[20]; | 537 | uint8_t msg[20]; |
| 538 | int msg_bytes; | 538 | int msg_bytes; |
| 539 | uint8_t ack; | 539 | uint8_t ack; |
| 540 | int retry; | ||
| 540 | 541 | ||
| 541 | if (WARN_ON(send_bytes > 16)) | 542 | if (WARN_ON(send_bytes > 16)) |
| 542 | return -E2BIG; | 543 | return -E2BIG; |
| @@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
| 548 | msg[3] = send_bytes - 1; | 549 | msg[3] = send_bytes - 1; |
| 549 | memcpy(&msg[4], send, send_bytes); | 550 | memcpy(&msg[4], send, send_bytes); |
| 550 | msg_bytes = send_bytes + 4; | 551 | msg_bytes = send_bytes + 4; |
| 551 | for (;;) { | 552 | for (retry = 0; retry < 7; retry++) { |
| 552 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); | 553 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
| 553 | if (ret < 0) | 554 | if (ret < 0) |
| 554 | return ret; | 555 | return ret; |
| 555 | ack >>= 4; | 556 | ack >>= 4; |
| 556 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | 557 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) |
| 557 | break; | 558 | return send_bytes; |
| 558 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 559 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 559 | udelay(100); | 560 | usleep_range(400, 500); |
| 560 | else | 561 | else |
| 561 | return -EIO; | 562 | return -EIO; |
| 562 | } | 563 | } |
| 563 | return send_bytes; | 564 | |
| 565 | DRM_ERROR("too many retries, giving up\n"); | ||
| 566 | return -EIO; | ||
| 564 | } | 567 | } |
| 565 | 568 | ||
| 566 | /* Write a single byte to the aux channel in native mode */ | 569 | /* Write a single byte to the aux channel in native mode */ |
| @@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
| 582 | int reply_bytes; | 585 | int reply_bytes; |
| 583 | uint8_t ack; | 586 | uint8_t ack; |
| 584 | int ret; | 587 | int ret; |
| 588 | int retry; | ||
| 585 | 589 | ||
| 586 | if (WARN_ON(recv_bytes > 19)) | 590 | if (WARN_ON(recv_bytes > 19)) |
| 587 | return -E2BIG; | 591 | return -E2BIG; |
| @@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
| 595 | msg_bytes = 4; | 599 | msg_bytes = 4; |
| 596 | reply_bytes = recv_bytes + 1; | 600 | reply_bytes = recv_bytes + 1; |
| 597 | 601 | ||
| 598 | for (;;) { | 602 | for (retry = 0; retry < 7; retry++) { |
| 599 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, | 603 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, |
| 600 | reply, reply_bytes); | 604 | reply, reply_bytes); |
| 601 | if (ret == 0) | 605 | if (ret == 0) |
| @@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
| 608 | return ret - 1; | 612 | return ret - 1; |
| 609 | } | 613 | } |
| 610 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 614 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
| 611 | udelay(100); | 615 | usleep_range(400, 500); |
| 612 | else | 616 | else |
| 613 | return -EIO; | 617 | return -EIO; |
| 614 | } | 618 | } |
| 619 | |||
| 620 | DRM_ERROR("too many retries, giving up\n"); | ||
| 621 | return -EIO; | ||
| 615 | } | 622 | } |
| 616 | 623 | ||
| 617 | static int | 624 | static int |
| @@ -1242,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) | |||
| 1242 | 1249 | ||
| 1243 | DRM_DEBUG_KMS("Turn eDP power off\n"); | 1250 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
| 1244 | 1251 | ||
| 1252 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | ||
| 1253 | |||
| 1245 | pp = ironlake_get_pp_control(intel_dp); | 1254 | pp = ironlake_get_pp_control(intel_dp); |
| 1246 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1255 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
| 1247 | * panels get very unhappy and cease to work. */ | 1256 | * panels get very unhappy and cease to work. */ |
| 1248 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); | 1257 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
| 1249 | 1258 | ||
| 1250 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1259 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
| 1251 | 1260 | ||
| 1252 | I915_WRITE(pp_ctrl_reg, pp); | 1261 | I915_WRITE(pp_ctrl_reg, pp); |
| 1253 | POSTING_READ(pp_ctrl_reg); | 1262 | POSTING_READ(pp_ctrl_reg); |
| 1254 | 1263 | ||
| 1264 | intel_dp->want_panel_vdd = false; | ||
| 1265 | |||
| 1255 | ironlake_wait_panel_off(intel_dp); | 1266 | ironlake_wait_panel_off(intel_dp); |
| 1267 | |||
| 1268 | /* We got a reference when we enabled the VDD. */ | ||
| 1269 | intel_runtime_pm_put(dev_priv); | ||
| 1256 | } | 1270 | } |
| 1257 | 1271 | ||
| 1258 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | 1272 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
| @@ -1632,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | |||
| 1632 | val |= EDP_PSR_LINK_DISABLE; | 1646 | val |= EDP_PSR_LINK_DISABLE; |
| 1633 | 1647 | ||
| 1634 | I915_WRITE(EDP_PSR_CTL(dev), val | | 1648 | I915_WRITE(EDP_PSR_CTL(dev), val | |
| 1635 | IS_BROADWELL(dev) ? 0 : link_entry_time | | 1649 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | |
| 1636 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 1650 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
| 1637 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 1651 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
| 1638 | EDP_PSR_ENABLE); | 1652 | EDP_PSR_ENABLE); |
| @@ -1777,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
| 1777 | 1791 | ||
| 1778 | /* Make sure the panel is off before trying to change the mode. But also | 1792 | /* Make sure the panel is off before trying to change the mode. But also |
| 1779 | * ensure that we have vdd while we switch off the panel. */ | 1793 | * ensure that we have vdd while we switch off the panel. */ |
| 1794 | ironlake_edp_panel_vdd_on(intel_dp); | ||
| 1780 | ironlake_edp_backlight_off(intel_dp); | 1795 | ironlake_edp_backlight_off(intel_dp); |
| 1781 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1796 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
| 1782 | ironlake_edp_panel_off(intel_dp); | 1797 | ironlake_edp_panel_off(intel_dp); |
| @@ -1869,10 +1884,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) | |||
| 1869 | 1884 | ||
| 1870 | mutex_unlock(&dev_priv->dpio_lock); | 1885 | mutex_unlock(&dev_priv->dpio_lock); |
| 1871 | 1886 | ||
| 1872 | /* init power sequencer on this pipe and port */ | 1887 | if (is_edp(intel_dp)) { |
| 1873 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 1888 | /* init power sequencer on this pipe and port */ |
| 1874 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, | 1889 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
| 1875 | &power_seq); | 1890 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
| 1891 | &power_seq); | ||
| 1892 | } | ||
| 1876 | 1893 | ||
| 1877 | intel_enable_dp(encoder); | 1894 | intel_enable_dp(encoder); |
| 1878 | 1895 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6db0d9d17f47..ee3181ebcc92 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) | |||
| 845 | { | 845 | { |
| 846 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); | 846 | struct drm_device *dev = intel_hdmi_to_dev(hdmi); |
| 847 | 847 | ||
| 848 | if (IS_G4X(dev)) | 848 | if (!hdmi->has_hdmi_sink || IS_G4X(dev)) |
| 849 | return 165000; | 849 | return 165000; |
| 850 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) | 850 | else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) |
| 851 | return 300000; | 851 | return 300000; |
| @@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
| 899 | * outputs. We also need to check that the higher clock still fits | 899 | * outputs. We also need to check that the higher clock still fits |
| 900 | * within limits. | 900 | * within limits. |
| 901 | */ | 901 | */ |
| 902 | if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit | 902 | if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && |
| 903 | && HAS_PCH_SPLIT(dev)) { | 903 | clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { |
| 904 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); | 904 | DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); |
| 905 | desired_bpp = 12*3; | 905 | desired_bpp = 12*3; |
| 906 | 906 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b1dc33f47899..d33b61d0dd33 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
| @@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) | |||
| 258 | algo->data = bus; | 258 | algo->data = bus; |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | /* | ||
| 262 | * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI | ||
| 263 | * mode. This results in spurious interrupt warnings if the legacy irq no. is | ||
| 264 | * shared with another device. The kernel then disables that interrupt source | ||
| 265 | * and so prevents the other device from working properly. | ||
| 266 | */ | ||
| 267 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
| 268 | static int | 261 | static int |
| 269 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | 262 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
| 270 | u32 gmbus2_status, | 263 | u32 gmbus2_status, |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4e960ec7419f..acde2945eb8a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
| @@ -226,6 +226,8 @@ struct opregion_asle { | |||
| 226 | #define ACPI_DIGITAL_OUTPUT (3<<8) | 226 | #define ACPI_DIGITAL_OUTPUT (3<<8) |
| 227 | #define ACPI_LVDS_OUTPUT (4<<8) | 227 | #define ACPI_LVDS_OUTPUT (4<<8) |
| 228 | 228 | ||
| 229 | #define MAX_DSLP 1500 | ||
| 230 | |||
| 229 | #ifdef CONFIG_ACPI | 231 | #ifdef CONFIG_ACPI |
| 230 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | 232 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) |
| 231 | { | 233 | { |
| @@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
| 260 | /* The spec says 2ms should be the default, but it's too small | 262 | /* The spec says 2ms should be the default, but it's too small |
| 261 | * for some machines. */ | 263 | * for some machines. */ |
| 262 | dslp = 50; | 264 | dslp = 50; |
| 263 | } else if (dslp > 500) { | 265 | } else if (dslp > MAX_DSLP) { |
| 264 | /* Hey bios, trust must be earned. */ | 266 | /* Hey bios, trust must be earned. */ |
| 265 | WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); | 267 | DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " |
| 266 | dslp = 500; | 268 | "using %u ms instead\n", dslp, MAX_DSLP); |
| 269 | dslp = MAX_DSLP; | ||
| 267 | } | 270 | } |
| 268 | 271 | ||
| 269 | /* The spec tells us to do this, but we are the only user... */ | 272 | /* The spec tells us to do this, but we are the only user... */ |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 350de359123a..079ea38f14d9 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) | |||
| 698 | freq /= 0xff; | 698 | freq /= 0xff; |
| 699 | 699 | ||
| 700 | ctl = freq << 17; | 700 | ctl = freq << 17; |
| 701 | if (IS_GEN2(dev) && panel->backlight.combination_mode) | 701 | if (panel->backlight.combination_mode) |
| 702 | ctl |= BLM_LEGACY_MODE; | 702 | ctl |= BLM_LEGACY_MODE; |
| 703 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) | 703 | if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) |
| 704 | ctl |= BLM_POLARITY_PNV; | 704 | ctl |= BLM_POLARITY_PNV; |
| @@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) | |||
| 979 | 979 | ||
| 980 | ctl = I915_READ(BLC_PWM_CTL); | 980 | ctl = I915_READ(BLC_PWM_CTL); |
| 981 | 981 | ||
| 982 | if (IS_GEN2(dev)) | 982 | if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) |
| 983 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; | 983 | panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; |
| 984 | 984 | ||
| 985 | if (IS_PINEVIEW(dev)) | 985 | if (IS_PINEVIEW(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d77cc81900f9..e1fc35a72656 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) | |||
| 3493 | u32 pcbr; | 3493 | u32 pcbr; |
| 3494 | int pctx_size = 24*1024; | 3494 | int pctx_size = 24*1024; |
| 3495 | 3495 | ||
| 3496 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
| 3497 | |||
| 3496 | pcbr = I915_READ(VLV_PCBR); | 3498 | pcbr = I915_READ(VLV_PCBR); |
| 3497 | if (pcbr) { | 3499 | if (pcbr) { |
| 3498 | /* BIOS set it up already, grab the pre-alloc'd space */ | 3500 | /* BIOS set it up already, grab the pre-alloc'd space */ |
| @@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
| 3542 | I915_WRITE(GTFIFODBG, gtfifodbg); | 3544 | I915_WRITE(GTFIFODBG, gtfifodbg); |
| 3543 | } | 3545 | } |
| 3544 | 3546 | ||
| 3545 | valleyview_setup_pctx(dev); | ||
| 3546 | |||
| 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ | 3547 | /* If VLV, Forcewake all wells, else re-direct to regular path */ |
| 3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 3548 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
| 3549 | 3549 | ||
| @@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) | |||
| 4395 | ironlake_enable_rc6(dev); | 4395 | ironlake_enable_rc6(dev); |
| 4396 | intel_init_emon(dev); | 4396 | intel_init_emon(dev); |
| 4397 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { | 4397 | } else if (IS_GEN6(dev) || IS_GEN7(dev)) { |
| 4398 | if (IS_VALLEYVIEW(dev)) | ||
| 4399 | valleyview_setup_pctx(dev); | ||
| 4398 | /* | 4400 | /* |
| 4399 | * PCU communication is slow and this doesn't need to be | 4401 | * PCU communication is slow and this doesn't need to be |
| 4400 | * done at any specific time, so do this out of our fast path | 4402 | * done at any specific time, so do this out of our fast path |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b7f1742caf87..31b36c5ac894 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring, | |||
| 1653 | return 0; | 1653 | return 0; |
| 1654 | } | 1654 | } |
| 1655 | 1655 | ||
| 1656 | /* Align the ring tail to a cacheline boundary */ | ||
| 1657 | int intel_ring_cacheline_align(struct intel_ring_buffer *ring) | ||
| 1658 | { | ||
| 1659 | int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); | ||
| 1660 | int ret; | ||
| 1661 | |||
| 1662 | if (num_dwords == 0) | ||
| 1663 | return 0; | ||
| 1664 | |||
| 1665 | ret = intel_ring_begin(ring, num_dwords); | ||
| 1666 | if (ret) | ||
| 1667 | return ret; | ||
| 1668 | |||
| 1669 | while (num_dwords--) | ||
| 1670 | intel_ring_emit(ring, MI_NOOP); | ||
| 1671 | |||
| 1672 | intel_ring_advance(ring); | ||
| 1673 | |||
| 1674 | return 0; | ||
| 1675 | } | ||
| 1676 | |||
| 1656 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) | 1677 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
| 1657 | { | 1678 | { |
| 1658 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1679 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 71a73f4fe252..0b243ce33714 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring, | |||
| 233 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 233 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
| 234 | 234 | ||
| 235 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 235 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
| 236 | int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); | ||
| 236 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | 237 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
| 237 | u32 data) | 238 | u32 data) |
| 238 | { | 239 | { |
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index f9adc27ef32a..13b7dd83faa9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c | |||
| @@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, | |||
| 41 | * then the BO is being moved and we should | 41 | * then the BO is being moved and we should |
| 42 | * store up the damage until later. | 42 | * store up the damage until later. |
| 43 | */ | 43 | */ |
| 44 | if (!drm_can_sleep()) | 44 | if (drm_can_sleep()) |
| 45 | ret = mgag200_bo_reserve(bo, true); | 45 | ret = mgag200_bo_reserve(bo, true); |
| 46 | if (ret) { | 46 | if (ret) { |
| 47 | if (ret != -EBUSY) | 47 | if (ret != -EBUSY) |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index b8583f275e80..968374776db9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, | |||
| 1519 | (mga_vga_calculate_mode_bandwidth(mode, bpp) | 1519 | (mga_vga_calculate_mode_bandwidth(mode, bpp) |
| 1520 | > (32700 * 1024))) { | 1520 | > (32700 * 1024))) { |
| 1521 | return MODE_BANDWIDTH; | 1521 | return MODE_BANDWIDTH; |
| 1522 | } else if (mode->type == G200_EH && | 1522 | } else if (mdev->type == G200_EH && |
| 1523 | (mga_vga_calculate_mode_bandwidth(mode, bpp) | 1523 | (mga_vga_calculate_mode_bandwidth(mode, bpp) |
| 1524 | > (37500 * 1024))) { | 1524 | > (37500 * 1024))) { |
| 1525 | return MODE_BANDWIDTH; | 1525 | return MODE_BANDWIDTH; |
| 1526 | } else if (mode->type == G200_ER && | 1526 | } else if (mdev->type == G200_ER && |
| 1527 | (mga_vga_calculate_mode_bandwidth(mode, | 1527 | (mga_vga_calculate_mode_bandwidth(mode, |
| 1528 | bpp) > (55000 * 1024))) { | 1528 | bpp) > (55000 * 1024))) { |
| 1529 | return MODE_BANDWIDTH; | 1529 | return MODE_BANDWIDTH; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..84c5b13b33c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
| @@ -39,6 +39,7 @@ struct mdp4_crtc { | |||
| 39 | spinlock_t lock; | 39 | spinlock_t lock; |
| 40 | bool stale; | 40 | bool stale; |
| 41 | uint32_t width, height; | 41 | uint32_t width, height; |
| 42 | uint32_t x, y; | ||
| 42 | 43 | ||
| 43 | /* next cursor to scan-out: */ | 44 | /* next cursor to scan-out: */ |
| 44 | uint32_t next_iova; | 45 | uint32_t next_iova; |
| @@ -57,9 +58,16 @@ struct mdp4_crtc { | |||
| 57 | #define PENDING_FLIP 0x2 | 58 | #define PENDING_FLIP 0x2 |
| 58 | atomic_t pending; | 59 | atomic_t pending; |
| 59 | 60 | ||
| 60 | /* the fb that we currently hold a scanout ref to: */ | 61 | /* the fb that we logically (from PoV of KMS API) hold a ref |
| 62 | * to. Which we may not yet be scanning out (we may still | ||
| 63 | * be scanning out previous in case of page_flip while waiting | ||
| 64 | * for gpu rendering to complete: | ||
| 65 | */ | ||
| 61 | struct drm_framebuffer *fb; | 66 | struct drm_framebuffer *fb; |
| 62 | 67 | ||
| 68 | /* the fb that we currently hold a scanout ref to: */ | ||
| 69 | struct drm_framebuffer *scanout_fb; | ||
| 70 | |||
| 63 | /* for unref'ing framebuffers after scanout completes: */ | 71 | /* for unref'ing framebuffers after scanout completes: */ |
| 64 | struct drm_flip_work unref_fb_work; | 72 | struct drm_flip_work unref_fb_work; |
| 65 | 73 | ||
| @@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | |||
| 77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); | 85 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
| 78 | } | 86 | } |
| 79 | 87 | ||
| 80 | static void update_fb(struct drm_crtc *crtc, bool async, | 88 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) |
| 81 | struct drm_framebuffer *new_fb) | ||
| 82 | { | 89 | { |
| 83 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 90 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
| 84 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
| 85 | 91 | ||
| 86 | if (old_fb) | 92 | atomic_or(pending, &mdp4_crtc->pending); |
| 87 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); | 93 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
| 94 | } | ||
| 95 | |||
| 96 | static void crtc_flush(struct drm_crtc *crtc) | ||
| 97 | { | ||
| 98 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
| 99 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
| 100 | uint32_t i, flush = 0; | ||
| 101 | |||
| 102 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
| 103 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
| 104 | if (plane) { | ||
| 105 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
| 106 | flush |= pipe2flush(pipe_id); | ||
| 107 | } | ||
| 108 | } | ||
| 109 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
| 110 | |||
| 111 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
| 112 | |||
| 113 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
| 114 | } | ||
| 115 | |||
| 116 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
| 117 | { | ||
| 118 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
| 119 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
| 88 | 120 | ||
| 89 | /* grab reference to incoming scanout fb: */ | 121 | /* grab reference to incoming scanout fb: */ |
| 90 | drm_framebuffer_reference(new_fb); | 122 | drm_framebuffer_reference(new_fb); |
| 91 | mdp4_crtc->base.fb = new_fb; | 123 | mdp4_crtc->base.fb = new_fb; |
| 92 | mdp4_crtc->fb = new_fb; | 124 | mdp4_crtc->fb = new_fb; |
| 93 | 125 | ||
| 94 | if (!async) { | 126 | if (old_fb) |
| 95 | /* enable vblank to pick up the old_fb */ | 127 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); |
| 96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | 128 | } |
| 97 | } | 129 | |
| 130 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
| 131 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
| 132 | * is actually still being scanned out. | ||
| 133 | * | ||
| 134 | * Note that this whole thing goes away with atomic.. since we can defer | ||
| 135 | * calling into driver until rendering is done. | ||
| 136 | */ | ||
| 137 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
| 138 | { | ||
| 139 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
| 140 | |||
| 141 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
| 142 | * so that we can safely queue unref to current fb (ie. next | ||
| 143 | * vblank we know hw is done w/ previous scanout_fb). | ||
| 144 | */ | ||
| 145 | crtc_flush(crtc); | ||
| 146 | |||
| 147 | if (mdp4_crtc->scanout_fb) | ||
| 148 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, | ||
| 149 | mdp4_crtc->scanout_fb); | ||
| 150 | |||
| 151 | mdp4_crtc->scanout_fb = fb; | ||
| 152 | |||
| 153 | /* enable vblank to complete flip: */ | ||
| 154 | request_pending(crtc, PENDING_FLIP); | ||
| 98 | } | 155 | } |
| 99 | 156 | ||
| 100 | /* if file!=NULL, this is preclose potential cancel-flip path */ | 157 | /* if file!=NULL, this is preclose potential cancel-flip path */ |
| @@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
| 120 | spin_unlock_irqrestore(&dev->event_lock, flags); | 177 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 121 | } | 178 | } |
| 122 | 179 | ||
| 123 | static void crtc_flush(struct drm_crtc *crtc) | ||
| 124 | { | ||
| 125 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
| 126 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
| 127 | uint32_t i, flush = 0; | ||
| 128 | |||
| 129 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
| 130 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
| 131 | if (plane) { | ||
| 132 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
| 133 | flush |= pipe2flush(pipe_id); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
| 137 | |||
| 138 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
| 139 | |||
| 140 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
| 144 | { | ||
| 145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
| 146 | |||
| 147 | atomic_or(pending, &mdp4_crtc->pending); | ||
| 148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | ||
| 149 | } | ||
| 150 | |||
| 151 | static void pageflip_cb(struct msm_fence_cb *cb) | 180 | static void pageflip_cb(struct msm_fence_cb *cb) |
| 152 | { | 181 | { |
| 153 | struct mdp4_crtc *mdp4_crtc = | 182 | struct mdp4_crtc *mdp4_crtc = |
| @@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) | |||
| 158 | if (!fb) | 187 | if (!fb) |
| 159 | return; | 188 | return; |
| 160 | 189 | ||
| 190 | drm_framebuffer_reference(fb); | ||
| 161 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); | 191 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); |
| 162 | crtc_flush(crtc); | 192 | update_scanout(crtc, fb); |
| 163 | |||
| 164 | /* enable vblank to complete flip: */ | ||
| 165 | request_pending(crtc, PENDING_FLIP); | ||
| 166 | } | 193 | } |
| 167 | 194 | ||
| 168 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | 195 | static void unref_fb_worker(struct drm_flip_work *work, void *val) |
| @@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
| 320 | mode->vsync_end, mode->vtotal, | 347 | mode->vsync_end, mode->vtotal, |
| 321 | mode->type, mode->flags); | 348 | mode->type, mode->flags); |
| 322 | 349 | ||
| 350 | /* grab extra ref for update_scanout() */ | ||
| 351 | drm_framebuffer_reference(crtc->fb); | ||
| 352 | |||
| 353 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
| 354 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
| 355 | x << 16, y << 16, | ||
| 356 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
| 357 | if (ret) { | ||
| 358 | drm_framebuffer_unreference(crtc->fb); | ||
| 359 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
| 360 | mdp4_crtc->name, ret); | ||
| 361 | return ret; | ||
| 362 | } | ||
| 363 | |||
| 323 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), | 364 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), |
| 324 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | | 365 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | |
| 325 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); | 366 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); |
| @@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
| 341 | 382 | ||
| 342 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); | 383 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); |
| 343 | 384 | ||
| 344 | update_fb(crtc, false, crtc->fb); | ||
| 345 | |||
| 346 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
| 347 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
| 348 | x << 16, y << 16, | ||
| 349 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
| 350 | if (ret) { | ||
| 351 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
| 352 | mdp4_crtc->name, ret); | ||
| 353 | return ret; | ||
| 354 | } | ||
| 355 | |||
| 356 | if (dma == DMA_E) { | 385 | if (dma == DMA_E) { |
| 357 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); | 386 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); |
| 358 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); | 387 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); |
| 359 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); | 388 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); |
| 360 | } | 389 | } |
| 361 | 390 | ||
| 391 | update_fb(crtc, crtc->fb); | ||
| 392 | update_scanout(crtc, crtc->fb); | ||
| 393 | |||
| 362 | return 0; | 394 | return 0; |
| 363 | } | 395 | } |
| 364 | 396 | ||
| @@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 385 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 417 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
| 386 | struct drm_plane *plane = mdp4_crtc->plane; | 418 | struct drm_plane *plane = mdp4_crtc->plane; |
| 387 | struct drm_display_mode *mode = &crtc->mode; | 419 | struct drm_display_mode *mode = &crtc->mode; |
| 420 | int ret; | ||
| 388 | 421 | ||
| 389 | update_fb(crtc, false, crtc->fb); | 422 | /* grab extra ref for update_scanout() */ |
| 423 | drm_framebuffer_reference(crtc->fb); | ||
| 390 | 424 | ||
| 391 | return mdp4_plane_mode_set(plane, crtc, crtc->fb, | 425 | ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, |
| 392 | 0, 0, mode->hdisplay, mode->vdisplay, | 426 | 0, 0, mode->hdisplay, mode->vdisplay, |
| 393 | x << 16, y << 16, | 427 | x << 16, y << 16, |
| 394 | mode->hdisplay << 16, mode->vdisplay << 16); | 428 | mode->hdisplay << 16, mode->vdisplay << 16); |
| 429 | if (ret) { | ||
| 430 | drm_framebuffer_unreference(crtc->fb); | ||
| 431 | return ret; | ||
| 432 | } | ||
| 433 | |||
| 434 | update_fb(crtc, crtc->fb); | ||
| 435 | update_scanout(crtc, crtc->fb); | ||
| 436 | |||
| 437 | return 0; | ||
| 395 | } | 438 | } |
| 396 | 439 | ||
| 397 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) | 440 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) |
| @@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, | |||
| 419 | mdp4_crtc->event = event; | 462 | mdp4_crtc->event = event; |
| 420 | spin_unlock_irqrestore(&dev->event_lock, flags); | 463 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 421 | 464 | ||
| 422 | update_fb(crtc, true, new_fb); | 465 | update_fb(crtc, new_fb); |
| 423 | 466 | ||
| 424 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); | 467 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); |
| 425 | } | 468 | } |
| @@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc, | |||
| 442 | static void update_cursor(struct drm_crtc *crtc) | 485 | static void update_cursor(struct drm_crtc *crtc) |
| 443 | { | 486 | { |
| 444 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 487 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
| 488 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
| 445 | enum mdp4_dma dma = mdp4_crtc->dma; | 489 | enum mdp4_dma dma = mdp4_crtc->dma; |
| 446 | unsigned long flags; | 490 | unsigned long flags; |
| 447 | 491 | ||
| 448 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); | 492 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
| 449 | if (mdp4_crtc->cursor.stale) { | 493 | if (mdp4_crtc->cursor.stale) { |
| 450 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
| 451 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; | 494 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; |
| 452 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; | 495 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; |
| 453 | uint32_t iova = mdp4_crtc->cursor.next_iova; | 496 | uint32_t iova = mdp4_crtc->cursor.next_iova; |
| @@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc) | |||
| 479 | mdp4_crtc->cursor.scanout_bo = next_bo; | 522 | mdp4_crtc->cursor.scanout_bo = next_bo; |
| 480 | mdp4_crtc->cursor.stale = false; | 523 | mdp4_crtc->cursor.stale = false; |
| 481 | } | 524 | } |
| 525 | |||
| 526 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | ||
| 527 | MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | | ||
| 528 | MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); | ||
| 529 | |||
| 482 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | 530 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); |
| 483 | } | 531 | } |
| 484 | 532 | ||
| @@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 530 | drm_gem_object_unreference_unlocked(old_bo); | 578 | drm_gem_object_unreference_unlocked(old_bo); |
| 531 | } | 579 | } |
| 532 | 580 | ||
| 581 | crtc_flush(crtc); | ||
| 533 | request_pending(crtc, PENDING_CURSOR); | 582 | request_pending(crtc, PENDING_CURSOR); |
| 534 | 583 | ||
| 535 | return 0; | 584 | return 0; |
| @@ -542,12 +591,15 @@ fail: | |||
| 542 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 591 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
| 543 | { | 592 | { |
| 544 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 593 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
| 545 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | 594 | unsigned long flags; |
| 546 | enum mdp4_dma dma = mdp4_crtc->dma; | ||
| 547 | 595 | ||
| 548 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | 596 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
| 549 | MDP4_DMA_CURSOR_POS_X(x) | | 597 | mdp4_crtc->cursor.x = x; |
| 550 | MDP4_DMA_CURSOR_POS_Y(y)); | 598 | mdp4_crtc->cursor.y = y; |
| 599 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | ||
| 600 | |||
| 601 | crtc_flush(crtc); | ||
| 602 | request_pending(crtc, PENDING_CURSOR); | ||
| 551 | 603 | ||
| 552 | return 0; | 604 | return 0; |
| 553 | } | 605 | } |
| @@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, | |||
| 713 | crtc = &mdp4_crtc->base; | 765 | crtc = &mdp4_crtc->base; |
| 714 | 766 | ||
| 715 | mdp4_crtc->plane = plane; | 767 | mdp4_crtc->plane = plane; |
| 768 | mdp4_crtc->id = id; | ||
| 716 | 769 | ||
| 717 | mdp4_crtc->ovlp = ovlp_id; | 770 | mdp4_crtc->ovlp = ovlp_id; |
| 718 | mdp4_crtc->dma = dma_id; | 771 | mdp4_crtc->dma = dma_id; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 2406027200ec..1e893dd13859 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
| @@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
| 170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); | 170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); |
| 171 | 171 | ||
| 172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), | 172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), |
| 173 | MDP4_PIPE_SRC_XY_X(crtc_x) | | 173 | MDP4_PIPE_DST_XY_X(crtc_x) | |
| 174 | MDP4_PIPE_SRC_XY_Y(crtc_y)); | 174 | MDP4_PIPE_DST_XY_Y(crtc_y)); |
| 175 | 175 | ||
| 176 | mdp4_plane_set_scanout(plane, fb); | 176 | mdp4_plane_set_scanout(plane, fb); |
| 177 | 177 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 71a3b2345eb3..f2794021f086 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, | |||
| 296 | x << 16, y << 16, | 296 | x << 16, y << 16, |
| 297 | mode->hdisplay << 16, mode->vdisplay << 16); | 297 | mode->hdisplay << 16, mode->vdisplay << 16); |
| 298 | if (ret) { | 298 | if (ret) { |
| 299 | drm_framebuffer_unreference(crtc->fb); | ||
| 299 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | 300 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", |
| 300 | mdp5_crtc->name, ret); | 301 | mdp5_crtc->name, ret); |
| 301 | return ret; | 302 | return ret; |
| @@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 343 | 0, 0, mode->hdisplay, mode->vdisplay, | 344 | 0, 0, mode->hdisplay, mode->vdisplay, |
| 344 | x << 16, y << 16, | 345 | x << 16, y << 16, |
| 345 | mode->hdisplay << 16, mode->vdisplay << 16); | 346 | mode->hdisplay << 16, mode->vdisplay << 16); |
| 347 | if (ret) { | ||
| 348 | drm_framebuffer_unreference(crtc->fb); | ||
| 349 | return ret; | ||
| 350 | } | ||
| 346 | 351 | ||
| 347 | update_fb(crtc, crtc->fb); | 352 | update_fb(crtc, crtc->fb); |
| 348 | update_scanout(crtc, crtc->fb); | 353 | update_scanout(crtc, crtc->fb); |
| 349 | 354 | ||
| 350 | return ret; | 355 | return 0; |
| 351 | } | 356 | } |
| 352 | 357 | ||
| 353 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) | 358 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8d60c969ac7..3da8264d3039 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
| 644 | 644 | ||
| 645 | fail: | 645 | fail: |
| 646 | if (obj) | 646 | if (obj) |
| 647 | drm_gem_object_unreference_unlocked(obj); | 647 | drm_gem_object_unreference(obj); |
| 648 | 648 | ||
| 649 | return ERR_PTR(ret); | 649 | return ERR_PTR(ret); |
| 650 | } | 650 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5281d4bc37f7..5423e914e491 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -163,7 +163,7 @@ retry: | |||
| 163 | 163 | ||
| 164 | 164 | ||
| 165 | /* if locking succeeded, pin bo: */ | 165 | /* if locking succeeded, pin bo: */ |
| 166 | ret = msm_gem_get_iova(&msm_obj->base, | 166 | ret = msm_gem_get_iova_locked(&msm_obj->base, |
| 167 | submit->gpu->id, &iova); | 167 | submit->gpu->id, &iova); |
| 168 | 168 | ||
| 169 | /* this would break the logic in the fail path.. there is no | 169 | /* this would break the logic in the fail path.. there is no |
| @@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
| 247 | /* For now, just map the entire thing. Eventually we probably | 247 | /* For now, just map the entire thing. Eventually we probably |
| 248 | * to do it page-by-page, w/ kmap() if not vmap()d.. | 248 | * to do it page-by-page, w/ kmap() if not vmap()d.. |
| 249 | */ | 249 | */ |
| 250 | ptr = msm_gem_vaddr(&obj->base); | 250 | ptr = msm_gem_vaddr_locked(&obj->base); |
| 251 | 251 | ||
| 252 | if (IS_ERR(ptr)) { | 252 | if (IS_ERR(ptr)) { |
| 253 | ret = PTR_ERR(ptr); | 253 | ret = PTR_ERR(ptr); |
| @@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) | |||
| 307 | { | 307 | { |
| 308 | unsigned i; | 308 | unsigned i; |
| 309 | 309 | ||
| 310 | mutex_lock(&submit->dev->struct_mutex); | ||
| 311 | for (i = 0; i < submit->nr_bos; i++) { | 310 | for (i = 0; i < submit->nr_bos; i++) { |
| 312 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 311 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
| 313 | submit_unlock_unpin_bo(submit, i); | 312 | submit_unlock_unpin_bo(submit, i); |
| 314 | list_del_init(&msm_obj->submit_entry); | 313 | list_del_init(&msm_obj->submit_entry); |
| 315 | drm_gem_object_unreference(&msm_obj->base); | 314 | drm_gem_object_unreference(&msm_obj->base); |
| 316 | } | 315 | } |
| 317 | mutex_unlock(&submit->dev->struct_mutex); | ||
| 318 | 316 | ||
| 319 | ww_acquire_fini(&submit->ticket); | 317 | ww_acquire_fini(&submit->ticket); |
| 320 | kfree(submit); | 318 | kfree(submit); |
| @@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 342 | if (args->nr_cmds > MAX_CMDS) | 340 | if (args->nr_cmds > MAX_CMDS) |
| 343 | return -EINVAL; | 341 | return -EINVAL; |
| 344 | 342 | ||
| 343 | mutex_lock(&dev->struct_mutex); | ||
| 344 | |||
| 345 | submit = submit_create(dev, gpu, args->nr_bos); | 345 | submit = submit_create(dev, gpu, args->nr_bos); |
| 346 | if (!submit) { | 346 | if (!submit) { |
| 347 | ret = -ENOMEM; | 347 | ret = -ENOMEM; |
| @@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 410 | out: | 410 | out: |
| 411 | if (submit) | 411 | if (submit) |
| 412 | submit_cleanup(submit, !!ret); | 412 | submit_cleanup(submit, !!ret); |
| 413 | mutex_unlock(&dev->struct_mutex); | ||
| 413 | return ret; | 414 | return ret; |
| 414 | } | 415 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4ebce8be489d..0cfe3f426ee4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
| 298 | struct msm_drm_private *priv = dev->dev_private; | 298 | struct msm_drm_private *priv = dev->dev_private; |
| 299 | int i, ret; | 299 | int i, ret; |
| 300 | 300 | ||
| 301 | mutex_lock(&dev->struct_mutex); | ||
| 302 | |||
| 303 | submit->fence = ++priv->next_fence; | 301 | submit->fence = ++priv->next_fence; |
| 304 | 302 | ||
| 305 | gpu->submitted_fence = submit->fence; | 303 | gpu->submitted_fence = submit->fence; |
| @@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
| 331 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | 329 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); |
| 332 | } | 330 | } |
| 333 | hangcheck_timer_reset(gpu); | 331 | hangcheck_timer_reset(gpu); |
| 334 | mutex_unlock(&dev->struct_mutex); | ||
| 335 | 332 | ||
| 336 | return ret; | 333 | return ret; |
| 337 | } | 334 | } |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e88145ba1bf5..d310c195bdfe 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
| @@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o | |||
| 141 | nouveau-y += core/subdev/mc/nv04.o | 141 | nouveau-y += core/subdev/mc/nv04.o |
| 142 | nouveau-y += core/subdev/mc/nv40.o | 142 | nouveau-y += core/subdev/mc/nv40.o |
| 143 | nouveau-y += core/subdev/mc/nv44.o | 143 | nouveau-y += core/subdev/mc/nv44.o |
| 144 | nouveau-y += core/subdev/mc/nv4c.o | ||
| 144 | nouveau-y += core/subdev/mc/nv50.o | 145 | nouveau-y += core/subdev/mc/nv50.o |
| 145 | nouveau-y += core/subdev/mc/nv94.o | 146 | nouveau-y += core/subdev/mc/nv94.o |
| 146 | nouveau-y += core/subdev/mc/nv98.o | 147 | nouveau-y += core/subdev/mc/nv98.o |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1b653dd74a70..08b88591ed60 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c | |||
| @@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device) | |||
| 311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 314 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 314 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
| 315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| @@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device) | |||
| 334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 337 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 337 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
| 338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; | 340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; |
| @@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device) | |||
| 357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 360 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 360 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
| 361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| @@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device) | |||
| 380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 383 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 383 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
| 384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
| @@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device) | |||
| 403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
| 404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
| 405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
| 406 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 406 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
| 407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
| 408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
| 409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 940eaa5d8b9a..9ad722e4e087 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
| @@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) | |||
| 1142 | if (conf != ~0) { | 1142 | if (conf != ~0) { |
| 1143 | if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { | 1143 | if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { |
| 1144 | u32 soff = (ffs(outp.or) - 1) * 0x08; | 1144 | u32 soff = (ffs(outp.or) - 1) * 0x08; |
| 1145 | u32 ctrl = nv_rd32(priv, 0x610798 + soff); | 1145 | u32 ctrl = nv_rd32(priv, 0x610794 + soff); |
| 1146 | u32 datarate; | 1146 | u32 datarate; |
| 1147 | 1147 | ||
| 1148 | switch ((ctrl & 0x000f0000) >> 16) { | 1148 | switch ((ctrl & 0x000f0000) >> 16) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9a850fe19515..54c1b5b471cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
| @@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) | |||
| 112 | 112 | ||
| 113 | nv_wr32(priv, 0x002270, cur->addr >> 12); | 113 | nv_wr32(priv, 0x002270, cur->addr >> 12); |
| 114 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); | 114 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); |
| 115 | if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) | 115 | if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) |
| 116 | nv_error(priv, "runlist %d update timeout\n", engine); | 116 | nv_error(priv, "runlist %d update timeout\n", engine); |
| 117 | mutex_unlock(&nv_subdev(priv)->mutex); | 117 | mutex_unlock(&nv_subdev(priv)->mutex); |
| 118 | } | 118 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 30ed19c52e05..7a367c402978 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c | |||
| @@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, | |||
| 539 | ustatus &= ~0x04030000; | 539 | ustatus &= ~0x04030000; |
| 540 | } | 540 | } |
| 541 | if (ustatus && display) { | 541 | if (ustatus && display) { |
| 542 | nv_error("%s - TP%d:", name, i); | 542 | nv_error(priv, "%s - TP%d:", name, i); |
| 543 | nouveau_bitfield_print(nv50_mpc_traps, ustatus); | 543 | nouveau_bitfield_print(nv50_mpc_traps, ustatus); |
| 544 | pr_cont("\n"); | 544 | pr_cont("\n"); |
| 545 | ustatus = 0; | 545 | ustatus = 0; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index adc88b73d911..3c6738edd127 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
| @@ -47,6 +47,7 @@ struct nouveau_mc_oclass { | |||
| 47 | extern struct nouveau_oclass *nv04_mc_oclass; | 47 | extern struct nouveau_oclass *nv04_mc_oclass; |
| 48 | extern struct nouveau_oclass *nv40_mc_oclass; | 48 | extern struct nouveau_oclass *nv40_mc_oclass; |
| 49 | extern struct nouveau_oclass *nv44_mc_oclass; | 49 | extern struct nouveau_oclass *nv44_mc_oclass; |
| 50 | extern struct nouveau_oclass *nv4c_mc_oclass; | ||
| 50 | extern struct nouveau_oclass *nv50_mc_oclass; | 51 | extern struct nouveau_oclass *nv50_mc_oclass; |
| 51 | extern struct nouveau_oclass *nv94_mc_oclass; | 52 | extern struct nouveau_oclass *nv94_mc_oclass; |
| 52 | extern struct nouveau_oclass *nv98_mc_oclass; | 53 | extern struct nouveau_oclass *nv98_mc_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index aa0fbbec7f08..ef0c9c4a8cc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
| @@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
| 130 | u16 pcir; | 130 | u16 pcir; |
| 131 | int i; | 131 | int i; |
| 132 | 132 | ||
| 133 | /* there is no prom on nv4x IGP's */ | ||
| 134 | if (device->card_type == NV_40 && device->chipset >= 0x4c) | ||
| 135 | return; | ||
| 136 | |||
| 133 | /* enable access to rom */ | 137 | /* enable access to rom */ |
| 134 | if (device->card_type >= NV_50) | 138 | if (device->card_type >= NV_50) |
| 135 | pcireg = 0x088050; | 139 | pcireg = 0x088050; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c index 9159a5ccee93..265d1253624a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c | |||
| @@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) { | |||
| 36 | .fini = _nouveau_fb_fini, | 36 | .fini = _nouveau_fb_fini, |
| 37 | }, | 37 | }, |
| 38 | .base.memtype = nv04_fb_memtype_valid, | 38 | .base.memtype = nv04_fb_memtype_valid, |
| 39 | .base.ram = &nv10_ram_oclass, | 39 | .base.ram = &nv1a_ram_oclass, |
| 40 | .tile.regions = 8, | 40 | .tile.regions = 8, |
| 41 | .tile.init = nv10_fb_tile_init, | 41 | .tile.init = nv10_fb_tile_init, |
| 42 | .tile.fini = nv10_fb_tile_fini, | 42 | .tile.fini = nv10_fb_tile_fini, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index b0d5c31606c1..81a408e7d034 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h | |||
| @@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *, | |||
| 14 | extern const struct nouveau_mc_intr nv04_mc_intr[]; | 14 | extern const struct nouveau_mc_intr nv04_mc_intr[]; |
| 15 | int nv04_mc_init(struct nouveau_object *); | 15 | int nv04_mc_init(struct nouveau_object *); |
| 16 | void nv40_mc_msi_rearm(struct nouveau_mc *); | 16 | void nv40_mc_msi_rearm(struct nouveau_mc *); |
| 17 | int nv44_mc_init(struct nouveau_object *object); | ||
| 17 | int nv50_mc_init(struct nouveau_object *); | 18 | int nv50_mc_init(struct nouveau_object *); |
| 18 | extern const struct nouveau_mc_intr nv50_mc_intr[]; | 19 | extern const struct nouveau_mc_intr nv50_mc_intr[]; |
| 19 | extern const struct nouveau_mc_intr nvc0_mc_intr[]; | 20 | extern const struct nouveau_mc_intr nvc0_mc_intr[]; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 3bfee5c6c4f2..cc4d0d2d886e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include "nv04.h" | 25 | #include "nv04.h" |
| 26 | 26 | ||
| 27 | static int | 27 | int |
| 28 | nv44_mc_init(struct nouveau_object *object) | 28 | nv44_mc_init(struct nouveau_object *object) |
| 29 | { | 29 | { |
| 30 | struct nv04_mc_priv *priv = (void *)object; | 30 | struct nv04_mc_priv *priv = (void *)object; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c new file mode 100644 index 000000000000..a75c35ccf25c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Ilia Mirkin | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ilia Mirkin | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include "nv04.h" | ||
| 26 | |||
| 27 | static void | ||
| 28 | nv4c_mc_msi_rearm(struct nouveau_mc *pmc) | ||
| 29 | { | ||
| 30 | struct nv04_mc_priv *priv = (void *)pmc; | ||
| 31 | nv_wr08(priv, 0x088050, 0xff); | ||
| 32 | } | ||
| 33 | |||
| 34 | struct nouveau_oclass * | ||
| 35 | nv4c_mc_oclass = &(struct nouveau_mc_oclass) { | ||
| 36 | .base.handle = NV_SUBDEV(MC, 0x4c), | ||
| 37 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
| 38 | .ctor = nv04_mc_ctor, | ||
| 39 | .dtor = _nouveau_mc_dtor, | ||
| 40 | .init = nv44_mc_init, | ||
| 41 | .fini = _nouveau_mc_fini, | ||
| 42 | }, | ||
| 43 | .intr = nv04_mc_intr, | ||
| 44 | .msi_rearm = nv4c_mc_msi_rearm, | ||
| 45 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 4ef83df2b246..83face3f608f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
| @@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * | |||
| 106 | return 0; | 106 | return 0; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /* | ||
| 110 | * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special | ||
| 111 | * requirements on the fourth parameter, so a private implementation | ||
| 112 | * instead of using acpi_check_dsm(). | ||
| 113 | */ | ||
| 114 | static int nouveau_check_optimus_dsm(acpi_handle handle) | ||
| 115 | { | ||
| 116 | int result; | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Function 0 returns a Buffer containing available functions. | ||
| 120 | * The args parameter is ignored for function 0, so just put 0 in it | ||
| 121 | */ | ||
| 122 | if (nouveau_optimus_dsm(handle, 0, 0, &result)) | ||
| 123 | return 0; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. | ||
| 127 | * If the n-th bit is enabled, function n is supported | ||
| 128 | */ | ||
| 129 | return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); | ||
| 130 | } | ||
| 131 | |||
| 109 | static int nouveau_dsm(acpi_handle handle, int func, int arg) | 132 | static int nouveau_dsm(acpi_handle handle, int func, int arg) |
| 110 | { | 133 | { |
| 111 | int ret = 0; | 134 | int ret = 0; |
| @@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) | |||
| 207 | 1 << NOUVEAU_DSM_POWER)) | 230 | 1 << NOUVEAU_DSM_POWER)) |
| 208 | retval |= NOUVEAU_DSM_HAS_MUX; | 231 | retval |= NOUVEAU_DSM_HAS_MUX; |
| 209 | 232 | ||
| 210 | if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100, | 233 | if (nouveau_check_optimus_dsm(dhandle)) |
| 211 | 1 << NOUVEAU_DSM_OPTIMUS_CAPS)) | ||
| 212 | retval |= NOUVEAU_DSM_HAS_OPT; | 234 | retval |= NOUVEAU_DSM_HAS_OPT; |
| 213 | 235 | ||
| 214 | if (retval & NOUVEAU_DSM_HAS_OPT) { | 236 | if (retval & NOUVEAU_DSM_HAS_OPT) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 488686d490c0..4aed1714b9ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
| 1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; | 1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; |
| 1250 | } | 1250 | } |
| 1251 | #endif | 1251 | #endif |
| 1252 | if (!node->memtype) | 1252 | if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) |
| 1253 | /* untiled */ | 1253 | /* untiled */ |
| 1254 | break; | 1254 | break; |
| 1255 | /* fallthrough, tiled memory */ | 1255 | /* fallthrough, tiled memory */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 78c8e7146d56..89c484d8ac26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
| @@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
| 376 | if (ret) | 376 | if (ret) |
| 377 | goto fail_device; | 377 | goto fail_device; |
| 378 | 378 | ||
| 379 | dev->irq_enabled = true; | ||
| 380 | |||
| 379 | /* workaround an odd issue on nvc1 by disabling the device's | 381 | /* workaround an odd issue on nvc1 by disabling the device's |
| 380 | * nosnoop capability. hopefully won't cause issues until a | 382 | * nosnoop capability. hopefully won't cause issues until a |
| 381 | * better fix is found - assuming there is one... | 383 | * better fix is found - assuming there is one... |
| @@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev) | |||
| 475 | struct nouveau_drm *drm = nouveau_drm(dev); | 477 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 476 | struct nouveau_object *device; | 478 | struct nouveau_object *device; |
| 477 | 479 | ||
| 480 | dev->irq_enabled = false; | ||
| 478 | device = drm->client.base.device; | 481 | device = drm->client.base.device; |
| 479 | drm_put_dev(dev); | 482 | drm_put_dev(dev); |
| 480 | 483 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 81638d7f2eff..471347edc27e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
| @@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state) | |||
| 14 | { | 14 | { |
| 15 | struct nouveau_device *device = nouveau_dev(priv); | 15 | struct nouveau_device *device = nouveau_dev(priv); |
| 16 | 16 | ||
| 17 | if (device->chipset >= 0x40) | 17 | if (device->card_type == NV_40 && device->chipset >= 0x4c) |
| 18 | nv_wr32(device, 0x088060, state); | ||
| 19 | else if (device->chipset >= 0x40) | ||
| 18 | nv_wr32(device, 0x088054, state); | 20 | nv_wr32(device, 0x088054, state); |
| 19 | else | 21 | else |
| 20 | nv_wr32(device, 0x001854, state); | 22 | nv_wr32(device, 0x001854, state); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a9338c85630f..daa4dd375ab1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 559 | u32 adjusted_clock = mode->clock; | 559 | u32 adjusted_clock = mode->clock; |
| 560 | int encoder_mode = atombios_get_encoder_mode(encoder); | 560 | int encoder_mode = atombios_get_encoder_mode(encoder); |
| 561 | u32 dp_clock = mode->clock; | 561 | u32 dp_clock = mode->clock; |
| 562 | int bpc = radeon_get_monitor_bpc(connector); | 562 | int bpc = radeon_crtc->bpc; |
| 563 | bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); | 563 | bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); |
| 564 | 564 | ||
| 565 | /* reset the pll flags */ | 565 | /* reset the pll flags */ |
| @@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1176 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | 1176 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); |
| 1177 | 1177 | ||
| 1178 | /* Set NUM_BANKS. */ | 1178 | /* Set NUM_BANKS. */ |
| 1179 | if (rdev->family >= CHIP_BONAIRE) { | 1179 | if (rdev->family >= CHIP_TAHITI) { |
| 1180 | unsigned tileb, index, num_banks, tile_split_bytes; | 1180 | unsigned tileb, index, num_banks, tile_split_bytes; |
| 1181 | 1181 | ||
| 1182 | /* Calculate the macrotile mode index. */ | 1182 | /* Calculate the macrotile mode index. */ |
| @@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1194 | return -EINVAL; | 1194 | return -EINVAL; |
| 1195 | } | 1195 | } |
| 1196 | 1196 | ||
| 1197 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | 1197 | if (rdev->family >= CHIP_BONAIRE) |
| 1198 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | ||
| 1199 | else | ||
| 1200 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; | ||
| 1198 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | 1201 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); |
| 1199 | } else { | 1202 | } else { |
| 1200 | /* SI and older. */ | 1203 | /* NI and older. */ |
| 1201 | if (rdev->family >= CHIP_TAHITI) | 1204 | if (rdev->family >= CHIP_CAYMAN) |
| 1202 | tmp = rdev->config.si.tile_config; | ||
| 1203 | else if (rdev->family >= CHIP_CAYMAN) | ||
| 1204 | tmp = rdev->config.cayman.tile_config; | 1205 | tmp = rdev->config.cayman.tile_config; |
| 1205 | else | 1206 | else |
| 1206 | tmp = rdev->config.evergreen.tile_config; | 1207 | tmp = rdev->config.evergreen.tile_config; |
| @@ -1773,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
| 1773 | return ATOM_PPLL1; | 1774 | return ATOM_PPLL1; |
| 1774 | DRM_ERROR("unable to allocate a PPLL\n"); | 1775 | DRM_ERROR("unable to allocate a PPLL\n"); |
| 1775 | return ATOM_PPLL_INVALID; | 1776 | return ATOM_PPLL_INVALID; |
| 1777 | } else if (ASIC_IS_DCE41(rdev)) { | ||
| 1778 | /* Don't share PLLs on DCE4.1 chips */ | ||
| 1779 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { | ||
| 1780 | if (rdev->clock.dp_extclk) | ||
| 1781 | /* skip PPLL programming if using ext clock */ | ||
| 1782 | return ATOM_PPLL_INVALID; | ||
| 1783 | } | ||
| 1784 | pll_in_use = radeon_get_pll_use_mask(crtc); | ||
| 1785 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | ||
| 1786 | return ATOM_PPLL1; | ||
| 1787 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
| 1788 | return ATOM_PPLL2; | ||
| 1789 | DRM_ERROR("unable to allocate a PPLL\n"); | ||
| 1790 | return ATOM_PPLL_INVALID; | ||
| 1776 | } else if (ASIC_IS_DCE4(rdev)) { | 1791 | } else if (ASIC_IS_DCE4(rdev)) { |
| 1777 | /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, | 1792 | /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, |
| 1778 | * depending on the asic: | 1793 | * depending on the asic: |
| @@ -1800,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
| 1800 | if (pll != ATOM_PPLL_INVALID) | 1815 | if (pll != ATOM_PPLL_INVALID) |
| 1801 | return pll; | 1816 | return pll; |
| 1802 | } | 1817 | } |
| 1803 | } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ | 1818 | } else { |
| 1804 | /* use the same PPLL for all monitors with the same clock */ | 1819 | /* use the same PPLL for all monitors with the same clock */ |
| 1805 | pll = radeon_get_shared_nondp_ppll(crtc); | 1820 | pll = radeon_get_shared_nondp_ppll(crtc); |
| 1806 | if (pll != ATOM_PPLL_INVALID) | 1821 | if (pll != ATOM_PPLL_INVALID) |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a42d61571f49..607dc14d195e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
| @@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
| 464 | 464 | ||
| 465 | static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) | 465 | static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) |
| 466 | { | 466 | { |
| 467 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 468 | int bpc = 8; | 467 | int bpc = 8; |
| 469 | 468 | ||
| 470 | if (connector) | 469 | if (encoder->crtc) { |
| 471 | bpc = radeon_get_monitor_bpc(connector); | 470 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
| 471 | bpc = radeon_crtc->bpc; | ||
| 472 | } | ||
| 472 | 473 | ||
| 473 | switch (bpc) { | 474 | switch (bpc) { |
| 474 | case 0: | 475 | case 0: |
| @@ -1313,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 1313 | } | 1314 | } |
| 1314 | if (is_dp) | 1315 | if (is_dp) |
| 1315 | args.v5.ucLaneNum = dp_lane_count; | 1316 | args.v5.ucLaneNum = dp_lane_count; |
| 1316 | else if (radeon_encoder->pixel_clock > 165000) | 1317 | else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) |
| 1317 | args.v5.ucLaneNum = 8; | 1318 | args.v5.ucLaneNum = 8; |
| 1318 | else | 1319 | else |
| 1319 | args.v5.ucLaneNum = 4; | 1320 | args.v5.ucLaneNum = 4; |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e9..ea103ccdf4bd 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include "cypress_dpm.h" | 29 | #include "cypress_dpm.h" |
| 30 | #include "btc_dpm.h" | 30 | #include "btc_dpm.h" |
| 31 | #include "atom.h" | 31 | #include "atom.h" |
| 32 | #include <linux/seq_file.h> | ||
| 32 | 33 | ||
| 33 | #define MC_CG_ARB_FREQ_F0 0x0a | 34 | #define MC_CG_ARB_FREQ_F0 0x0a |
| 34 | #define MC_CG_ARB_FREQ_F1 0x0b | 35 | #define MC_CG_ARB_FREQ_F1 0x0b |
| @@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev) | |||
| 2756 | r600_free_extended_power_table(rdev); | 2757 | r600_free_extended_power_table(rdev); |
| 2757 | } | 2758 | } |
| 2758 | 2759 | ||
| 2760 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
| 2761 | struct seq_file *m) | ||
| 2762 | { | ||
| 2763 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | ||
| 2764 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
| 2765 | struct rv7xx_ps *ps = rv770_get_ps(rps); | ||
| 2766 | struct rv7xx_pl *pl; | ||
| 2767 | u32 current_index = | ||
| 2768 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> | ||
| 2769 | CURRENT_PROFILE_INDEX_SHIFT; | ||
| 2770 | |||
| 2771 | if (current_index > 2) { | ||
| 2772 | seq_printf(m, "invalid dpm profile %d\n", current_index); | ||
| 2773 | } else { | ||
| 2774 | if (current_index == 0) | ||
| 2775 | pl = &ps->low; | ||
| 2776 | else if (current_index == 1) | ||
| 2777 | pl = &ps->medium; | ||
| 2778 | else /* current_index == 2 */ | ||
| 2779 | pl = &ps->high; | ||
| 2780 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
| 2781 | if (rdev->family >= CHIP_CEDAR) { | ||
| 2782 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", | ||
| 2783 | current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); | ||
| 2784 | } else { | ||
| 2785 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", | ||
| 2786 | current_index, pl->sclk, pl->mclk, pl->vddc); | ||
| 2787 | } | ||
| 2788 | } | ||
| 2789 | } | ||
| 2790 | |||
| 2759 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) | 2791 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) |
| 2760 | { | 2792 | { |
| 2761 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2793 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e025..9c65be2d55a9 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h | |||
| @@ -44,6 +44,10 @@ | |||
| 44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) | 44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) |
| 45 | # define AC_DC_SW (1 << 24) | 45 | # define AC_DC_SW (1 << 24) |
| 46 | 46 | ||
| 47 | #define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c | ||
| 48 | # define CURRENT_PROFILE_INDEX_MASK (0xf << 4) | ||
| 49 | # define CURRENT_PROFILE_INDEX_SHIFT 4 | ||
| 50 | |||
| 47 | #define CG_BIF_REQ_AND_RSP 0x7f4 | 51 | #define CG_BIF_REQ_AND_RSP 0x7f4 |
| 48 | #define CG_CLIENT_REQ(x) ((x) << 0) | 52 | #define CG_CLIENT_REQ(x) ((x) << 0) |
| 49 | #define CG_CLIENT_REQ_MASK (0xff << 0) | 53 | #define CG_CLIENT_REQ_MASK (0xff << 0) |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca7cd37..bbb17841a9e5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) | |||
| 3046 | } | 3046 | } |
| 3047 | 3047 | ||
| 3048 | /** | 3048 | /** |
| 3049 | * cik_select_se_sh - select which SE, SH to address | 3049 | * cik_get_rb_disabled - computes the mask of disabled RBs |
| 3050 | * | 3050 | * |
| 3051 | * @rdev: radeon_device pointer | 3051 | * @rdev: radeon_device pointer |
| 3052 | * @max_rb_num: max RBs (render backends) for the asic | 3052 | * @max_rb_num: max RBs (render backends) for the asic |
| @@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) | |||
| 4134 | { | 4134 | { |
| 4135 | if (enable) | 4135 | if (enable) |
| 4136 | WREG32(CP_MEC_CNTL, 0); | 4136 | WREG32(CP_MEC_CNTL, 0); |
| 4137 | else | 4137 | else { |
| 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); | 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
| 4139 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | ||
| 4140 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | ||
| 4141 | } | ||
| 4139 | udelay(50); | 4142 | udelay(50); |
| 4140 | } | 4143 | } |
| 4141 | 4144 | ||
| @@ -7902,7 +7905,8 @@ int cik_resume(struct radeon_device *rdev) | |||
| 7902 | /* init golden registers */ | 7905 | /* init golden registers */ |
| 7903 | cik_init_golden_registers(rdev); | 7906 | cik_init_golden_registers(rdev); |
| 7904 | 7907 | ||
| 7905 | radeon_pm_resume(rdev); | 7908 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 7909 | radeon_pm_resume(rdev); | ||
| 7906 | 7910 | ||
| 7907 | rdev->accel_working = true; | 7911 | rdev->accel_working = true; |
| 7908 | r = cik_startup(rdev); | 7912 | r = cik_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e3..94626ea90fa5 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) | |||
| 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); |
| 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); | 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); |
| 266 | } | 266 | } |
| 267 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; | ||
| 268 | rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; | ||
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | /** | 271 | /** |
| @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) | |||
| 291 | u32 me_cntl, reg_offset; | 293 | u32 me_cntl, reg_offset; |
| 292 | int i; | 294 | int i; |
| 293 | 295 | ||
| 296 | if (enable == false) { | ||
| 297 | cik_sdma_gfx_stop(rdev); | ||
| 298 | cik_sdma_rlc_stop(rdev); | ||
| 299 | } | ||
| 300 | |||
| 294 | for (i = 0; i < 2; i++) { | 301 | for (i = 0; i < 2; i++) { |
| 295 | if (i == 0) | 302 | if (i == 0) |
| 296 | reg_offset = SDMA0_REGISTER_OFFSET; | 303 | reg_offset = SDMA0_REGISTER_OFFSET; |
| @@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) | |||
| 420 | if (!rdev->sdma_fw) | 427 | if (!rdev->sdma_fw) |
| 421 | return -EINVAL; | 428 | return -EINVAL; |
| 422 | 429 | ||
| 423 | /* stop the gfx rings and rlc compute queues */ | ||
| 424 | cik_sdma_gfx_stop(rdev); | ||
| 425 | cik_sdma_rlc_stop(rdev); | ||
| 426 | |||
| 427 | /* halt the MEs */ | 430 | /* halt the MEs */ |
| 428 | cik_sdma_enable(rdev, false); | 431 | cik_sdma_enable(rdev, false); |
| 429 | 432 | ||
| @@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) | |||
| 492 | */ | 495 | */ |
| 493 | void cik_sdma_fini(struct radeon_device *rdev) | 496 | void cik_sdma_fini(struct radeon_device *rdev) |
| 494 | { | 497 | { |
| 495 | /* stop the gfx rings and rlc compute queues */ | ||
| 496 | cik_sdma_gfx_stop(rdev); | ||
| 497 | cik_sdma_rlc_stop(rdev); | ||
| 498 | /* halt the MEs */ | 498 | /* halt the MEs */ |
| 499 | cik_sdma_enable(rdev, false); | 499 | cik_sdma_enable(rdev, false); |
| 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 713a5d359901..94e858751994 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
| @@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev) | |||
| 278 | return !ASIC_IS_NODCE(rdev); | 278 | return !ASIC_IS_NODCE(rdev); |
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | static void dce6_audio_enable(struct radeon_device *rdev, | 281 | void dce6_audio_enable(struct radeon_device *rdev, |
| 282 | struct r600_audio_pin *pin, | 282 | struct r600_audio_pin *pin, |
| 283 | bool enable) | 283 | bool enable) |
| 284 | { | 284 | { |
| 285 | if (!pin) | ||
| 286 | return; | ||
| 287 | |||
| 285 | WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, | 288 | WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, |
| 286 | AUDIO_ENABLED); | 289 | enable ? AUDIO_ENABLED : 0); |
| 287 | DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); | ||
| 288 | } | 290 | } |
| 289 | 291 | ||
| 290 | static const u32 pin_offsets[7] = | 292 | static const u32 pin_offsets[7] = |
| @@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev) | |||
| 323 | rdev->audio.pin[i].connected = false; | 325 | rdev->audio.pin[i].connected = false; |
| 324 | rdev->audio.pin[i].offset = pin_offsets[i]; | 326 | rdev->audio.pin[i].offset = pin_offsets[i]; |
| 325 | rdev->audio.pin[i].id = i; | 327 | rdev->audio.pin[i].id = i; |
| 326 | dce6_audio_enable(rdev, &rdev->audio.pin[i], true); | 328 | /* disable audio. it will be set up later */ |
| 329 | dce6_audio_enable(rdev, &rdev->audio.pin[i], false); | ||
| 327 | } | 330 | } |
| 328 | 331 | ||
| 329 | return 0; | 332 | return 0; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f2b9e21ce4da..27b0ff16082e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | |||
| 1680 | case RADEON_HPD_6: | 1680 | case RADEON_HPD_6: |
| 1681 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | 1681 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) |
| 1682 | connected = true; | 1682 | connected = true; |
| 1683 | break; | 1683 | break; |
| 1684 | default: | 1684 | default: |
| 1685 | break; | 1685 | break; |
| 1686 | } | 1686 | } |
| @@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 5299 | /* init golden registers */ | 5299 | /* init golden registers */ |
| 5300 | evergreen_init_golden_registers(rdev); | 5300 | evergreen_init_golden_registers(rdev); |
| 5301 | 5301 | ||
| 5302 | radeon_pm_resume(rdev); | 5302 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 5303 | radeon_pm_resume(rdev); | ||
| 5303 | 5304 | ||
| 5304 | rdev->accel_working = true; | 5305 | rdev->accel_working = true; |
| 5305 | r = evergreen_startup(rdev); | 5306 | r = evergreen_startup(rdev); |
| @@ -5475,9 +5476,9 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 5475 | radeon_wb_fini(rdev); | 5476 | radeon_wb_fini(rdev); |
| 5476 | radeon_ib_pool_fini(rdev); | 5477 | radeon_ib_pool_fini(rdev); |
| 5477 | radeon_irq_kms_fini(rdev); | 5478 | radeon_irq_kms_fini(rdev); |
| 5478 | evergreen_pcie_gart_fini(rdev); | ||
| 5479 | uvd_v1_0_fini(rdev); | 5479 | uvd_v1_0_fini(rdev); |
| 5480 | radeon_uvd_fini(rdev); | 5480 | radeon_uvd_fini(rdev); |
| 5481 | evergreen_pcie_gart_fini(rdev); | ||
| 5481 | r600_vram_scratch_fini(rdev); | 5482 | r600_vram_scratch_fini(rdev); |
| 5482 | radeon_gem_fini(rdev); | 5483 | radeon_gem_fini(rdev); |
| 5483 | radeon_fence_driver_fini(rdev); | 5484 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0c6d5cef4cf1..05b0c95813fd 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode | |||
| 306 | return; | 306 | return; |
| 307 | offset = dig->afmt->offset; | 307 | offset = dig->afmt->offset; |
| 308 | 308 | ||
| 309 | /* disable audio prior to setting up hw */ | ||
| 310 | if (ASIC_IS_DCE6(rdev)) { | ||
| 311 | dig->afmt->pin = dce6_audio_get_pin(rdev); | ||
| 312 | dce6_audio_enable(rdev, dig->afmt->pin, false); | ||
| 313 | } else { | ||
| 314 | dig->afmt->pin = r600_audio_get_pin(rdev); | ||
| 315 | r600_audio_enable(rdev, dig->afmt->pin, false); | ||
| 316 | } | ||
| 317 | |||
| 309 | evergreen_audio_set_dto(encoder, mode->clock); | 318 | evergreen_audio_set_dto(encoder, mode->clock); |
| 310 | 319 | ||
| 311 | WREG32(HDMI_VBI_PACKET_CONTROL + offset, | 320 | WREG32(HDMI_VBI_PACKET_CONTROL + offset, |
| @@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode | |||
| 409 | WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); | 418 | WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); |
| 410 | WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); | 419 | WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); |
| 411 | WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); | 420 | WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); |
| 421 | |||
| 422 | /* enable audio after to setting up hw */ | ||
| 423 | if (ASIC_IS_DCE6(rdev)) | ||
| 424 | dce6_audio_enable(rdev, dig->afmt->pin, true); | ||
| 425 | else | ||
| 426 | r600_audio_enable(rdev, dig->afmt->pin, true); | ||
| 412 | } | 427 | } |
| 413 | 428 | ||
| 414 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | 429 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) |
| 415 | { | 430 | { |
| 416 | struct drm_device *dev = encoder->dev; | ||
| 417 | struct radeon_device *rdev = dev->dev_private; | ||
| 418 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 431 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 419 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 432 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 420 | 433 | ||
| @@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 427 | if (!enable && !dig->afmt->enabled) | 440 | if (!enable && !dig->afmt->enabled) |
| 428 | return; | 441 | return; |
| 429 | 442 | ||
| 430 | if (enable) { | ||
| 431 | if (ASIC_IS_DCE6(rdev)) | ||
| 432 | dig->afmt->pin = dce6_audio_get_pin(rdev); | ||
| 433 | else | ||
| 434 | dig->afmt->pin = r600_audio_get_pin(rdev); | ||
| 435 | } else { | ||
| 436 | dig->afmt->pin = NULL; | ||
| 437 | } | ||
| 438 | |||
| 439 | dig->afmt->enabled = enable; | 443 | dig->afmt->enabled = enable; |
| 440 | 444 | ||
| 441 | DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", | 445 | DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8cfe902..3a03ba37d043 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h | |||
| @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; | |||
| 57 | 57 | ||
| 58 | #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 | 58 | #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 |
| 59 | 59 | ||
| 60 | #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 | 60 | #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 |
| 61 | #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC | 61 | #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC |
| 62 | #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 | 62 | #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 |
| 63 | 63 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cce..351db361239d 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
| @@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
| 1223 | 1223 | ||
| 1224 | int kv_dpm_late_enable(struct radeon_device *rdev) | 1224 | int kv_dpm_late_enable(struct radeon_device *rdev) |
| 1225 | { | 1225 | { |
| 1226 | int ret; | 1226 | int ret = 0; |
| 1227 | 1227 | ||
| 1228 | if (rdev->irq.installed && | 1228 | if (rdev->irq.installed && |
| 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac66fc6..bf6300cfd62d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) | |||
| 2105 | /* init golden registers */ | 2105 | /* init golden registers */ |
| 2106 | ni_init_golden_registers(rdev); | 2106 | ni_init_golden_registers(rdev); |
| 2107 | 2107 | ||
| 2108 | radeon_pm_resume(rdev); | 2108 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 2109 | radeon_pm_resume(rdev); | ||
| 2109 | 2110 | ||
| 2110 | rdev->accel_working = true; | 2111 | rdev->accel_working = true; |
| 2111 | r = cayman_startup(rdev); | 2112 | r = cayman_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb31..ca814276b075 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
| @@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev, | |||
| 2588 | if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) | 2588 | if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) |
| 2589 | enable_sq_ramping = false; | 2589 | enable_sq_ramping = false; |
| 2590 | 2590 | ||
| 2591 | if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) | 2591 | if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) |
| 2592 | enable_sq_ramping = false; | 2592 | enable_sq_ramping = false; |
| 2593 | 2593 | ||
| 2594 | for (i = 0; i < state->performance_level_count; i++) { | 2594 | for (i = 0; i < state->performance_level_count; i++) { |
| @@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
| 3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 3947 | struct ni_ps *ps = ni_get_ps(rps); | 3947 | struct ni_ps *ps = ni_get_ps(rps); |
| 3948 | u16 vddc; | ||
| 3949 | struct rv7xx_pl *pl = &ps->performance_levels[index]; | 3948 | struct rv7xx_pl *pl = &ps->performance_levels[index]; |
| 3950 | 3949 | ||
| 3951 | ps->performance_level_count = index + 1; | 3950 | ps->performance_level_count = index + 1; |
| @@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 3961 | 3960 | ||
| 3962 | /* patch up vddc if necessary */ | 3961 | /* patch up vddc if necessary */ |
| 3963 | if (pl->vddc == 0xff01) { | 3962 | if (pl->vddc == 0xff01) { |
| 3964 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 3963 | if (pi->max_vddc) |
| 3965 | pl->vddc = vddc; | 3964 | pl->vddc = pi->max_vddc; |
| 3966 | } | 3965 | } |
| 3967 | 3966 | ||
| 3968 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 3967 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
| @@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, | |||
| 4322 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 4321 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
| 4323 | struct seq_file *m) | 4322 | struct seq_file *m) |
| 4324 | { | 4323 | { |
| 4325 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 4324 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 4325 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
| 4326 | struct ni_ps *ps = ni_get_ps(rps); | 4326 | struct ni_ps *ps = ni_get_ps(rps); |
| 4327 | struct rv7xx_pl *pl; | 4327 | struct rv7xx_pl *pl; |
| 4328 | u32 current_index = | 4328 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce3f7cc..3cc78bb66042 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev) | |||
| 3942 | /* Initialize surface registers */ | 3942 | /* Initialize surface registers */ |
| 3943 | radeon_surface_init(rdev); | 3943 | radeon_surface_init(rdev); |
| 3944 | 3944 | ||
| 3945 | radeon_pm_resume(rdev); | ||
| 3946 | |||
| 3947 | rdev->accel_working = true; | 3945 | rdev->accel_working = true; |
| 3948 | r = r100_startup(rdev); | 3946 | r = r100_startup(rdev); |
| 3949 | if (r) { | 3947 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef840e86..0b658b34b33a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev) | |||
| 1430 | /* Initialize surface registers */ | 1430 | /* Initialize surface registers */ |
| 1431 | radeon_surface_init(rdev); | 1431 | radeon_surface_init(rdev); |
| 1432 | 1432 | ||
| 1433 | radeon_pm_resume(rdev); | ||
| 1434 | |||
| 1435 | rdev->accel_working = true; | 1433 | rdev->accel_working = true; |
| 1436 | r = r300_startup(rdev); | 1434 | r = r300_startup(rdev); |
| 1437 | if (r) { | 1435 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab2710b..802b19220a21 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) | |||
| 325 | /* Initialize surface registers */ | 325 | /* Initialize surface registers */ |
| 326 | radeon_surface_init(rdev); | 326 | radeon_surface_init(rdev); |
| 327 | 327 | ||
| 328 | radeon_pm_resume(rdev); | ||
| 329 | |||
| 330 | rdev->accel_working = true; | 328 | rdev->accel_working = true; |
| 331 | r = r420_startup(rdev); | 329 | r = r420_startup(rdev); |
| 332 | if (r) { | 330 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb75024f..98d6053c36c6 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) | |||
| 240 | /* Initialize surface registers */ | 240 | /* Initialize surface registers */ |
| 241 | radeon_surface_init(rdev); | 241 | radeon_surface_init(rdev); |
| 242 | 242 | ||
| 243 | radeon_pm_resume(rdev); | ||
| 244 | |||
| 245 | rdev->accel_working = true; | 243 | rdev->accel_working = true; |
| 246 | r = r520_startup(rdev); | 244 | r = r520_startup(rdev); |
| 247 | if (r) { | 245 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb2..647ef4079217 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) | |||
| 2968 | /* post card */ | 2968 | /* post card */ |
| 2969 | atom_asic_init(rdev->mode_info.atom_context); | 2969 | atom_asic_init(rdev->mode_info.atom_context); |
| 2970 | 2970 | ||
| 2971 | radeon_pm_resume(rdev); | 2971 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 2972 | radeon_pm_resume(rdev); | ||
| 2972 | 2973 | ||
| 2973 | rdev->accel_working = true; | 2974 | rdev->accel_working = true; |
| 2974 | r = r600_startup(rdev); | 2975 | r = r600_startup(rdev); |
| @@ -3991,6 +3992,10 @@ restart_ih: | |||
| 3991 | break; | 3992 | break; |
| 3992 | } | 3993 | } |
| 3993 | break; | 3994 | break; |
| 3995 | case 124: /* UVD */ | ||
| 3996 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
| 3997 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
| 3998 | break; | ||
| 3994 | case 176: /* CP_INT in ring buffer */ | 3999 | case 176: /* CP_INT in ring buffer */ |
| 3995 | case 177: /* CP_INT in IB1 */ | 4000 | case 177: /* CP_INT in IB1 */ |
| 3996 | case 178: /* CP_INT in IB2 */ | 4001 | case 178: /* CP_INT in IB2 */ |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 47fc2b886979..bffac10c4296 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c | |||
| @@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work) | |||
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | /* enable the audio stream */ | 144 | /* enable the audio stream */ |
| 145 | static void r600_audio_enable(struct radeon_device *rdev, | 145 | void r600_audio_enable(struct radeon_device *rdev, |
| 146 | struct r600_audio_pin *pin, | 146 | struct r600_audio_pin *pin, |
| 147 | bool enable) | 147 | bool enable) |
| 148 | { | 148 | { |
| 149 | u32 value = 0; | 149 | u32 value = 0; |
| 150 | 150 | ||
| 151 | if (!pin) | ||
| 152 | return; | ||
| 153 | |||
| 151 | if (ASIC_IS_DCE4(rdev)) { | 154 | if (ASIC_IS_DCE4(rdev)) { |
| 152 | if (enable) { | 155 | if (enable) { |
| 153 | value |= 0x81000000; /* Required to enable audio */ | 156 | value |= 0x81000000; /* Required to enable audio */ |
| @@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev, | |||
| 158 | WREG32_P(R600_AUDIO_ENABLE, | 161 | WREG32_P(R600_AUDIO_ENABLE, |
| 159 | enable ? 0x81000000 : 0x0, ~0x81000000); | 162 | enable ? 0x81000000 : 0x0, ~0x81000000); |
| 160 | } | 163 | } |
| 161 | DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | /* | 166 | /* |
| @@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev) | |||
| 178 | rdev->audio.pin[0].status_bits = 0; | 180 | rdev->audio.pin[0].status_bits = 0; |
| 179 | rdev->audio.pin[0].category_code = 0; | 181 | rdev->audio.pin[0].category_code = 0; |
| 180 | rdev->audio.pin[0].id = 0; | 182 | rdev->audio.pin[0].id = 0; |
| 181 | 183 | /* disable audio. it will be set up later */ | |
| 182 | r600_audio_enable(rdev, &rdev->audio.pin[0], true); | 184 | r600_audio_enable(rdev, &rdev->audio.pin[0], false); |
| 183 | 185 | ||
| 184 | return 0; | 186 | return 0; |
| 185 | } | 187 | } |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7b399dc5fd54..2812c7d1ae6f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) | |||
| 1007 | case R_008C64_SQ_VSTMP_RING_SIZE: | 1007 | case R_008C64_SQ_VSTMP_RING_SIZE: |
| 1008 | case R_0288C8_SQ_GS_VERT_ITEMSIZE: | 1008 | case R_0288C8_SQ_GS_VERT_ITEMSIZE: |
| 1009 | /* get value to populate the IB don't remove */ | 1009 | /* get value to populate the IB don't remove */ |
| 1010 | tmp =radeon_get_ib_value(p, idx); | 1010 | /*tmp =radeon_get_ib_value(p, idx); |
| 1011 | ib[idx] = 0; | 1011 | ib[idx] = 0;*/ |
| 1012 | break; | ||
| 1013 | case SQ_ESGS_RING_BASE: | ||
| 1014 | case SQ_GSVS_RING_BASE: | ||
| 1015 | case SQ_ESTMP_RING_BASE: | ||
| 1016 | case SQ_GSTMP_RING_BASE: | ||
| 1017 | case SQ_PSTMP_RING_BASE: | ||
| 1018 | case SQ_VSTMP_RING_BASE: | ||
| 1019 | r = radeon_cs_packet_next_reloc(p, &reloc, 0); | ||
| 1020 | if (r) { | ||
| 1021 | dev_warn(p->dev, "bad SET_CONTEXT_REG " | ||
| 1022 | "0x%04X\n", reg); | ||
| 1023 | return -EINVAL; | ||
| 1024 | } | ||
| 1025 | ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
| 1012 | break; | 1026 | break; |
| 1013 | case SQ_CONFIG: | 1027 | case SQ_CONFIG: |
| 1014 | track->sq_config = radeon_get_ib_value(p, idx); | 1028 | track->sq_config = radeon_get_ib_value(p, idx); |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 3016fc14f502..85a2bb28aed2 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) | |||
| 329 | u8 *sadb; | 329 | u8 *sadb; |
| 330 | int sad_count; | 330 | int sad_count; |
| 331 | 331 | ||
| 332 | /* XXX: setting this register causes hangs on some asics */ | ||
| 333 | return; | ||
| 334 | |||
| 335 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | 332 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
| 336 | if (connector->encoder == encoder) { | 333 | if (connector->encoder == encoder) { |
| 337 | radeon_connector = to_radeon_connector(connector); | 334 | radeon_connector = to_radeon_connector(connector); |
| @@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
| 460 | return; | 457 | return; |
| 461 | offset = dig->afmt->offset; | 458 | offset = dig->afmt->offset; |
| 462 | 459 | ||
| 460 | /* disable audio prior to setting up hw */ | ||
| 461 | dig->afmt->pin = r600_audio_get_pin(rdev); | ||
| 462 | r600_audio_enable(rdev, dig->afmt->pin, false); | ||
| 463 | |||
| 463 | r600_audio_set_dto(encoder, mode->clock); | 464 | r600_audio_set_dto(encoder, mode->clock); |
| 464 | 465 | ||
| 465 | WREG32(HDMI0_VBI_PACKET_CONTROL + offset, | 466 | WREG32(HDMI0_VBI_PACKET_CONTROL + offset, |
| @@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
| 531 | WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); | 532 | WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); |
| 532 | 533 | ||
| 533 | r600_hdmi_audio_workaround(encoder); | 534 | r600_hdmi_audio_workaround(encoder); |
| 535 | |||
| 536 | /* enable audio after to setting up hw */ | ||
| 537 | r600_audio_enable(rdev, dig->afmt->pin, true); | ||
| 534 | } | 538 | } |
| 535 | 539 | ||
| 536 | /* | 540 | /* |
| @@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 651 | if (!enable && !dig->afmt->enabled) | 655 | if (!enable && !dig->afmt->enabled) |
| 652 | return; | 656 | return; |
| 653 | 657 | ||
| 654 | if (enable) | ||
| 655 | dig->afmt->pin = r600_audio_get_pin(rdev); | ||
| 656 | else | ||
| 657 | dig->afmt->pin = NULL; | ||
| 658 | |||
| 659 | /* Older chipsets require setting HDMI and routing manually */ | 658 | /* Older chipsets require setting HDMI and routing manually */ |
| 660 | if (!ASIC_IS_DCE3(rdev)) { | 659 | if (!ASIC_IS_DCE3(rdev)) { |
| 661 | if (enable) | 660 | if (enable) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4a8ac1cd6b4c..e887d027b6d0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -135,6 +135,9 @@ extern int radeon_hard_reset; | |||
| 135 | /* R600+ */ | 135 | /* R600+ */ |
| 136 | #define R600_RING_TYPE_UVD_INDEX 5 | 136 | #define R600_RING_TYPE_UVD_INDEX 5 |
| 137 | 137 | ||
| 138 | /* number of hw syncs before falling back on blocking */ | ||
| 139 | #define RADEON_NUM_SYNCS 4 | ||
| 140 | |||
| 138 | /* hardcode those limit for now */ | 141 | /* hardcode those limit for now */ |
| 139 | #define RADEON_VA_IB_OFFSET (1 << 20) | 142 | #define RADEON_VA_IB_OFFSET (1 << 20) |
| 140 | #define RADEON_VA_RESERVED_SIZE (8 << 20) | 143 | #define RADEON_VA_RESERVED_SIZE (8 << 20) |
| @@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, | |||
| 554 | /* | 557 | /* |
| 555 | * Semaphores. | 558 | * Semaphores. |
| 556 | */ | 559 | */ |
| 557 | /* everything here is constant */ | ||
| 558 | struct radeon_semaphore { | 560 | struct radeon_semaphore { |
| 559 | struct radeon_sa_bo *sa_bo; | 561 | struct radeon_sa_bo *sa_bo; |
| 560 | signed waiters; | 562 | signed waiters; |
| @@ -2745,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
| 2745 | void r600_audio_update_hdmi(struct work_struct *work); | 2747 | void r600_audio_update_hdmi(struct work_struct *work); |
| 2746 | struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); | 2748 | struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); |
| 2747 | struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); | 2749 | struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); |
| 2750 | void r600_audio_enable(struct radeon_device *rdev, | ||
| 2751 | struct r600_audio_pin *pin, | ||
| 2752 | bool enable); | ||
| 2753 | void dce6_audio_enable(struct radeon_device *rdev, | ||
| 2754 | struct r600_audio_pin *pin, | ||
| 2755 | bool enable); | ||
| 2748 | 2756 | ||
| 2749 | /* | 2757 | /* |
| 2750 | * R600 vram scratch functions | 2758 | * R600 vram scratch functions |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346fd..dda02bfc10a4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = { | |||
| 1555 | .get_sclk = &btc_dpm_get_sclk, | 1555 | .get_sclk = &btc_dpm_get_sclk, |
| 1556 | .get_mclk = &btc_dpm_get_mclk, | 1556 | .get_mclk = &btc_dpm_get_mclk, |
| 1557 | .print_power_state = &rv770_dpm_print_power_state, | 1557 | .print_power_state = &rv770_dpm_print_power_state, |
| 1558 | .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, | 1558 | .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, |
| 1559 | .force_performance_level = &rv770_dpm_force_performance_level, | 1559 | .force_performance_level = &rv770_dpm_force_performance_level, |
| 1560 | .vblank_too_short = &btc_dpm_vblank_too_short, | 1560 | .vblank_too_short = &btc_dpm_vblank_too_short, |
| 1561 | }, | 1561 | }, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4c..ae637cfda783 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev); | |||
| 551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); | 551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); |
| 552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); | 552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); |
| 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); | 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
| 554 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
| 555 | struct seq_file *m); | ||
| 554 | int sumo_dpm_init(struct radeon_device *rdev); | 556 | int sumo_dpm_init(struct radeon_device *rdev); |
| 555 | int sumo_dpm_enable(struct radeon_device *rdev); | 557 | int sumo_dpm_enable(struct radeon_device *rdev); |
| 556 | int sumo_dpm_late_enable(struct radeon_device *rdev); | 558 | int sumo_dpm_late_enable(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 485848f889f5..fa9a9c02751e 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
| @@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) | |||
| 219 | memcpy(&output, info->buffer.pointer, size); | 219 | memcpy(&output, info->buffer.pointer, size); |
| 220 | 220 | ||
| 221 | /* TODO: check version? */ | 221 | /* TODO: check version? */ |
| 222 | printk("ATPX version %u\n", output.version); | 222 | printk("ATPX version %u, functions 0x%08x\n", |
| 223 | output.version, output.function_bits); | ||
| 223 | 224 | ||
| 224 | radeon_atpx_parse_functions(&atpx->functions, output.function_bits); | 225 | radeon_atpx_parse_functions(&atpx->functions, output.function_bits); |
| 225 | 226 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbbc3ed5..044bc98fb459 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
| 1521 | if (r) | 1521 | if (r) |
| 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1522 | DRM_ERROR("ib ring test failed (%d).\n", r); |
| 1523 | 1523 | ||
| 1524 | if (rdev->pm.dpm_enabled) { | 1524 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
| 1525 | /* do dpm late init */ | 1525 | /* do dpm late init */ |
| 1526 | r = radeon_pm_late_init(rdev); | 1526 | r = radeon_pm_late_init(rdev); |
| 1527 | if (r) { | 1527 | if (r) { |
| 1528 | rdev->pm.dpm_enabled = false; | 1528 | rdev->pm.dpm_enabled = false; |
| 1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); | 1529 | DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); |
| 1530 | } | 1530 | } |
| 1531 | } else { | ||
| 1532 | /* resume old pm late */ | ||
| 1533 | radeon_pm_resume(rdev); | ||
| 1531 | } | 1534 | } |
| 1532 | 1535 | ||
| 1533 | radeon_restore_bios_scratch_regs(rdev); | 1536 | radeon_restore_bios_scratch_regs(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d680608f6f5b..fbd8b930f2be 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
| 571 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; | 571 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; |
| 572 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; | 572 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; |
| 573 | } | 573 | } |
| 574 | dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; | ||
| 575 | dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; | ||
| 574 | 576 | ||
| 575 | #if 0 | 577 | #if 0 |
| 576 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; | 578 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ec8c388eec17..84a1bbb75f91 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -78,9 +78,10 @@ | |||
| 78 | * 2.34.0 - Add CIK tiling mode array query | 78 | * 2.34.0 - Add CIK tiling mode array query |
| 79 | * 2.35.0 - Add CIK macrotile mode array query | 79 | * 2.35.0 - Add CIK macrotile mode array query |
| 80 | * 2.36.0 - Fix CIK DCE tiling setup | 80 | * 2.36.0 - Fix CIK DCE tiling setup |
| 81 | * 2.37.0 - allow GS ring setup on r6xx/r7xx | ||
| 81 | */ | 82 | */ |
| 82 | #define KMS_DRIVER_MAJOR 2 | 83 | #define KMS_DRIVER_MAJOR 2 |
| 83 | #define KMS_DRIVER_MINOR 36 | 84 | #define KMS_DRIVER_MINOR 37 |
| 84 | #define KMS_DRIVER_PATCHLEVEL 0 | 85 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 85 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 86 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 86 | int radeon_driver_unload_kms(struct drm_device *dev); | 87 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 114d1672d616..66ed3ea71440 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -33,6 +33,13 @@ | |||
| 33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 36 | |||
| 37 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
| 38 | bool radeon_is_px(void); | ||
| 39 | #else | ||
| 40 | static inline bool radeon_is_px(void) { return false; } | ||
| 41 | #endif | ||
| 42 | |||
| 36 | /** | 43 | /** |
| 37 | * radeon_driver_unload_kms - Main unload function for KMS. | 44 | * radeon_driver_unload_kms - Main unload function for KMS. |
| 38 | * | 45 | * |
| @@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
| 130 | "Error during ACPI methods call\n"); | 137 | "Error during ACPI methods call\n"); |
| 131 | } | 138 | } |
| 132 | 139 | ||
| 133 | if (radeon_runtime_pm != 0) { | 140 | if ((radeon_runtime_pm == 1) || |
| 141 | ((radeon_runtime_pm == -1) && radeon_is_px())) { | ||
| 134 | pm_runtime_use_autosuspend(dev->dev); | 142 | pm_runtime_use_autosuspend(dev->dev); |
| 135 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 143 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 136 | pm_runtime_set_active(dev->dev); | 144 | pm_runtime_set_active(dev->dev); |
| @@ -537,6 +545,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
| 537 | 545 | ||
| 538 | radeon_vm_init(rdev, &fpriv->vm); | 546 | radeon_vm_init(rdev, &fpriv->vm); |
| 539 | 547 | ||
| 548 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | ||
| 549 | if (r) | ||
| 550 | return r; | ||
| 551 | |||
| 540 | /* map the ib pool buffer read only into | 552 | /* map the ib pool buffer read only into |
| 541 | * virtual address space */ | 553 | * virtual address space */ |
| 542 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, | 554 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, |
| @@ -544,6 +556,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
| 544 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, | 556 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, |
| 545 | RADEON_VM_PAGE_READABLE | | 557 | RADEON_VM_PAGE_READABLE | |
| 546 | RADEON_VM_PAGE_SNOOPED); | 558 | RADEON_VM_PAGE_SNOOPED); |
| 559 | |||
| 560 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | ||
| 547 | if (r) { | 561 | if (r) { |
| 548 | radeon_vm_fini(rdev, &fpriv->vm); | 562 | radeon_vm_fini(rdev, &fpriv->vm); |
| 549 | kfree(fpriv); | 563 | kfree(fpriv); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1b783f0e6d3a..15e44a7281ab 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | /* 64 dwords should be enough for fence too */ | 141 | /* 64 dwords should be enough for fence too */ |
| 142 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); | 142 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); |
| 143 | if (r) { | 143 | if (r) { |
| 144 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); | 144 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); |
| 145 | return r; | 145 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 2b42aa1914f2..9006b32d5eed 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
| @@ -34,14 +34,15 @@ | |||
| 34 | int radeon_semaphore_create(struct radeon_device *rdev, | 34 | int radeon_semaphore_create(struct radeon_device *rdev, |
| 35 | struct radeon_semaphore **semaphore) | 35 | struct radeon_semaphore **semaphore) |
| 36 | { | 36 | { |
| 37 | uint32_t *cpu_addr; | ||
| 37 | int i, r; | 38 | int i, r; |
| 38 | 39 | ||
| 39 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); | 40 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); |
| 40 | if (*semaphore == NULL) { | 41 | if (*semaphore == NULL) { |
| 41 | return -ENOMEM; | 42 | return -ENOMEM; |
| 42 | } | 43 | } |
| 43 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, | 44 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, |
| 44 | &(*semaphore)->sa_bo, 8, 8, true); | 45 | 8 * RADEON_NUM_SYNCS, 8, true); |
| 45 | if (r) { | 46 | if (r) { |
| 46 | kfree(*semaphore); | 47 | kfree(*semaphore); |
| 47 | *semaphore = NULL; | 48 | *semaphore = NULL; |
| @@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, | |||
| 49 | } | 50 | } |
| 50 | (*semaphore)->waiters = 0; | 51 | (*semaphore)->waiters = 0; |
| 51 | (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); | 52 | (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); |
| 52 | *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; | 53 | |
| 54 | cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); | ||
| 55 | for (i = 0; i < RADEON_NUM_SYNCS; ++i) | ||
| 56 | cpu_addr[i] = 0; | ||
| 53 | 57 | ||
| 54 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | 58 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
| 55 | (*semaphore)->sync_to[i] = NULL; | 59 | (*semaphore)->sync_to[i] = NULL; |
| @@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
| 125 | struct radeon_semaphore *semaphore, | 129 | struct radeon_semaphore *semaphore, |
| 126 | int ring) | 130 | int ring) |
| 127 | { | 131 | { |
| 132 | unsigned count = 0; | ||
| 128 | int i, r; | 133 | int i, r; |
| 129 | 134 | ||
| 130 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 135 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
| @@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
| 140 | return -EINVAL; | 145 | return -EINVAL; |
| 141 | } | 146 | } |
| 142 | 147 | ||
| 148 | if (++count > RADEON_NUM_SYNCS) { | ||
| 149 | /* not enough room, wait manually */ | ||
| 150 | radeon_fence_wait_locked(fence); | ||
| 151 | continue; | ||
| 152 | } | ||
| 153 | |||
| 143 | /* allocate enough space for sync command */ | 154 | /* allocate enough space for sync command */ |
| 144 | r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); | 155 | r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); |
| 145 | if (r) { | 156 | if (r) { |
| @@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
| 164 | 175 | ||
| 165 | radeon_ring_commit(rdev, &rdev->ring[i]); | 176 | radeon_ring_commit(rdev, &rdev->ring[i]); |
| 166 | radeon_fence_note_sync(fence, ring); | 177 | radeon_fence_note_sync(fence, ring); |
| 178 | |||
| 179 | semaphore->gpu_addr += 8; | ||
| 167 | } | 180 | } |
| 168 | 181 | ||
| 169 | return 0; | 182 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c3edb8..040a2a10ea17 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 714 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 714 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 715 | return r; | 715 | return r; |
| 716 | } | 716 | } |
| 717 | /* Change the size here instead of the init above so only lpfn is affected */ | ||
| 718 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
| 719 | |||
| 717 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, | 720 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
| 718 | RADEON_GEM_DOMAIN_VRAM, | 721 | RADEON_GEM_DOMAIN_VRAM, |
| 719 | NULL, &rdev->stollen_vga_memory); | 722 | NULL, &rdev->stollen_vga_memory); |
| @@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | |||
| 935 | while (size) { | 938 | while (size) { |
| 936 | loff_t p = *pos / PAGE_SIZE; | 939 | loff_t p = *pos / PAGE_SIZE; |
| 937 | unsigned off = *pos & ~PAGE_MASK; | 940 | unsigned off = *pos & ~PAGE_MASK; |
| 938 | ssize_t cur_size = min(size, PAGE_SIZE - off); | 941 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
| 939 | struct page *page; | 942 | struct page *page; |
| 940 | void *ptr; | 943 | void *ptr; |
| 941 | 944 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6781fee1eaad..3e6804b2b2ef 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev) | |||
| 171 | 171 | ||
| 172 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | 172 | radeon_bo_unref(&rdev->uvd.vcpu_bo); |
| 173 | 173 | ||
| 174 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); | ||
| 175 | |||
| 174 | release_firmware(rdev->uvd_fw); | 176 | release_firmware(rdev->uvd_fw); |
| 175 | } | 177 | } |
| 176 | 178 | ||
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 20bfbda7b3f1..ec0c6829c1dc 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 | |||
| @@ -18,6 +18,7 @@ r600 0x9400 | |||
| 18 | 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL | 18 | 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL |
| 19 | 0x00028A40 VGT_GS_MODE | 19 | 0x00028A40 VGT_GS_MODE |
| 20 | 0x00028A6C VGT_GS_OUT_PRIM_TYPE | 20 | 0x00028A6C VGT_GS_OUT_PRIM_TYPE |
| 21 | 0x00028B38 VGT_GS_MAX_VERT_OUT | ||
| 21 | 0x000088C8 VGT_GS_PER_ES | 22 | 0x000088C8 VGT_GS_PER_ES |
| 22 | 0x000088E8 VGT_GS_PER_VS | 23 | 0x000088E8 VGT_GS_PER_VS |
| 23 | 0x000088D4 VGT_GS_VERTEX_REUSE | 24 | 0x000088D4 VGT_GS_VERTEX_REUSE |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369cda2f..130d5cc50d43 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev) | |||
| 474 | /* Initialize surface registers */ | 474 | /* Initialize surface registers */ |
| 475 | radeon_surface_init(rdev); | 475 | radeon_surface_init(rdev); |
| 476 | 476 | ||
| 477 | radeon_pm_resume(rdev); | ||
| 478 | |||
| 479 | rdev->accel_working = true; | 477 | rdev->accel_working = true; |
| 480 | r = rs400_startup(rdev); | 478 | r = rs400_startup(rdev); |
| 481 | if (r) { | 479 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde7693032..72d3616de08e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev) | |||
| 1048 | /* Initialize surface registers */ | 1048 | /* Initialize surface registers */ |
| 1049 | radeon_surface_init(rdev); | 1049 | radeon_surface_init(rdev); |
| 1050 | 1050 | ||
| 1051 | radeon_pm_resume(rdev); | ||
| 1052 | |||
| 1053 | rdev->accel_working = true; | 1051 | rdev->accel_working = true; |
| 1054 | r = rs600_startup(rdev); | 1052 | r = rs600_startup(rdev); |
| 1055 | if (r) { | 1053 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 35950738bd5e..3462b64369bf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) | |||
| 756 | /* Initialize surface registers */ | 756 | /* Initialize surface registers */ |
| 757 | radeon_surface_init(rdev); | 757 | radeon_surface_init(rdev); |
| 758 | 758 | ||
| 759 | radeon_pm_resume(rdev); | ||
| 760 | |||
| 761 | rdev->accel_working = true; | 759 | rdev->accel_working = true; |
| 762 | r = rs690_startup(rdev); | 760 | r = rs690_startup(rdev); |
| 763 | if (r) { | 761 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138ff779..237dd29d9f1c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev) | |||
| 586 | /* Initialize surface registers */ | 586 | /* Initialize surface registers */ |
| 587 | radeon_surface_init(rdev); | 587 | radeon_surface_init(rdev); |
| 588 | 588 | ||
| 589 | radeon_pm_resume(rdev); | ||
| 590 | |||
| 591 | rdev->accel_working = true; | 589 | rdev->accel_working = true; |
| 592 | r = rv515_startup(rdev); | 590 | r = rv515_startup(rdev); |
| 593 | if (r) { | 591 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 6c772e58c784..fef310773aad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 1811 | /* init golden registers */ | 1811 | /* init golden registers */ |
| 1812 | rv770_init_golden_registers(rdev); | 1812 | rv770_init_golden_registers(rdev); |
| 1813 | 1813 | ||
| 1814 | radeon_pm_resume(rdev); | 1814 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 1815 | radeon_pm_resume(rdev); | ||
| 1815 | 1816 | ||
| 1816 | rdev->accel_working = true; | 1817 | rdev->accel_working = true; |
| 1817 | r = rv770_startup(rdev); | 1818 | r = rv770_startup(rdev); |
| @@ -1955,9 +1956,9 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1955 | radeon_wb_fini(rdev); | 1956 | radeon_wb_fini(rdev); |
| 1956 | radeon_ib_pool_fini(rdev); | 1957 | radeon_ib_pool_fini(rdev); |
| 1957 | radeon_irq_kms_fini(rdev); | 1958 | radeon_irq_kms_fini(rdev); |
| 1958 | rv770_pcie_gart_fini(rdev); | ||
| 1959 | uvd_v1_0_fini(rdev); | 1959 | uvd_v1_0_fini(rdev); |
| 1960 | radeon_uvd_fini(rdev); | 1960 | radeon_uvd_fini(rdev); |
| 1961 | rv770_pcie_gart_fini(rdev); | ||
| 1961 | r600_vram_scratch_fini(rdev); | 1962 | r600_vram_scratch_fini(rdev); |
| 1962 | radeon_gem_fini(rdev); | 1963 | radeon_gem_fini(rdev); |
| 1963 | radeon_fence_driver_fini(rdev); | 1964 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba359..b5f63f5e22a3 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
| @@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); | 2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
| 2176 | u32 sclk, mclk; | 2176 | u32 sclk, mclk; |
| 2177 | u16 vddc; | ||
| 2178 | struct rv7xx_pl *pl; | 2177 | struct rv7xx_pl *pl; |
| 2179 | 2178 | ||
| 2180 | switch (index) { | 2179 | switch (index) { |
| @@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 2214 | 2213 | ||
| 2215 | /* patch up vddc if necessary */ | 2214 | /* patch up vddc if necessary */ |
| 2216 | if (pl->vddc == 0xff01) { | 2215 | if (pl->vddc == 0xff01) { |
| 2217 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 2216 | if (pi->max_vddc) |
| 2218 | pl->vddc = vddc; | 2217 | pl->vddc = pi->max_vddc; |
| 2219 | } | 2218 | } |
| 2220 | 2219 | ||
| 2221 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 2220 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
| @@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
| 2527 | bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) | 2526 | bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) |
| 2528 | { | 2527 | { |
| 2529 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); | 2528 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); |
| 2530 | u32 switch_limit = 300; | 2529 | u32 switch_limit = 200; /* 300 */ |
| 2531 | |||
| 2532 | /* quirks */ | ||
| 2533 | /* ASUS K70AF */ | ||
| 2534 | if ((rdev->pdev->device == 0x9553) && | ||
| 2535 | (rdev->pdev->subsystem_vendor == 0x1043) && | ||
| 2536 | (rdev->pdev->subsystem_device == 0x1c42)) | ||
| 2537 | switch_limit = 200; | ||
| 2538 | 2530 | ||
| 2539 | /* RV770 */ | 2531 | /* RV770 */ |
| 2540 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ | 2532 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53bb..9a124d0608b3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -6338,6 +6338,10 @@ restart_ih: | |||
| 6338 | break; | 6338 | break; |
| 6339 | } | 6339 | } |
| 6340 | break; | 6340 | break; |
| 6341 | case 124: /* UVD */ | ||
| 6342 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
| 6343 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
| 6344 | break; | ||
| 6341 | case 146: | 6345 | case 146: |
| 6342 | case 147: | 6346 | case 147: |
| 6343 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | 6347 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); |
| @@ -6614,7 +6618,8 @@ int si_resume(struct radeon_device *rdev) | |||
| 6614 | /* init golden registers */ | 6618 | /* init golden registers */ |
| 6615 | si_init_golden_registers(rdev); | 6619 | si_init_golden_registers(rdev); |
| 6616 | 6620 | ||
| 6617 | radeon_pm_resume(rdev); | 6621 | if (rdev->pm.pm_method == PM_METHOD_DPM) |
| 6622 | radeon_pm_resume(rdev); | ||
| 6618 | 6623 | ||
| 6619 | rdev->accel_working = true; | 6624 | rdev->accel_working = true; |
| 6620 | r = si_startup(rdev); | 6625 | r = si_startup(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338fb..0a2f5b4bca43 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, | |||
| 2395 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) | 2395 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) |
| 2396 | enable_sq_ramping = false; | 2396 | enable_sq_ramping = false; |
| 2397 | 2397 | ||
| 2398 | if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) | 2398 | if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) |
| 2399 | enable_sq_ramping = false; | 2399 | enable_sq_ramping = false; |
| 2400 | 2400 | ||
| 2401 | for (i = 0; i < state->performance_level_count; i++) { | 2401 | for (i = 0; i < state->performance_level_count; i++) { |
| @@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev) | |||
| 6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
| 6473 | struct seq_file *m) | 6473 | struct seq_file *m) |
| 6474 | { | 6474 | { |
| 6475 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 6475 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
| 6476 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
| 6476 | struct ni_ps *ps = ni_get_ps(rps); | 6477 | struct ni_ps *ps = ni_get_ps(rps); |
| 6477 | struct rv7xx_pl *pl; | 6478 | struct rv7xx_pl *pl; |
| 6478 | u32 current_index = | 6479 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc5..8b47b3cd0357 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
| @@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev | |||
| 1807 | struct seq_file *m) | 1807 | struct seq_file *m) |
| 1808 | { | 1808 | { |
| 1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); | 1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
| 1810 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1810 | struct radeon_ps *rps = &pi->current_rps; |
| 1811 | struct sumo_ps *ps = sumo_get_ps(rps); | 1811 | struct sumo_ps *ps = sumo_get_ps(rps); |
| 1812 | struct sumo_pl *pl; | 1812 | struct sumo_pl *pl; |
| 1813 | u32 current_index = | 1813 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f7..2da0e17eb960 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
| @@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, | |||
| 1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
| 1927 | struct seq_file *m) | 1927 | struct seq_file *m) |
| 1928 | { | 1928 | { |
| 1929 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1929 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
| 1930 | struct radeon_ps *rps = &pi->current_rps; | ||
| 1930 | struct trinity_ps *ps = trinity_get_ps(rps); | 1931 | struct trinity_ps *ps = trinity_get_ps(rps); |
| 1931 | struct trinity_pl *pl; | 1932 | struct trinity_pl *pl; |
| 1932 | u32 current_index = | 1933 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fed..d1771004cb52 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c | |||
| @@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, | |||
| 57 | radeon_ring_write(ring, 0); | 57 | radeon_ring_write(ring, 0); |
| 58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); | 58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
| 59 | radeon_ring_write(ring, 2); | 59 | radeon_ring_write(ring, 2); |
| 60 | return; | ||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | /** | 62 | /** |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 88a529008ce0..c71594754f46 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
| @@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context) | |||
| 104 | 104 | ||
| 105 | static void tegra_drm_lastclose(struct drm_device *drm) | 105 | static void tegra_drm_lastclose(struct drm_device *drm) |
| 106 | { | 106 | { |
| 107 | #ifdef CONFIG_TEGRA_DRM_FBDEV | 107 | #ifdef CONFIG_DRM_TEGRA_FBDEV |
| 108 | struct tegra_drm *tegra = drm->dev_private; | 108 | struct tegra_drm *tegra = drm->dev_private; |
| 109 | 109 | ||
| 110 | tegra_fbdev_restore_mode(tegra->fbdev); | 110 | tegra_fbdev_restore_mode(tegra->fbdev); |
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 338f7f6561d7..0266fb40479e 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | struct tegra_rgb { | 15 | struct tegra_rgb { |
| 16 | struct tegra_output output; | 16 | struct tegra_output output; |
| 17 | struct tegra_dc *dc; | 17 | struct tegra_dc *dc; |
| 18 | bool enabled; | ||
| 18 | 19 | ||
| 19 | struct clk *clk_parent; | 20 | struct clk *clk_parent; |
| 20 | struct clk *clk; | 21 | struct clk *clk; |
| @@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output) | |||
| 89 | struct tegra_rgb *rgb = to_rgb(output); | 90 | struct tegra_rgb *rgb = to_rgb(output); |
| 90 | unsigned long value; | 91 | unsigned long value; |
| 91 | 92 | ||
| 93 | if (rgb->enabled) | ||
| 94 | return 0; | ||
| 95 | |||
| 92 | tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); | 96 | tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); |
| 93 | 97 | ||
| 94 | value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; | 98 | value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; |
| @@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output) | |||
| 122 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); | 126 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); |
| 123 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); | 127 | tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); |
| 124 | 128 | ||
| 129 | rgb->enabled = true; | ||
| 130 | |||
| 125 | return 0; | 131 | return 0; |
| 126 | } | 132 | } |
| 127 | 133 | ||
| @@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output) | |||
| 130 | struct tegra_rgb *rgb = to_rgb(output); | 136 | struct tegra_rgb *rgb = to_rgb(output); |
| 131 | unsigned long value; | 137 | unsigned long value; |
| 132 | 138 | ||
| 139 | if (!rgb->enabled) | ||
| 140 | return 0; | ||
| 141 | |||
| 133 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); | 142 | value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); |
| 134 | value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | | 143 | value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | |
| 135 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); | 144 | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); |
| @@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output) | |||
| 144 | 153 | ||
| 145 | tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); | 154 | tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); |
| 146 | 155 | ||
| 156 | rgb->enabled = false; | ||
| 157 | |||
| 147 | return 0; | 158 | return 0; |
| 148 | } | 159 | } |
| 149 | 160 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 3302f99e7497..764be36397fd 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
| @@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, | |||
| 126 | agp_be->ttm.func = &ttm_agp_func; | 126 | agp_be->ttm.func = &ttm_agp_func; |
| 127 | 127 | ||
| 128 | if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { | 128 | if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { |
| 129 | kfree(agp_be); | ||
| 129 | return NULL; | 130 | return NULL; |
| 130 | } | 131 | } |
| 131 | 132 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a06651309388..214b7992a3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
| 351 | 351 | ||
| 352 | moved: | 352 | moved: |
| 353 | if (bo->evicted) { | 353 | if (bo->evicted) { |
| 354 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | 354 | if (bdev->driver->invalidate_caches) { |
| 355 | if (ret) | 355 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
| 356 | pr_err("Can not flush read caches\n"); | 356 | if (ret) |
| 357 | pr_err("Can not flush read caches\n"); | ||
| 358 | } | ||
| 357 | bo->evicted = false; | 359 | bo->evicted = false; |
| 358 | } | 360 | } |
| 359 | 361 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c9ae48..0ce48e5a9cb4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
| 339 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
| 340 | 340 | ||
| 341 | /* | 341 | /* |
| 342 | * PFNMAP is faster than MIXEDMAP due to reduced page | 342 | * We'd like to use VM_PFNMAP on shared mappings, where |
| 343 | * administration. So use MIXEDMAP only if private VMA, where | 343 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
| 344 | * we need to support COW. | 344 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
| 345 | * bad for performance. Until that has been sorted out, use | ||
| 346 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | ||
| 345 | */ | 347 | */ |
| 346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 348 | vma->vm_flags |= VM_MIXEDMAP; |
| 347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 349 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
| 348 | return 0; | 350 | return 0; |
| 349 | out_unref: | 351 | out_unref: |
| @@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
| 359 | 361 | ||
| 360 | vma->vm_ops = &ttm_bo_vm_ops; | 362 | vma->vm_ops = &ttm_bo_vm_ops; |
| 361 | vma->vm_private_data = ttm_bo_reference(bo); | 363 | vma->vm_private_data = ttm_bo_reference(bo); |
| 362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 364 | vma->vm_flags |= VM_MIXEDMAP; |
| 363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | 365 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
| 364 | return 0; | 366 | return 0; |
| 365 | } | 367 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 37079859afc8..53b51c4e671a 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
| @@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, | |||
| 292 | 292 | ||
| 293 | if (ret == 0) { | 293 | if (ret == 0) { |
| 294 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | 294 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); |
| 295 | if (!kref_get_unless_zero(&ref->kref)) { | 295 | if (kref_get_unless_zero(&ref->kref)) { |
| 296 | rcu_read_unlock(); | 296 | rcu_read_unlock(); |
| 297 | break; | 297 | break; |
| 298 | } | 298 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9af99084b344..75f319090043 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
| @@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) | |||
| 380 | pgoff_t i; | 380 | pgoff_t i; |
| 381 | struct page **page = ttm->pages; | 381 | struct page **page = ttm->pages; |
| 382 | 382 | ||
| 383 | if (ttm->page_flags & TTM_PAGE_FLAG_SG) | ||
| 384 | return; | ||
| 385 | |||
| 383 | for (i = 0; i < ttm->num_pages; ++i) { | 386 | for (i = 0; i < ttm->num_pages; ++i) { |
| 384 | (*page)->mapping = NULL; | 387 | (*page)->mapping = NULL; |
| 385 | (*page++)->index = 0; | 388 | (*page++)->index = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d95335cb90bd..f58dc7dd15c5 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
| @@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 261 | /* Planar video formats. */ | 261 | /* Planar video formats. */ |
| 262 | SVGA3D_YV12 = 121, | 262 | SVGA3D_YV12 = 121, |
| 263 | 263 | ||
| 264 | /* Shader constant formats. */ | 264 | SVGA3D_FORMAT_MAX = 122, |
| 265 | SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, | ||
| 266 | SVGA3D_SURFACE_SHADERCONST_INT = 123, | ||
| 267 | SVGA3D_SURFACE_SHADERCONST_BOOL = 124, | ||
| 268 | |||
| 269 | SVGA3D_FORMAT_MAX = 125, | ||
| 270 | } SVGA3dSurfaceFormat; | 265 | } SVGA3dSurfaceFormat; |
| 271 | 266 | ||
| 272 | typedef uint32 SVGA3dColor; /* a, r, g, b */ | 267 | typedef uint32 SVGA3dColor; /* a, r, g, b */ |
| @@ -1223,9 +1218,19 @@ typedef enum { | |||
| 1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | 1218 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 |
| 1224 | 1219 | ||
| 1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | 1220 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 |
| 1226 | 1221 | #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 | |
| 1222 | #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 | ||
| 1223 | #define SVGA_3D_CMD_GB_MOB_FENCE 1133 | ||
| 1224 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 | ||
| 1227 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | 1225 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 |
| 1228 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | 1226 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 |
| 1227 | #define SVGA_3D_CMD_NOP_ERROR 1137 | ||
| 1228 | |||
| 1229 | #define SVGA_3D_CMD_RESERVED1 1138 | ||
| 1230 | #define SVGA_3D_CMD_RESERVED2 1139 | ||
| 1231 | #define SVGA_3D_CMD_RESERVED3 1140 | ||
| 1232 | #define SVGA_3D_CMD_RESERVED4 1141 | ||
| 1233 | #define SVGA_3D_CMD_RESERVED5 1142 | ||
| 1229 | 1234 | ||
| 1230 | #define SVGA_3D_CMD_MAX 1142 | 1235 | #define SVGA_3D_CMD_MAX 1142 |
| 1231 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | 1236 | #define SVGA_3D_CMD_FUTURE_MAX 3000 |
| @@ -1973,8 +1978,7 @@ struct { | |||
| 1973 | uint32 sizeInBytes; | 1978 | uint32 sizeInBytes; |
| 1974 | uint32 validSizeInBytes; | 1979 | uint32 validSizeInBytes; |
| 1975 | SVGAMobFormat ptDepth; | 1980 | SVGAMobFormat ptDepth; |
| 1976 | } | 1981 | } __packed |
| 1977 | __attribute__((__packed__)) | ||
| 1978 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | 1982 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ |
| 1979 | 1983 | ||
| 1980 | typedef | 1984 | typedef |
| @@ -1984,15 +1988,13 @@ struct { | |||
| 1984 | uint32 sizeInBytes; | 1988 | uint32 sizeInBytes; |
| 1985 | uint32 validSizeInBytes; | 1989 | uint32 validSizeInBytes; |
| 1986 | SVGAMobFormat ptDepth; | 1990 | SVGAMobFormat ptDepth; |
| 1987 | } | 1991 | } __packed |
| 1988 | __attribute__((__packed__)) | ||
| 1989 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | 1992 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ |
| 1990 | 1993 | ||
| 1991 | typedef | 1994 | typedef |
| 1992 | struct { | 1995 | struct { |
| 1993 | SVGAOTableType type; | 1996 | SVGAOTableType type; |
| 1994 | } | 1997 | } __packed |
| 1995 | __attribute__((__packed__)) | ||
| 1996 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | 1998 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ |
| 1997 | 1999 | ||
| 1998 | /* | 2000 | /* |
| @@ -2005,8 +2007,7 @@ struct SVGA3dCmdDefineGBMob { | |||
| 2005 | SVGAMobFormat ptDepth; | 2007 | SVGAMobFormat ptDepth; |
| 2006 | PPN base; | 2008 | PPN base; |
| 2007 | uint32 sizeInBytes; | 2009 | uint32 sizeInBytes; |
| 2008 | } | 2010 | } __packed |
| 2009 | __attribute__((__packed__)) | ||
| 2010 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | 2011 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ |
| 2011 | 2012 | ||
| 2012 | 2013 | ||
| @@ -2017,8 +2018,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | |||
| 2017 | typedef | 2018 | typedef |
| 2018 | struct SVGA3dCmdDestroyGBMob { | 2019 | struct SVGA3dCmdDestroyGBMob { |
| 2019 | SVGAMobId mobid; | 2020 | SVGAMobId mobid; |
| 2020 | } | 2021 | } __packed |
| 2021 | __attribute__((__packed__)) | ||
| 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ |
| 2023 | 2023 | ||
| 2024 | /* | 2024 | /* |
| @@ -2031,8 +2031,7 @@ struct SVGA3dCmdRedefineGBMob { | |||
| 2031 | SVGAMobFormat ptDepth; | 2031 | SVGAMobFormat ptDepth; |
| 2032 | PPN base; | 2032 | PPN base; |
| 2033 | uint32 sizeInBytes; | 2033 | uint32 sizeInBytes; |
| 2034 | } | 2034 | } __packed |
| 2035 | __attribute__((__packed__)) | ||
| 2036 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | 2035 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ |
| 2037 | 2036 | ||
| 2038 | /* | 2037 | /* |
| @@ -2045,8 +2044,7 @@ struct SVGA3dCmdDefineGBMob64 { | |||
| 2045 | SVGAMobFormat ptDepth; | 2044 | SVGAMobFormat ptDepth; |
| 2046 | PPN64 base; | 2045 | PPN64 base; |
| 2047 | uint32 sizeInBytes; | 2046 | uint32 sizeInBytes; |
| 2048 | } | 2047 | } __packed |
| 2049 | __attribute__((__packed__)) | ||
| 2050 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | 2048 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ |
| 2051 | 2049 | ||
| 2052 | /* | 2050 | /* |
| @@ -2059,8 +2057,7 @@ struct SVGA3dCmdRedefineGBMob64 { | |||
| 2059 | SVGAMobFormat ptDepth; | 2057 | SVGAMobFormat ptDepth; |
| 2060 | PPN64 base; | 2058 | PPN64 base; |
| 2061 | uint32 sizeInBytes; | 2059 | uint32 sizeInBytes; |
| 2062 | } | 2060 | } __packed |
| 2063 | __attribute__((__packed__)) | ||
| 2064 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | 2061 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ |
| 2065 | 2062 | ||
| 2066 | /* | 2063 | /* |
| @@ -2070,8 +2067,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | |||
| 2070 | typedef | 2067 | typedef |
| 2071 | struct SVGA3dCmdUpdateGBMobMapping { | 2068 | struct SVGA3dCmdUpdateGBMobMapping { |
| 2072 | SVGAMobId mobid; | 2069 | SVGAMobId mobid; |
| 2073 | } | 2070 | } __packed |
| 2074 | __attribute__((__packed__)) | ||
| 2075 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | 2071 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ |
| 2076 | 2072 | ||
| 2077 | /* | 2073 | /* |
| @@ -2087,7 +2083,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
| 2087 | uint32 multisampleCount; | 2083 | uint32 multisampleCount; |
| 2088 | SVGA3dTextureFilter autogenFilter; | 2084 | SVGA3dTextureFilter autogenFilter; |
| 2089 | SVGA3dSize size; | 2085 | SVGA3dSize size; |
| 2090 | } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | 2086 | } __packed |
| 2087 | SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
| 2091 | 2088 | ||
| 2092 | /* | 2089 | /* |
| 2093 | * Destroy a guest-backed surface. | 2090 | * Destroy a guest-backed surface. |
| @@ -2096,7 +2093,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
| 2096 | typedef | 2093 | typedef |
| 2097 | struct SVGA3dCmdDestroyGBSurface { | 2094 | struct SVGA3dCmdDestroyGBSurface { |
| 2098 | uint32 sid; | 2095 | uint32 sid; |
| 2099 | } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | 2096 | } __packed |
| 2097 | SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
| 2100 | 2098 | ||
| 2101 | /* | 2099 | /* |
| 2102 | * Bind a guest-backed surface to an object. | 2100 | * Bind a guest-backed surface to an object. |
| @@ -2106,7 +2104,8 @@ typedef | |||
| 2106 | struct SVGA3dCmdBindGBSurface { | 2104 | struct SVGA3dCmdBindGBSurface { |
| 2107 | uint32 sid; | 2105 | uint32 sid; |
| 2108 | SVGAMobId mobid; | 2106 | SVGAMobId mobid; |
| 2109 | } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | 2107 | } __packed |
| 2108 | SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
| 2110 | 2109 | ||
| 2111 | /* | 2110 | /* |
| 2112 | * Conditionally bind a mob to a guest backed surface if testMobid | 2111 | * Conditionally bind a mob to a guest backed surface if testMobid |
| @@ -2123,7 +2122,7 @@ struct{ | |||
| 2123 | SVGAMobId testMobid; | 2122 | SVGAMobId testMobid; |
| 2124 | SVGAMobId mobid; | 2123 | SVGAMobId mobid; |
| 2125 | uint32 flags; | 2124 | uint32 flags; |
| 2126 | } | 2125 | } __packed |
| 2127 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | 2126 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ |
| 2128 | 2127 | ||
| 2129 | /* | 2128 | /* |
| @@ -2135,7 +2134,8 @@ typedef | |||
| 2135 | struct SVGA3dCmdUpdateGBImage { | 2134 | struct SVGA3dCmdUpdateGBImage { |
| 2136 | SVGA3dSurfaceImageId image; | 2135 | SVGA3dSurfaceImageId image; |
| 2137 | SVGA3dBox box; | 2136 | SVGA3dBox box; |
| 2138 | } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | 2137 | } __packed |
| 2138 | SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
| 2139 | 2139 | ||
| 2140 | /* | 2140 | /* |
| 2141 | * Update an entire guest-backed surface. | 2141 | * Update an entire guest-backed surface. |
| @@ -2145,7 +2145,8 @@ struct SVGA3dCmdUpdateGBImage { | |||
| 2145 | typedef | 2145 | typedef |
| 2146 | struct SVGA3dCmdUpdateGBSurface { | 2146 | struct SVGA3dCmdUpdateGBSurface { |
| 2147 | uint32 sid; | 2147 | uint32 sid; |
| 2148 | } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | 2148 | } __packed |
| 2149 | SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
| 2149 | 2150 | ||
| 2150 | /* | 2151 | /* |
| 2151 | * Readback an image in a guest-backed surface. | 2152 | * Readback an image in a guest-backed surface. |
| @@ -2155,7 +2156,8 @@ struct SVGA3dCmdUpdateGBSurface { | |||
| 2155 | typedef | 2156 | typedef |
| 2156 | struct SVGA3dCmdReadbackGBImage { | 2157 | struct SVGA3dCmdReadbackGBImage { |
| 2157 | SVGA3dSurfaceImageId image; | 2158 | SVGA3dSurfaceImageId image; |
| 2158 | } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | 2159 | } __packed |
| 2160 | SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
| 2159 | 2161 | ||
| 2160 | /* | 2162 | /* |
| 2161 | * Readback an entire guest-backed surface. | 2163 | * Readback an entire guest-backed surface. |
| @@ -2165,7 +2167,8 @@ struct SVGA3dCmdReadbackGBImage { | |||
| 2165 | typedef | 2167 | typedef |
| 2166 | struct SVGA3dCmdReadbackGBSurface { | 2168 | struct SVGA3dCmdReadbackGBSurface { |
| 2167 | uint32 sid; | 2169 | uint32 sid; |
| 2168 | } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | 2170 | } __packed |
| 2171 | SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
| 2169 | 2172 | ||
| 2170 | /* | 2173 | /* |
| 2171 | * Readback a sub rect of an image in a guest-backed surface. After | 2174 | * Readback a sub rect of an image in a guest-backed surface. After |
| @@ -2179,7 +2182,7 @@ struct SVGA3dCmdReadbackGBImagePartial { | |||
| 2179 | SVGA3dSurfaceImageId image; | 2182 | SVGA3dSurfaceImageId image; |
| 2180 | SVGA3dBox box; | 2183 | SVGA3dBox box; |
| 2181 | uint32 invertBox; | 2184 | uint32 invertBox; |
| 2182 | } | 2185 | } __packed |
| 2183 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | 2186 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ |
| 2184 | 2187 | ||
| 2185 | /* | 2188 | /* |
| @@ -2190,7 +2193,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | |||
| 2190 | typedef | 2193 | typedef |
| 2191 | struct SVGA3dCmdInvalidateGBImage { | 2194 | struct SVGA3dCmdInvalidateGBImage { |
| 2192 | SVGA3dSurfaceImageId image; | 2195 | SVGA3dSurfaceImageId image; |
| 2193 | } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | 2196 | } __packed |
| 2197 | SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
| 2194 | 2198 | ||
| 2195 | /* | 2199 | /* |
| 2196 | * Invalidate an entire guest-backed surface. | 2200 | * Invalidate an entire guest-backed surface. |
| @@ -2200,7 +2204,8 @@ struct SVGA3dCmdInvalidateGBImage { | |||
| 2200 | typedef | 2204 | typedef |
| 2201 | struct SVGA3dCmdInvalidateGBSurface { | 2205 | struct SVGA3dCmdInvalidateGBSurface { |
| 2202 | uint32 sid; | 2206 | uint32 sid; |
| 2203 | } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | 2207 | } __packed |
| 2208 | SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
| 2204 | 2209 | ||
| 2205 | /* | 2210 | /* |
| 2206 | * Invalidate a sub rect of an image in a guest-backed surface. After | 2211 | * Invalidate a sub rect of an image in a guest-backed surface. After |
| @@ -2214,7 +2219,7 @@ struct SVGA3dCmdInvalidateGBImagePartial { | |||
| 2214 | SVGA3dSurfaceImageId image; | 2219 | SVGA3dSurfaceImageId image; |
| 2215 | SVGA3dBox box; | 2220 | SVGA3dBox box; |
| 2216 | uint32 invertBox; | 2221 | uint32 invertBox; |
| 2217 | } | 2222 | } __packed |
| 2218 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | 2223 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ |
| 2219 | 2224 | ||
| 2220 | /* | 2225 | /* |
| @@ -2224,7 +2229,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | |||
| 2224 | typedef | 2229 | typedef |
| 2225 | struct SVGA3dCmdDefineGBContext { | 2230 | struct SVGA3dCmdDefineGBContext { |
| 2226 | uint32 cid; | 2231 | uint32 cid; |
| 2227 | } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | 2232 | } __packed |
| 2233 | SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
| 2228 | 2234 | ||
| 2229 | /* | 2235 | /* |
| 2230 | * Destroy a guest-backed context. | 2236 | * Destroy a guest-backed context. |
| @@ -2233,7 +2239,8 @@ struct SVGA3dCmdDefineGBContext { | |||
| 2233 | typedef | 2239 | typedef |
| 2234 | struct SVGA3dCmdDestroyGBContext { | 2240 | struct SVGA3dCmdDestroyGBContext { |
| 2235 | uint32 cid; | 2241 | uint32 cid; |
| 2236 | } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | 2242 | } __packed |
| 2243 | SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
| 2237 | 2244 | ||
| 2238 | /* | 2245 | /* |
| 2239 | * Bind a guest-backed context. | 2246 | * Bind a guest-backed context. |
| @@ -2252,7 +2259,8 @@ struct SVGA3dCmdBindGBContext { | |||
| 2252 | uint32 cid; | 2259 | uint32 cid; |
| 2253 | SVGAMobId mobid; | 2260 | SVGAMobId mobid; |
| 2254 | uint32 validContents; | 2261 | uint32 validContents; |
| 2255 | } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | 2262 | } __packed |
| 2263 | SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
| 2256 | 2264 | ||
| 2257 | /* | 2265 | /* |
| 2258 | * Readback a guest-backed context. | 2266 | * Readback a guest-backed context. |
| @@ -2262,7 +2270,8 @@ struct SVGA3dCmdBindGBContext { | |||
| 2262 | typedef | 2270 | typedef |
| 2263 | struct SVGA3dCmdReadbackGBContext { | 2271 | struct SVGA3dCmdReadbackGBContext { |
| 2264 | uint32 cid; | 2272 | uint32 cid; |
| 2265 | } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | 2273 | } __packed |
| 2274 | SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
| 2266 | 2275 | ||
| 2267 | /* | 2276 | /* |
| 2268 | * Invalidate a guest-backed context. | 2277 | * Invalidate a guest-backed context. |
| @@ -2270,7 +2279,8 @@ struct SVGA3dCmdReadbackGBContext { | |||
| 2270 | typedef | 2279 | typedef |
| 2271 | struct SVGA3dCmdInvalidateGBContext { | 2280 | struct SVGA3dCmdInvalidateGBContext { |
| 2272 | uint32 cid; | 2281 | uint32 cid; |
| 2273 | } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | 2282 | } __packed |
| 2283 | SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
| 2274 | 2284 | ||
| 2275 | /* | 2285 | /* |
| 2276 | * Define a guest-backed shader. | 2286 | * Define a guest-backed shader. |
| @@ -2281,7 +2291,8 @@ struct SVGA3dCmdDefineGBShader { | |||
| 2281 | uint32 shid; | 2291 | uint32 shid; |
| 2282 | SVGA3dShaderType type; | 2292 | SVGA3dShaderType type; |
| 2283 | uint32 sizeInBytes; | 2293 | uint32 sizeInBytes; |
| 2284 | } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | 2294 | } __packed |
| 2295 | SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
| 2285 | 2296 | ||
| 2286 | /* | 2297 | /* |
| 2287 | * Bind a guest-backed shader. | 2298 | * Bind a guest-backed shader. |
| @@ -2291,7 +2302,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
| 2291 | uint32 shid; | 2302 | uint32 shid; |
| 2292 | SVGAMobId mobid; | 2303 | SVGAMobId mobid; |
| 2293 | uint32 offsetInBytes; | 2304 | uint32 offsetInBytes; |
| 2294 | } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | 2305 | } __packed |
| 2306 | SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
| 2295 | 2307 | ||
| 2296 | /* | 2308 | /* |
| 2297 | * Destroy a guest-backed shader. | 2309 | * Destroy a guest-backed shader. |
| @@ -2299,7 +2311,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
| 2299 | 2311 | ||
| 2300 | typedef struct SVGA3dCmdDestroyGBShader { | 2312 | typedef struct SVGA3dCmdDestroyGBShader { |
| 2301 | uint32 shid; | 2313 | uint32 shid; |
| 2302 | } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | 2314 | } __packed |
| 2315 | SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
| 2303 | 2316 | ||
| 2304 | typedef | 2317 | typedef |
| 2305 | struct { | 2318 | struct { |
| @@ -2314,14 +2327,16 @@ struct { | |||
| 2314 | * Note that FLOAT and INT constants are 4-dwords in length, while | 2327 | * Note that FLOAT and INT constants are 4-dwords in length, while |
| 2315 | * BOOL constants are 1-dword in length. | 2328 | * BOOL constants are 1-dword in length. |
| 2316 | */ | 2329 | */ |
| 2317 | } SVGA3dCmdSetGBShaderConstInline; | 2330 | } __packed |
| 2331 | SVGA3dCmdSetGBShaderConstInline; | ||
| 2318 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | 2332 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ |
| 2319 | 2333 | ||
| 2320 | typedef | 2334 | typedef |
| 2321 | struct { | 2335 | struct { |
| 2322 | uint32 cid; | 2336 | uint32 cid; |
| 2323 | SVGA3dQueryType type; | 2337 | SVGA3dQueryType type; |
| 2324 | } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | 2338 | } __packed |
| 2339 | SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
| 2325 | 2340 | ||
| 2326 | typedef | 2341 | typedef |
| 2327 | struct { | 2342 | struct { |
| @@ -2329,7 +2344,8 @@ struct { | |||
| 2329 | SVGA3dQueryType type; | 2344 | SVGA3dQueryType type; |
| 2330 | SVGAMobId mobid; | 2345 | SVGAMobId mobid; |
| 2331 | uint32 offset; | 2346 | uint32 offset; |
| 2332 | } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | 2347 | } __packed |
| 2348 | SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
| 2333 | 2349 | ||
| 2334 | 2350 | ||
| 2335 | /* | 2351 | /* |
| @@ -2346,21 +2362,22 @@ struct { | |||
| 2346 | SVGA3dQueryType type; | 2362 | SVGA3dQueryType type; |
| 2347 | SVGAMobId mobid; | 2363 | SVGAMobId mobid; |
| 2348 | uint32 offset; | 2364 | uint32 offset; |
| 2349 | } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | 2365 | } __packed |
| 2366 | SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
| 2350 | 2367 | ||
| 2351 | typedef | 2368 | typedef |
| 2352 | struct { | 2369 | struct { |
| 2353 | SVGAMobId mobid; | 2370 | SVGAMobId mobid; |
| 2354 | uint32 fbOffset; | 2371 | uint32 fbOffset; |
| 2355 | uint32 initalized; | 2372 | uint32 initalized; |
| 2356 | } | 2373 | } __packed |
| 2357 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | 2374 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ |
| 2358 | 2375 | ||
| 2359 | typedef | 2376 | typedef |
| 2360 | struct { | 2377 | struct { |
| 2361 | SVGAMobId mobid; | 2378 | SVGAMobId mobid; |
| 2362 | uint32 gartOffset; | 2379 | uint32 gartOffset; |
| 2363 | } | 2380 | } __packed |
| 2364 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | 2381 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ |
| 2365 | 2382 | ||
| 2366 | 2383 | ||
| @@ -2368,7 +2385,7 @@ typedef | |||
| 2368 | struct { | 2385 | struct { |
| 2369 | uint32 gartOffset; | 2386 | uint32 gartOffset; |
| 2370 | uint32 numPages; | 2387 | uint32 numPages; |
| 2371 | } | 2388 | } __packed |
| 2372 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | 2389 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ |
| 2373 | 2390 | ||
| 2374 | 2391 | ||
| @@ -2385,27 +2402,27 @@ struct { | |||
| 2385 | int32 xRoot; | 2402 | int32 xRoot; |
| 2386 | int32 yRoot; | 2403 | int32 yRoot; |
| 2387 | uint32 flags; | 2404 | uint32 flags; |
| 2388 | } | 2405 | } __packed |
| 2389 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | 2406 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ |
| 2390 | 2407 | ||
| 2391 | typedef | 2408 | typedef |
| 2392 | struct { | 2409 | struct { |
| 2393 | uint32 stid; | 2410 | uint32 stid; |
| 2394 | } | 2411 | } __packed |
| 2395 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | 2412 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ |
| 2396 | 2413 | ||
| 2397 | typedef | 2414 | typedef |
| 2398 | struct { | 2415 | struct { |
| 2399 | uint32 stid; | 2416 | uint32 stid; |
| 2400 | SVGA3dSurfaceImageId image; | 2417 | SVGA3dSurfaceImageId image; |
| 2401 | } | 2418 | } __packed |
| 2402 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | 2419 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ |
| 2403 | 2420 | ||
| 2404 | typedef | 2421 | typedef |
| 2405 | struct { | 2422 | struct { |
| 2406 | uint32 stid; | 2423 | uint32 stid; |
| 2407 | SVGA3dBox box; | 2424 | SVGA3dBox box; |
| 2408 | } | 2425 | } __packed |
| 2409 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | 2426 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ |
| 2410 | 2427 | ||
| 2411 | /* | 2428 | /* |
| @@ -2583,4 +2600,28 @@ typedef union { | |||
| 2583 | float f; | 2600 | float f; |
| 2584 | } SVGA3dDevCapResult; | 2601 | } SVGA3dDevCapResult; |
| 2585 | 2602 | ||
| 2603 | typedef enum { | ||
| 2604 | SVGA3DCAPS_RECORD_UNKNOWN = 0, | ||
| 2605 | SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, | ||
| 2606 | SVGA3DCAPS_RECORD_DEVCAPS = 0x100, | ||
| 2607 | SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, | ||
| 2608 | } SVGA3dCapsRecordType; | ||
| 2609 | |||
| 2610 | typedef | ||
| 2611 | struct SVGA3dCapsRecordHeader { | ||
| 2612 | uint32 length; | ||
| 2613 | SVGA3dCapsRecordType type; | ||
| 2614 | } | ||
| 2615 | SVGA3dCapsRecordHeader; | ||
| 2616 | |||
| 2617 | typedef | ||
| 2618 | struct SVGA3dCapsRecord { | ||
| 2619 | SVGA3dCapsRecordHeader header; | ||
| 2620 | uint32 data[1]; | ||
| 2621 | } | ||
| 2622 | SVGA3dCapsRecord; | ||
| 2623 | |||
| 2624 | |||
| 2625 | typedef uint32 SVGA3dCapPair[2]; | ||
| 2626 | |||
| 2586 | #endif /* _SVGA3D_REG_H_ */ | 2627 | #endif /* _SVGA3D_REG_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | |||
| @@ -38,8 +38,11 @@ | |||
| 38 | 38 | ||
| 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) | 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) |
| 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) | 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) |
| 41 | #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) | ||
| 41 | #define surf_size_struct SVGA3dSize | 42 | #define surf_size_struct SVGA3dSize |
| 42 | #define u32 uint32 | 43 | #define u32 uint32 |
| 44 | #define u64 uint64_t | ||
| 45 | #define U32_MAX ((u32)~0U) | ||
| 43 | 46 | ||
| 44 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
| 45 | 48 | ||
| @@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { | |||
| 704 | 707 | ||
| 705 | static inline u32 clamped_umul32(u32 a, u32 b) | 708 | static inline u32 clamped_umul32(u32 a, u32 b) |
| 706 | { | 709 | { |
| 707 | uint64_t tmp = (uint64_t) a*b; | 710 | u64 tmp = (u64) a*b; |
| 708 | return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; | 711 | return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; |
| 709 | } | 712 | } |
| 710 | 713 | ||
| 711 | static inline const struct svga3d_surface_desc * | 714 | static inline const struct svga3d_surface_desc * |
| @@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 834 | bool cubemap) | 837 | bool cubemap) |
| 835 | { | 838 | { |
| 836 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); | 839 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); |
| 837 | u32 total_size = 0; | 840 | u64 total_size = 0; |
| 838 | u32 mip; | 841 | u32 mip; |
| 839 | 842 | ||
| 840 | for (mip = 0; mip < num_mip_levels; mip++) { | 843 | for (mip = 0; mip < num_mip_levels; mip++) { |
| @@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 847 | if (cubemap) | 850 | if (cubemap) |
| 848 | total_size *= SVGA3D_MAX_SURFACE_FACES; | 851 | total_size *= SVGA3D_MAX_SURFACE_FACES; |
| 849 | 852 | ||
| 850 | return total_size; | 853 | return (u32) min_t(u64, total_size, (u64) U32_MAX); |
| 851 | } | 854 | } |
| 852 | 855 | ||
| 853 | 856 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 71defa4d2d75..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
| @@ -169,10 +169,17 @@ enum { | |||
| 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
| 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
| 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
| 172 | SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ | ||
| 173 | SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ | ||
| 172 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ | 174 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
| 173 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | 175 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ |
| 174 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | 176 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ |
| 175 | SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ | 177 | SVGA_REG_CMD_PREPEND_LOW = 53, |
| 178 | SVGA_REG_CMD_PREPEND_HIGH = 54, | ||
| 179 | SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, | ||
| 180 | SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, | ||
| 181 | SVGA_REG_MOB_MAX_SIZE = 57, | ||
| 182 | SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ | ||
| 176 | 183 | ||
| 177 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 184 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
| 178 | /* Next 768 (== 256*3) registers exist for colormap */ | 185 | /* Next 768 (== 256*3) registers exist for colormap */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 82c41daebc0e..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
| @@ -37,7 +37,7 @@ struct vmw_user_context { | |||
| 37 | 37 | ||
| 38 | 38 | ||
| 39 | 39 | ||
| 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); | 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); |
| 41 | 41 | ||
| 42 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
| 43 | static struct vmw_resource * | 43 | static struct vmw_resource * |
| @@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
| 50 | bool readback, | 50 | bool readback, |
| 51 | struct ttm_validate_buffer *val_buf); | 51 | struct ttm_validate_buffer *val_buf); |
| 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); | 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); |
| 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); | 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); |
| 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); | 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
| 55 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); | 55 | bool rebind); |
| 56 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
| 57 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | ||
| 56 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | 58 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); |
| 57 | static uint64_t vmw_user_context_size; | 59 | static uint64_t vmw_user_context_size; |
| 58 | 60 | ||
| @@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 111 | 113 | ||
| 112 | if (res->func->destroy == vmw_gb_context_destroy) { | 114 | if (res->func->destroy == vmw_gb_context_destroy) { |
| 113 | mutex_lock(&dev_priv->cmdbuf_mutex); | 115 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 116 | mutex_lock(&dev_priv->binding_mutex); | ||
| 117 | (void) vmw_context_binding_state_kill | ||
| 118 | (&container_of(res, struct vmw_user_context, res)->cbs); | ||
| 114 | (void) vmw_gb_context_destroy(res); | 119 | (void) vmw_gb_context_destroy(res); |
| 115 | if (dev_priv->pinned_bo != NULL && | 120 | if (dev_priv->pinned_bo != NULL && |
| 116 | !dev_priv->query_cid_valid) | 121 | !dev_priv->query_cid_valid) |
| 117 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 122 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
| 123 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 118 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 124 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 119 | return; | 125 | return; |
| 120 | } | 126 | } |
| @@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
| 328 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | 334 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
| 329 | 335 | ||
| 330 | mutex_lock(&dev_priv->binding_mutex); | 336 | mutex_lock(&dev_priv->binding_mutex); |
| 331 | vmw_context_binding_state_kill(&uctx->cbs); | 337 | vmw_context_binding_state_scrub(&uctx->cbs); |
| 332 | 338 | ||
| 333 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 339 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
| 334 | 340 | ||
| @@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) | |||
| 378 | SVGA3dCmdHeader header; | 384 | SVGA3dCmdHeader header; |
| 379 | SVGA3dCmdDestroyGBContext body; | 385 | SVGA3dCmdDestroyGBContext body; |
| 380 | } *cmd; | 386 | } *cmd; |
| 381 | struct vmw_user_context *uctx = | ||
| 382 | container_of(res, struct vmw_user_context, res); | ||
| 383 | |||
| 384 | BUG_ON(!list_empty(&uctx->cbs.list)); | ||
| 385 | 387 | ||
| 386 | if (likely(res->id == -1)) | 388 | if (likely(res->id == -1)) |
| 387 | return 0; | 389 | return 0; |
| @@ -528,8 +530,9 @@ out_unlock: | |||
| 528 | * vmw_context_scrub_shader - scrub a shader binding from a context. | 530 | * vmw_context_scrub_shader - scrub a shader binding from a context. |
| 529 | * | 531 | * |
| 530 | * @bi: single binding information. | 532 | * @bi: single binding information. |
| 533 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 531 | */ | 534 | */ |
| 532 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | 535 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) |
| 533 | { | 536 | { |
| 534 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 537 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 535 | struct { | 538 | struct { |
| @@ -548,7 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | |||
| 548 | cmd->header.size = sizeof(cmd->body); | 551 | cmd->header.size = sizeof(cmd->body); |
| 549 | cmd->body.cid = bi->ctx->id; | 552 | cmd->body.cid = bi->ctx->id; |
| 550 | cmd->body.type = bi->i1.shader_type; | 553 | cmd->body.type = bi->i1.shader_type; |
| 551 | cmd->body.shid = SVGA3D_INVALID_ID; | 554 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 552 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 555 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 553 | 556 | ||
| 554 | return 0; | 557 | return 0; |
| @@ -559,8 +562,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | |||
| 559 | * from a context. | 562 | * from a context. |
| 560 | * | 563 | * |
| 561 | * @bi: single binding information. | 564 | * @bi: single binding information. |
| 565 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 562 | */ | 566 | */ |
| 563 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | 567 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, |
| 568 | bool rebind) | ||
| 564 | { | 569 | { |
| 565 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 570 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 566 | struct { | 571 | struct { |
| @@ -579,7 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | |||
| 579 | cmd->header.size = sizeof(cmd->body); | 584 | cmd->header.size = sizeof(cmd->body); |
| 580 | cmd->body.cid = bi->ctx->id; | 585 | cmd->body.cid = bi->ctx->id; |
| 581 | cmd->body.type = bi->i1.rt_type; | 586 | cmd->body.type = bi->i1.rt_type; |
| 582 | cmd->body.target.sid = SVGA3D_INVALID_ID; | 587 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 583 | cmd->body.target.face = 0; | 588 | cmd->body.target.face = 0; |
| 584 | cmd->body.target.mipmap = 0; | 589 | cmd->body.target.mipmap = 0; |
| 585 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 590 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| @@ -591,11 +596,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | |||
| 591 | * vmw_context_scrub_texture - scrub a texture binding from a context. | 596 | * vmw_context_scrub_texture - scrub a texture binding from a context. |
| 592 | * | 597 | * |
| 593 | * @bi: single binding information. | 598 | * @bi: single binding information. |
| 599 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 594 | * | 600 | * |
| 595 | * TODO: Possibly complement this function with a function that takes | 601 | * TODO: Possibly complement this function with a function that takes |
| 596 | * a list of texture bindings and combines them to a single command. | 602 | * a list of texture bindings and combines them to a single command. |
| 597 | */ | 603 | */ |
| 598 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | 604 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, |
| 605 | bool rebind) | ||
| 599 | { | 606 | { |
| 600 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | 607 | struct vmw_private *dev_priv = bi->ctx->dev_priv; |
| 601 | struct { | 608 | struct { |
| @@ -619,7 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | |||
| 619 | cmd->body.c.cid = bi->ctx->id; | 626 | cmd->body.c.cid = bi->ctx->id; |
| 620 | cmd->body.s1.stage = bi->i1.texture_stage; | 627 | cmd->body.s1.stage = bi->i1.texture_stage; |
| 621 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | 628 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |
| 622 | cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; | 629 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
| 623 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 630 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
| 624 | 631 | ||
| 625 | return 0; | 632 | return 0; |
| @@ -692,6 +699,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | |||
| 692 | vmw_context_binding_drop(loc); | 699 | vmw_context_binding_drop(loc); |
| 693 | 700 | ||
| 694 | loc->bi = *bi; | 701 | loc->bi = *bi; |
| 702 | loc->bi.scrubbed = false; | ||
| 695 | list_add_tail(&loc->ctx_list, &cbs->list); | 703 | list_add_tail(&loc->ctx_list, &cbs->list); |
| 696 | INIT_LIST_HEAD(&loc->res_list); | 704 | INIT_LIST_HEAD(&loc->res_list); |
| 697 | 705 | ||
| @@ -727,12 +735,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | |||
| 727 | if (loc->bi.ctx != NULL) | 735 | if (loc->bi.ctx != NULL) |
| 728 | vmw_context_binding_drop(loc); | 736 | vmw_context_binding_drop(loc); |
| 729 | 737 | ||
| 730 | loc->bi = *bi; | 738 | if (bi->res != NULL) { |
| 731 | list_add_tail(&loc->ctx_list, &cbs->list); | 739 | loc->bi = *bi; |
| 732 | if (bi->res != NULL) | 740 | list_add_tail(&loc->ctx_list, &cbs->list); |
| 733 | list_add_tail(&loc->res_list, &bi->res->binding_head); | 741 | list_add_tail(&loc->res_list, &bi->res->binding_head); |
| 734 | else | 742 | } |
| 735 | INIT_LIST_HEAD(&loc->res_list); | ||
| 736 | } | 743 | } |
| 737 | 744 | ||
| 738 | /** | 745 | /** |
| @@ -746,7 +753,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | |||
| 746 | */ | 753 | */ |
| 747 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | 754 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) |
| 748 | { | 755 | { |
| 749 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); | 756 | if (!cb->bi.scrubbed) { |
| 757 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); | ||
| 758 | cb->bi.scrubbed = true; | ||
| 759 | } | ||
| 750 | vmw_context_binding_drop(cb); | 760 | vmw_context_binding_drop(cb); |
| 751 | } | 761 | } |
| 752 | 762 | ||
| @@ -768,6 +778,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | |||
| 768 | } | 778 | } |
| 769 | 779 | ||
| 770 | /** | 780 | /** |
| 781 | * vmw_context_binding_state_scrub - Scrub all bindings associated with a | ||
| 782 | * struct vmw_ctx_binding state structure. | ||
| 783 | * | ||
| 784 | * @cbs: Pointer to the context binding state tracker. | ||
| 785 | * | ||
| 786 | * Emits commands to scrub all bindings associated with the | ||
| 787 | * context binding state tracker. | ||
| 788 | */ | ||
| 789 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) | ||
| 790 | { | ||
| 791 | struct vmw_ctx_binding *entry; | ||
| 792 | |||
| 793 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 794 | if (!entry->bi.scrubbed) { | ||
| 795 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 796 | entry->bi.scrubbed = true; | ||
| 797 | } | ||
| 798 | } | ||
| 799 | } | ||
| 800 | |||
| 801 | /** | ||
| 771 | * vmw_context_binding_res_list_kill - Kill all bindings on a | 802 | * vmw_context_binding_res_list_kill - Kill all bindings on a |
| 772 | * resource binding list | 803 | * resource binding list |
| 773 | * | 804 | * |
| @@ -785,6 +816,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head) | |||
| 785 | } | 816 | } |
| 786 | 817 | ||
| 787 | /** | 818 | /** |
| 819 | * vmw_context_binding_res_list_scrub - Scrub all bindings on a | ||
| 820 | * resource binding list | ||
| 821 | * | ||
| 822 | * @head: list head of resource binding list | ||
| 823 | * | ||
| 824 | * Scrub all bindings associated with a specific resource. Typically | ||
| 825 | * called before the resource is evicted. | ||
| 826 | */ | ||
| 827 | void vmw_context_binding_res_list_scrub(struct list_head *head) | ||
| 828 | { | ||
| 829 | struct vmw_ctx_binding *entry; | ||
| 830 | |||
| 831 | list_for_each_entry(entry, head, res_list) { | ||
| 832 | if (!entry->bi.scrubbed) { | ||
| 833 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 834 | entry->bi.scrubbed = true; | ||
| 835 | } | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | /** | ||
| 788 | * vmw_context_binding_state_transfer - Commit staged binding info | 840 | * vmw_context_binding_state_transfer - Commit staged binding info |
| 789 | * | 841 | * |
| 790 | * @ctx: Pointer to context to commit the staged binding info to. | 842 | * @ctx: Pointer to context to commit the staged binding info to. |
| @@ -803,3 +855,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | |||
| 803 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | 855 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) |
| 804 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | 856 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); |
| 805 | } | 857 | } |
| 858 | |||
| 859 | /** | ||
| 860 | * vmw_context_rebind_all - Rebind all scrubbed bindings of a context | ||
| 861 | * | ||
| 862 | * @ctx: The context resource | ||
| 863 | * | ||
| 864 | * Walks through the context binding list and rebinds all scrubbed | ||
| 865 | * resources. | ||
| 866 | */ | ||
| 867 | int vmw_context_rebind_all(struct vmw_resource *ctx) | ||
| 868 | { | ||
| 869 | struct vmw_ctx_binding *entry; | ||
| 870 | struct vmw_user_context *uctx = | ||
| 871 | container_of(ctx, struct vmw_user_context, res); | ||
| 872 | struct vmw_ctx_binding_state *cbs = &uctx->cbs; | ||
| 873 | int ret; | ||
| 874 | |||
| 875 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 876 | if (likely(!entry->bi.scrubbed)) | ||
| 877 | continue; | ||
| 878 | |||
| 879 | if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == | ||
| 880 | SVGA3D_INVALID_ID)) | ||
| 881 | continue; | ||
| 882 | |||
| 883 | ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); | ||
| 884 | if (unlikely(ret != 0)) | ||
| 885 | return ret; | ||
| 886 | |||
| 887 | entry->bi.scrubbed = false; | ||
| 888 | } | ||
| 889 | |||
| 890 | return 0; | ||
| 891 | } | ||
| 892 | |||
| 893 | /** | ||
| 894 | * vmw_context_binding_list - Return a list of context bindings | ||
| 895 | * | ||
| 896 | * @ctx: The context resource | ||
| 897 | * | ||
| 898 | * Returns the current list of bindings of the given context. Note that | ||
| 899 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. | ||
| 900 | */ | ||
| 901 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | ||
| 902 | { | ||
| 903 | return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); | ||
| 904 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9893328f8fdc..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 667 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
| 668 | } | 668 | } |
| 669 | dev_priv->max_mob_pages = 0; | 669 | dev_priv->max_mob_pages = 0; |
| 670 | dev_priv->max_mob_size = 0; | ||
| 670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | 671 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
| 671 | uint64_t mem_size = | 672 | uint64_t mem_size = |
| 672 | vmw_read(dev_priv, | 673 | vmw_read(dev_priv, |
| @@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 676 | dev_priv->prim_bb_mem = | 677 | dev_priv->prim_bb_mem = |
| 677 | vmw_read(dev_priv, | 678 | vmw_read(dev_priv, |
| 678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | 679 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
| 680 | dev_priv->max_mob_size = | ||
| 681 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); | ||
| 679 | } else | 682 | } else |
| 680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 683 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
| 681 | 684 | ||
| @@ -941,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev, | |||
| 941 | drm_master_put(&vmw_fp->locked_master); | 944 | drm_master_put(&vmw_fp->locked_master); |
| 942 | } | 945 | } |
| 943 | 946 | ||
| 947 | vmw_compat_shader_man_destroy(vmw_fp->shman); | ||
| 944 | ttm_object_file_release(&vmw_fp->tfile); | 948 | ttm_object_file_release(&vmw_fp->tfile); |
| 945 | kfree(vmw_fp); | 949 | kfree(vmw_fp); |
| 946 | } | 950 | } |
| @@ -960,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
| 960 | if (unlikely(vmw_fp->tfile == NULL)) | 964 | if (unlikely(vmw_fp->tfile == NULL)) |
| 961 | goto out_no_tfile; | 965 | goto out_no_tfile; |
| 962 | 966 | ||
| 967 | vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); | ||
| 968 | if (IS_ERR(vmw_fp->shman)) | ||
| 969 | goto out_no_shman; | ||
| 970 | |||
| 963 | file_priv->driver_priv = vmw_fp; | 971 | file_priv->driver_priv = vmw_fp; |
| 964 | dev_priv->bdev.dev_mapping = dev->dev_mapping; | 972 | dev_priv->bdev.dev_mapping = dev->dev_mapping; |
| 965 | 973 | ||
| 966 | return 0; | 974 | return 0; |
| 967 | 975 | ||
| 976 | out_no_shman: | ||
| 977 | ttm_object_file_release(&vmw_fp->tfile); | ||
| 968 | out_no_tfile: | 978 | out_no_tfile: |
| 969 | kfree(vmw_fp); | 979 | kfree(vmw_fp); |
| 970 | return ret; | 980 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 554e7fa33082..07831554dad7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
| 41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
| 42 | 42 | ||
| 43 | #define VMWGFX_DRIVER_DATE "20121114" | 43 | #define VMWGFX_DRIVER_DATE "20140228" |
| 44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
| 45 | #define VMWGFX_DRIVER_MINOR 5 | 45 | #define VMWGFX_DRIVER_MINOR 5 |
| 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| @@ -75,10 +75,14 @@ | |||
| 75 | #define VMW_RES_FENCE ttm_driver_type3 | 75 | #define VMW_RES_FENCE ttm_driver_type3 |
| 76 | #define VMW_RES_SHADER ttm_driver_type4 | 76 | #define VMW_RES_SHADER ttm_driver_type4 |
| 77 | 77 | ||
| 78 | struct vmw_compat_shader_manager; | ||
| 79 | |||
| 78 | struct vmw_fpriv { | 80 | struct vmw_fpriv { |
| 79 | struct drm_master *locked_master; | 81 | struct drm_master *locked_master; |
| 80 | struct ttm_object_file *tfile; | 82 | struct ttm_object_file *tfile; |
| 81 | struct list_head fence_events; | 83 | struct list_head fence_events; |
| 84 | bool gb_aware; | ||
| 85 | struct vmw_compat_shader_manager *shman; | ||
| 82 | }; | 86 | }; |
| 83 | 87 | ||
| 84 | struct vmw_dma_buffer { | 88 | struct vmw_dma_buffer { |
| @@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo { | |||
| 272 | struct vmw_resource *ctx; | 276 | struct vmw_resource *ctx; |
| 273 | struct vmw_resource *res; | 277 | struct vmw_resource *res; |
| 274 | enum vmw_ctx_binding_type bt; | 278 | enum vmw_ctx_binding_type bt; |
| 279 | bool scrubbed; | ||
| 275 | union { | 280 | union { |
| 276 | SVGA3dShaderType shader_type; | 281 | SVGA3dShaderType shader_type; |
| 277 | SVGA3dRenderTargetType rt_type; | 282 | SVGA3dRenderTargetType rt_type; |
| @@ -318,7 +323,7 @@ struct vmw_sw_context{ | |||
| 318 | struct drm_open_hash res_ht; | 323 | struct drm_open_hash res_ht; |
| 319 | bool res_ht_initialized; | 324 | bool res_ht_initialized; |
| 320 | bool kernel; /**< is the called made from the kernel */ | 325 | bool kernel; /**< is the called made from the kernel */ |
| 321 | struct ttm_object_file *tfile; | 326 | struct vmw_fpriv *fp; |
| 322 | struct list_head validate_nodes; | 327 | struct list_head validate_nodes; |
| 323 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 328 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
| 324 | uint32_t cur_reloc; | 329 | uint32_t cur_reloc; |
| @@ -336,6 +341,7 @@ struct vmw_sw_context{ | |||
| 336 | bool needs_post_query_barrier; | 341 | bool needs_post_query_barrier; |
| 337 | struct vmw_resource *error_resource; | 342 | struct vmw_resource *error_resource; |
| 338 | struct vmw_ctx_binding_state staged_bindings; | 343 | struct vmw_ctx_binding_state staged_bindings; |
| 344 | struct list_head staged_shaders; | ||
| 339 | }; | 345 | }; |
| 340 | 346 | ||
| 341 | struct vmw_legacy_display; | 347 | struct vmw_legacy_display; |
| @@ -380,6 +386,7 @@ struct vmw_private { | |||
| 380 | uint32_t max_gmr_ids; | 386 | uint32_t max_gmr_ids; |
| 381 | uint32_t max_gmr_pages; | 387 | uint32_t max_gmr_pages; |
| 382 | uint32_t max_mob_pages; | 388 | uint32_t max_mob_pages; |
| 389 | uint32_t max_mob_size; | ||
| 383 | uint32_t memory_size; | 390 | uint32_t memory_size; |
| 384 | bool has_gmr; | 391 | bool has_gmr; |
| 385 | bool has_mob; | 392 | bool has_mob; |
| @@ -569,6 +576,8 @@ struct vmw_user_resource_conv; | |||
| 569 | 576 | ||
| 570 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 577 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 571 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 578 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
| 579 | extern struct vmw_resource * | ||
| 580 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); | ||
| 572 | extern int vmw_resource_validate(struct vmw_resource *res); | 581 | extern int vmw_resource_validate(struct vmw_resource *res); |
| 573 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | 582 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
| 574 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 583 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
| @@ -957,6 +966,9 @@ extern void | |||
| 957 | vmw_context_binding_state_transfer(struct vmw_resource *res, | 966 | vmw_context_binding_state_transfer(struct vmw_resource *res, |
| 958 | struct vmw_ctx_binding_state *cbs); | 967 | struct vmw_ctx_binding_state *cbs); |
| 959 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | 968 | extern void vmw_context_binding_res_list_kill(struct list_head *head); |
| 969 | extern void vmw_context_binding_res_list_scrub(struct list_head *head); | ||
| 970 | extern int vmw_context_rebind_all(struct vmw_resource *ctx); | ||
| 971 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||
| 960 | 972 | ||
| 961 | /* | 973 | /* |
| 962 | * Surface management - vmwgfx_surface.c | 974 | * Surface management - vmwgfx_surface.c |
| @@ -991,6 +1003,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
| 991 | struct drm_file *file_priv); | 1003 | struct drm_file *file_priv); |
| 992 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | 1004 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, |
| 993 | struct drm_file *file_priv); | 1005 | struct drm_file *file_priv); |
| 1006 | extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 1007 | SVGA3dShaderType shader_type, | ||
| 1008 | u32 *user_key); | ||
| 1009 | extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 1010 | struct list_head *list); | ||
| 1011 | extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 1012 | struct list_head *list); | ||
| 1013 | extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 1014 | u32 user_key, | ||
| 1015 | SVGA3dShaderType shader_type, | ||
| 1016 | struct list_head *list); | ||
| 1017 | extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 1018 | u32 user_key, const void *bytecode, | ||
| 1019 | SVGA3dShaderType shader_type, | ||
| 1020 | size_t size, | ||
| 1021 | struct ttm_object_file *tfile, | ||
| 1022 | struct list_head *list); | ||
| 1023 | extern struct vmw_compat_shader_manager * | ||
| 1024 | vmw_compat_shader_man_create(struct vmw_private *dev_priv); | ||
| 1025 | extern void | ||
| 1026 | vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); | ||
| 1027 | |||
| 994 | 1028 | ||
| 995 | /** | 1029 | /** |
| 996 | * Inline helper functions | 1030 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7a5f1eb55c5a..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
| 114 | * persistent context binding tracker. | 114 | * persistent context binding tracker. |
| 115 | */ | 115 | */ |
| 116 | if (unlikely(val->staged_bindings)) { | 116 | if (unlikely(val->staged_bindings)) { |
| 117 | vmw_context_binding_state_transfer | 117 | if (!backoff) { |
| 118 | (val->res, val->staged_bindings); | 118 | vmw_context_binding_state_transfer |
| 119 | (val->res, val->staged_bindings); | ||
| 120 | } | ||
| 119 | kfree(val->staged_bindings); | 121 | kfree(val->staged_bindings); |
| 120 | val->staged_bindings = NULL; | 122 | val->staged_bindings = NULL; |
| 121 | } | 123 | } |
| @@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
| 178 | } | 180 | } |
| 179 | 181 | ||
| 180 | /** | 182 | /** |
| 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on | ||
| 184 | * the validation list | ||
| 185 | * | ||
| 186 | * @dev_priv: Pointer to a device private structure | ||
| 187 | * @sw_context: Pointer to a software context used for this command submission | ||
| 188 | * @ctx: Pointer to the context resource | ||
| 189 | * | ||
| 190 | * This function puts all resources that were previously bound to @ctx on | ||
| 191 | * the resource validation list. This is part of the context state reemission | ||
| 192 | */ | ||
| 193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | ||
| 194 | struct vmw_sw_context *sw_context, | ||
| 195 | struct vmw_resource *ctx) | ||
| 196 | { | ||
| 197 | struct list_head *binding_list; | ||
| 198 | struct vmw_ctx_binding *entry; | ||
| 199 | int ret = 0; | ||
| 200 | struct vmw_resource *res; | ||
| 201 | |||
| 202 | mutex_lock(&dev_priv->binding_mutex); | ||
| 203 | binding_list = vmw_context_binding_list(ctx); | ||
| 204 | |||
| 205 | list_for_each_entry(entry, binding_list, ctx_list) { | ||
| 206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); | ||
| 207 | if (unlikely(res == NULL)) | ||
| 208 | continue; | ||
| 209 | |||
| 210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); | ||
| 211 | vmw_resource_unreference(&res); | ||
| 212 | if (unlikely(ret != 0)) | ||
| 213 | break; | ||
| 214 | } | ||
| 215 | |||
| 216 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 181 | * vmw_resource_relocation_add - Add a relocation to the relocation list | 221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
| 182 | * | 222 | * |
| 183 | * @list: Pointer to head of relocation list. | 223 | * @list: Pointer to head of relocation list. |
| @@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, | |||
| 233 | { | 273 | { |
| 234 | struct vmw_resource_relocation *rel; | 274 | struct vmw_resource_relocation *rel; |
| 235 | 275 | ||
| 236 | list_for_each_entry(rel, list, head) | 276 | list_for_each_entry(rel, list, head) { |
| 237 | cb[rel->offset] = rel->res->id; | 277 | if (likely(rel->res != NULL)) |
| 278 | cb[rel->offset] = rel->res->id; | ||
| 279 | else | ||
| 280 | cb[rel->offset] = SVGA_3D_CMD_NOP; | ||
| 281 | } | ||
| 238 | } | 282 | } |
| 239 | 283 | ||
| 240 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
| @@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
| 379 | } | 423 | } |
| 380 | 424 | ||
| 381 | /** | 425 | /** |
| 382 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | 426 | * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it |
| 383 | * on the resource validate list unless it's already there. | 427 | * on the resource validate list unless it's already there. |
| 384 | * | 428 | * |
| 385 | * @dev_priv: Pointer to a device private structure. | 429 | * @dev_priv: Pointer to a device private structure. |
| 386 | * @sw_context: Pointer to the software context. | 430 | * @sw_context: Pointer to the software context. |
| 387 | * @res_type: Resource type. | 431 | * @res_type: Resource type. |
| 388 | * @converter: User-space visisble type specific information. | 432 | * @converter: User-space visisble type specific information. |
| 389 | * @id: Pointer to the location in the command buffer currently being | 433 | * @id: user-space resource id handle. |
| 434 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 390 | * parsed from where the user-space resource id handle is located. | 435 | * parsed from where the user-space resource id handle is located. |
| 436 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 437 | * on exit. | ||
| 391 | */ | 438 | */ |
| 392 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, | 439 | static int |
| 393 | struct vmw_sw_context *sw_context, | 440 | vmw_cmd_compat_res_check(struct vmw_private *dev_priv, |
| 394 | enum vmw_res_type res_type, | 441 | struct vmw_sw_context *sw_context, |
| 395 | const struct vmw_user_resource_conv *converter, | 442 | enum vmw_res_type res_type, |
| 396 | uint32_t *id, | 443 | const struct vmw_user_resource_conv *converter, |
| 397 | struct vmw_resource_val_node **p_val) | 444 | uint32_t id, |
| 445 | uint32_t *id_loc, | ||
| 446 | struct vmw_resource_val_node **p_val) | ||
| 398 | { | 447 | { |
| 399 | struct vmw_res_cache_entry *rcache = | 448 | struct vmw_res_cache_entry *rcache = |
| 400 | &sw_context->res_cache[res_type]; | 449 | &sw_context->res_cache[res_type]; |
| @@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 402 | struct vmw_resource_val_node *node; | 451 | struct vmw_resource_val_node *node; |
| 403 | int ret; | 452 | int ret; |
| 404 | 453 | ||
| 405 | if (*id == SVGA3D_INVALID_ID) { | 454 | if (id == SVGA3D_INVALID_ID) { |
| 406 | if (p_val) | 455 | if (p_val) |
| 407 | *p_val = NULL; | 456 | *p_val = NULL; |
| 408 | if (res_type == vmw_res_context) { | 457 | if (res_type == vmw_res_context) { |
| @@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 417 | * resource | 466 | * resource |
| 418 | */ | 467 | */ |
| 419 | 468 | ||
| 420 | if (likely(rcache->valid && *id == rcache->handle)) { | 469 | if (likely(rcache->valid && id == rcache->handle)) { |
| 421 | const struct vmw_resource *res = rcache->res; | 470 | const struct vmw_resource *res = rcache->res; |
| 422 | 471 | ||
| 423 | rcache->node->first_usage = false; | 472 | rcache->node->first_usage = false; |
| @@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 426 | 475 | ||
| 427 | return vmw_resource_relocation_add | 476 | return vmw_resource_relocation_add |
| 428 | (&sw_context->res_relocations, res, | 477 | (&sw_context->res_relocations, res, |
| 429 | id - sw_context->buf_start); | 478 | id_loc - sw_context->buf_start); |
| 430 | } | 479 | } |
| 431 | 480 | ||
| 432 | ret = vmw_user_resource_lookup_handle(dev_priv, | 481 | ret = vmw_user_resource_lookup_handle(dev_priv, |
| 433 | sw_context->tfile, | 482 | sw_context->fp->tfile, |
| 434 | *id, | 483 | id, |
| 435 | converter, | 484 | converter, |
| 436 | &res); | 485 | &res); |
| 437 | if (unlikely(ret != 0)) { | 486 | if (unlikely(ret != 0)) { |
| 438 | DRM_ERROR("Could not find or use resource 0x%08x.\n", | 487 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
| 439 | (unsigned) *id); | 488 | (unsigned) id); |
| 440 | dump_stack(); | 489 | dump_stack(); |
| 441 | return ret; | 490 | return ret; |
| 442 | } | 491 | } |
| 443 | 492 | ||
| 444 | rcache->valid = true; | 493 | rcache->valid = true; |
| 445 | rcache->res = res; | 494 | rcache->res = res; |
| 446 | rcache->handle = *id; | 495 | rcache->handle = id; |
| 447 | 496 | ||
| 448 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 497 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
| 449 | res, | 498 | res, |
| 450 | id - sw_context->buf_start); | 499 | id_loc - sw_context->buf_start); |
| 451 | if (unlikely(ret != 0)) | 500 | if (unlikely(ret != 0)) |
| 452 | goto out_no_reloc; | 501 | goto out_no_reloc; |
| 453 | 502 | ||
| @@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 459 | if (p_val) | 508 | if (p_val) |
| 460 | *p_val = node; | 509 | *p_val = node; |
| 461 | 510 | ||
| 462 | if (node->first_usage && res_type == vmw_res_context) { | 511 | if (dev_priv->has_mob && node->first_usage && |
| 512 | res_type == vmw_res_context) { | ||
| 513 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | ||
| 514 | if (unlikely(ret != 0)) | ||
| 515 | goto out_no_reloc; | ||
| 463 | node->staged_bindings = | 516 | node->staged_bindings = |
| 464 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | 517 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
| 465 | if (node->staged_bindings == NULL) { | 518 | if (node->staged_bindings == NULL) { |
| @@ -481,6 +534,59 @@ out_no_reloc: | |||
| 481 | } | 534 | } |
| 482 | 535 | ||
| 483 | /** | 536 | /** |
| 537 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | ||
| 538 | * on the resource validate list unless it's already there. | ||
| 539 | * | ||
| 540 | * @dev_priv: Pointer to a device private structure. | ||
| 541 | * @sw_context: Pointer to the software context. | ||
| 542 | * @res_type: Resource type. | ||
| 543 | * @converter: User-space visisble type specific information. | ||
| 544 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 545 | * parsed from where the user-space resource id handle is located. | ||
| 546 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 547 | * on exit. | ||
| 548 | */ | ||
| 549 | static int | ||
| 550 | vmw_cmd_res_check(struct vmw_private *dev_priv, | ||
| 551 | struct vmw_sw_context *sw_context, | ||
| 552 | enum vmw_res_type res_type, | ||
| 553 | const struct vmw_user_resource_conv *converter, | ||
| 554 | uint32_t *id_loc, | ||
| 555 | struct vmw_resource_val_node **p_val) | ||
| 556 | { | ||
| 557 | return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, | ||
| 558 | converter, *id_loc, id_loc, p_val); | ||
| 559 | } | ||
| 560 | |||
| 561 | /** | ||
| 562 | * vmw_rebind_contexts - Rebind all resources previously bound to | ||
| 563 | * referenced contexts. | ||
| 564 | * | ||
| 565 | * @sw_context: Pointer to the software context. | ||
| 566 | * | ||
| 567 | * Rebind context binding points that have been scrubbed because of eviction. | ||
| 568 | */ | ||
| 569 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | ||
| 570 | { | ||
| 571 | struct vmw_resource_val_node *val; | ||
| 572 | int ret; | ||
| 573 | |||
| 574 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
| 575 | if (likely(!val->staged_bindings)) | ||
| 576 | continue; | ||
| 577 | |||
| 578 | ret = vmw_context_rebind_all(val->res); | ||
| 579 | if (unlikely(ret != 0)) { | ||
| 580 | if (ret != -ERESTARTSYS) | ||
| 581 | DRM_ERROR("Failed to rebind context.\n"); | ||
| 582 | return ret; | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | } | ||
| 588 | |||
| 589 | /** | ||
| 484 | * vmw_cmd_cid_check - Check a command header for valid context information. | 590 | * vmw_cmd_cid_check - Check a command header for valid context information. |
| 485 | * | 591 | * |
| 486 | * @dev_priv: Pointer to a device private structure. | 592 | * @dev_priv: Pointer to a device private structure. |
| @@ -496,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
| 496 | { | 602 | { |
| 497 | struct vmw_cid_cmd { | 603 | struct vmw_cid_cmd { |
| 498 | SVGA3dCmdHeader header; | 604 | SVGA3dCmdHeader header; |
| 499 | __le32 cid; | 605 | uint32_t cid; |
| 500 | } *cmd; | 606 | } *cmd; |
| 501 | 607 | ||
| 502 | cmd = container_of(header, struct vmw_cid_cmd, header); | 608 | cmd = container_of(header, struct vmw_cid_cmd, header); |
| @@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
| 767 | struct vmw_relocation *reloc; | 873 | struct vmw_relocation *reloc; |
| 768 | int ret; | 874 | int ret; |
| 769 | 875 | ||
| 770 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 876 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 771 | if (unlikely(ret != 0)) { | 877 | if (unlikely(ret != 0)) { |
| 772 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 878 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
| 773 | return -EINVAL; | 879 | return -EINVAL; |
| @@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 828 | struct vmw_relocation *reloc; | 934 | struct vmw_relocation *reloc; |
| 829 | int ret; | 935 | int ret; |
| 830 | 936 | ||
| 831 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 937 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 832 | if (unlikely(ret != 0)) { | 938 | if (unlikely(ret != 0)) { |
| 833 | DRM_ERROR("Could not find or use GMR region.\n"); | 939 | DRM_ERROR("Could not find or use GMR region.\n"); |
| 834 | return -EINVAL; | 940 | return -EINVAL; |
| @@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 1127 | 1233 | ||
| 1128 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); | 1234 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
| 1129 | 1235 | ||
| 1130 | vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); | 1236 | vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, |
| 1237 | header); | ||
| 1131 | 1238 | ||
| 1132 | out_no_surface: | 1239 | out_no_surface: |
| 1133 | vmw_dmabuf_unreference(&vmw_bo); | 1240 | vmw_dmabuf_unreference(&vmw_bo); |
| @@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | |||
| 1478 | &cmd->body.sid, NULL); | 1585 | &cmd->body.sid, NULL); |
| 1479 | } | 1586 | } |
| 1480 | 1587 | ||
| 1588 | |||
| 1589 | /** | ||
| 1590 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE | ||
| 1591 | * command | ||
| 1592 | * | ||
| 1593 | * @dev_priv: Pointer to a device private struct. | ||
| 1594 | * @sw_context: The software context being used for this batch. | ||
| 1595 | * @header: Pointer to the command header in the command stream. | ||
| 1596 | */ | ||
| 1597 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | ||
| 1598 | struct vmw_sw_context *sw_context, | ||
| 1599 | SVGA3dCmdHeader *header) | ||
| 1600 | { | ||
| 1601 | struct vmw_shader_define_cmd { | ||
| 1602 | SVGA3dCmdHeader header; | ||
| 1603 | SVGA3dCmdDefineShader body; | ||
| 1604 | } *cmd; | ||
| 1605 | int ret; | ||
| 1606 | size_t size; | ||
| 1607 | |||
| 1608 | cmd = container_of(header, struct vmw_shader_define_cmd, | ||
| 1609 | header); | ||
| 1610 | |||
| 1611 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1612 | user_context_converter, &cmd->body.cid, | ||
| 1613 | NULL); | ||
| 1614 | if (unlikely(ret != 0)) | ||
| 1615 | return ret; | ||
| 1616 | |||
| 1617 | if (unlikely(!dev_priv->has_mob)) | ||
| 1618 | return 0; | ||
| 1619 | |||
| 1620 | size = cmd->header.size - sizeof(cmd->body); | ||
| 1621 | ret = vmw_compat_shader_add(sw_context->fp->shman, | ||
| 1622 | cmd->body.shid, cmd + 1, | ||
| 1623 | cmd->body.type, size, | ||
| 1624 | sw_context->fp->tfile, | ||
| 1625 | &sw_context->staged_shaders); | ||
| 1626 | if (unlikely(ret != 0)) | ||
| 1627 | return ret; | ||
| 1628 | |||
| 1629 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1630 | NULL, &cmd->header.id - | ||
| 1631 | sw_context->buf_start); | ||
| 1632 | |||
| 1633 | return 0; | ||
| 1634 | } | ||
| 1635 | |||
| 1636 | /** | ||
| 1637 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY | ||
| 1638 | * command | ||
| 1639 | * | ||
| 1640 | * @dev_priv: Pointer to a device private struct. | ||
| 1641 | * @sw_context: The software context being used for this batch. | ||
| 1642 | * @header: Pointer to the command header in the command stream. | ||
| 1643 | */ | ||
| 1644 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | ||
| 1645 | struct vmw_sw_context *sw_context, | ||
| 1646 | SVGA3dCmdHeader *header) | ||
| 1647 | { | ||
| 1648 | struct vmw_shader_destroy_cmd { | ||
| 1649 | SVGA3dCmdHeader header; | ||
| 1650 | SVGA3dCmdDestroyShader body; | ||
| 1651 | } *cmd; | ||
| 1652 | int ret; | ||
| 1653 | |||
| 1654 | cmd = container_of(header, struct vmw_shader_destroy_cmd, | ||
| 1655 | header); | ||
| 1656 | |||
| 1657 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1658 | user_context_converter, &cmd->body.cid, | ||
| 1659 | NULL); | ||
| 1660 | if (unlikely(ret != 0)) | ||
| 1661 | return ret; | ||
| 1662 | |||
| 1663 | if (unlikely(!dev_priv->has_mob)) | ||
| 1664 | return 0; | ||
| 1665 | |||
| 1666 | ret = vmw_compat_shader_remove(sw_context->fp->shman, | ||
| 1667 | cmd->body.shid, | ||
| 1668 | cmd->body.type, | ||
| 1669 | &sw_context->staged_shaders); | ||
| 1670 | if (unlikely(ret != 0)) | ||
| 1671 | return ret; | ||
| 1672 | |||
| 1673 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1674 | NULL, &cmd->header.id - | ||
| 1675 | sw_context->buf_start); | ||
| 1676 | |||
| 1677 | return 0; | ||
| 1678 | } | ||
| 1679 | |||
| 1481 | /** | 1680 | /** |
| 1482 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1681 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
| 1483 | * command | 1682 | * command |
| @@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 1509 | if (dev_priv->has_mob) { | 1708 | if (dev_priv->has_mob) { |
| 1510 | struct vmw_ctx_bindinfo bi; | 1709 | struct vmw_ctx_bindinfo bi; |
| 1511 | struct vmw_resource_val_node *res_node; | 1710 | struct vmw_resource_val_node *res_node; |
| 1512 | 1711 | u32 shid = cmd->body.shid; | |
| 1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, | 1712 | |
| 1514 | user_shader_converter, | 1713 | if (shid != SVGA3D_INVALID_ID) |
| 1515 | &cmd->body.shid, &res_node); | 1714 | (void) vmw_compat_shader_lookup(sw_context->fp->shman, |
| 1715 | cmd->body.type, | ||
| 1716 | &shid); | ||
| 1717 | |||
| 1718 | ret = vmw_cmd_compat_res_check(dev_priv, sw_context, | ||
| 1719 | vmw_res_shader, | ||
| 1720 | user_shader_converter, | ||
| 1721 | shid, | ||
| 1722 | &cmd->body.shid, &res_node); | ||
| 1516 | if (unlikely(ret != 0)) | 1723 | if (unlikely(ret != 0)) |
| 1517 | return ret; | 1724 | return ret; |
| 1518 | 1725 | ||
| @@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 1527 | } | 1734 | } |
| 1528 | 1735 | ||
| 1529 | /** | 1736 | /** |
| 1737 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST | ||
| 1738 | * command | ||
| 1739 | * | ||
| 1740 | * @dev_priv: Pointer to a device private struct. | ||
| 1741 | * @sw_context: The software context being used for this batch. | ||
| 1742 | * @header: Pointer to the command header in the command stream. | ||
| 1743 | */ | ||
| 1744 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, | ||
| 1745 | struct vmw_sw_context *sw_context, | ||
| 1746 | SVGA3dCmdHeader *header) | ||
| 1747 | { | ||
| 1748 | struct vmw_set_shader_const_cmd { | ||
| 1749 | SVGA3dCmdHeader header; | ||
| 1750 | SVGA3dCmdSetShaderConst body; | ||
| 1751 | } *cmd; | ||
| 1752 | int ret; | ||
| 1753 | |||
| 1754 | cmd = container_of(header, struct vmw_set_shader_const_cmd, | ||
| 1755 | header); | ||
| 1756 | |||
| 1757 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1758 | user_context_converter, &cmd->body.cid, | ||
| 1759 | NULL); | ||
| 1760 | if (unlikely(ret != 0)) | ||
| 1761 | return ret; | ||
| 1762 | |||
| 1763 | if (dev_priv->has_mob) | ||
| 1764 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; | ||
| 1765 | |||
| 1766 | return 0; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | /** | ||
| 1530 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | 1770 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER |
| 1531 | * command | 1771 | * command |
| 1532 | * | 1772 | * |
| @@ -1595,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
| 1595 | return 0; | 1835 | return 0; |
| 1596 | } | 1836 | } |
| 1597 | 1837 | ||
| 1598 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | 1838 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
| 1599 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, | 1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
| 1600 | false, false, false), | 1840 | false, false, false), |
| 1601 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | 1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
| @@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
| 1634 | true, false, false), | 1874 | true, false, false), |
| 1635 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, | 1875 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
| 1636 | false, false, false), | 1876 | false, false, false), |
| 1637 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, | 1877 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
| 1638 | true, true, false), | 1878 | true, false, false), |
| 1639 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, | 1879 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, |
| 1640 | true, true, false), | 1880 | true, false, false), |
| 1641 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | 1881 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, |
| 1642 | true, false, false), | 1882 | true, false, false), |
| 1643 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, | 1883 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, |
| 1644 | true, true, false), | 1884 | true, false, false), |
| 1645 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | 1885 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, |
| 1646 | true, false, false), | 1886 | true, false, false), |
| 1647 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | 1887 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, |
| @@ -1792,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1792 | goto out_invalid; | 2032 | goto out_invalid; |
| 1793 | 2033 | ||
| 1794 | entry = &vmw_cmd_entries[cmd_id]; | 2034 | entry = &vmw_cmd_entries[cmd_id]; |
| 2035 | if (unlikely(!entry->func)) | ||
| 2036 | goto out_invalid; | ||
| 2037 | |||
| 1795 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | 2038 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
| 1796 | goto out_privileged; | 2039 | goto out_privileged; |
| 1797 | 2040 | ||
| @@ -2171,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2171 | } else | 2414 | } else |
| 2172 | sw_context->kernel = true; | 2415 | sw_context->kernel = true; |
| 2173 | 2416 | ||
| 2174 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 2417 | sw_context->fp = vmw_fpriv(file_priv); |
| 2175 | sw_context->cur_reloc = 0; | 2418 | sw_context->cur_reloc = 0; |
| 2176 | sw_context->cur_val_buf = 0; | 2419 | sw_context->cur_val_buf = 0; |
| 2177 | sw_context->fence_flags = 0; | 2420 | sw_context->fence_flags = 0; |
| @@ -2188,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2188 | goto out_unlock; | 2431 | goto out_unlock; |
| 2189 | sw_context->res_ht_initialized = true; | 2432 | sw_context->res_ht_initialized = true; |
| 2190 | } | 2433 | } |
| 2434 | INIT_LIST_HEAD(&sw_context->staged_shaders); | ||
| 2191 | 2435 | ||
| 2192 | INIT_LIST_HEAD(&resource_list); | 2436 | INIT_LIST_HEAD(&resource_list); |
| 2193 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 2437 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
| 2194 | command_size); | 2438 | command_size); |
| 2195 | if (unlikely(ret != 0)) | 2439 | if (unlikely(ret != 0)) |
| 2196 | goto out_err; | 2440 | goto out_err_nores; |
| 2197 | 2441 | ||
| 2198 | ret = vmw_resources_reserve(sw_context); | 2442 | ret = vmw_resources_reserve(sw_context); |
| 2199 | if (unlikely(ret != 0)) | 2443 | if (unlikely(ret != 0)) |
| 2200 | goto out_err; | 2444 | goto out_err_nores; |
| 2201 | 2445 | ||
| 2202 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); | 2446 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
| 2203 | if (unlikely(ret != 0)) | 2447 | if (unlikely(ret != 0)) |
| @@ -2225,6 +2469,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2225 | goto out_err; | 2469 | goto out_err; |
| 2226 | } | 2470 | } |
| 2227 | 2471 | ||
| 2472 | if (dev_priv->has_mob) { | ||
| 2473 | ret = vmw_rebind_contexts(sw_context); | ||
| 2474 | if (unlikely(ret != 0)) | ||
| 2475 | goto out_unlock_binding; | ||
| 2476 | } | ||
| 2477 | |||
| 2228 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2478 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
| 2229 | if (unlikely(cmd == NULL)) { | 2479 | if (unlikely(cmd == NULL)) { |
| 2230 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2480 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
| @@ -2276,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2276 | } | 2526 | } |
| 2277 | 2527 | ||
| 2278 | list_splice_init(&sw_context->resource_list, &resource_list); | 2528 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 2529 | vmw_compat_shaders_commit(sw_context->fp->shman, | ||
| 2530 | &sw_context->staged_shaders); | ||
| 2279 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2531 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 2280 | 2532 | ||
| 2281 | /* | 2533 | /* |
| @@ -2289,10 +2541,11 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 2289 | out_unlock_binding: | 2541 | out_unlock_binding: |
| 2290 | mutex_unlock(&dev_priv->binding_mutex); | 2542 | mutex_unlock(&dev_priv->binding_mutex); |
| 2291 | out_err: | 2543 | out_err: |
| 2292 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 2293 | vmw_free_relocations(sw_context); | ||
| 2294 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 2544 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
| 2545 | out_err_nores: | ||
| 2295 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | 2546 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
| 2547 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 2548 | vmw_free_relocations(sw_context); | ||
| 2296 | vmw_clear_validations(sw_context); | 2549 | vmw_clear_validations(sw_context); |
| 2297 | if (unlikely(dev_priv->pinned_bo != NULL && | 2550 | if (unlikely(dev_priv->pinned_bo != NULL && |
| 2298 | !dev_priv->query_cid_valid)) | 2551 | !dev_priv->query_cid_valid)) |
| @@ -2301,6 +2554,8 @@ out_unlock: | |||
| 2301 | list_splice_init(&sw_context->resource_list, &resource_list); | 2554 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 2302 | error_resource = sw_context->error_resource; | 2555 | error_resource = sw_context->error_resource; |
| 2303 | sw_context->error_resource = NULL; | 2556 | sw_context->error_resource = NULL; |
| 2557 | vmw_compat_shaders_revert(sw_context->fp->shman, | ||
| 2558 | &sw_context->staged_shaders); | ||
| 2304 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2559 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 2305 | 2560 | ||
| 2306 | /* | 2561 | /* |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 116c49736763..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -29,12 +29,18 @@ | |||
| 29 | #include <drm/vmwgfx_drm.h> | 29 | #include <drm/vmwgfx_drm.h> |
| 30 | #include "vmwgfx_kms.h" | 30 | #include "vmwgfx_kms.h" |
| 31 | 31 | ||
| 32 | struct svga_3d_compat_cap { | ||
| 33 | SVGA3dCapsRecordHeader header; | ||
| 34 | SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; | ||
| 35 | }; | ||
| 36 | |||
| 32 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 37 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
| 33 | struct drm_file *file_priv) | 38 | struct drm_file *file_priv) |
| 34 | { | 39 | { |
| 35 | struct vmw_private *dev_priv = vmw_priv(dev); | 40 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 36 | struct drm_vmw_getparam_arg *param = | 41 | struct drm_vmw_getparam_arg *param = |
| 37 | (struct drm_vmw_getparam_arg *)data; | 42 | (struct drm_vmw_getparam_arg *)data; |
| 43 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 38 | 44 | ||
| 39 | switch (param->param) { | 45 | switch (param->param) { |
| 40 | case DRM_VMW_PARAM_NUM_STREAMS: | 46 | case DRM_VMW_PARAM_NUM_STREAMS: |
| @@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 60 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 66 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 61 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 67 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 62 | 68 | ||
| 69 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { | ||
| 70 | param->value = SVGA3D_HWVERSION_WS8_B1; | ||
| 71 | break; | ||
| 72 | } | ||
| 73 | |||
| 63 | param->value = | 74 | param->value = |
| 64 | ioread32(fifo_mem + | 75 | ioread32(fifo_mem + |
| 65 | ((fifo->capabilities & | 76 | ((fifo->capabilities & |
| @@ -69,19 +80,31 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 69 | break; | 80 | break; |
| 70 | } | 81 | } |
| 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | 82 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: |
| 72 | param->value = dev_priv->memory_size; | 83 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && |
| 84 | !vmw_fp->gb_aware) | ||
| 85 | param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; | ||
| 86 | else | ||
| 87 | param->value = dev_priv->memory_size; | ||
| 73 | break; | 88 | break; |
| 74 | case DRM_VMW_PARAM_3D_CAPS_SIZE: | 89 | case DRM_VMW_PARAM_3D_CAPS_SIZE: |
| 75 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | 90 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && |
| 76 | param->value = SVGA3D_DEVCAP_MAX; | 91 | vmw_fp->gb_aware) |
| 92 | param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); | ||
| 93 | else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
| 94 | param->value = sizeof(struct svga_3d_compat_cap) + | ||
| 95 | sizeof(uint32_t); | ||
| 77 | else | 96 | else |
| 78 | param->value = (SVGA_FIFO_3D_CAPS_LAST - | 97 | param->value = (SVGA_FIFO_3D_CAPS_LAST - |
| 79 | SVGA_FIFO_3D_CAPS + 1); | 98 | SVGA_FIFO_3D_CAPS + 1) * |
| 80 | param->value *= sizeof(uint32_t); | 99 | sizeof(uint32_t); |
| 81 | break; | 100 | break; |
| 82 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: | 101 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: |
| 102 | vmw_fp->gb_aware = true; | ||
| 83 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | 103 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; |
| 84 | break; | 104 | break; |
| 105 | case DRM_VMW_PARAM_MAX_MOB_SIZE: | ||
| 106 | param->value = dev_priv->max_mob_size; | ||
| 107 | break; | ||
| 85 | default: | 108 | default: |
| 86 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 109 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| 87 | param->param); | 110 | param->param); |
| @@ -91,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 91 | return 0; | 114 | return 0; |
| 92 | } | 115 | } |
| 93 | 116 | ||
| 117 | static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | ||
| 118 | size_t size) | ||
| 119 | { | ||
| 120 | struct svga_3d_compat_cap *compat_cap = | ||
| 121 | (struct svga_3d_compat_cap *) bounce; | ||
| 122 | unsigned int i; | ||
| 123 | size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); | ||
| 124 | unsigned int max_size; | ||
| 125 | |||
| 126 | if (size < pair_offset) | ||
| 127 | return -EINVAL; | ||
| 128 | |||
| 129 | max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); | ||
| 130 | |||
| 131 | if (max_size > SVGA3D_DEVCAP_MAX) | ||
| 132 | max_size = SVGA3D_DEVCAP_MAX; | ||
| 133 | |||
| 134 | compat_cap->header.length = | ||
| 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | ||
| 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | ||
| 137 | |||
| 138 | mutex_lock(&dev_priv->hw_mutex); | ||
| 139 | for (i = 0; i < max_size; ++i) { | ||
| 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
| 141 | compat_cap->pairs[i][0] = i; | ||
| 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 143 | } | ||
| 144 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 94 | 149 | ||
| 95 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | 150 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
| 96 | struct drm_file *file_priv) | 151 | struct drm_file *file_priv) |
| @@ -104,41 +159,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 104 | void *bounce; | 159 | void *bounce; |
| 105 | int ret; | 160 | int ret; |
| 106 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | 161 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); |
| 162 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 107 | 163 | ||
| 108 | if (unlikely(arg->pad64 != 0)) { | 164 | if (unlikely(arg->pad64 != 0)) { |
| 109 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 165 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
| 110 | return -EINVAL; | 166 | return -EINVAL; |
| 111 | } | 167 | } |
| 112 | 168 | ||
| 113 | if (gb_objects) | 169 | if (gb_objects && vmw_fp->gb_aware) |
| 114 | size = SVGA3D_DEVCAP_MAX; | 170 | size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); |
| 171 | else if (gb_objects) | ||
| 172 | size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); | ||
| 115 | else | 173 | else |
| 116 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); | 174 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * |
| 117 | 175 | sizeof(uint32_t); | |
| 118 | size *= sizeof(uint32_t); | ||
| 119 | 176 | ||
| 120 | if (arg->max_size < size) | 177 | if (arg->max_size < size) |
| 121 | size = arg->max_size; | 178 | size = arg->max_size; |
| 122 | 179 | ||
| 123 | bounce = vmalloc(size); | 180 | bounce = vzalloc(size); |
| 124 | if (unlikely(bounce == NULL)) { | 181 | if (unlikely(bounce == NULL)) { |
| 125 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); | 182 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); |
| 126 | return -ENOMEM; | 183 | return -ENOMEM; |
| 127 | } | 184 | } |
| 128 | 185 | ||
| 129 | if (gb_objects) { | 186 | if (gb_objects && vmw_fp->gb_aware) { |
| 130 | int i; | 187 | int i, num; |
| 131 | uint32_t *bounce32 = (uint32_t *) bounce; | 188 | uint32_t *bounce32 = (uint32_t *) bounce; |
| 132 | 189 | ||
| 190 | num = size / sizeof(uint32_t); | ||
| 191 | if (num > SVGA3D_DEVCAP_MAX) | ||
| 192 | num = SVGA3D_DEVCAP_MAX; | ||
| 193 | |||
| 133 | mutex_lock(&dev_priv->hw_mutex); | 194 | mutex_lock(&dev_priv->hw_mutex); |
| 134 | for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { | 195 | for (i = 0; i < num; ++i) { |
| 135 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
| 136 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 137 | } | 198 | } |
| 138 | mutex_unlock(&dev_priv->hw_mutex); | 199 | mutex_unlock(&dev_priv->hw_mutex); |
| 139 | 200 | } else if (gb_objects) { | |
| 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | ||
| 202 | if (unlikely(ret != 0)) | ||
| 203 | goto out_err; | ||
| 140 | } else { | 204 | } else { |
| 141 | |||
| 142 | fifo_mem = dev_priv->mmio_virt; | 205 | fifo_mem = dev_priv->mmio_virt; |
| 143 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 206 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); |
| 144 | } | 207 | } |
| @@ -146,6 +209,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 146 | ret = copy_to_user(buffer, bounce, size); | 209 | ret = copy_to_user(buffer, bounce, size); |
| 147 | if (ret) | 210 | if (ret) |
| 148 | ret = -EFAULT; | 211 | ret = -EFAULT; |
| 212 | out_err: | ||
| 149 | vfree(bounce); | 213 | vfree(bounce); |
| 150 | 214 | ||
| 151 | if (unlikely(ret != 0)) | 215 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 4910e7b81811..04a64b8cd3cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
| @@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
| 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 135 | if (unlikely(cmd == NULL)) { | 135 | if (unlikely(cmd == NULL)) { |
| 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); |
| 137 | ret = -ENOMEM; | ||
| 137 | goto out_no_fifo; | 138 | goto out_no_fifo; |
| 138 | } | 139 | } |
| 139 | 140 | ||
| @@ -187,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | |||
| 187 | 188 | ||
| 188 | bo = otable->page_table->pt_bo; | 189 | bo = otable->page_table->pt_bo; |
| 189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 190 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 190 | if (unlikely(cmd == NULL)) | 191 | if (unlikely(cmd == NULL)) { |
| 191 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | 192 | DRM_ERROR("Failed reserving FIFO space for OTable " |
| 192 | 193 | "takedown.\n"); | |
| 193 | memset(cmd, 0, sizeof(*cmd)); | 194 | } else { |
| 194 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | 195 | memset(cmd, 0, sizeof(*cmd)); |
| 195 | cmd->header.size = sizeof(cmd->body); | 196 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; |
| 196 | cmd->body.type = type; | 197 | cmd->header.size = sizeof(cmd->body); |
| 197 | cmd->body.baseAddress = 0; | 198 | cmd->body.type = type; |
| 198 | cmd->body.sizeInBytes = 0; | 199 | cmd->body.baseAddress = 0; |
| 199 | cmd->body.validSizeInBytes = 0; | 200 | cmd->body.sizeInBytes = 0; |
| 200 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | 201 | cmd->body.validSizeInBytes = 0; |
| 201 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 202 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; |
| 203 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 204 | } | ||
| 202 | 205 | ||
| 203 | if (bo) { | 206 | if (bo) { |
| 204 | int ret; | 207 | int ret; |
| @@ -561,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, | |||
| 561 | if (unlikely(cmd == NULL)) { | 564 | if (unlikely(cmd == NULL)) { |
| 562 | DRM_ERROR("Failed reserving FIFO space for Memory " | 565 | DRM_ERROR("Failed reserving FIFO space for Memory " |
| 563 | "Object unbinding.\n"); | 566 | "Object unbinding.\n"); |
| 567 | } else { | ||
| 568 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 569 | cmd->header.size = sizeof(cmd->body); | ||
| 570 | cmd->body.mobid = mob->id; | ||
| 571 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 564 | } | 572 | } |
| 565 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 566 | cmd->header.size = sizeof(cmd->body); | ||
| 567 | cmd->body.mobid = mob->id; | ||
| 568 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 569 | if (bo) { | 573 | if (bo) { |
| 570 | vmw_fence_single_bo(bo, NULL); | 574 | vmw_fence_single_bo(bo, NULL); |
| 571 | ttm_bo_unreserve(bo); | 575 | ttm_bo_unreserve(bo); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6fdd82d42f65..9757b57f8388 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
| 88 | return res; | 88 | return res; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | struct vmw_resource * | ||
| 92 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) | ||
| 93 | { | ||
| 94 | return kref_get_unless_zero(&res->kref) ? res : NULL; | ||
| 95 | } | ||
| 91 | 96 | ||
| 92 | /** | 97 | /** |
| 93 | * vmw_resource_release_id - release a resource id to the id manager. | 98 | * vmw_resource_release_id - release a resource id to the id manager. |
| @@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) | |||
| 136 | vmw_dmabuf_unreference(&res->backup); | 141 | vmw_dmabuf_unreference(&res->backup); |
| 137 | } | 142 | } |
| 138 | 143 | ||
| 139 | if (likely(res->hw_destroy != NULL)) | 144 | if (likely(res->hw_destroy != NULL)) { |
| 140 | res->hw_destroy(res); | 145 | res->hw_destroy(res); |
| 146 | mutex_lock(&dev_priv->binding_mutex); | ||
| 147 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
| 148 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 149 | } | ||
| 141 | 150 | ||
| 142 | id = res->id; | 151 | id = res->id; |
| 143 | if (res->res_free != NULL) | 152 | if (res->res_free != NULL) |
| @@ -418,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
| 418 | INIT_LIST_HEAD(&vmw_bo->res_list); | 427 | INIT_LIST_HEAD(&vmw_bo->res_list); |
| 419 | 428 | ||
| 420 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 429 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
| 421 | (user) ? ttm_bo_type_device : | 430 | ttm_bo_type_device, placement, |
| 422 | ttm_bo_type_kernel, placement, | ||
| 423 | 0, interruptible, | 431 | 0, interruptible, |
| 424 | NULL, acc_size, NULL, bo_free); | 432 | NULL, acc_size, NULL, bo_free); |
| 425 | return ret; | 433 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1457ec4b7125..ee3856578a12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include "vmwgfx_resource_priv.h" | 29 | #include "vmwgfx_resource_priv.h" |
| 30 | #include "ttm/ttm_placement.h" | 30 | #include "ttm/ttm_placement.h" |
| 31 | 31 | ||
| 32 | #define VMW_COMPAT_SHADER_HT_ORDER 12 | ||
| 33 | |||
| 32 | struct vmw_shader { | 34 | struct vmw_shader { |
| 33 | struct vmw_resource res; | 35 | struct vmw_resource res; |
| 34 | SVGA3dShaderType type; | 36 | SVGA3dShaderType type; |
| @@ -40,6 +42,50 @@ struct vmw_user_shader { | |||
| 40 | struct vmw_shader shader; | 42 | struct vmw_shader shader; |
| 41 | }; | 43 | }; |
| 42 | 44 | ||
| 45 | /** | ||
| 46 | * enum vmw_compat_shader_state - Staging state for compat shaders | ||
| 47 | */ | ||
| 48 | enum vmw_compat_shader_state { | ||
| 49 | VMW_COMPAT_COMMITED, | ||
| 50 | VMW_COMPAT_ADD, | ||
| 51 | VMW_COMPAT_DEL | ||
| 52 | }; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * struct vmw_compat_shader - Metadata for compat shaders. | ||
| 56 | * | ||
| 57 | * @handle: The TTM handle of the guest backed shader. | ||
| 58 | * @tfile: The struct ttm_object_file the guest backed shader is registered | ||
| 59 | * with. | ||
| 60 | * @hash: Hash item for lookup. | ||
| 61 | * @head: List head for staging lists or the compat shader manager list. | ||
| 62 | * @state: Staging state. | ||
| 63 | * | ||
| 64 | * The structure is protected by the cmdbuf lock. | ||
| 65 | */ | ||
| 66 | struct vmw_compat_shader { | ||
| 67 | u32 handle; | ||
| 68 | struct ttm_object_file *tfile; | ||
| 69 | struct drm_hash_item hash; | ||
| 70 | struct list_head head; | ||
| 71 | enum vmw_compat_shader_state state; | ||
| 72 | }; | ||
| 73 | |||
| 74 | /** | ||
| 75 | * struct vmw_compat_shader_manager - Compat shader manager. | ||
| 76 | * | ||
| 77 | * @shaders: Hash table containing staged and commited compat shaders | ||
| 78 | * @list: List of commited shaders. | ||
| 79 | * @dev_priv: Pointer to a device private structure. | ||
| 80 | * | ||
| 81 | * @shaders and @list are protected by the cmdbuf mutex for now. | ||
| 82 | */ | ||
| 83 | struct vmw_compat_shader_manager { | ||
| 84 | struct drm_open_hash shaders; | ||
| 85 | struct list_head list; | ||
| 86 | struct vmw_private *dev_priv; | ||
| 87 | }; | ||
| 88 | |||
| 43 | static void vmw_user_shader_free(struct vmw_resource *res); | 89 | static void vmw_user_shader_free(struct vmw_resource *res); |
| 44 | static struct vmw_resource * | 90 | static struct vmw_resource * |
| 45 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | 91 | vmw_user_shader_base_to_res(struct ttm_base_object *base); |
| @@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
| 258 | return 0; | 304 | return 0; |
| 259 | 305 | ||
| 260 | mutex_lock(&dev_priv->binding_mutex); | 306 | mutex_lock(&dev_priv->binding_mutex); |
| 261 | vmw_context_binding_res_list_kill(&res->binding_head); | 307 | vmw_context_binding_res_list_scrub(&res->binding_head); |
| 262 | 308 | ||
| 263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 309 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 264 | if (unlikely(cmd == NULL)) { | 310 | if (unlikely(cmd == NULL)) { |
| @@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | |||
| 325 | TTM_REF_USAGE); | 371 | TTM_REF_USAGE); |
| 326 | } | 372 | } |
| 327 | 373 | ||
| 374 | static int vmw_shader_alloc(struct vmw_private *dev_priv, | ||
| 375 | struct vmw_dma_buffer *buffer, | ||
| 376 | size_t shader_size, | ||
| 377 | size_t offset, | ||
| 378 | SVGA3dShaderType shader_type, | ||
| 379 | struct ttm_object_file *tfile, | ||
| 380 | u32 *handle) | ||
| 381 | { | ||
| 382 | struct vmw_user_shader *ushader; | ||
| 383 | struct vmw_resource *res, *tmp; | ||
| 384 | int ret; | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
| 388 | * by maximum number_of shaders anyway. | ||
| 389 | */ | ||
| 390 | if (unlikely(vmw_user_shader_size == 0)) | ||
| 391 | vmw_user_shader_size = | ||
| 392 | ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; | ||
| 393 | |||
| 394 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 395 | vmw_user_shader_size, | ||
| 396 | false, true); | ||
| 397 | if (unlikely(ret != 0)) { | ||
| 398 | if (ret != -ERESTARTSYS) | ||
| 399 | DRM_ERROR("Out of graphics memory for shader " | ||
| 400 | "creation.\n"); | ||
| 401 | goto out; | ||
| 402 | } | ||
| 403 | |||
| 404 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
| 405 | if (unlikely(ushader == NULL)) { | ||
| 406 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 407 | vmw_user_shader_size); | ||
| 408 | ret = -ENOMEM; | ||
| 409 | goto out; | ||
| 410 | } | ||
| 411 | |||
| 412 | res = &ushader->shader.res; | ||
| 413 | ushader->base.shareable = false; | ||
| 414 | ushader->base.tfile = NULL; | ||
| 415 | |||
| 416 | /* | ||
| 417 | * From here on, the destructor takes over resource freeing. | ||
| 418 | */ | ||
| 419 | |||
| 420 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, | ||
| 421 | offset, shader_type, buffer, | ||
| 422 | vmw_user_shader_free); | ||
| 423 | if (unlikely(ret != 0)) | ||
| 424 | goto out; | ||
| 425 | |||
| 426 | tmp = vmw_resource_reference(res); | ||
| 427 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
| 428 | VMW_RES_SHADER, | ||
| 429 | &vmw_user_shader_base_release, NULL); | ||
| 430 | |||
| 431 | if (unlikely(ret != 0)) { | ||
| 432 | vmw_resource_unreference(&tmp); | ||
| 433 | goto out_err; | ||
| 434 | } | ||
| 435 | |||
| 436 | if (handle) | ||
| 437 | *handle = ushader->base.hash.key; | ||
| 438 | out_err: | ||
| 439 | vmw_resource_unreference(&res); | ||
| 440 | out: | ||
| 441 | return ret; | ||
| 442 | } | ||
| 443 | |||
| 444 | |||
| 328 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | 445 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, |
| 329 | struct drm_file *file_priv) | 446 | struct drm_file *file_priv) |
| 330 | { | 447 | { |
| 331 | struct vmw_private *dev_priv = vmw_priv(dev); | 448 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 332 | struct vmw_user_shader *ushader; | ||
| 333 | struct vmw_resource *res; | ||
| 334 | struct vmw_resource *tmp; | ||
| 335 | struct drm_vmw_shader_create_arg *arg = | 449 | struct drm_vmw_shader_create_arg *arg = |
| 336 | (struct drm_vmw_shader_create_arg *)data; | 450 | (struct drm_vmw_shader_create_arg *)data; |
| 337 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 451 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| @@ -373,69 +487,326 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
| 373 | goto out_bad_arg; | 487 | goto out_bad_arg; |
| 374 | } | 488 | } |
| 375 | 489 | ||
| 376 | /* | 490 | ret = ttm_read_lock(&vmaster->lock, true); |
| 377 | * Approximate idr memory usage with 128 bytes. It will be limited | 491 | if (unlikely(ret != 0)) |
| 378 | * by maximum number_of shaders anyway. | 492 | goto out_bad_arg; |
| 379 | */ | ||
| 380 | 493 | ||
| 381 | if (unlikely(vmw_user_shader_size == 0)) | 494 | ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, |
| 382 | vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) | 495 | shader_type, tfile, &arg->shader_handle); |
| 383 | + 128; | ||
| 384 | 496 | ||
| 385 | ret = ttm_read_lock(&vmaster->lock, true); | 497 | ttm_read_unlock(&vmaster->lock); |
| 498 | out_bad_arg: | ||
| 499 | vmw_dmabuf_unreference(&buffer); | ||
| 500 | return ret; | ||
| 501 | } | ||
| 502 | |||
| 503 | /** | ||
| 504 | * vmw_compat_shader_lookup - Look up a compat shader | ||
| 505 | * | ||
| 506 | * @man: Pointer to the compat shader manager. | ||
| 507 | * @shader_type: The shader type, that combined with the user_key identifies | ||
| 508 | * the shader. | ||
| 509 | * @user_key: On entry, this should be a pointer to the user_key. | ||
| 510 | * On successful exit, it will contain the guest-backed shader's TTM handle. | ||
| 511 | * | ||
| 512 | * Returns 0 on success. Non-zero on failure, in which case the value pointed | ||
| 513 | * to by @user_key is unmodified. | ||
| 514 | */ | ||
| 515 | int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 516 | SVGA3dShaderType shader_type, | ||
| 517 | u32 *user_key) | ||
| 518 | { | ||
| 519 | struct drm_hash_item *hash; | ||
| 520 | int ret; | ||
| 521 | unsigned long key = *user_key | (shader_type << 24); | ||
| 522 | |||
| 523 | ret = drm_ht_find_item(&man->shaders, key, &hash); | ||
| 386 | if (unlikely(ret != 0)) | 524 | if (unlikely(ret != 0)) |
| 387 | return ret; | 525 | return ret; |
| 388 | 526 | ||
| 389 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | 527 | *user_key = drm_hash_entry(hash, struct vmw_compat_shader, |
| 390 | vmw_user_shader_size, | 528 | hash)->handle; |
| 391 | false, true); | 529 | |
| 392 | if (unlikely(ret != 0)) { | 530 | return 0; |
| 393 | if (ret != -ERESTARTSYS) | 531 | } |
| 394 | DRM_ERROR("Out of graphics memory for shader" | 532 | |
| 395 | " creation.\n"); | 533 | /** |
| 396 | goto out_unlock; | 534 | * vmw_compat_shader_free - Free a compat shader. |
| 535 | * | ||
| 536 | * @man: Pointer to the compat shader manager. | ||
| 537 | * @entry: Pointer to a struct vmw_compat_shader. | ||
| 538 | * | ||
| 539 | * Frees a struct vmw_compat_shder entry and drops its reference to the | ||
| 540 | * guest backed shader. | ||
| 541 | */ | ||
| 542 | static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, | ||
| 543 | struct vmw_compat_shader *entry) | ||
| 544 | { | ||
| 545 | list_del(&entry->head); | ||
| 546 | WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); | ||
| 547 | WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 548 | TTM_REF_USAGE)); | ||
| 549 | kfree(entry); | ||
| 550 | } | ||
| 551 | |||
| 552 | /** | ||
| 553 | * vmw_compat_shaders_commit - Commit a list of compat shader actions. | ||
| 554 | * | ||
| 555 | * @man: Pointer to the compat shader manager. | ||
| 556 | * @list: Caller's list of compat shader actions. | ||
| 557 | * | ||
| 558 | * This function commits a list of compat shader additions or removals. | ||
| 559 | * It is typically called when the execbuf ioctl call triggering these | ||
| 560 | * actions has commited the fifo contents to the device. | ||
| 561 | */ | ||
| 562 | void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 563 | struct list_head *list) | ||
| 564 | { | ||
| 565 | struct vmw_compat_shader *entry, *next; | ||
| 566 | |||
| 567 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 568 | list_del(&entry->head); | ||
| 569 | switch (entry->state) { | ||
| 570 | case VMW_COMPAT_ADD: | ||
| 571 | entry->state = VMW_COMPAT_COMMITED; | ||
| 572 | list_add_tail(&entry->head, &man->list); | ||
| 573 | break; | ||
| 574 | case VMW_COMPAT_DEL: | ||
| 575 | ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 576 | TTM_REF_USAGE); | ||
| 577 | kfree(entry); | ||
| 578 | break; | ||
| 579 | default: | ||
| 580 | BUG(); | ||
| 581 | break; | ||
| 582 | } | ||
| 397 | } | 583 | } |
| 584 | } | ||
| 398 | 585 | ||
| 399 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | 586 | /** |
| 400 | if (unlikely(ushader == NULL)) { | 587 | * vmw_compat_shaders_revert - Revert a list of compat shader actions |
| 401 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 588 | * |
| 402 | vmw_user_shader_size); | 589 | * @man: Pointer to the compat shader manager. |
| 403 | ret = -ENOMEM; | 590 | * @list: Caller's list of compat shader actions. |
| 404 | goto out_unlock; | 591 | * |
| 592 | * This function reverts a list of compat shader additions or removals. | ||
| 593 | * It is typically called when the execbuf ioctl call triggering these | ||
| 594 | * actions failed for some reason, and the command stream was never | ||
| 595 | * submitted. | ||
| 596 | */ | ||
| 597 | void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 598 | struct list_head *list) | ||
| 599 | { | ||
| 600 | struct vmw_compat_shader *entry, *next; | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 604 | switch (entry->state) { | ||
| 605 | case VMW_COMPAT_ADD: | ||
| 606 | vmw_compat_shader_free(man, entry); | ||
| 607 | break; | ||
| 608 | case VMW_COMPAT_DEL: | ||
| 609 | ret = drm_ht_insert_item(&man->shaders, &entry->hash); | ||
| 610 | list_del(&entry->head); | ||
| 611 | list_add_tail(&entry->head, &man->list); | ||
| 612 | entry->state = VMW_COMPAT_COMMITED; | ||
| 613 | break; | ||
| 614 | default: | ||
| 615 | BUG(); | ||
| 616 | break; | ||
| 617 | } | ||
| 405 | } | 618 | } |
| 619 | } | ||
| 406 | 620 | ||
| 407 | res = &ushader->shader.res; | 621 | /** |
| 408 | ushader->base.shareable = false; | 622 | * vmw_compat_shader_remove - Stage a compat shader for removal. |
| 409 | ushader->base.tfile = NULL; | 623 | * |
| 624 | * @man: Pointer to the compat shader manager | ||
| 625 | * @user_key: The key that is used to identify the shader. The key is | ||
| 626 | * unique to the shader type. | ||
| 627 | * @shader_type: Shader type. | ||
| 628 | * @list: Caller's list of staged shader actions. | ||
| 629 | * | ||
| 630 | * This function stages a compat shader for removal and removes the key from | ||
| 631 | * the shader manager's hash table. If the shader was previously only staged | ||
| 632 | * for addition it is completely removed (But the execbuf code may keep a | ||
| 633 | * reference if it was bound to a context between addition and removal). If | ||
| 634 | * it was previously commited to the manager, it is staged for removal. | ||
| 635 | */ | ||
| 636 | int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 637 | u32 user_key, SVGA3dShaderType shader_type, | ||
| 638 | struct list_head *list) | ||
| 639 | { | ||
| 640 | struct vmw_compat_shader *entry; | ||
| 641 | struct drm_hash_item *hash; | ||
| 642 | int ret; | ||
| 410 | 643 | ||
| 411 | /* | 644 | ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), |
| 412 | * From here on, the destructor takes over resource freeing. | 645 | &hash); |
| 413 | */ | 646 | if (likely(ret != 0)) |
| 647 | return -EINVAL; | ||
| 414 | 648 | ||
| 415 | ret = vmw_gb_shader_init(dev_priv, res, arg->size, | 649 | entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); |
| 416 | arg->offset, shader_type, buffer, | 650 | |
| 417 | vmw_user_shader_free); | 651 | switch (entry->state) { |
| 652 | case VMW_COMPAT_ADD: | ||
| 653 | vmw_compat_shader_free(man, entry); | ||
| 654 | break; | ||
| 655 | case VMW_COMPAT_COMMITED: | ||
| 656 | (void) drm_ht_remove_item(&man->shaders, &entry->hash); | ||
| 657 | list_del(&entry->head); | ||
| 658 | entry->state = VMW_COMPAT_DEL; | ||
| 659 | list_add_tail(&entry->head, list); | ||
| 660 | break; | ||
| 661 | default: | ||
| 662 | BUG(); | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | /** | ||
| 670 | * vmw_compat_shader_add - Create a compat shader and add the | ||
| 671 | * key to the manager | ||
| 672 | * | ||
| 673 | * @man: Pointer to the compat shader manager | ||
| 674 | * @user_key: The key that is used to identify the shader. The key is | ||
| 675 | * unique to the shader type. | ||
| 676 | * @bytecode: Pointer to the bytecode of the shader. | ||
| 677 | * @shader_type: Shader type. | ||
| 678 | * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is | ||
| 679 | * to be created with. | ||
| 680 | * @list: Caller's list of staged shader actions. | ||
| 681 | * | ||
| 682 | * Note that only the key is added to the shader manager's hash table. | ||
| 683 | * The shader is not yet added to the shader manager's list of shaders. | ||
| 684 | */ | ||
| 685 | int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 686 | u32 user_key, const void *bytecode, | ||
| 687 | SVGA3dShaderType shader_type, | ||
| 688 | size_t size, | ||
| 689 | struct ttm_object_file *tfile, | ||
| 690 | struct list_head *list) | ||
| 691 | { | ||
| 692 | struct vmw_dma_buffer *buf; | ||
| 693 | struct ttm_bo_kmap_obj map; | ||
| 694 | bool is_iomem; | ||
| 695 | struct vmw_compat_shader *compat; | ||
| 696 | u32 handle; | ||
| 697 | int ret; | ||
| 698 | |||
| 699 | if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) | ||
| 700 | return -EINVAL; | ||
| 701 | |||
| 702 | /* Allocate and pin a DMA buffer */ | ||
| 703 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||
| 704 | if (unlikely(buf == NULL)) | ||
| 705 | return -ENOMEM; | ||
| 706 | |||
| 707 | ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, | ||
| 708 | true, vmw_dmabuf_bo_free); | ||
| 418 | if (unlikely(ret != 0)) | 709 | if (unlikely(ret != 0)) |
| 419 | goto out_unlock; | 710 | goto out; |
| 420 | 711 | ||
| 421 | tmp = vmw_resource_reference(res); | 712 | ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); |
| 422 | ret = ttm_base_object_init(tfile, &ushader->base, false, | 713 | if (unlikely(ret != 0)) |
| 423 | VMW_RES_SHADER, | 714 | goto no_reserve; |
| 424 | &vmw_user_shader_base_release, NULL); | ||
| 425 | 715 | ||
| 716 | /* Map and copy shader bytecode. */ | ||
| 717 | ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, | ||
| 718 | &map); | ||
| 426 | if (unlikely(ret != 0)) { | 719 | if (unlikely(ret != 0)) { |
| 427 | vmw_resource_unreference(&tmp); | 720 | ttm_bo_unreserve(&buf->base); |
| 428 | goto out_err; | 721 | goto no_reserve; |
| 429 | } | 722 | } |
| 430 | 723 | ||
| 431 | arg->shader_handle = ushader->base.hash.key; | 724 | memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); |
| 432 | out_err: | 725 | WARN_ON(is_iomem); |
| 433 | vmw_resource_unreference(&res); | 726 | |
| 434 | out_unlock: | 727 | ttm_bo_kunmap(&map); |
| 435 | ttm_read_unlock(&vmaster->lock); | 728 | ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); |
| 436 | out_bad_arg: | 729 | WARN_ON(ret != 0); |
| 437 | vmw_dmabuf_unreference(&buffer); | 730 | ttm_bo_unreserve(&buf->base); |
| 731 | |||
| 732 | /* Create a guest-backed shader container backed by the dma buffer */ | ||
| 733 | ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, | ||
| 734 | tfile, &handle); | ||
| 735 | vmw_dmabuf_unreference(&buf); | ||
| 736 | if (unlikely(ret != 0)) | ||
| 737 | goto no_reserve; | ||
| 738 | /* | ||
| 739 | * Create a compat shader structure and stage it for insertion | ||
| 740 | * in the manager | ||
| 741 | */ | ||
| 742 | compat = kzalloc(sizeof(*compat), GFP_KERNEL); | ||
| 743 | if (compat == NULL) | ||
| 744 | goto no_compat; | ||
| 745 | |||
| 746 | compat->hash.key = user_key | (shader_type << 24); | ||
| 747 | ret = drm_ht_insert_item(&man->shaders, &compat->hash); | ||
| 748 | if (unlikely(ret != 0)) | ||
| 749 | goto out_invalid_key; | ||
| 750 | |||
| 751 | compat->state = VMW_COMPAT_ADD; | ||
| 752 | compat->handle = handle; | ||
| 753 | compat->tfile = tfile; | ||
| 754 | list_add_tail(&compat->head, list); | ||
| 438 | 755 | ||
| 756 | return 0; | ||
| 757 | |||
| 758 | out_invalid_key: | ||
| 759 | kfree(compat); | ||
| 760 | no_compat: | ||
| 761 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); | ||
| 762 | no_reserve: | ||
| 763 | out: | ||
| 439 | return ret; | 764 | return ret; |
| 765 | } | ||
| 766 | |||
| 767 | /** | ||
| 768 | * vmw_compat_shader_man_create - Create a compat shader manager | ||
| 769 | * | ||
| 770 | * @dev_priv: Pointer to a device private structure. | ||
| 771 | * | ||
| 772 | * Typically done at file open time. If successful returns a pointer to a | ||
| 773 | * compat shader manager. Otherwise returns an error pointer. | ||
| 774 | */ | ||
| 775 | struct vmw_compat_shader_manager * | ||
| 776 | vmw_compat_shader_man_create(struct vmw_private *dev_priv) | ||
| 777 | { | ||
| 778 | struct vmw_compat_shader_manager *man; | ||
| 779 | int ret; | ||
| 780 | |||
| 781 | man = kzalloc(sizeof(*man), GFP_KERNEL); | ||
| 782 | if (man == NULL) | ||
| 783 | return ERR_PTR(-ENOMEM); | ||
| 784 | |||
| 785 | man->dev_priv = dev_priv; | ||
| 786 | INIT_LIST_HEAD(&man->list); | ||
| 787 | ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); | ||
| 788 | if (ret == 0) | ||
| 789 | return man; | ||
| 790 | |||
| 791 | kfree(man); | ||
| 792 | return ERR_PTR(ret); | ||
| 793 | } | ||
| 794 | |||
| 795 | /** | ||
| 796 | * vmw_compat_shader_man_destroy - Destroy a compat shader manager | ||
| 797 | * | ||
| 798 | * @man: Pointer to the shader manager to destroy. | ||
| 799 | * | ||
| 800 | * Typically done at file close time. | ||
| 801 | */ | ||
| 802 | void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) | ||
| 803 | { | ||
| 804 | struct vmw_compat_shader *entry, *next; | ||
| 805 | |||
| 806 | mutex_lock(&man->dev_priv->cmdbuf_mutex); | ||
| 807 | list_for_each_entry_safe(entry, next, &man->list, head) | ||
| 808 | vmw_compat_shader_free(man, entry); | ||
| 440 | 809 | ||
| 810 | mutex_unlock(&man->dev_priv->cmdbuf_mutex); | ||
| 811 | kfree(man); | ||
| 441 | } | 812 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 979da1c246a5..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 830 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
| 831 | goto out_unlock; | 831 | goto out_unlock; |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * A gb-aware client referencing a shared surface will | ||
| 835 | * expect a backup buffer to be present. | ||
| 836 | */ | ||
| 837 | if (dev_priv->has_mob && req->shareable) { | ||
| 838 | uint32_t backup_handle; | ||
| 839 | |||
| 840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 841 | res->backup_size, | ||
| 842 | true, | ||
| 843 | &backup_handle, | ||
| 844 | &res->backup); | ||
| 845 | if (unlikely(ret != 0)) { | ||
| 846 | vmw_resource_unreference(&res); | ||
| 847 | goto out_unlock; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 833 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
| 834 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
| 835 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
| @@ -908,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 908 | rep->size_addr; | 926 | rep->size_addr; |
| 909 | 927 | ||
| 910 | if (user_sizes) | 928 | if (user_sizes) |
| 911 | ret = copy_to_user(user_sizes, srf->sizes, | 929 | ret = copy_to_user(user_sizes, &srf->base_size, |
| 912 | srf->num_sizes * sizeof(*srf->sizes)); | 930 | sizeof(srf->base_size)); |
| 913 | if (unlikely(ret != 0)) { | 931 | if (unlikely(ret != 0)) { |
| 914 | DRM_ERROR("copy_to_user failed %p %u\n", | 932 | DRM_ERROR("copy_to_user failed %p %u\n", |
| 915 | user_sizes, srf->num_sizes); | 933 | user_sizes, srf->num_sizes); |
| @@ -1111,7 +1129,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
| 1111 | return 0; | 1129 | return 0; |
| 1112 | 1130 | ||
| 1113 | mutex_lock(&dev_priv->binding_mutex); | 1131 | mutex_lock(&dev_priv->binding_mutex); |
| 1114 | vmw_context_binding_res_list_kill(&res->binding_head); | 1132 | vmw_context_binding_res_list_scrub(&res->binding_head); |
| 1115 | 1133 | ||
| 1116 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 1117 | if (unlikely(cmd == NULL)) { | 1135 | if (unlikely(cmd == NULL)) { |
