aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c144
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c330
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c467
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
50 files changed, 1282 insertions, 299 deletions
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3f65dd6676b2..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
65 * then the BO is being moved and we should 65 * then the BO is being moved and we should
66 * store up the damage until later. 66 * store up the damage until later.
67 */ 67 */
68 if (!drm_can_sleep()) 68 if (drm_can_sleep())
69 ret = ast_bo_reserve(bo, true); 69 ret = ast_bo_reserve(bo, true);
70 if (ret) { 70 if (ret) {
71 if (ret != -EBUSY) 71 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2fd4a92162cb..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
39 * then the BO is being moved and we should 39 * then the BO is being moved and we should
40 * store up the damage until later. 40 * store up the damage until later.
41 */ 41 */
42 if (!drm_can_sleep()) 42 if (drm_can_sleep())
43 ret = cirrus_bo_reserve(bo, true); 43 ret = cirrus_bo_reserve(bo, true);
44 if (ret) { 44 if (ret) {
45 if (ret != -EBUSY) 45 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
51 51
52config DRM_EXYNOS_IPP 52config DRM_EXYNOS_IPP
53 bool "Exynos DRM IPP" 53 bool "Exynos DRM IPP"
54 depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM 54 depends on DRM_EXYNOS
55 help 55 help
56 Choose this option if you want to use IPP feature for DRM. 56 Choose this option if you want to use IPP feature for DRM.
57 57
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
69 69
70config DRM_EXYNOS_GSC 70config DRM_EXYNOS_GSC
71 bool "Exynos DRM GSC" 71 bool "Exynos DRM GSC"
72 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 72 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
73 help 73 help
74 Choose this option if you want to use Exynos GSC for DRM. 74 Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
171 file->driver_priv = file_priv; 171 file->driver_priv = file_priv;
172 172
173 ret = exynos_drm_subdrv_open(dev, file); 173 ret = exynos_drm_subdrv_open(dev, file);
174 if (ret) { 174 if (ret)
175 kfree(file_priv); 175 goto out;
176 file->driver_priv = NULL;
177 }
178 176
179 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, 177 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
180 NULL, 0); 178 NULL, 0);
181 if (IS_ERR(anon_filp)) { 179 if (IS_ERR(anon_filp)) {
182 kfree(file_priv); 180 ret = PTR_ERR(anon_filp);
183 return PTR_ERR(anon_filp); 181 goto out;
184 } 182 }
185 183
186 anon_filp->f_mode = FMODE_READ | FMODE_WRITE; 184 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
187 file_priv->anon_filp = anon_filp; 185 file_priv->anon_filp = anon_filp;
188 186
189 return ret; 187 return ret;
188out:
189 kfree(file_priv);
190 file->driver_priv = NULL;
191 return ret;
190} 192}
191 193
192static void exynos_drm_preclose(struct drm_device *dev, 194static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
607 reg_type = REG_TYPE_NONE; 607 reg_type = REG_TYPE_NONE;
608 DRM_ERROR("Unknown register offset![%d]\n", reg_offset); 608 DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
609 break; 609 break;
610 }; 610 }
611 611
612 return reg_type; 612 return reg_type;
613} 613}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20 19
21#include <drm/drmP.h> 20#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
826 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 825 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
827 826
828 /* 827 /*
829 * quf == NULL condition means all event deletion. 828 * qbuf == NULL condition means all event deletion.
830 * stop operations want to delete all event list. 829 * stop operations want to delete all event list.
831 * another case delete only same buf id. 830 * another case delete only same buf id.
832 */ 831 */
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
37#include <linux/hdmi.h>
37 38
38#include <drm/exynos_drm.h> 39#include <drm/exynos_drm.h>
39 40
@@ -59,19 +60,6 @@
59#define HDMI_AUI_VERSION 0x01 60#define HDMI_AUI_VERSION 0x01
60#define HDMI_AUI_LENGTH 0x0A 61#define HDMI_AUI_LENGTH 0x0A
61 62
62/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
63enum HDMI_PACKET_TYPE {
64 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
65 /* InfoFrame packet type */
66 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
67 /* Vendor-Specific InfoFrame */
68 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
69 /* Auxiliary Video information InfoFrame */
70 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
71 /* Audio information InfoFrame */
72 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
73};
74
75enum hdmi_type { 63enum hdmi_type {
76 HDMI_TYPE13, 64 HDMI_TYPE13,
77 HDMI_TYPE14, 65 HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
379 }, 367 },
380}; 368};
381 369
382struct hdmi_infoframe {
383 enum HDMI_PACKET_TYPE type;
384 u8 ver;
385 u8 len;
386};
387
388static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 370static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
389{ 371{
390 return readl(hdata->regs + reg_id); 372 return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
682} 664}
683 665
684static void hdmi_reg_infoframe(struct hdmi_context *hdata, 666static void hdmi_reg_infoframe(struct hdmi_context *hdata,
685 struct hdmi_infoframe *infoframe) 667 union hdmi_infoframe *infoframe)
686{ 668{
687 u32 hdr_sum; 669 u32 hdr_sum;
688 u8 chksum; 670 u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
700 return; 682 return;
701 } 683 }
702 684
703 switch (infoframe->type) { 685 switch (infoframe->any.type) {
704 case HDMI_PACKET_TYPE_AVI: 686 case HDMI_INFOFRAME_TYPE_AVI:
705 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); 687 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
706 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); 688 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
707 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); 689 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
708 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); 690 infoframe->any.version);
709 hdr_sum = infoframe->type + infoframe->ver + infoframe->len; 691 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
692 hdr_sum = infoframe->any.type + infoframe->any.version +
693 infoframe->any.length;
710 694
711 /* Output format zero hardcoded ,RGB YBCR selection */ 695 /* Output format zero hardcoded ,RGB YBCR selection */
712 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | 696 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
722 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 706 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
723 707
724 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), 708 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
725 infoframe->len, hdr_sum); 709 infoframe->any.length, hdr_sum);
726 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); 710 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
727 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); 711 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
728 break; 712 break;
729 case HDMI_PACKET_TYPE_AUI: 713 case HDMI_INFOFRAME_TYPE_AUDIO:
730 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); 714 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
731 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); 715 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
732 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); 716 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
733 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); 717 infoframe->any.version);
734 hdr_sum = infoframe->type + infoframe->ver + infoframe->len; 718 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
719 hdr_sum = infoframe->any.type + infoframe->any.version +
720 infoframe->any.length;
735 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), 721 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
736 infoframe->len, hdr_sum); 722 infoframe->any.length, hdr_sum);
737 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); 723 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
738 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); 724 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
739 break; 725 break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
985 971
986static void hdmi_conf_init(struct hdmi_context *hdata) 972static void hdmi_conf_init(struct hdmi_context *hdata)
987{ 973{
988 struct hdmi_infoframe infoframe; 974 union hdmi_infoframe infoframe;
989 975
990 /* disable HPD interrupts from HDMI IP block, use GPIO instead */ 976 /* disable HPD interrupts from HDMI IP block, use GPIO instead */
991 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 977 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1021 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1007 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1022 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1008 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1023 } else { 1009 } else {
1024 infoframe.type = HDMI_PACKET_TYPE_AVI; 1010 infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
1025 infoframe.ver = HDMI_AVI_VERSION; 1011 infoframe.any.version = HDMI_AVI_VERSION;
1026 infoframe.len = HDMI_AVI_LENGTH; 1012 infoframe.any.length = HDMI_AVI_LENGTH;
1027 hdmi_reg_infoframe(hdata, &infoframe); 1013 hdmi_reg_infoframe(hdata, &infoframe);
1028 1014
1029 infoframe.type = HDMI_PACKET_TYPE_AUI; 1015 infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
1030 infoframe.ver = HDMI_AUI_VERSION; 1016 infoframe.any.version = HDMI_AUI_VERSION;
1031 infoframe.len = HDMI_AUI_LENGTH; 1017 infoframe.any.length = HDMI_AUI_LENGTH;
1032 hdmi_reg_infoframe(hdata, &infoframe); 1018 hdmi_reg_infoframe(hdata, &infoframe);
1033 1019
1034 /* enable AVI packet every vsync, fixes purple line problem */ 1020 /* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..fa18cf374470 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
208# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) 208# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
209# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) 209# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
210#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ 210#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
211# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) 211# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
212# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) 212# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
213#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ 213#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
214# define PLL_SERIAL_3_SRL_CCIR (1 << 0) 214# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
528{ 528{
529 uint8_t buf[PB(5) + 1]; 529 uint8_t buf[PB(5) + 1];
530 530
531 memset(buf, 0, sizeof(buf));
531 buf[HB(0)] = 0x84; 532 buf[HB(0)] = 0x84;
532 buf[HB(1)] = 0x01; 533 buf[HB(1)] = 0x01;
533 buf[HB(2)] = 10; 534 buf[HB(2)] = 10;
534 buf[PB(0)] = 0;
535 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ 535 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
536 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ 536 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
537 buf[PB(4)] = p->audio_frame[4]; 537 buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
824 } 824 }
825 825
826 div = 148500 / mode->clock; 826 div = 148500 / mode->clock;
827 if (div != 0) {
828 div--;
829 if (div > 3)
830 div = 3;
831 }
827 832
828 /* mute the audio FIFO: */ 833 /* mute the audio FIFO: */
829 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 834 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
913 918
914 if (priv->rev == TDA19988) { 919 if (priv->rev == TDA19988) {
915 /* let incoming pixels fill the active space (if any) */ 920 /* let incoming pixels fill the active space (if any) */
916 reg_write(encoder, REG_ENABLE_SPACE, 0x01); 921 reg_write(encoder, REG_ENABLE_SPACE, 0x00);
917 } 922 }
918 923
919 /* must be last register set: */ 924 /* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
1094{ 1099{
1095 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1100 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1096 drm_i2c_encoder_destroy(encoder); 1101 drm_i2c_encoder_destroy(encoder);
1102 if (priv->cec)
1103 i2c_unregister_device(priv->cec);
1097 kfree(priv); 1104 kfree(priv);
1098} 1105}
1099 1106
@@ -1142,8 +1149,10 @@ tda998x_encoder_init(struct i2c_client *client,
1142 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1149 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1143 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); 1150 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1144 1151
1145 priv->current_page = 0; 1152 priv->current_page = 0xff;
1146 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1153 priv->cec = i2c_new_dummy(client->adapter, 0x34);
1154 if (!priv->cec)
1155 return -ENODEV;
1147 priv->dpms = DRM_MODE_DPMS_OFF; 1156 priv->dpms = DRM_MODE_DPMS_OFF;
1148 1157
1149 encoder_slave->slave_priv = priv; 1158 encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
1831 1831
1832/* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1832/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1833#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 1833#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1834/*
1835 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
1836 * even when in MSI mode. This results in spurious interrupt warnings if the
1837 * legacy irq no. is shared with another device. The kernel then disables that
1838 * interrupt source and so prevents the other device from working properly.
1839 */
1840#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1841#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1834 1842
1835/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1843/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1836 * rows, which changed the alignment requirements and fence programming. 1844 * rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
146 va_list tmp; 146 va_list tmp;
147 147
148 va_copy(tmp, args); 148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) 149 len = vsnprintf(NULL, 0, f, tmp);
150 va_end(tmp);
151
152 if (!__i915_error_seek(e, len))
150 return; 153 return;
151 } 154 }
152 155
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
567 567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else { 569 } else {
570 enum transcoder cpu_transcoder = 570 enum transcoder cpu_transcoder = (enum transcoder) pipe;
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal; 571 u32 htotal;
573 572
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 573 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..2f517b85b3f4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
404 int i, ret, recv_bytes; 404 int i, ret, recv_bytes;
405 uint32_t status; 405 uint32_t status;
406 int try, precharge, clock = 0; 406 int try, precharge, clock = 0;
407 bool has_aux_irq = true; 407 bool has_aux_irq = HAS_AUX_IRQ(dev);
408 uint32_t timeout; 408 uint32_t timeout;
409 409
410 /* dp aux is extremely sensitive to irq latency, hence request the 410 /* dp aux is extremely sensitive to irq latency, hence request the
@@ -1869,10 +1869,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1869 1869
1870 mutex_unlock(&dev_priv->dpio_lock); 1870 mutex_unlock(&dev_priv->dpio_lock);
1871 1871
1872 /* init power sequencer on this pipe and port */ 1872 if (is_edp(intel_dp)) {
1873 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 1873 /* init power sequencer on this pipe and port */
1874 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 1874 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1875 &power_seq); 1875 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1876 &power_seq);
1877 }
1876 1878
1877 intel_enable_dp(encoder); 1879 intel_enable_dp(encoder);
1878 1880
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
258 algo->data = bus; 258 algo->data = bus;
259} 259}
260 260
261/*
262 * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
263 * mode. This results in spurious interrupt warnings if the legacy irq no. is
264 * shared with another device. The kernel then disables that interrupt source
265 * and so prevents the other device from working properly.
266 */
267#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
268static int 261static int
269gmbus_wait_hw_status(struct drm_i915_private *dev_priv, 262gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
270 u32 gmbus2_status, 263 u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
226#define ACPI_DIGITAL_OUTPUT (3<<8) 226#define ACPI_DIGITAL_OUTPUT (3<<8)
227#define ACPI_LVDS_OUTPUT (4<<8) 227#define ACPI_LVDS_OUTPUT (4<<8)
228 228
229#define MAX_DSLP 1500
230
229#ifdef CONFIG_ACPI 231#ifdef CONFIG_ACPI
230static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 232static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
231{ 233{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
260 /* The spec says 2ms should be the default, but it's too small 262 /* The spec says 2ms should be the default, but it's too small
261 * for some machines. */ 263 * for some machines. */
262 dslp = 50; 264 dslp = 50;
263 } else if (dslp > 500) { 265 } else if (dslp > MAX_DSLP) {
264 /* Hey bios, trust must be earned. */ 266 /* Hey bios, trust must be earned. */
265 WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); 267 DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
266 dslp = 500; 268 "using %u ms instead\n", dslp, MAX_DSLP);
269 dslp = MAX_DSLP;
267 } 270 }
268 271
269 /* The spec tells us to do this, but we are the only user... */ 272 /* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index f9adc27ef32a..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
41 * then the BO is being moved and we should 41 * then the BO is being moved and we should
42 * store up the damage until later. 42 * store up the damage until later.
43 */ 43 */
44 if (!drm_can_sleep()) 44 if (drm_can_sleep())
45 ret = mgag200_bo_reserve(bo, true); 45 ret = mgag200_bo_reserve(bo, true);
46 if (ret) { 46 if (ret) {
47 if (ret != -EBUSY) 47 if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b8583f275e80..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1519 (mga_vga_calculate_mode_bandwidth(mode, bpp) 1519 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1520 > (32700 * 1024))) { 1520 > (32700 * 1024))) {
1521 return MODE_BANDWIDTH; 1521 return MODE_BANDWIDTH;
1522 } else if (mode->type == G200_EH && 1522 } else if (mdev->type == G200_EH &&
1523 (mga_vga_calculate_mode_bandwidth(mode, bpp) 1523 (mga_vga_calculate_mode_bandwidth(mode, bpp)
1524 > (37500 * 1024))) { 1524 > (37500 * 1024))) {
1525 return MODE_BANDWIDTH; 1525 return MODE_BANDWIDTH;
1526 } else if (mode->type == G200_ER && 1526 } else if (mdev->type == G200_ER &&
1527 (mga_vga_calculate_mode_bandwidth(mode, 1527 (mga_vga_calculate_mode_bandwidth(mode,
1528 bpp) > (55000 * 1024))) { 1528 bpp) > (55000 * 1024))) {
1529 return MODE_BANDWIDTH; 1529 return MODE_BANDWIDTH;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
39 spinlock_t lock; 39 spinlock_t lock;
40 bool stale; 40 bool stale;
41 uint32_t width, height; 41 uint32_t width, height;
42 uint32_t x, y;
42 43
43 /* next cursor to scan-out: */ 44 /* next cursor to scan-out: */
44 uint32_t next_iova; 45 uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
57#define PENDING_FLIP 0x2 58#define PENDING_FLIP 0x2
58 atomic_t pending; 59 atomic_t pending;
59 60
60 /* the fb that we currently hold a scanout ref to: */ 61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
61 struct drm_framebuffer *fb; 66 struct drm_framebuffer *fb;
62 67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
63 /* for unref'ing framebuffers after scanout completes: */ 71 /* for unref'ing framebuffers after scanout completes: */
64 struct drm_flip_work unref_fb_work; 72 struct drm_flip_work unref_fb_work;
65 73
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
77 return to_mdp4_kms(to_mdp_kms(priv->kms)); 85 return to_mdp4_kms(to_mdp_kms(priv->kms));
78} 86}
79 87
80static void update_fb(struct drm_crtc *crtc, bool async, 88static void request_pending(struct drm_crtc *crtc, uint32_t pending)
81 struct drm_framebuffer *new_fb)
82{ 89{
83 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 90 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
84 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
85 91
86 if (old_fb) 92 atomic_or(pending, &mdp4_crtc->pending);
87 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); 93 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
94}
95
96static void crtc_flush(struct drm_crtc *crtc)
97{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0;
101
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
103 struct drm_plane *plane = mdp4_crtc->planes[i];
104 if (plane) {
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 }
109 flush |= ovlp2flush(mdp4_crtc->ovlp);
110
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
112
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114}
115
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
88 120
89 /* grab reference to incoming scanout fb: */ 121 /* grab reference to incoming scanout fb: */
90 drm_framebuffer_reference(new_fb); 122 drm_framebuffer_reference(new_fb);
91 mdp4_crtc->base.fb = new_fb; 123 mdp4_crtc->base.fb = new_fb;
92 mdp4_crtc->fb = new_fb; 124 mdp4_crtc->fb = new_fb;
93 125
94 if (!async) { 126 if (old_fb)
95 /* enable vblank to pick up the old_fb */ 127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
96 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); 128}
97 } 129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
98} 155}
99 156
100/* if file!=NULL, this is preclose potential cancel-flip path */ 157/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
120 spin_unlock_irqrestore(&dev->event_lock, flags); 177 spin_unlock_irqrestore(&dev->event_lock, flags);
121} 178}
122 179
123static void crtc_flush(struct drm_crtc *crtc)
124{
125 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
126 struct mdp4_kms *mdp4_kms = get_kms(crtc);
127 uint32_t i, flush = 0;
128
129 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
130 struct drm_plane *plane = mdp4_crtc->planes[i];
131 if (plane) {
132 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
133 flush |= pipe2flush(pipe_id);
134 }
135 }
136 flush |= ovlp2flush(mdp4_crtc->ovlp);
137
138 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
139
140 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
141}
142
143static void request_pending(struct drm_crtc *crtc, uint32_t pending)
144{
145 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
146
147 atomic_or(pending, &mdp4_crtc->pending);
148 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
149}
150
151static void pageflip_cb(struct msm_fence_cb *cb) 180static void pageflip_cb(struct msm_fence_cb *cb)
152{ 181{
153 struct mdp4_crtc *mdp4_crtc = 182 struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
158 if (!fb) 187 if (!fb)
159 return; 188 return;
160 189
190 drm_framebuffer_reference(fb);
161 mdp4_plane_set_scanout(mdp4_crtc->plane, fb); 191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
162 crtc_flush(crtc); 192 update_scanout(crtc, fb);
163
164 /* enable vblank to complete flip: */
165 request_pending(crtc, PENDING_FLIP);
166} 193}
167 194
168static void unref_fb_worker(struct drm_flip_work *work, void *val) 195static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
320 mode->vsync_end, mode->vtotal, 347 mode->vsync_end, mode->vtotal,
321 mode->type, mode->flags); 348 mode->type, mode->flags);
322 349
350 /* grab extra ref for update_scanout() */
351 drm_framebuffer_reference(crtc->fb);
352
353 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
354 0, 0, mode->hdisplay, mode->vdisplay,
355 x << 16, y << 16,
356 mode->hdisplay << 16, mode->vdisplay << 16);
357 if (ret) {
358 drm_framebuffer_unreference(crtc->fb);
359 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
360 mdp4_crtc->name, ret);
361 return ret;
362 }
363
323 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 364 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
324 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 365 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
325 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 366 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
341 382
342 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
343 384
344 update_fb(crtc, false, crtc->fb);
345
346 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
347 0, 0, mode->hdisplay, mode->vdisplay,
348 x << 16, y << 16,
349 mode->hdisplay << 16, mode->vdisplay << 16);
350 if (ret) {
351 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
352 mdp4_crtc->name, ret);
353 return ret;
354 }
355
356 if (dma == DMA_E) { 385 if (dma == DMA_E) {
357 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); 386 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
358 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 387 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
359 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 388 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
360 } 389 }
361 390
391 update_fb(crtc, crtc->fb);
392 update_scanout(crtc, crtc->fb);
393
362 return 0; 394 return 0;
363} 395}
364 396
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
385 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 417 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
386 struct drm_plane *plane = mdp4_crtc->plane; 418 struct drm_plane *plane = mdp4_crtc->plane;
387 struct drm_display_mode *mode = &crtc->mode; 419 struct drm_display_mode *mode = &crtc->mode;
420 int ret;
388 421
389 update_fb(crtc, false, crtc->fb); 422 /* grab extra ref for update_scanout() */
423 drm_framebuffer_reference(crtc->fb);
390 424
391 return mdp4_plane_mode_set(plane, crtc, crtc->fb, 425 ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
392 0, 0, mode->hdisplay, mode->vdisplay, 426 0, 0, mode->hdisplay, mode->vdisplay,
393 x << 16, y << 16, 427 x << 16, y << 16,
394 mode->hdisplay << 16, mode->vdisplay << 16); 428 mode->hdisplay << 16, mode->vdisplay << 16);
429 if (ret) {
430 drm_framebuffer_unreference(crtc->fb);
431 return ret;
432 }
433
434 update_fb(crtc, crtc->fb);
435 update_scanout(crtc, crtc->fb);
436
437 return 0;
395} 438}
396 439
397static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 440static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
419 mdp4_crtc->event = event; 462 mdp4_crtc->event = event;
420 spin_unlock_irqrestore(&dev->event_lock, flags); 463 spin_unlock_irqrestore(&dev->event_lock, flags);
421 464
422 update_fb(crtc, true, new_fb); 465 update_fb(crtc, new_fb);
423 466
424 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 467 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
425} 468}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
442static void update_cursor(struct drm_crtc *crtc) 485static void update_cursor(struct drm_crtc *crtc)
443{ 486{
444 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 487 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
488 struct mdp4_kms *mdp4_kms = get_kms(crtc);
445 enum mdp4_dma dma = mdp4_crtc->dma; 489 enum mdp4_dma dma = mdp4_crtc->dma;
446 unsigned long flags; 490 unsigned long flags;
447 491
448 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 492 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
449 if (mdp4_crtc->cursor.stale) { 493 if (mdp4_crtc->cursor.stale) {
450 struct mdp4_kms *mdp4_kms = get_kms(crtc);
451 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; 494 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
452 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; 495 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
453 uint32_t iova = mdp4_crtc->cursor.next_iova; 496 uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
479 mdp4_crtc->cursor.scanout_bo = next_bo; 522 mdp4_crtc->cursor.scanout_bo = next_bo;
480 mdp4_crtc->cursor.stale = false; 523 mdp4_crtc->cursor.stale = false;
481 } 524 }
525
526 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
527 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
528 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
529
482 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 530 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
483} 531}
484 532
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
530 drm_gem_object_unreference_unlocked(old_bo); 578 drm_gem_object_unreference_unlocked(old_bo);
531 } 579 }
532 580
581 crtc_flush(crtc);
533 request_pending(crtc, PENDING_CURSOR); 582 request_pending(crtc, PENDING_CURSOR);
534 583
535 return 0; 584 return 0;
@@ -542,12 +591,15 @@ fail:
542static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 591static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
543{ 592{
544 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 593 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
545 struct mdp4_kms *mdp4_kms = get_kms(crtc); 594 unsigned long flags;
546 enum mdp4_dma dma = mdp4_crtc->dma;
547 595
548 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), 596 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
549 MDP4_DMA_CURSOR_POS_X(x) | 597 mdp4_crtc->cursor.x = x;
550 MDP4_DMA_CURSOR_POS_Y(y)); 598 mdp4_crtc->cursor.y = y;
599 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
600
601 crtc_flush(crtc);
602 request_pending(crtc, PENDING_CURSOR);
551 603
552 return 0; 604 return 0;
553} 605}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
713 crtc = &mdp4_crtc->base; 765 crtc = &mdp4_crtc->base;
714 766
715 mdp4_crtc->plane = plane; 767 mdp4_crtc->plane = plane;
768 mdp4_crtc->id = id;
716 769
717 mdp4_crtc->ovlp = ovlp_id; 770 mdp4_crtc->ovlp = ovlp_id;
718 mdp4_crtc->dma = dma_id; 771 mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); 170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
171 171
172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), 172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
173 MDP4_PIPE_SRC_XY_X(crtc_x) | 173 MDP4_PIPE_DST_XY_X(crtc_x) |
174 MDP4_PIPE_SRC_XY_Y(crtc_y)); 174 MDP4_PIPE_DST_XY_Y(crtc_y));
175 175
176 mdp4_plane_set_scanout(plane, fb); 176 mdp4_plane_set_scanout(plane, fb);
177 177
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
296 x << 16, y << 16, 296 x << 16, y << 16,
297 mode->hdisplay << 16, mode->vdisplay << 16); 297 mode->hdisplay << 16, mode->vdisplay << 16);
298 if (ret) { 298 if (ret) {
299 drm_framebuffer_unreference(crtc->fb);
299 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", 300 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
300 mdp5_crtc->name, ret); 301 mdp5_crtc->name, ret);
301 return ret; 302 return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
343 0, 0, mode->hdisplay, mode->vdisplay, 344 0, 0, mode->hdisplay, mode->vdisplay,
344 x << 16, y << 16, 345 x << 16, y << 16,
345 mode->hdisplay << 16, mode->vdisplay << 16); 346 mode->hdisplay << 16, mode->vdisplay << 16);
347 if (ret) {
348 drm_framebuffer_unreference(crtc->fb);
349 return ret;
350 }
346 351
347 update_fb(crtc, crtc->fb); 352 update_fb(crtc, crtc->fb);
348 update_scanout(crtc, crtc->fb); 353 update_scanout(crtc, crtc->fb);
349 354
350 return ret; 355 return 0;
351} 356}
352 357
353static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 358static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
644 644
645fail: 645fail:
646 if (obj) 646 if (obj)
647 drm_gem_object_unreference_unlocked(obj); 647 drm_gem_object_unreference(obj);
648 648
649 return ERR_PTR(ret); 649 return ERR_PTR(ret);
650} 650}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
163 163
164 164
165 /* if locking succeeded, pin bo: */ 165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base, 166 ret = msm_gem_get_iova_locked(&msm_obj->base,
167 submit->gpu->id, &iova); 167 submit->gpu->id, &iova);
168 168
169 /* this would break the logic in the fail path.. there is no 169 /* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
247 /* For now, just map the entire thing. Eventually we probably 247 /* For now, just map the entire thing. Eventually we probably
248 * to do it page-by-page, w/ kmap() if not vmap()d.. 248 * to do it page-by-page, w/ kmap() if not vmap()d..
249 */ 249 */
250 ptr = msm_gem_vaddr(&obj->base); 250 ptr = msm_gem_vaddr_locked(&obj->base);
251 251
252 if (IS_ERR(ptr)) { 252 if (IS_ERR(ptr)) {
253 ret = PTR_ERR(ptr); 253 ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
307{ 307{
308 unsigned i; 308 unsigned i;
309 309
310 mutex_lock(&submit->dev->struct_mutex);
311 for (i = 0; i < submit->nr_bos; i++) { 310 for (i = 0; i < submit->nr_bos; i++) {
312 struct msm_gem_object *msm_obj = submit->bos[i].obj; 311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
313 submit_unlock_unpin_bo(submit, i); 312 submit_unlock_unpin_bo(submit, i);
314 list_del_init(&msm_obj->submit_entry); 313 list_del_init(&msm_obj->submit_entry);
315 drm_gem_object_unreference(&msm_obj->base); 314 drm_gem_object_unreference(&msm_obj->base);
316 } 315 }
317 mutex_unlock(&submit->dev->struct_mutex);
318 316
319 ww_acquire_fini(&submit->ticket); 317 ww_acquire_fini(&submit->ticket);
320 kfree(submit); 318 kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
342 if (args->nr_cmds > MAX_CMDS) 340 if (args->nr_cmds > MAX_CMDS)
343 return -EINVAL; 341 return -EINVAL;
344 342
343 mutex_lock(&dev->struct_mutex);
344
345 submit = submit_create(dev, gpu, args->nr_bos); 345 submit = submit_create(dev, gpu, args->nr_bos);
346 if (!submit) { 346 if (!submit) {
347 ret = -ENOMEM; 347 ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
410out: 410out:
411 if (submit) 411 if (submit)
412 submit_cleanup(submit, !!ret); 412 submit_cleanup(submit, !!ret);
413 mutex_unlock(&dev->struct_mutex);
413 return ret; 414 return ret;
414} 415}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
298 struct msm_drm_private *priv = dev->dev_private; 298 struct msm_drm_private *priv = dev->dev_private;
299 int i, ret; 299 int i, ret;
300 300
301 mutex_lock(&dev->struct_mutex);
302
303 submit->fence = ++priv->next_fence; 301 submit->fence = ++priv->next_fence;
304 302
305 gpu->submitted_fence = submit->fence; 303 gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
331 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 329 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
332 } 330 }
333 hangcheck_timer_reset(gpu); 331 hangcheck_timer_reset(gpu);
334 mutex_unlock(&dev->struct_mutex);
335 332
336 return ret; 333 return ret;
337} 334}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
29#include "cypress_dpm.h" 29#include "cypress_dpm.h"
30#include "btc_dpm.h" 30#include "btc_dpm.h"
31#include "atom.h" 31#include "atom.h"
32#include <linux/seq_file.h>
32 33
33#define MC_CG_ARB_FREQ_F0 0x0a 34#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b 35#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
2756 r600_free_extended_power_table(rdev); 2757 r600_free_extended_power_table(rdev);
2757} 2758}
2758 2759
2760void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2761 struct seq_file *m)
2762{
2763 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2764 struct radeon_ps *rps = &eg_pi->current_rps;
2765 struct rv7xx_ps *ps = rv770_get_ps(rps);
2766 struct rv7xx_pl *pl;
2767 u32 current_index =
2768 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2769 CURRENT_PROFILE_INDEX_SHIFT;
2770
2771 if (current_index > 2) {
2772 seq_printf(m, "invalid dpm profile %d\n", current_index);
2773 } else {
2774 if (current_index == 0)
2775 pl = &ps->low;
2776 else if (current_index == 1)
2777 pl = &ps->medium;
2778 else /* current_index == 2 */
2779 pl = &ps->high;
2780 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2781 if (rdev->family >= CHIP_CEDAR) {
2782 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
2783 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2784 } else {
2785 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
2786 current_index, pl->sclk, pl->mclk, pl->vddc);
2787 }
2788 }
2789}
2790
2759u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) 2791u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
2760{ 2792{
2761 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2793 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
44# define DYN_SPREAD_SPECTRUM_EN (1 << 23) 44# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
45# define AC_DC_SW (1 << 24) 45# define AC_DC_SW (1 << 24)
46 46
47#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
48# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
49# define CURRENT_PROFILE_INDEX_SHIFT 4
50
47#define CG_BIF_REQ_AND_RSP 0x7f4 51#define CG_BIF_REQ_AND_RSP 0x7f4
48#define CG_CLIENT_REQ(x) ((x) << 0) 52#define CG_CLIENT_REQ(x) ((x) << 0)
49#define CG_CLIENT_REQ_MASK (0xff << 0) 53#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
1223 1223
1224int kv_dpm_late_enable(struct radeon_device *rdev) 1224int kv_dpm_late_enable(struct radeon_device *rdev)
1225{ 1225{
1226 int ret; 1226 int ret = 0;
1227 1227
1228 if (rdev->irq.installed && 1228 if (rdev->irq.installed &&
1229 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1229 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..1217fbcbdcca 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3945 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3945 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3946 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3946 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3947 struct ni_ps *ps = ni_get_ps(rps); 3947 struct ni_ps *ps = ni_get_ps(rps);
3948 u16 vddc;
3949 struct rv7xx_pl *pl = &ps->performance_levels[index]; 3948 struct rv7xx_pl *pl = &ps->performance_levels[index];
3950 3949
3951 ps->performance_level_count = index + 1; 3950 ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3961 3960
3962 /* patch up vddc if necessary */ 3961 /* patch up vddc if necessary */
3963 if (pl->vddc == 0xff01) { 3962 if (pl->vddc == 0xff01) {
3964 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) 3963 if (pi->max_vddc)
3965 pl->vddc = vddc; 3964 pl->vddc = pi->max_vddc;
3966 } 3965 }
3967 3966
3968 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 3967 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
4322void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 4321void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
4323 struct seq_file *m) 4322 struct seq_file *m)
4324{ 4323{
4325 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 4324 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4325 struct radeon_ps *rps = &eg_pi->current_rps;
4326 struct ni_ps *ps = ni_get_ps(rps); 4326 struct ni_ps *ps = ni_get_ps(rps);
4327 struct rv7xx_pl *pl; 4327 struct rv7xx_pl *pl;
4328 u32 current_index = 4328 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3991,6 +3991,10 @@ restart_ih:
3991 break; 3991 break;
3992 } 3992 }
3993 break; 3993 break;
3994 case 124: /* UVD */
3995 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3996 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3997 break;
3994 case 176: /* CP_INT in ring buffer */ 3998 case 176: /* CP_INT in ring buffer */
3995 case 177: /* CP_INT in IB1 */ 3999 case 177: /* CP_INT in IB1 */
3996 case 178: /* CP_INT in IB2 */ 4000 case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7b399dc5fd54..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1007 case R_008C64_SQ_VSTMP_RING_SIZE: 1007 case R_008C64_SQ_VSTMP_RING_SIZE:
1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1008 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1009 /* get value to populate the IB don't remove */ 1009 /* get value to populate the IB don't remove */
1010 tmp =radeon_get_ib_value(p, idx); 1010 /*tmp =radeon_get_ib_value(p, idx);
1011 ib[idx] = 0; 1011 ib[idx] = 0;*/
1012 break;
1013 case SQ_ESGS_RING_BASE:
1014 case SQ_GSVS_RING_BASE:
1015 case SQ_ESTMP_RING_BASE:
1016 case SQ_GSTMP_RING_BASE:
1017 case SQ_PSTMP_RING_BASE:
1018 case SQ_VSTMP_RING_BASE:
1019 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1020 if (r) {
1021 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1022 "0x%04X\n", reg);
1023 return -EINVAL;
1024 }
1025 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1012 break; 1026 break;
1013 case SQ_CONFIG: 1027 case SQ_CONFIG:
1014 track->sq_config = radeon_get_ib_value(p, idx); 1028 track->sq_config = radeon_get_ib_value(p, idx);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
1555 .get_sclk = &btc_dpm_get_sclk, 1555 .get_sclk = &btc_dpm_get_sclk,
1556 .get_mclk = &btc_dpm_get_mclk, 1556 .get_mclk = &btc_dpm_get_mclk,
1557 .print_power_state = &rv770_dpm_print_power_state, 1557 .print_power_state = &rv770_dpm_print_power_state,
1558 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, 1558 .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
1559 .force_performance_level = &rv770_dpm_force_performance_level, 1559 .force_performance_level = &rv770_dpm_force_performance_level,
1560 .vblank_too_short = &btc_dpm_vblank_too_short, 1560 .vblank_too_short = &btc_dpm_vblank_too_short,
1561 }, 1561 },
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
551u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); 551u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
552u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); 552u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
553bool btc_dpm_vblank_too_short(struct radeon_device *rdev); 553bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
554void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
555 struct seq_file *m);
554int sumo_dpm_init(struct radeon_device *rdev); 556int sumo_dpm_init(struct radeon_device *rdev);
555int sumo_dpm_enable(struct radeon_device *rdev); 557int sumo_dpm_enable(struct radeon_device *rdev);
556int sumo_dpm_late_enable(struct radeon_device *rdev); 558int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec8c388eec17..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup 80 * 2.36.0 - Fix CIK DCE tiling setup
81 * 2.37.0 - allow GS ring setup on r6xx/r7xx
81 */ 82 */
82#define KMS_DRIVER_MAJOR 2 83#define KMS_DRIVER_MAJOR 2
83#define KMS_DRIVER_MINOR 36 84#define KMS_DRIVER_MINOR 37
84#define KMS_DRIVER_PATCHLEVEL 0 85#define KMS_DRIVER_PATCHLEVEL 0
85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 86int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
86int radeon_driver_unload_kms(struct drm_device *dev); 87int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
180x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 180x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
190x00028A40 VGT_GS_MODE 190x00028A40 VGT_GS_MODE
200x00028A6C VGT_GS_OUT_PRIM_TYPE 200x00028A6C VGT_GS_OUT_PRIM_TYPE
210x00028B38 VGT_GS_MAX_VERT_OUT
210x000088C8 VGT_GS_PER_ES 220x000088C8 VGT_GS_PER_ES
220x000088E8 VGT_GS_PER_VS 230x000088E8 VGT_GS_PER_VS
230x000088D4 VGT_GS_VERTEX_REUSE 240x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..5b2ea8ac0731 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2175 struct rv7xx_ps *ps = rv770_get_ps(rps); 2175 struct rv7xx_ps *ps = rv770_get_ps(rps);
2176 u32 sclk, mclk; 2176 u32 sclk, mclk;
2177 u16 vddc;
2178 struct rv7xx_pl *pl; 2177 struct rv7xx_pl *pl;
2179 2178
2180 switch (index) { 2179 switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2214 2213
2215 /* patch up vddc if necessary */ 2214 /* patch up vddc if necessary */
2216 if (pl->vddc == 0xff01) { 2215 if (pl->vddc == 0xff01) {
2217 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) 2216 if (pi->max_vddc)
2218 pl->vddc = vddc; 2217 pl->vddc = pi->max_vddc;
2219 } 2218 }
2220 2219
2221 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 2220 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
6338 break; 6338 break;
6339 } 6339 }
6340 break; 6340 break;
6341 case 124: /* UVD */
6342 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6343 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6344 break;
6341 case 146: 6345 case 146:
6342 case 147: 6346 case 147:
6343 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6347 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..eafb0e6bc67e 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
6472void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 6472void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
6473 struct seq_file *m) 6473 struct seq_file *m)
6474{ 6474{
6475 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 6475 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6476 struct radeon_ps *rps = &eg_pi->current_rps;
6476 struct ni_ps *ps = ni_get_ps(rps); 6477 struct ni_ps *ps = ni_get_ps(rps);
6477 struct rv7xx_pl *pl; 6478 struct rv7xx_pl *pl;
6478 u32 current_index = 6479 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
1807 struct seq_file *m) 1807 struct seq_file *m)
1808{ 1808{
1809 struct sumo_power_info *pi = sumo_get_pi(rdev); 1809 struct sumo_power_info *pi = sumo_get_pi(rdev);
1810 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 1810 struct radeon_ps *rps = &pi->current_rps;
1811 struct sumo_ps *ps = sumo_get_ps(rps); 1811 struct sumo_ps *ps = sumo_get_ps(rps);
1812 struct sumo_pl *pl; 1812 struct sumo_pl *pl;
1813 u32 current_index = 1813 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
1926void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 1926void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
1927 struct seq_file *m) 1927 struct seq_file *m)
1928{ 1928{
1929 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 1929 struct trinity_power_info *pi = trinity_get_pi(rdev);
1930 struct radeon_ps *rps = &pi->current_rps;
1930 struct trinity_ps *ps = trinity_get_ps(rps); 1931 struct trinity_ps *ps = trinity_get_ps(rps);
1931 struct trinity_pl *pl; 1932 struct trinity_pl *pl;
1932 u32 current_index = 1933 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
57 radeon_ring_write(ring, 0); 57 radeon_ring_write(ring, 0);
58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
59 radeon_ring_write(ring, 2); 59 radeon_ring_write(ring, 2);
60 return;
61} 60}
62 61
63/** 62/**
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 37079859afc8..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
292 292
293 if (ret == 0) { 293 if (ret == 0) {
294 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 294 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
295 if (!kref_get_unless_zero(&ref->kref)) { 295 if (kref_get_unless_zero(&ref->kref)) {
296 rcu_read_unlock(); 296 rcu_read_unlock();
297 break; 297 break;
298 } 298 }
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9af99084b344..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
380 pgoff_t i; 380 pgoff_t i;
381 struct page **page = ttm->pages; 381 struct page **page = ttm->pages;
382 382
383 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
384 return;
385
383 for (i = 0; i < ttm->num_pages; ++i) { 386 for (i = 0; i < ttm->num_pages; ++i) {
384 (*page)->mapping = NULL; 387 (*page)->mapping = NULL;
385 (*page++)->index = 0; 388 (*page++)->index = 0;
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -2583,4 +2583,28 @@ typedef union {
2583 float f; 2583 float f;
2584} SVGA3dDevCapResult; 2584} SVGA3dDevCapResult;
2585 2585
2586typedef enum {
2587 SVGA3DCAPS_RECORD_UNKNOWN = 0,
2588 SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
2589 SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
2590 SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
2591} SVGA3dCapsRecordType;
2592
2593typedef
2594struct SVGA3dCapsRecordHeader {
2595 uint32 length;
2596 SVGA3dCapsRecordType type;
2597}
2598SVGA3dCapsRecordHeader;
2599
2600typedef
2601struct SVGA3dCapsRecord {
2602 SVGA3dCapsRecordHeader header;
2603 uint32 data[1];
2604}
2605SVGA3dCapsRecord;
2606
2607
2608typedef uint32 SVGA3dCapPair[2];
2609
2586#endif /* _SVGA3D_REG_H_ */ 2610#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
37 37
38 38
39 39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); 40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41 41
42static void vmw_user_context_free(struct vmw_resource *res); 42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource * 43static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback, 50 bool readback,
51 struct ttm_validate_buffer *val_buf); 51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res); 52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); 53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); 54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); 55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
56static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
57static uint64_t vmw_user_context_size; 59static uint64_t vmw_user_context_size;
58 60
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
111 113
112 if (res->func->destroy == vmw_gb_context_destroy) { 114 if (res->func->destroy == vmw_gb_context_destroy) {
113 mutex_lock(&dev_priv->cmdbuf_mutex); 115 mutex_lock(&dev_priv->cmdbuf_mutex);
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
114 (void) vmw_gb_context_destroy(res); 119 (void) vmw_gb_context_destroy(res);
115 if (dev_priv->pinned_bo != NULL && 120 if (dev_priv->pinned_bo != NULL &&
116 !dev_priv->query_cid_valid) 121 !dev_priv->query_cid_valid)
117 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 122 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
123 mutex_unlock(&dev_priv->binding_mutex);
118 mutex_unlock(&dev_priv->cmdbuf_mutex); 124 mutex_unlock(&dev_priv->cmdbuf_mutex);
119 return; 125 return;
120 } 126 }
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
328 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
329 335
330 mutex_lock(&dev_priv->binding_mutex); 336 mutex_lock(&dev_priv->binding_mutex);
331 vmw_context_binding_state_kill(&uctx->cbs); 337 vmw_context_binding_state_scrub(&uctx->cbs);
332 338
333 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
334 340
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
378 SVGA3dCmdHeader header; 384 SVGA3dCmdHeader header;
379 SVGA3dCmdDestroyGBContext body; 385 SVGA3dCmdDestroyGBContext body;
380 } *cmd; 386 } *cmd;
381 struct vmw_user_context *uctx =
382 container_of(res, struct vmw_user_context, res);
383
384 BUG_ON(!list_empty(&uctx->cbs.list));
385 387
386 if (likely(res->id == -1)) 388 if (likely(res->id == -1))
387 return 0; 389 return 0;
@@ -528,8 +530,9 @@ out_unlock:
528 * vmw_context_scrub_shader - scrub a shader binding from a context. 530 * vmw_context_scrub_shader - scrub a shader binding from a context.
529 * 531 *
530 * @bi: single binding information. 532 * @bi: single binding information.
533 * @rebind: Whether to issue a bind instead of scrub command.
531 */ 534 */
532static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) 535static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
533{ 536{
534 struct vmw_private *dev_priv = bi->ctx->dev_priv; 537 struct vmw_private *dev_priv = bi->ctx->dev_priv;
535 struct { 538 struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
548 cmd->header.size = sizeof(cmd->body); 551 cmd->header.size = sizeof(cmd->body);
549 cmd->body.cid = bi->ctx->id; 552 cmd->body.cid = bi->ctx->id;
550 cmd->body.type = bi->i1.shader_type; 553 cmd->body.type = bi->i1.shader_type;
551 cmd->body.shid = SVGA3D_INVALID_ID; 554 cmd->body.shid =
555 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
552 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 556 vmw_fifo_commit(dev_priv, sizeof(*cmd));
553 557
554 return 0; 558 return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
559 * from a context. 563 * from a context.
560 * 564 *
561 * @bi: single binding information. 565 * @bi: single binding information.
566 * @rebind: Whether to issue a bind instead of scrub command.
562 */ 567 */
563static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) 568static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
569 bool rebind)
564{ 570{
565 struct vmw_private *dev_priv = bi->ctx->dev_priv; 571 struct vmw_private *dev_priv = bi->ctx->dev_priv;
566 struct { 572 struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
579 cmd->header.size = sizeof(cmd->body); 585 cmd->header.size = sizeof(cmd->body);
580 cmd->body.cid = bi->ctx->id; 586 cmd->body.cid = bi->ctx->id;
581 cmd->body.type = bi->i1.rt_type; 587 cmd->body.type = bi->i1.rt_type;
582 cmd->body.target.sid = SVGA3D_INVALID_ID; 588 cmd->body.target.sid =
589 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
583 cmd->body.target.face = 0; 590 cmd->body.target.face = 0;
584 cmd->body.target.mipmap = 0; 591 cmd->body.target.mipmap = 0;
585 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 592 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
591 * vmw_context_scrub_texture - scrub a texture binding from a context. 598 * vmw_context_scrub_texture - scrub a texture binding from a context.
592 * 599 *
593 * @bi: single binding information. 600 * @bi: single binding information.
601 * @rebind: Whether to issue a bind instead of scrub command.
594 * 602 *
595 * TODO: Possibly complement this function with a function that takes 603 * TODO: Possibly complement this function with a function that takes
596 * a list of texture bindings and combines them to a single command. 604 * a list of texture bindings and combines them to a single command.
597 */ 605 */
598static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) 606static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
607 bool rebind)
599{ 608{
600 struct vmw_private *dev_priv = bi->ctx->dev_priv; 609 struct vmw_private *dev_priv = bi->ctx->dev_priv;
601 struct { 610 struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
619 cmd->body.c.cid = bi->ctx->id; 628 cmd->body.c.cid = bi->ctx->id;
620 cmd->body.s1.stage = bi->i1.texture_stage; 629 cmd->body.s1.stage = bi->i1.texture_stage;
621 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
622 cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; 631 cmd->body.s1.value =
632 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
623 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 633 vmw_fifo_commit(dev_priv, sizeof(*cmd));
624 634
625 return 0; 635 return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
692 vmw_context_binding_drop(loc); 702 vmw_context_binding_drop(loc);
693 703
694 loc->bi = *bi; 704 loc->bi = *bi;
705 loc->bi.scrubbed = false;
695 list_add_tail(&loc->ctx_list, &cbs->list); 706 list_add_tail(&loc->ctx_list, &cbs->list);
696 INIT_LIST_HEAD(&loc->res_list); 707 INIT_LIST_HEAD(&loc->res_list);
697 708
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
727 if (loc->bi.ctx != NULL) 738 if (loc->bi.ctx != NULL)
728 vmw_context_binding_drop(loc); 739 vmw_context_binding_drop(loc);
729 740
730 loc->bi = *bi; 741 if (bi->res != NULL) {
731 list_add_tail(&loc->ctx_list, &cbs->list); 742 loc->bi = *bi;
732 if (bi->res != NULL) 743 list_add_tail(&loc->ctx_list, &cbs->list);
733 list_add_tail(&loc->res_list, &bi->res->binding_head); 744 list_add_tail(&loc->res_list, &bi->res->binding_head);
734 else 745 }
735 INIT_LIST_HEAD(&loc->res_list);
736} 746}
737 747
738/** 748/**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
746 */ 756 */
747static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) 757static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
748{ 758{
749 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); 759 if (!cb->bi.scrubbed) {
760 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
761 cb->bi.scrubbed = true;
762 }
750 vmw_context_binding_drop(cb); 763 vmw_context_binding_drop(cb);
751} 764}
752 765
@@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
768} 781}
769 782
770/** 783/**
784 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
785 * struct vmw_ctx_binding state structure.
786 *
787 * @cbs: Pointer to the context binding state tracker.
788 *
789 * Emits commands to scrub all bindings associated with the
790 * context binding state tracker.
791 */
792static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
793{
794 struct vmw_ctx_binding *entry;
795
796 list_for_each_entry(entry, &cbs->list, ctx_list) {
797 if (!entry->bi.scrubbed) {
798 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
799 entry->bi.scrubbed = true;
800 }
801 }
802}
803
804/**
771 * vmw_context_binding_res_list_kill - Kill all bindings on a 805 * vmw_context_binding_res_list_kill - Kill all bindings on a
772 * resource binding list 806 * resource binding list
773 * 807 *
@@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
785} 819}
786 820
787/** 821/**
822 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
823 * resource binding list
824 *
825 * @head: list head of resource binding list
826 *
827 * Scrub all bindings associated with a specific resource. Typically
828 * called before the resource is evicted.
829 */
830void vmw_context_binding_res_list_scrub(struct list_head *head)
831{
832 struct vmw_ctx_binding *entry;
833
834 list_for_each_entry(entry, head, res_list) {
835 if (!entry->bi.scrubbed) {
836 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
837 entry->bi.scrubbed = true;
838 }
839 }
840}
841
842/**
788 * vmw_context_binding_state_transfer - Commit staged binding info 843 * vmw_context_binding_state_transfer - Commit staged binding info
789 * 844 *
790 * @ctx: Pointer to context to commit the staged binding info to. 845 * @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
803 list_for_each_entry_safe(entry, next, &from->list, ctx_list) 858 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
804 vmw_context_binding_transfer(&uctx->cbs, &entry->bi); 859 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
805} 860}
861
862/**
863 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
864 *
865 * @ctx: The context resource
866 *
867 * Walks through the context binding list and rebinds all scrubbed
868 * resources.
869 */
870int vmw_context_rebind_all(struct vmw_resource *ctx)
871{
872 struct vmw_ctx_binding *entry;
873 struct vmw_user_context *uctx =
874 container_of(ctx, struct vmw_user_context, res);
875 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
876 int ret;
877
878 list_for_each_entry(entry, &cbs->list, ctx_list) {
879 if (likely(!entry->bi.scrubbed))
880 continue;
881
882 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
883 SVGA3D_INVALID_ID))
884 continue;
885
886 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
887 if (unlikely(ret != 0))
888 return ret;
889
890 entry->bi.scrubbed = false;
891 }
892
893 return 0;
894}
895
896/**
897 * vmw_context_binding_list - Return a list of context bindings
898 *
899 * @ctx: The context resource
900 *
901 * Returns the current list of bindings of the given context. Note that
902 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
903 */
904struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
905{
906 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
907}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
941 drm_master_put(&vmw_fp->locked_master); 941 drm_master_put(&vmw_fp->locked_master);
942 } 942 }
943 943
944 vmw_compat_shader_man_destroy(vmw_fp->shman);
944 ttm_object_file_release(&vmw_fp->tfile); 945 ttm_object_file_release(&vmw_fp->tfile);
945 kfree(vmw_fp); 946 kfree(vmw_fp);
946} 947}
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
960 if (unlikely(vmw_fp->tfile == NULL)) 961 if (unlikely(vmw_fp->tfile == NULL))
961 goto out_no_tfile; 962 goto out_no_tfile;
962 963
964 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
965 if (IS_ERR(vmw_fp->shman))
966 goto out_no_shman;
967
963 file_priv->driver_priv = vmw_fp; 968 file_priv->driver_priv = vmw_fp;
964 dev_priv->bdev.dev_mapping = dev->dev_mapping; 969 dev_priv->bdev.dev_mapping = dev->dev_mapping;
965 970
966 return 0; 971 return 0;
967 972
973out_no_shman:
974 ttm_object_file_release(&vmw_fp->tfile);
968out_no_tfile: 975out_no_tfile:
969 kfree(vmw_fp); 976 kfree(vmw_fp);
970 return ret; 977 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
75#define VMW_RES_FENCE ttm_driver_type3 75#define VMW_RES_FENCE ttm_driver_type3
76#define VMW_RES_SHADER ttm_driver_type4 76#define VMW_RES_SHADER ttm_driver_type4
77 77
78struct vmw_compat_shader_manager;
79
78struct vmw_fpriv { 80struct vmw_fpriv {
79 struct drm_master *locked_master; 81 struct drm_master *locked_master;
80 struct ttm_object_file *tfile; 82 struct ttm_object_file *tfile;
81 struct list_head fence_events; 83 struct list_head fence_events;
84 bool gb_aware;
85 struct vmw_compat_shader_manager *shman;
82}; 86};
83 87
84struct vmw_dma_buffer { 88struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
272 struct vmw_resource *ctx; 276 struct vmw_resource *ctx;
273 struct vmw_resource *res; 277 struct vmw_resource *res;
274 enum vmw_ctx_binding_type bt; 278 enum vmw_ctx_binding_type bt;
279 bool scrubbed;
275 union { 280 union {
276 SVGA3dShaderType shader_type; 281 SVGA3dShaderType shader_type;
277 SVGA3dRenderTargetType rt_type; 282 SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
318 struct drm_open_hash res_ht; 323 struct drm_open_hash res_ht;
319 bool res_ht_initialized; 324 bool res_ht_initialized;
320 bool kernel; /**< is the called made from the kernel */ 325 bool kernel; /**< is the called made from the kernel */
321 struct ttm_object_file *tfile; 326 struct vmw_fpriv *fp;
322 struct list_head validate_nodes; 327 struct list_head validate_nodes;
323 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 328 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
324 uint32_t cur_reloc; 329 uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
336 bool needs_post_query_barrier; 341 bool needs_post_query_barrier;
337 struct vmw_resource *error_resource; 342 struct vmw_resource *error_resource;
338 struct vmw_ctx_binding_state staged_bindings; 343 struct vmw_ctx_binding_state staged_bindings;
344 struct list_head staged_shaders;
339}; 345};
340 346
341struct vmw_legacy_display; 347struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
569 575
570extern void vmw_resource_unreference(struct vmw_resource **p_res); 576extern void vmw_resource_unreference(struct vmw_resource **p_res);
571extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 577extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
578extern struct vmw_resource *
579vmw_resource_reference_unless_doomed(struct vmw_resource *res);
572extern int vmw_resource_validate(struct vmw_resource *res); 580extern int vmw_resource_validate(struct vmw_resource *res);
573extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); 581extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
574extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 582extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
957vmw_context_binding_state_transfer(struct vmw_resource *res, 965vmw_context_binding_state_transfer(struct vmw_resource *res,
958 struct vmw_ctx_binding_state *cbs); 966 struct vmw_ctx_binding_state *cbs);
959extern void vmw_context_binding_res_list_kill(struct list_head *head); 967extern void vmw_context_binding_res_list_kill(struct list_head *head);
968extern void vmw_context_binding_res_list_scrub(struct list_head *head);
969extern int vmw_context_rebind_all(struct vmw_resource *ctx);
970extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
960 971
961/* 972/*
962 * Surface management - vmwgfx_surface.c 973 * Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
991 struct drm_file *file_priv); 1002 struct drm_file *file_priv);
992extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 1003extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
993 struct drm_file *file_priv); 1004 struct drm_file *file_priv);
1005extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
1006 SVGA3dShaderType shader_type,
1007 u32 *user_key);
1008extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
1009 struct list_head *list);
1010extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
1011 struct list_head *list);
1012extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
1013 u32 user_key,
1014 SVGA3dShaderType shader_type,
1015 struct list_head *list);
1016extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
1017 u32 user_key, const void *bytecode,
1018 SVGA3dShaderType shader_type,
1019 size_t size,
1020 struct ttm_object_file *tfile,
1021 struct list_head *list);
1022extern struct vmw_compat_shader_manager *
1023vmw_compat_shader_man_create(struct vmw_private *dev_priv);
1024extern void
1025vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
1026
994 1027
995/** 1028/**
996 * Inline helper functions 1029 * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
114 * persistent context binding tracker. 114 * persistent context binding tracker.
115 */ 115 */
116 if (unlikely(val->staged_bindings)) { 116 if (unlikely(val->staged_bindings)) {
117 vmw_context_binding_state_transfer 117 if (!backoff) {
118 (val->res, val->staged_bindings); 118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
119 kfree(val->staged_bindings); 121 kfree(val->staged_bindings);
120 val->staged_bindings = NULL; 122 val->staged_bindings = NULL;
121 } 123 }
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
178} 180}
179 181
180/** 182/**
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
220/**
181 * vmw_resource_relocation_add - Add a relocation to the relocation list 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
182 * 222 *
183 * @list: Pointer to head of relocation list. 223 * @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
233{ 273{
234 struct vmw_resource_relocation *rel; 274 struct vmw_resource_relocation *rel;
235 275
236 list_for_each_entry(rel, list, head) 276 list_for_each_entry(rel, list, head) {
237 cb[rel->offset] = rel->res->id; 277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
238} 282}
239 283
240static int vmw_cmd_invalid(struct vmw_private *dev_priv, 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
379} 423}
380 424
381/** 425/**
382 * vmw_cmd_res_check - Check that a resource is present and if so, put it 426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
383 * on the resource validate list unless it's already there. 427 * on the resource validate list unless it's already there.
384 * 428 *
385 * @dev_priv: Pointer to a device private structure. 429 * @dev_priv: Pointer to a device private structure.
386 * @sw_context: Pointer to the software context. 430 * @sw_context: Pointer to the software context.
387 * @res_type: Resource type. 431 * @res_type: Resource type.
388 * @converter: User-space visisble type specific information. 432 * @converter: User-space visisble type specific information.
389 * @id: Pointer to the location in the command buffer currently being 433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being
390 * parsed from where the user-space resource id handle is located. 435 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit.
391 */ 438 */
392static int vmw_cmd_res_check(struct vmw_private *dev_priv, 439static int
393 struct vmw_sw_context *sw_context, 440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
394 enum vmw_res_type res_type, 441 struct vmw_sw_context *sw_context,
395 const struct vmw_user_resource_conv *converter, 442 enum vmw_res_type res_type,
396 uint32_t *id, 443 const struct vmw_user_resource_conv *converter,
397 struct vmw_resource_val_node **p_val) 444 uint32_t id,
445 uint32_t *id_loc,
446 struct vmw_resource_val_node **p_val)
398{ 447{
399 struct vmw_res_cache_entry *rcache = 448 struct vmw_res_cache_entry *rcache =
400 &sw_context->res_cache[res_type]; 449 &sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
402 struct vmw_resource_val_node *node; 451 struct vmw_resource_val_node *node;
403 int ret; 452 int ret;
404 453
405 if (*id == SVGA3D_INVALID_ID) { 454 if (id == SVGA3D_INVALID_ID) {
406 if (p_val) 455 if (p_val)
407 *p_val = NULL; 456 *p_val = NULL;
408 if (res_type == vmw_res_context) { 457 if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
417 * resource 466 * resource
418 */ 467 */
419 468
420 if (likely(rcache->valid && *id == rcache->handle)) { 469 if (likely(rcache->valid && id == rcache->handle)) {
421 const struct vmw_resource *res = rcache->res; 470 const struct vmw_resource *res = rcache->res;
422 471
423 rcache->node->first_usage = false; 472 rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
426 475
427 return vmw_resource_relocation_add 476 return vmw_resource_relocation_add
428 (&sw_context->res_relocations, res, 477 (&sw_context->res_relocations, res,
429 id - sw_context->buf_start); 478 id_loc - sw_context->buf_start);
430 } 479 }
431 480
432 ret = vmw_user_resource_lookup_handle(dev_priv, 481 ret = vmw_user_resource_lookup_handle(dev_priv,
433 sw_context->tfile, 482 sw_context->fp->tfile,
434 *id, 483 id,
435 converter, 484 converter,
436 &res); 485 &res);
437 if (unlikely(ret != 0)) { 486 if (unlikely(ret != 0)) {
438 DRM_ERROR("Could not find or use resource 0x%08x.\n", 487 DRM_ERROR("Could not find or use resource 0x%08x.\n",
439 (unsigned) *id); 488 (unsigned) id);
440 dump_stack(); 489 dump_stack();
441 return ret; 490 return ret;
442 } 491 }
443 492
444 rcache->valid = true; 493 rcache->valid = true;
445 rcache->res = res; 494 rcache->res = res;
446 rcache->handle = *id; 495 rcache->handle = id;
447 496
448 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
449 res, 498 res,
450 id - sw_context->buf_start); 499 id_loc - sw_context->buf_start);
451 if (unlikely(ret != 0)) 500 if (unlikely(ret != 0))
452 goto out_no_reloc; 501 goto out_no_reloc;
453 502
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
459 if (p_val) 508 if (p_val)
460 *p_val = node; 509 *p_val = node;
461 510
462 if (node->first_usage && res_type == vmw_res_context) { 511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
463 node->staged_bindings = 516 node->staged_bindings =
464 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
465 if (node->staged_bindings == NULL) { 518 if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
481} 534}
482 535
483/** 536/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts.
564 *
565 * @sw_context: Pointer to the software context.
566 *
567 * Rebind context binding points that have been scrubbed because of eviction.
568 */
569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings))
576 continue;
577
578 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to rebind context.\n");
582 return ret;
583 }
584 }
585
586 return 0;
587}
588
589/**
484 * vmw_cmd_cid_check - Check a command header for valid context information. 590 * vmw_cmd_cid_check - Check a command header for valid context information.
485 * 591 *
486 * @dev_priv: Pointer to a device private structure. 592 * @dev_priv: Pointer to a device private structure.
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
767 struct vmw_relocation *reloc; 873 struct vmw_relocation *reloc;
768 int ret; 874 int ret;
769 875
770 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 876 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
771 if (unlikely(ret != 0)) { 877 if (unlikely(ret != 0)) {
772 DRM_ERROR("Could not find or use MOB buffer.\n"); 878 DRM_ERROR("Could not find or use MOB buffer.\n");
773 return -EINVAL; 879 return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
828 struct vmw_relocation *reloc; 934 struct vmw_relocation *reloc;
829 int ret; 935 int ret;
830 936
831 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 937 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
832 if (unlikely(ret != 0)) { 938 if (unlikely(ret != 0)) {
833 DRM_ERROR("Could not find or use GMR region.\n"); 939 DRM_ERROR("Could not find or use GMR region.\n");
834 return -EINVAL; 940 return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1127 1233
1128 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); 1234 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1129 1235
1130 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); 1236 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1237 header);
1131 1238
1132out_no_surface: 1239out_no_surface:
1133 vmw_dmabuf_unreference(&vmw_bo); 1240 vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1478 &cmd->body.sid, NULL); 1585 &cmd->body.sid, NULL);
1479} 1586}
1480 1587
1588
1589/**
1590 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1591 * command
1592 *
1593 * @dev_priv: Pointer to a device private struct.
1594 * @sw_context: The software context being used for this batch.
1595 * @header: Pointer to the command header in the command stream.
1596 */
1597static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1598 struct vmw_sw_context *sw_context,
1599 SVGA3dCmdHeader *header)
1600{
1601 struct vmw_shader_define_cmd {
1602 SVGA3dCmdHeader header;
1603 SVGA3dCmdDefineShader body;
1604 } *cmd;
1605 int ret;
1606 size_t size;
1607
1608 cmd = container_of(header, struct vmw_shader_define_cmd,
1609 header);
1610
1611 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1612 user_context_converter, &cmd->body.cid,
1613 NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 if (unlikely(!dev_priv->has_mob))
1618 return 0;
1619
1620 size = cmd->header.size - sizeof(cmd->body);
1621 ret = vmw_compat_shader_add(sw_context->fp->shman,
1622 cmd->body.shid, cmd + 1,
1623 cmd->body.type, size,
1624 sw_context->fp->tfile,
1625 &sw_context->staged_shaders);
1626 if (unlikely(ret != 0))
1627 return ret;
1628
1629 return vmw_resource_relocation_add(&sw_context->res_relocations,
1630 NULL, &cmd->header.id -
1631 sw_context->buf_start);
1632
1633 return 0;
1634}
1635
1636/**
1637 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1638 * command
1639 *
1640 * @dev_priv: Pointer to a device private struct.
1641 * @sw_context: The software context being used for this batch.
1642 * @header: Pointer to the command header in the command stream.
1643 */
1644static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1645 struct vmw_sw_context *sw_context,
1646 SVGA3dCmdHeader *header)
1647{
1648 struct vmw_shader_destroy_cmd {
1649 SVGA3dCmdHeader header;
1650 SVGA3dCmdDestroyShader body;
1651 } *cmd;
1652 int ret;
1653
1654 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1655 header);
1656
1657 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1658 user_context_converter, &cmd->body.cid,
1659 NULL);
1660 if (unlikely(ret != 0))
1661 return ret;
1662
1663 if (unlikely(!dev_priv->has_mob))
1664 return 0;
1665
1666 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1667 cmd->body.shid,
1668 cmd->body.type,
1669 &sw_context->staged_shaders);
1670 if (unlikely(ret != 0))
1671 return ret;
1672
1673 return vmw_resource_relocation_add(&sw_context->res_relocations,
1674 NULL, &cmd->header.id -
1675 sw_context->buf_start);
1676
1677 return 0;
1678}
1679
1481/** 1680/**
1482 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER 1681 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1483 * command 1682 * command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1509 if (dev_priv->has_mob) { 1708 if (dev_priv->has_mob) {
1510 struct vmw_ctx_bindinfo bi; 1709 struct vmw_ctx_bindinfo bi;
1511 struct vmw_resource_val_node *res_node; 1710 struct vmw_resource_val_node *res_node;
1512 1711 u32 shid = cmd->body.shid;
1513 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, 1712
1514 user_shader_converter, 1713 if (shid != SVGA3D_INVALID_ID)
1515 &cmd->body.shid, &res_node); 1714 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1715 cmd->body.type,
1716 &shid);
1717
1718 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1719 vmw_res_shader,
1720 user_shader_converter,
1721 shid,
1722 &cmd->body.shid, &res_node);
1516 if (unlikely(ret != 0)) 1723 if (unlikely(ret != 0))
1517 return ret; 1724 return ret;
1518 1725
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1527} 1734}
1528 1735
1529/** 1736/**
1737 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1738 * command
1739 *
1740 * @dev_priv: Pointer to a device private struct.
1741 * @sw_context: The software context being used for this batch.
1742 * @header: Pointer to the command header in the command stream.
1743 */
1744static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1745 struct vmw_sw_context *sw_context,
1746 SVGA3dCmdHeader *header)
1747{
1748 struct vmw_set_shader_const_cmd {
1749 SVGA3dCmdHeader header;
1750 SVGA3dCmdSetShaderConst body;
1751 } *cmd;
1752 int ret;
1753
1754 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1755 header);
1756
1757 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1758 user_context_converter, &cmd->body.cid,
1759 NULL);
1760 if (unlikely(ret != 0))
1761 return ret;
1762
1763 if (dev_priv->has_mob)
1764 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1765
1766 return 0;
1767}
1768
1769/**
1530 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER 1770 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1531 * command 1771 * command
1532 * 1772 *
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1634 true, false, false), 1874 true, false, false),
1635 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, 1875 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1636 false, false, false), 1876 false, false, false),
1637 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, 1877 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1638 true, true, false), 1878 true, false, false),
1639 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, 1879 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1640 true, true, false), 1880 true, false, false),
1641 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, 1881 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1642 true, false, false), 1882 true, false, false),
1643 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, 1883 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1644 true, true, false), 1884 true, false, false),
1645 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, 1885 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1646 true, false, false), 1886 true, false, false),
1647 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, 1887 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2171 } else 2411 } else
2172 sw_context->kernel = true; 2412 sw_context->kernel = true;
2173 2413
2174 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 2414 sw_context->fp = vmw_fpriv(file_priv);
2175 sw_context->cur_reloc = 0; 2415 sw_context->cur_reloc = 0;
2176 sw_context->cur_val_buf = 0; 2416 sw_context->cur_val_buf = 0;
2177 sw_context->fence_flags = 0; 2417 sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2188 goto out_unlock; 2428 goto out_unlock;
2189 sw_context->res_ht_initialized = true; 2429 sw_context->res_ht_initialized = true;
2190 } 2430 }
2431 INIT_LIST_HEAD(&sw_context->staged_shaders);
2191 2432
2192 INIT_LIST_HEAD(&resource_list); 2433 INIT_LIST_HEAD(&resource_list);
2193 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 2434 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2194 command_size); 2435 command_size);
2195 if (unlikely(ret != 0)) 2436 if (unlikely(ret != 0))
2196 goto out_err; 2437 goto out_err_nores;
2197 2438
2198 ret = vmw_resources_reserve(sw_context); 2439 ret = vmw_resources_reserve(sw_context);
2199 if (unlikely(ret != 0)) 2440 if (unlikely(ret != 0))
2200 goto out_err; 2441 goto out_err_nores;
2201 2442
2202 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); 2443 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2203 if (unlikely(ret != 0)) 2444 if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2225 goto out_err; 2466 goto out_err;
2226 } 2467 }
2227 2468
2469 if (dev_priv->has_mob) {
2470 ret = vmw_rebind_contexts(sw_context);
2471 if (unlikely(ret != 0))
2472 goto out_err;
2473 }
2474
2228 cmd = vmw_fifo_reserve(dev_priv, command_size); 2475 cmd = vmw_fifo_reserve(dev_priv, command_size);
2229 if (unlikely(cmd == NULL)) { 2476 if (unlikely(cmd == NULL)) {
2230 DRM_ERROR("Failed reserving fifo space for commands.\n"); 2477 DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2276 } 2523 }
2277 2524
2278 list_splice_init(&sw_context->resource_list, &resource_list); 2525 list_splice_init(&sw_context->resource_list, &resource_list);
2526 vmw_compat_shaders_commit(sw_context->fp->shman,
2527 &sw_context->staged_shaders);
2279 mutex_unlock(&dev_priv->cmdbuf_mutex); 2528 mutex_unlock(&dev_priv->cmdbuf_mutex);
2280 2529
2281 /* 2530 /*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2289out_unlock_binding: 2538out_unlock_binding:
2290 mutex_unlock(&dev_priv->binding_mutex); 2539 mutex_unlock(&dev_priv->binding_mutex);
2291out_err: 2540out_err:
2292 vmw_resource_relocations_free(&sw_context->res_relocations);
2293 vmw_free_relocations(sw_context);
2294 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 2541 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2542out_err_nores:
2295 vmw_resource_list_unreserve(&sw_context->resource_list, true); 2543 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2544 vmw_resource_relocations_free(&sw_context->res_relocations);
2545 vmw_free_relocations(sw_context);
2296 vmw_clear_validations(sw_context); 2546 vmw_clear_validations(sw_context);
2297 if (unlikely(dev_priv->pinned_bo != NULL && 2547 if (unlikely(dev_priv->pinned_bo != NULL &&
2298 !dev_priv->query_cid_valid)) 2548 !dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
2301 list_splice_init(&sw_context->resource_list, &resource_list); 2551 list_splice_init(&sw_context->resource_list, &resource_list);
2302 error_resource = sw_context->error_resource; 2552 error_resource = sw_context->error_resource;
2303 sw_context->error_resource = NULL; 2553 sw_context->error_resource = NULL;
2554 vmw_compat_shaders_revert(sw_context->fp->shman,
2555 &sw_context->staged_shaders);
2304 mutex_unlock(&dev_priv->cmdbuf_mutex); 2556 mutex_unlock(&dev_priv->cmdbuf_mutex);
2305 2557
2306 /* 2558 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
29#include <drm/vmwgfx_drm.h> 29#include <drm/vmwgfx_drm.h>
30#include "vmwgfx_kms.h" 30#include "vmwgfx_kms.h"
31 31
32struct svga_3d_compat_cap {
33 SVGA3dCapsRecordHeader header;
34 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
35};
36
32int vmw_getparam_ioctl(struct drm_device *dev, void *data, 37int vmw_getparam_ioctl(struct drm_device *dev, void *data,
33 struct drm_file *file_priv) 38 struct drm_file *file_priv)
34{ 39{
35 struct vmw_private *dev_priv = vmw_priv(dev); 40 struct vmw_private *dev_priv = vmw_priv(dev);
36 struct drm_vmw_getparam_arg *param = 41 struct drm_vmw_getparam_arg *param =
37 (struct drm_vmw_getparam_arg *)data; 42 (struct drm_vmw_getparam_arg *)data;
43 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
38 44
39 switch (param->param) { 45 switch (param->param) {
40 case DRM_VMW_PARAM_NUM_STREAMS: 46 case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
61 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 67 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
62 68
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
70 param->value = SVGA3D_HWVERSION_WS8_B1;
71 break;
72 }
73
63 param->value = 74 param->value =
64 ioread32(fifo_mem + 75 ioread32(fifo_mem +
65 ((fifo->capabilities & 76 ((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
69 break; 80 break;
70 } 81 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY: 82 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size; 83 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
84 !vmw_fp->gb_aware)
85 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
86 else
87 param->value = dev_priv->memory_size;
73 break; 88 break;
74 case DRM_VMW_PARAM_3D_CAPS_SIZE: 89 case DRM_VMW_PARAM_3D_CAPS_SIZE:
75 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 90 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
76 param->value = SVGA3D_DEVCAP_MAX; 91 vmw_fp->gb_aware)
92 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
93 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
94 param->value = sizeof(struct svga_3d_compat_cap) +
95 sizeof(uint32_t);
77 else 96 else
78 param->value = (SVGA_FIFO_3D_CAPS_LAST - 97 param->value = (SVGA_FIFO_3D_CAPS_LAST -
79 SVGA_FIFO_3D_CAPS + 1); 98 SVGA_FIFO_3D_CAPS + 1) *
80 param->value *= sizeof(uint32_t); 99 sizeof(uint32_t);
81 break; 100 break;
82 case DRM_VMW_PARAM_MAX_MOB_MEMORY: 101 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
102 vmw_fp->gb_aware = true;
83 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
84 break; 104 break;
85 default: 105 default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
91 return 0; 111 return 0;
92} 112}
93 113
114static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
115 size_t size)
116{
117 struct svga_3d_compat_cap *compat_cap =
118 (struct svga_3d_compat_cap *) bounce;
119 unsigned int i;
120 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
121 unsigned int max_size;
122
123 if (size < pair_offset)
124 return -EINVAL;
125
126 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
127
128 if (max_size > SVGA3D_DEVCAP_MAX)
129 max_size = SVGA3D_DEVCAP_MAX;
130
131 compat_cap->header.length =
132 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
133 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
134
135 mutex_lock(&dev_priv->hw_mutex);
136 for (i = 0; i < max_size; ++i) {
137 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
138 compat_cap->pairs[i][0] = i;
139 compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
140 }
141 mutex_unlock(&dev_priv->hw_mutex);
142
143 return 0;
144}
145
94 146
95int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, 147int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv) 148 struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
104 void *bounce; 156 void *bounce;
105 int ret; 157 int ret;
106 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 158 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
159 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
107 160
108 if (unlikely(arg->pad64 != 0)) { 161 if (unlikely(arg->pad64 != 0)) {
109 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 162 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
110 return -EINVAL; 163 return -EINVAL;
111 } 164 }
112 165
113 if (gb_objects) 166 if (gb_objects && vmw_fp->gb_aware)
114 size = SVGA3D_DEVCAP_MAX; 167 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
168 else if (gb_objects)
169 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
115 else 170 else
116 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); 171 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
117 172 sizeof(uint32_t);
118 size *= sizeof(uint32_t);
119 173
120 if (arg->max_size < size) 174 if (arg->max_size < size)
121 size = arg->max_size; 175 size = arg->max_size;
122 176
123 bounce = vmalloc(size); 177 bounce = vzalloc(size);
124 if (unlikely(bounce == NULL)) { 178 if (unlikely(bounce == NULL)) {
125 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); 179 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
126 return -ENOMEM; 180 return -ENOMEM;
127 } 181 }
128 182
129 if (gb_objects) { 183 if (gb_objects && vmw_fp->gb_aware) {
130 int i; 184 int i, num;
131 uint32_t *bounce32 = (uint32_t *) bounce; 185 uint32_t *bounce32 = (uint32_t *) bounce;
132 186
187 num = size / sizeof(uint32_t);
188 if (num > SVGA3D_DEVCAP_MAX)
189 num = SVGA3D_DEVCAP_MAX;
190
133 mutex_lock(&dev_priv->hw_mutex); 191 mutex_lock(&dev_priv->hw_mutex);
134 for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { 192 for (i = 0; i < num; ++i) {
135 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); 193 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
136 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 194 *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
137 } 195 }
138 mutex_unlock(&dev_priv->hw_mutex); 196 mutex_unlock(&dev_priv->hw_mutex);
139 197 } else if (gb_objects) {
198 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
199 if (unlikely(ret != 0))
200 goto out_err;
140 } else { 201 } else {
141
142 fifo_mem = dev_priv->mmio_virt; 202 fifo_mem = dev_priv->mmio_virt;
143 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 203 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
144 } 204 }
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
146 ret = copy_to_user(buffer, bounce, size); 206 ret = copy_to_user(buffer, bounce, size);
147 if (ret) 207 if (ret)
148 ret = -EFAULT; 208 ret = -EFAULT;
209out_err:
149 vfree(bounce); 210 vfree(bounce);
150 211
151 if (unlikely(ret != 0)) 212 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..d4a5a19cb8c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
135 if (unlikely(cmd == NULL)) { 135 if (unlikely(cmd == NULL)) {
136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 136 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
137 ret = -ENOMEM;
137 goto out_no_fifo; 138 goto out_no_fifo;
138 } 139 }
139 140
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
88 return res; 88 return res;
89} 89}
90 90
91struct vmw_resource *
92vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93{
94 return kref_get_unless_zero(&res->kref) ? res : NULL;
95}
91 96
92/** 97/**
93 * vmw_resource_release_id - release a resource id to the id manager. 98 * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
136 vmw_dmabuf_unreference(&res->backup); 141 vmw_dmabuf_unreference(&res->backup);
137 } 142 }
138 143
139 if (likely(res->hw_destroy != NULL)) 144 if (likely(res->hw_destroy != NULL)) {
140 res->hw_destroy(res); 145 res->hw_destroy(res);
146 mutex_lock(&dev_priv->binding_mutex);
147 vmw_context_binding_res_list_kill(&res->binding_head);
148 mutex_unlock(&dev_priv->binding_mutex);
149 }
141 150
142 id = res->id; 151 id = res->id;
143 if (res->res_free != NULL) 152 if (res->res_free != NULL)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..217d941b8176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
29#include "vmwgfx_resource_priv.h" 29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h" 30#include "ttm/ttm_placement.h"
31 31
32#define VMW_COMPAT_SHADER_HT_ORDER 12
33
32struct vmw_shader { 34struct vmw_shader {
33 struct vmw_resource res; 35 struct vmw_resource res;
34 SVGA3dShaderType type; 36 SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
40 struct vmw_shader shader; 42 struct vmw_shader shader;
41}; 43};
42 44
45/**
46 * enum vmw_compat_shader_state - Staging state for compat shaders
47 */
48enum vmw_compat_shader_state {
49 VMW_COMPAT_COMMITED,
50 VMW_COMPAT_ADD,
51 VMW_COMPAT_DEL
52};
53
54/**
55 * struct vmw_compat_shader - Metadata for compat shaders.
56 *
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
59 * with.
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
63 *
64 * The structure is protected by the cmdbuf lock.
65 */
66struct vmw_compat_shader {
67 u32 handle;
68 struct ttm_object_file *tfile;
69 struct drm_hash_item hash;
70 struct list_head head;
71 enum vmw_compat_shader_state state;
72};
73
74/**
75 * struct vmw_compat_shader_manager - Compat shader manager.
76 *
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
80 *
81 * @shaders and @list are protected by the cmdbuf mutex for now.
82 */
83struct vmw_compat_shader_manager {
84 struct drm_open_hash shaders;
85 struct list_head list;
86 struct vmw_private *dev_priv;
87};
88
43static void vmw_user_shader_free(struct vmw_resource *res); 89static void vmw_user_shader_free(struct vmw_resource *res);
44static struct vmw_resource * 90static struct vmw_resource *
45vmw_user_shader_base_to_res(struct ttm_base_object *base); 91vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
258 return 0; 304 return 0;
259 305
260 mutex_lock(&dev_priv->binding_mutex); 306 mutex_lock(&dev_priv->binding_mutex);
261 vmw_context_binding_res_list_kill(&res->binding_head); 307 vmw_context_binding_res_list_scrub(&res->binding_head);
262 308
263 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 309 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 if (unlikely(cmd == NULL)) { 310 if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
325 TTM_REF_USAGE); 371 TTM_REF_USAGE);
326} 372}
327 373
374int vmw_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer,
376 size_t shader_size,
377 size_t offset,
378 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile,
380 u32 *handle)
381{
382 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp;
384 int ret;
385
386 /*
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
389 */
390 if (unlikely(vmw_user_shader_size == 0))
391 vmw_user_shader_size =
392 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
393
394 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
395 vmw_user_shader_size,
396 false, true);
397 if (unlikely(ret != 0)) {
398 if (ret != -ERESTARTSYS)
399 DRM_ERROR("Out of graphics memory for shader "
400 "creation.\n");
401 goto out;
402 }
403
404 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
405 if (unlikely(ushader == NULL)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv),
407 vmw_user_shader_size);
408 ret = -ENOMEM;
409 goto out;
410 }
411
412 res = &ushader->shader.res;
413 ushader->base.shareable = false;
414 ushader->base.tfile = NULL;
415
416 /*
417 * From here on, the destructor takes over resource freeing.
418 */
419
420 ret = vmw_gb_shader_init(dev_priv, res, shader_size,
421 offset, shader_type, buffer,
422 vmw_user_shader_free);
423 if (unlikely(ret != 0))
424 goto out;
425
426 tmp = vmw_resource_reference(res);
427 ret = ttm_base_object_init(tfile, &ushader->base, false,
428 VMW_RES_SHADER,
429 &vmw_user_shader_base_release, NULL);
430
431 if (unlikely(ret != 0)) {
432 vmw_resource_unreference(&tmp);
433 goto out_err;
434 }
435
436 if (handle)
437 *handle = ushader->base.hash.key;
438out_err:
439 vmw_resource_unreference(&res);
440out:
441 return ret;
442}
443
444
328int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 445int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *file_priv) 446 struct drm_file *file_priv)
330{ 447{
331 struct vmw_private *dev_priv = vmw_priv(dev); 448 struct vmw_private *dev_priv = vmw_priv(dev);
332 struct vmw_user_shader *ushader;
333 struct vmw_resource *res;
334 struct vmw_resource *tmp;
335 struct drm_vmw_shader_create_arg *arg = 449 struct drm_vmw_shader_create_arg *arg =
336 (struct drm_vmw_shader_create_arg *)data; 450 (struct drm_vmw_shader_create_arg *)data;
337 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
373 goto out_bad_arg; 487 goto out_bad_arg;
374 } 488 }
375 489
376 /* 490 ret = ttm_read_lock(&vmaster->lock, true);
377 * Approximate idr memory usage with 128 bytes. It will be limited 491 if (unlikely(ret != 0))
378 * by maximum number_of shaders anyway. 492 goto out_bad_arg;
379 */
380 493
381 if (unlikely(vmw_user_shader_size == 0)) 494 ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
382 vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) 495 shader_type, tfile, &arg->shader_handle);
383 + 128;
384 496
385 ret = ttm_read_lock(&vmaster->lock, true); 497 ttm_read_unlock(&vmaster->lock);
498out_bad_arg:
499 vmw_dmabuf_unreference(&buffer);
500 return ret;
501}
502
503/**
504 * vmw_compat_shader_lookup - Look up a compat shader
505 *
506 * @man: Pointer to the compat shader manager.
507 * @shader_type: The shader type, that combined with the user_key identifies
508 * the shader.
509 * @user_key: On entry, this should be a pointer to the user_key.
510 * On successful exit, it will contain the guest-backed shader's TTM handle.
511 *
512 * Returns 0 on success. Non-zero on failure, in which case the value pointed
513 * to by @user_key is unmodified.
514 */
515int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
516 SVGA3dShaderType shader_type,
517 u32 *user_key)
518{
519 struct drm_hash_item *hash;
520 int ret;
521 unsigned long key = *user_key | (shader_type << 24);
522
523 ret = drm_ht_find_item(&man->shaders, key, &hash);
386 if (unlikely(ret != 0)) 524 if (unlikely(ret != 0))
387 return ret; 525 return ret;
388 526
389 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 527 *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
390 vmw_user_shader_size, 528 hash)->handle;
391 false, true); 529
392 if (unlikely(ret != 0)) { 530 return 0;
393 if (ret != -ERESTARTSYS) 531}
394 DRM_ERROR("Out of graphics memory for shader" 532
395 " creation.\n"); 533/**
396 goto out_unlock; 534 * vmw_compat_shader_free - Free a compat shader.
535 *
536 * @man: Pointer to the compat shader manager.
537 * @entry: Pointer to a struct vmw_compat_shader.
538 *
539 * Frees a struct vmw_compat_shder entry and drops its reference to the
540 * guest backed shader.
541 */
542static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
543 struct vmw_compat_shader *entry)
544{
545 list_del(&entry->head);
546 WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
547 WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
548 TTM_REF_USAGE));
549 kfree(entry);
550}
551
552/**
553 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
554 *
555 * @man: Pointer to the compat shader manager.
556 * @list: Caller's list of compat shader actions.
557 *
558 * This function commits a list of compat shader additions or removals.
559 * It is typically called when the execbuf ioctl call triggering these
560 * actions has commited the fifo contents to the device.
561 */
562void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
563 struct list_head *list)
564{
565 struct vmw_compat_shader *entry, *next;
566
567 list_for_each_entry_safe(entry, next, list, head) {
568 list_del(&entry->head);
569 switch (entry->state) {
570 case VMW_COMPAT_ADD:
571 entry->state = VMW_COMPAT_COMMITED;
572 list_add_tail(&entry->head, &man->list);
573 break;
574 case VMW_COMPAT_DEL:
575 ttm_ref_object_base_unref(entry->tfile, entry->handle,
576 TTM_REF_USAGE);
577 kfree(entry);
578 break;
579 default:
580 BUG();
581 break;
582 }
397 } 583 }
584}
398 585
399 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 586/**
400 if (unlikely(ushader == NULL)) { 587 * vmw_compat_shaders_revert - Revert a list of compat shader actions
401 ttm_mem_global_free(vmw_mem_glob(dev_priv), 588 *
402 vmw_user_shader_size); 589 * @man: Pointer to the compat shader manager.
403 ret = -ENOMEM; 590 * @list: Caller's list of compat shader actions.
404 goto out_unlock; 591 *
592 * This function reverts a list of compat shader additions or removals.
593 * It is typically called when the execbuf ioctl call triggering these
594 * actions failed for some reason, and the command stream was never
595 * submitted.
596 */
597void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
598 struct list_head *list)
599{
600 struct vmw_compat_shader *entry, *next;
601 int ret;
602
603 list_for_each_entry_safe(entry, next, list, head) {
604 switch (entry->state) {
605 case VMW_COMPAT_ADD:
606 vmw_compat_shader_free(man, entry);
607 break;
608 case VMW_COMPAT_DEL:
609 ret = drm_ht_insert_item(&man->shaders, &entry->hash);
610 list_del(&entry->head);
611 list_add_tail(&entry->head, &man->list);
612 entry->state = VMW_COMPAT_COMMITED;
613 break;
614 default:
615 BUG();
616 break;
617 }
405 } 618 }
619}
406 620
407 res = &ushader->shader.res; 621/**
408 ushader->base.shareable = false; 622 * vmw_compat_shader_remove - Stage a compat shader for removal.
409 ushader->base.tfile = NULL; 623 *
624 * @man: Pointer to the compat shader manager
625 * @user_key: The key that is used to identify the shader. The key is
626 * unique to the shader type.
627 * @shader_type: Shader type.
628 * @list: Caller's list of staged shader actions.
629 *
630 * This function stages a compat shader for removal and removes the key from
631 * the shader manager's hash table. If the shader was previously only staged
632 * for addition it is completely removed (But the execbuf code may keep a
633 * reference if it was bound to a context between addition and removal). If
634 * it was previously commited to the manager, it is staged for removal.
635 */
636int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
637 u32 user_key, SVGA3dShaderType shader_type,
638 struct list_head *list)
639{
640 struct vmw_compat_shader *entry;
641 struct drm_hash_item *hash;
642 int ret;
410 643
411 /* 644 ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
412 * From here on, the destructor takes over resource freeing. 645 &hash);
413 */ 646 if (likely(ret != 0))
647 return -EINVAL;
414 648
415 ret = vmw_gb_shader_init(dev_priv, res, arg->size, 649 entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
416 arg->offset, shader_type, buffer, 650
417 vmw_user_shader_free); 651 switch (entry->state) {
652 case VMW_COMPAT_ADD:
653 vmw_compat_shader_free(man, entry);
654 break;
655 case VMW_COMPAT_COMMITED:
656 (void) drm_ht_remove_item(&man->shaders, &entry->hash);
657 list_del(&entry->head);
658 entry->state = VMW_COMPAT_DEL;
659 list_add_tail(&entry->head, list);
660 break;
661 default:
662 BUG();
663 break;
664 }
665
666 return 0;
667}
668
669/**
670 * vmw_compat_shader_add - Create a compat shader and add the
671 * key to the manager
672 *
673 * @man: Pointer to the compat shader manager
674 * @user_key: The key that is used to identify the shader. The key is
675 * unique to the shader type.
676 * @bytecode: Pointer to the bytecode of the shader.
677 * @shader_type: Shader type.
678 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
679 * to be created with.
680 * @list: Caller's list of staged shader actions.
681 *
682 * Note that only the key is added to the shader manager's hash table.
683 * The shader is not yet added to the shader manager's list of shaders.
684 */
685int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
686 u32 user_key, const void *bytecode,
687 SVGA3dShaderType shader_type,
688 size_t size,
689 struct ttm_object_file *tfile,
690 struct list_head *list)
691{
692 struct vmw_dma_buffer *buf;
693 struct ttm_bo_kmap_obj map;
694 bool is_iomem;
695 struct vmw_compat_shader *compat;
696 u32 handle;
697 int ret;
698
699 if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
700 return -EINVAL;
701
702 /* Allocate and pin a DMA buffer */
703 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
704 if (unlikely(buf == NULL))
705 return -ENOMEM;
706
707 ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
708 true, vmw_dmabuf_bo_free);
418 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
419 goto out_unlock; 710 goto out;
420 711
421 tmp = vmw_resource_reference(res); 712 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
422 ret = ttm_base_object_init(tfile, &ushader->base, false, 713 if (unlikely(ret != 0))
423 VMW_RES_SHADER, 714 goto no_reserve;
424 &vmw_user_shader_base_release, NULL);
425 715
716 /* Map and copy shader bytecode. */
717 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
718 &map);
426 if (unlikely(ret != 0)) { 719 if (unlikely(ret != 0)) {
427 vmw_resource_unreference(&tmp); 720 ttm_bo_unreserve(&buf->base);
428 goto out_err; 721 goto no_reserve;
429 } 722 }
430 723
431 arg->shader_handle = ushader->base.hash.key; 724 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
432out_err: 725 WARN_ON(is_iomem);
433 vmw_resource_unreference(&res); 726
434out_unlock: 727 ttm_bo_kunmap(&map);
435 ttm_read_unlock(&vmaster->lock); 728 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
436out_bad_arg: 729 WARN_ON(ret != 0);
437 vmw_dmabuf_unreference(&buffer); 730 ttm_bo_unreserve(&buf->base);
731
732 /* Create a guest-backed shader container backed by the dma buffer */
733 ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
734 tfile, &handle);
735 vmw_dmabuf_unreference(&buf);
736 if (unlikely(ret != 0))
737 goto no_reserve;
738 /*
739 * Create a compat shader structure and stage it for insertion
740 * in the manager
741 */
742 compat = kzalloc(sizeof(*compat), GFP_KERNEL);
743 if (compat == NULL)
744 goto no_compat;
745
746 compat->hash.key = user_key | (shader_type << 24);
747 ret = drm_ht_insert_item(&man->shaders, &compat->hash);
748 if (unlikely(ret != 0))
749 goto out_invalid_key;
750
751 compat->state = VMW_COMPAT_ADD;
752 compat->handle = handle;
753 compat->tfile = tfile;
754 list_add_tail(&compat->head, list);
438 755
756 return 0;
757
758out_invalid_key:
759 kfree(compat);
760no_compat:
761 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
762no_reserve:
763out:
439 return ret; 764 return ret;
765}
766
767/**
768 * vmw_compat_shader_man_create - Create a compat shader manager
769 *
770 * @dev_priv: Pointer to a device private structure.
771 *
772 * Typically done at file open time. If successful returns a pointer to a
773 * compat shader manager. Otherwise returns an error pointer.
774 */
775struct vmw_compat_shader_manager *
776vmw_compat_shader_man_create(struct vmw_private *dev_priv)
777{
778 struct vmw_compat_shader_manager *man;
779 int ret;
780
781 man = kzalloc(sizeof(*man), GFP_KERNEL);
782
783 man->dev_priv = dev_priv;
784 INIT_LIST_HEAD(&man->list);
785 ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
786 if (ret == 0)
787 return man;
788
789 kfree(man);
790 return ERR_PTR(ret);
791}
792
793/**
794 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
795 *
796 * @man: Pointer to the shader manager to destroy.
797 *
798 * Typically done at file close time.
799 */
800void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
801{
802 struct vmw_compat_shader *entry, *next;
803
804 mutex_lock(&man->dev_priv->cmdbuf_mutex);
805 list_for_each_entry_safe(entry, next, &man->list, head)
806 vmw_compat_shader_free(man, entry);
440 807
808 mutex_unlock(&man->dev_priv->cmdbuf_mutex);
809 kfree(man);
441} 810}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
908 rep->size_addr; 908 rep->size_addr;
909 909
910 if (user_sizes) 910 if (user_sizes)
911 ret = copy_to_user(user_sizes, srf->sizes, 911 ret = copy_to_user(user_sizes, &srf->base_size,
912 srf->num_sizes * sizeof(*srf->sizes)); 912 sizeof(srf->base_size));
913 if (unlikely(ret != 0)) { 913 if (unlikely(ret != 0)) {
914 DRM_ERROR("copy_to_user failed %p %u\n", 914 DRM_ERROR("copy_to_user failed %p %u\n",
915 user_sizes, srf->num_sizes); 915 user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1111 return 0; 1111 return 0;
1112 1112
1113 mutex_lock(&dev_priv->binding_mutex); 1113 mutex_lock(&dev_priv->binding_mutex);
1114 vmw_context_binding_res_list_kill(&res->binding_head); 1114 vmw_context_binding_res_list_scrub(&res->binding_head);
1115 1115
1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1116 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1117 if (unlikely(cmd == NULL)) { 1117 if (unlikely(cmd == NULL)) {