diff options
author | Dave Airlie <airlied@redhat.com> | 2011-10-05 05:18:13 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-10-05 05:18:13 -0400 |
commit | 5383053627afab973ffe582a4db9646317bec726 (patch) | |
tree | 92b4358b0e91c4bca5da8ebe42cf9d64967a6536 /drivers | |
parent | 88ef4e3f4f616462b78a7838eb3ffc3818d30f67 (diff) | |
parent | 6ea77d1384ed0c2d040a1934ecc3fd7187580931 (diff) |
Merge branch 'drm-vmware-next' into drm-core-next
* drm-vmware-next: (26 commits)
vmwgfx: Minor cleanups
vmwgfx: Bump driver minor to advertise support for new ioctls.
vmwgfx: Be more strict with fb depths when using screen objects
vmwgfx: Handle device surface memory limit
vmwgfx: Make sure we always have a user-space handle to use for objects that are backing kms framebuffers.
vmwgfx: Optimize the command submission resource list
vmwgfx: Fix up query processing
vmwgfx: Allow reference and unreference of NULL fence objects.
vmwgfx: minor dmabuf utilities cleanup
vmwgfx: Disallow user space to send present and readback commands
vmwgfx: Add present and readback ioctls
vmwgfx: Place overlays in GMR area if we can
vmwgfx: Drop 3D Legacy Display Unit support
vmwgfx: Require HWV8 for 3d support
vmwgfx: Add screen object support
vmwgfx: Add dmabuf helper functions for pinning
vmwgfx: Refactor common display unit functions to shared file
vmwgfx: Expand the command checker to cover screen object commands
vmwgfx: Break out dirty submission code
vmwgfx: Break out execbuf command processing
...
Diffstat (limited to 'drivers')
21 files changed, 3910 insertions, 968 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b824d9bdd87c..6e96c85b70da 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1295,6 +1295,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1295 | 1295 | ||
1296 | return ret; | 1296 | return ret; |
1297 | } | 1297 | } |
1298 | EXPORT_SYMBOL(ttm_bo_create); | ||
1298 | 1299 | ||
1299 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | 1300 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
1300 | unsigned mem_type, bool allow_errors) | 1301 | unsigned mem_type, bool allow_errors) |
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 7d8e9d5d498c..586869c8c11f 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -5,6 +5,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
8 | vmwgfx_fence.o | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o |
9 | 9 | ||
10 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 10 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index 77cb45331000..d0e085ee8249 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
@@ -57,7 +57,8 @@ typedef enum { | |||
57 | SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), | 57 | SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1), |
58 | SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), | 58 | SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4), |
59 | SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), | 59 | SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0), |
60 | SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1, | 60 | SVGA3D_HWVERSION_WS8_B1 = SVGA3D_MAKE_HWVERSION(2, 1), |
61 | SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS8_B1, | ||
61 | } SVGA3dHardwareVersion; | 62 | } SVGA3dHardwareVersion; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -67,7 +68,8 @@ typedef enum { | |||
67 | typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | 68 | typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ |
68 | #define SVGA3D_NUM_CLIPPLANES 6 | 69 | #define SVGA3D_NUM_CLIPPLANES 6 |
69 | #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8 | 70 | #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8 |
70 | 71 | #define SVGA3D_MAX_CONTEXT_IDS 256 | |
72 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * Surface formats. | 75 | * Surface formats. |
@@ -79,76 +81,91 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
79 | */ | 81 | */ |
80 | 82 | ||
81 | typedef enum SVGA3dSurfaceFormat { | 83 | typedef enum SVGA3dSurfaceFormat { |
82 | SVGA3D_FORMAT_INVALID = 0, | 84 | SVGA3D_FORMAT_INVALID = 0, |
83 | 85 | ||
84 | SVGA3D_X8R8G8B8 = 1, | 86 | SVGA3D_X8R8G8B8 = 1, |
85 | SVGA3D_A8R8G8B8 = 2, | 87 | SVGA3D_A8R8G8B8 = 2, |
86 | 88 | ||
87 | SVGA3D_R5G6B5 = 3, | 89 | SVGA3D_R5G6B5 = 3, |
88 | SVGA3D_X1R5G5B5 = 4, | 90 | SVGA3D_X1R5G5B5 = 4, |
89 | SVGA3D_A1R5G5B5 = 5, | 91 | SVGA3D_A1R5G5B5 = 5, |
90 | SVGA3D_A4R4G4B4 = 6, | 92 | SVGA3D_A4R4G4B4 = 6, |
91 | 93 | ||
92 | SVGA3D_Z_D32 = 7, | 94 | SVGA3D_Z_D32 = 7, |
93 | SVGA3D_Z_D16 = 8, | 95 | SVGA3D_Z_D16 = 8, |
94 | SVGA3D_Z_D24S8 = 9, | 96 | SVGA3D_Z_D24S8 = 9, |
95 | SVGA3D_Z_D15S1 = 10, | 97 | SVGA3D_Z_D15S1 = 10, |
96 | 98 | ||
97 | SVGA3D_LUMINANCE8 = 11, | 99 | SVGA3D_LUMINANCE8 = 11, |
98 | SVGA3D_LUMINANCE4_ALPHA4 = 12, | 100 | SVGA3D_LUMINANCE4_ALPHA4 = 12, |
99 | SVGA3D_LUMINANCE16 = 13, | 101 | SVGA3D_LUMINANCE16 = 13, |
100 | SVGA3D_LUMINANCE8_ALPHA8 = 14, | 102 | SVGA3D_LUMINANCE8_ALPHA8 = 14, |
101 | 103 | ||
102 | SVGA3D_DXT1 = 15, | 104 | SVGA3D_DXT1 = 15, |
103 | SVGA3D_DXT2 = 16, | 105 | SVGA3D_DXT2 = 16, |
104 | SVGA3D_DXT3 = 17, | 106 | SVGA3D_DXT3 = 17, |
105 | SVGA3D_DXT4 = 18, | 107 | SVGA3D_DXT4 = 18, |
106 | SVGA3D_DXT5 = 19, | 108 | SVGA3D_DXT5 = 19, |
107 | 109 | ||
108 | SVGA3D_BUMPU8V8 = 20, | 110 | SVGA3D_BUMPU8V8 = 20, |
109 | SVGA3D_BUMPL6V5U5 = 21, | 111 | SVGA3D_BUMPL6V5U5 = 21, |
110 | SVGA3D_BUMPX8L8V8U8 = 22, | 112 | SVGA3D_BUMPX8L8V8U8 = 22, |
111 | SVGA3D_BUMPL8V8U8 = 23, | 113 | SVGA3D_BUMPL8V8U8 = 23, |
112 | 114 | ||
113 | SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ | 115 | SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */ |
114 | SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ | 116 | SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */ |
115 | 117 | ||
116 | SVGA3D_A2R10G10B10 = 26, | 118 | SVGA3D_A2R10G10B10 = 26, |
117 | 119 | ||
118 | /* signed formats */ | 120 | /* signed formats */ |
119 | SVGA3D_V8U8 = 27, | 121 | SVGA3D_V8U8 = 27, |
120 | SVGA3D_Q8W8V8U8 = 28, | 122 | SVGA3D_Q8W8V8U8 = 28, |
121 | SVGA3D_CxV8U8 = 29, | 123 | SVGA3D_CxV8U8 = 29, |
122 | 124 | ||
123 | /* mixed formats */ | 125 | /* mixed formats */ |
124 | SVGA3D_X8L8V8U8 = 30, | 126 | SVGA3D_X8L8V8U8 = 30, |
125 | SVGA3D_A2W10V10U10 = 31, | 127 | SVGA3D_A2W10V10U10 = 31, |
126 | 128 | ||
127 | SVGA3D_ALPHA8 = 32, | 129 | SVGA3D_ALPHA8 = 32, |
128 | 130 | ||
129 | /* Single- and dual-component floating point formats */ | 131 | /* Single- and dual-component floating point formats */ |
130 | SVGA3D_R_S10E5 = 33, | 132 | SVGA3D_R_S10E5 = 33, |
131 | SVGA3D_R_S23E8 = 34, | 133 | SVGA3D_R_S23E8 = 34, |
132 | SVGA3D_RG_S10E5 = 35, | 134 | SVGA3D_RG_S10E5 = 35, |
133 | SVGA3D_RG_S23E8 = 36, | 135 | SVGA3D_RG_S23E8 = 36, |
134 | 136 | ||
135 | /* | 137 | /* |
136 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is | 138 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is |
137 | * the most efficient format to use when creating new surfaces | 139 | * the most efficient format to use when creating new surfaces |
138 | * expressly for index or vertex data. | 140 | * expressly for index or vertex data. |
139 | */ | 141 | */ |
140 | SVGA3D_BUFFER = 37, | ||
141 | 142 | ||
142 | SVGA3D_Z_D24X8 = 38, | 143 | SVGA3D_BUFFER = 37, |
144 | |||
145 | SVGA3D_Z_D24X8 = 38, | ||
143 | 146 | ||
144 | SVGA3D_V16U16 = 39, | 147 | SVGA3D_V16U16 = 39, |
145 | 148 | ||
146 | SVGA3D_G16R16 = 40, | 149 | SVGA3D_G16R16 = 40, |
147 | SVGA3D_A16B16G16R16 = 41, | 150 | SVGA3D_A16B16G16R16 = 41, |
148 | 151 | ||
149 | /* Packed Video formats */ | 152 | /* Packed Video formats */ |
150 | SVGA3D_UYVY = 42, | 153 | SVGA3D_UYVY = 42, |
151 | SVGA3D_YUY2 = 43, | 154 | SVGA3D_YUY2 = 43, |
155 | |||
156 | /* Planar video formats */ | ||
157 | SVGA3D_NV12 = 44, | ||
158 | |||
159 | /* Video format with alpha */ | ||
160 | SVGA3D_AYUV = 45, | ||
161 | |||
162 | SVGA3D_BC4_UNORM = 108, | ||
163 | SVGA3D_BC5_UNORM = 111, | ||
164 | |||
165 | /* Advanced D3D9 depth formats. */ | ||
166 | SVGA3D_Z_DF16 = 118, | ||
167 | SVGA3D_Z_DF24 = 119, | ||
168 | SVGA3D_Z_D24S8_INT = 120, | ||
152 | 169 | ||
153 | SVGA3D_FORMAT_MAX | 170 | SVGA3D_FORMAT_MAX |
154 | } SVGA3dSurfaceFormat; | 171 | } SVGA3dSurfaceFormat; |
@@ -414,10 +431,20 @@ typedef enum { | |||
414 | SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ | 431 | SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */ |
415 | SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ | 432 | SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */ |
416 | SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ | 433 | SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */ |
434 | SVGA3D_RS_TRANSPARENCYANTIALIAS = 97, /* SVGA3dTransparencyAntialiasType */ | ||
435 | SVGA3D_RS_LINEAA = 98, /* SVGA3dBool */ | ||
436 | SVGA3D_RS_LINEWIDTH = 99, /* float */ | ||
417 | SVGA3D_RS_MAX | 437 | SVGA3D_RS_MAX |
418 | } SVGA3dRenderStateName; | 438 | } SVGA3dRenderStateName; |
419 | 439 | ||
420 | typedef enum { | 440 | typedef enum { |
441 | SVGA3D_TRANSPARENCYANTIALIAS_NORMAL = 0, | ||
442 | SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE = 1, | ||
443 | SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE = 2, | ||
444 | SVGA3D_TRANSPARENCYANTIALIAS_MAX | ||
445 | } SVGA3dTransparencyAntialiasType; | ||
446 | |||
447 | typedef enum { | ||
421 | SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ | 448 | SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */ |
422 | SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ | 449 | SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */ |
423 | SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ | 450 | SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */ |
@@ -728,10 +755,10 @@ typedef enum { | |||
728 | SVGA3D_TEX_FILTER_NEAREST = 1, | 755 | SVGA3D_TEX_FILTER_NEAREST = 1, |
729 | SVGA3D_TEX_FILTER_LINEAR = 2, | 756 | SVGA3D_TEX_FILTER_LINEAR = 2, |
730 | SVGA3D_TEX_FILTER_ANISOTROPIC = 3, | 757 | SVGA3D_TEX_FILTER_ANISOTROPIC = 3, |
731 | SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented | 758 | SVGA3D_TEX_FILTER_FLATCUBIC = 4, /* Deprecated, not implemented */ |
732 | SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented | 759 | SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, /* Deprecated, not implemented */ |
733 | SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented | 760 | SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, /* Not currently implemented */ |
734 | SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented | 761 | SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, /* Not currently implemented */ |
735 | SVGA3D_TEX_FILTER_MAX | 762 | SVGA3D_TEX_FILTER_MAX |
736 | } SVGA3dTextureFilter; | 763 | } SVGA3dTextureFilter; |
737 | 764 | ||
@@ -799,19 +826,19 @@ typedef enum { | |||
799 | 826 | ||
800 | typedef enum { | 827 | typedef enum { |
801 | SVGA3D_DECLUSAGE_POSITION = 0, | 828 | SVGA3D_DECLUSAGE_POSITION = 0, |
802 | SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1 | 829 | SVGA3D_DECLUSAGE_BLENDWEIGHT, /* 1 */ |
803 | SVGA3D_DECLUSAGE_BLENDINDICES, // 2 | 830 | SVGA3D_DECLUSAGE_BLENDINDICES, /* 2 */ |
804 | SVGA3D_DECLUSAGE_NORMAL, // 3 | 831 | SVGA3D_DECLUSAGE_NORMAL, /* 3 */ |
805 | SVGA3D_DECLUSAGE_PSIZE, // 4 | 832 | SVGA3D_DECLUSAGE_PSIZE, /* 4 */ |
806 | SVGA3D_DECLUSAGE_TEXCOORD, // 5 | 833 | SVGA3D_DECLUSAGE_TEXCOORD, /* 5 */ |
807 | SVGA3D_DECLUSAGE_TANGENT, // 6 | 834 | SVGA3D_DECLUSAGE_TANGENT, /* 6 */ |
808 | SVGA3D_DECLUSAGE_BINORMAL, // 7 | 835 | SVGA3D_DECLUSAGE_BINORMAL, /* 7 */ |
809 | SVGA3D_DECLUSAGE_TESSFACTOR, // 8 | 836 | SVGA3D_DECLUSAGE_TESSFACTOR, /* 8 */ |
810 | SVGA3D_DECLUSAGE_POSITIONT, // 9 | 837 | SVGA3D_DECLUSAGE_POSITIONT, /* 9 */ |
811 | SVGA3D_DECLUSAGE_COLOR, // 10 | 838 | SVGA3D_DECLUSAGE_COLOR, /* 10 */ |
812 | SVGA3D_DECLUSAGE_FOG, // 11 | 839 | SVGA3D_DECLUSAGE_FOG, /* 11 */ |
813 | SVGA3D_DECLUSAGE_DEPTH, // 12 | 840 | SVGA3D_DECLUSAGE_DEPTH, /* 12 */ |
814 | SVGA3D_DECLUSAGE_SAMPLE, // 13 | 841 | SVGA3D_DECLUSAGE_SAMPLE, /* 13 */ |
815 | SVGA3D_DECLUSAGE_MAX | 842 | SVGA3D_DECLUSAGE_MAX |
816 | } SVGA3dDeclUsage; | 843 | } SVGA3dDeclUsage; |
817 | 844 | ||
@@ -819,10 +846,10 @@ typedef enum { | |||
819 | SVGA3D_DECLMETHOD_DEFAULT = 0, | 846 | SVGA3D_DECLMETHOD_DEFAULT = 0, |
820 | SVGA3D_DECLMETHOD_PARTIALU, | 847 | SVGA3D_DECLMETHOD_PARTIALU, |
821 | SVGA3D_DECLMETHOD_PARTIALV, | 848 | SVGA3D_DECLMETHOD_PARTIALV, |
822 | SVGA3D_DECLMETHOD_CROSSUV, // Normal | 849 | SVGA3D_DECLMETHOD_CROSSUV, /* Normal */ |
823 | SVGA3D_DECLMETHOD_UV, | 850 | SVGA3D_DECLMETHOD_UV, |
824 | SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map | 851 | SVGA3D_DECLMETHOD_LOOKUP, /* Lookup a displacement map */ |
825 | SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map | 852 | SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */ |
826 | } SVGA3dDeclMethod; | 853 | } SVGA3dDeclMethod; |
827 | 854 | ||
828 | typedef enum { | 855 | typedef enum { |
@@ -930,7 +957,6 @@ typedef enum { | |||
930 | } SVGA3dCubeFace; | 957 | } SVGA3dCubeFace; |
931 | 958 | ||
932 | typedef enum { | 959 | typedef enum { |
933 | SVGA3D_SHADERTYPE_COMPILED_DX8 = 0, | ||
934 | SVGA3D_SHADERTYPE_VS = 1, | 960 | SVGA3D_SHADERTYPE_VS = 1, |
935 | SVGA3D_SHADERTYPE_PS = 2, | 961 | SVGA3D_SHADERTYPE_PS = 2, |
936 | SVGA3D_SHADERTYPE_MAX | 962 | SVGA3D_SHADERTYPE_MAX |
@@ -968,12 +994,18 @@ typedef enum { | |||
968 | } SVGA3dTransferType; | 994 | } SVGA3dTransferType; |
969 | 995 | ||
970 | /* | 996 | /* |
971 | * The maximum number vertex arrays we're guaranteed to support in | 997 | * The maximum number of vertex arrays we're guaranteed to support in |
972 | * SVGA_3D_CMD_DRAWPRIMITIVES. | 998 | * SVGA_3D_CMD_DRAWPRIMITIVES. |
973 | */ | 999 | */ |
974 | #define SVGA3D_MAX_VERTEX_ARRAYS 32 | 1000 | #define SVGA3D_MAX_VERTEX_ARRAYS 32 |
975 | 1001 | ||
976 | /* | 1002 | /* |
1003 | * The maximum number of primitive ranges we're guaranteed to support | ||
1004 | * in SVGA_3D_CMD_DRAWPRIMITIVES. | ||
1005 | */ | ||
1006 | #define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32 | ||
1007 | |||
1008 | /* | ||
977 | * Identifiers for commands in the command FIFO. | 1009 | * Identifiers for commands in the command FIFO. |
978 | * | 1010 | * |
979 | * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of | 1011 | * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of |
@@ -990,7 +1022,7 @@ typedef enum { | |||
990 | #define SVGA_3D_CMD_LEGACY_BASE 1000 | 1022 | #define SVGA_3D_CMD_LEGACY_BASE 1000 |
991 | #define SVGA_3D_CMD_BASE 1040 | 1023 | #define SVGA_3D_CMD_BASE 1040 |
992 | 1024 | ||
993 | #define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 | 1025 | #define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0 /* Deprecated */ |
994 | #define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1 | 1026 | #define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1 |
995 | #define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2 | 1027 | #define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2 |
996 | #define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3 | 1028 | #define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3 |
@@ -1008,7 +1040,7 @@ typedef enum { | |||
1008 | #define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15 | 1040 | #define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15 |
1009 | #define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16 | 1041 | #define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16 |
1010 | #define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17 | 1042 | #define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17 |
1011 | #define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated | 1043 | #define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 /* Deprecated */ |
1012 | #define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19 | 1044 | #define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19 |
1013 | #define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20 | 1045 | #define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20 |
1014 | #define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21 | 1046 | #define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21 |
@@ -1018,9 +1050,13 @@ typedef enum { | |||
1018 | #define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25 | 1050 | #define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25 |
1019 | #define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26 | 1051 | #define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26 |
1020 | #define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27 | 1052 | #define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27 |
1021 | #define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated | 1053 | #define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 /* Deprecated */ |
1022 | #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29 | 1054 | #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29 |
1023 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30 | 1055 | #define SVGA_3D_CMD_SURFACE_DEFINE_V2 SVGA_3D_CMD_BASE + 30 |
1056 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 | ||
1057 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 | ||
1058 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 | ||
1059 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 | ||
1024 | 1060 | ||
1025 | #define SVGA_3D_CMD_FUTURE_MAX 2000 | 1061 | #define SVGA_3D_CMD_FUTURE_MAX 2000 |
1026 | 1062 | ||
@@ -1031,9 +1067,9 @@ typedef enum { | |||
1031 | typedef struct { | 1067 | typedef struct { |
1032 | union { | 1068 | union { |
1033 | struct { | 1069 | struct { |
1034 | uint16 function; // SVGA3dFogFunction | 1070 | uint16 function; /* SVGA3dFogFunction */ |
1035 | uint8 type; // SVGA3dFogType | 1071 | uint8 type; /* SVGA3dFogType */ |
1036 | uint8 base; // SVGA3dFogBase | 1072 | uint8 base; /* SVGA3dFogBase */ |
1037 | }; | 1073 | }; |
1038 | uint32 uintValue; | 1074 | uint32 uintValue; |
1039 | }; | 1075 | }; |
@@ -1109,6 +1145,8 @@ typedef enum { | |||
1109 | SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), | 1145 | SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6), |
1110 | SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), | 1146 | SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7), |
1111 | SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), | 1147 | SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8), |
1148 | SVGA3D_SURFACE_MASKABLE_ANTIALIAS = (1 << 9), | ||
1149 | SVGA3D_SURFACE_AUTOGENMIPMAPS = (1 << 10), | ||
1112 | } SVGA3dSurfaceFlags; | 1150 | } SVGA3dSurfaceFlags; |
1113 | 1151 | ||
1114 | typedef | 1152 | typedef |
@@ -1121,6 +1159,12 @@ struct { | |||
1121 | uint32 sid; | 1159 | uint32 sid; |
1122 | SVGA3dSurfaceFlags surfaceFlags; | 1160 | SVGA3dSurfaceFlags surfaceFlags; |
1123 | SVGA3dSurfaceFormat format; | 1161 | SVGA3dSurfaceFormat format; |
1162 | /* | ||
1163 | * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace | ||
1164 | * structures must have the same value of numMipLevels field. | ||
1165 | * Otherwise, all but the first SVGA3dSurfaceFace structures must have the | ||
1166 | * numMipLevels set to 0. | ||
1167 | */ | ||
1124 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; | 1168 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; |
1125 | /* | 1169 | /* |
1126 | * Followed by an SVGA3dSize structure for each mip level in each face. | 1170 | * Followed by an SVGA3dSize structure for each mip level in each face. |
@@ -1135,6 +1179,31 @@ struct { | |||
1135 | 1179 | ||
1136 | typedef | 1180 | typedef |
1137 | struct { | 1181 | struct { |
1182 | uint32 sid; | ||
1183 | SVGA3dSurfaceFlags surfaceFlags; | ||
1184 | SVGA3dSurfaceFormat format; | ||
1185 | /* | ||
1186 | * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace | ||
1187 | * structures must have the same value of numMipLevels field. | ||
1188 | * Otherwise, all but the first SVGA3dSurfaceFace structures must have the | ||
1189 | * numMipLevels set to 0. | ||
1190 | */ | ||
1191 | SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES]; | ||
1192 | uint32 multisampleCount; | ||
1193 | SVGA3dTextureFilter autogenFilter; | ||
1194 | /* | ||
1195 | * Followed by an SVGA3dSize structure for each mip level in each face. | ||
1196 | * | ||
1197 | * A note on surface sizes: Sizes are always specified in pixels, | ||
1198 | * even if the true surface size is not a multiple of the minimum | ||
1199 | * block size of the surface's format. For example, a 3x3x1 DXT1 | ||
1200 | * compressed texture would actually be stored as a 4x4x1 image in | ||
1201 | * memory. | ||
1202 | */ | ||
1203 | } SVGA3dCmdDefineSurface_v2; /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */ | ||
1204 | |||
1205 | typedef | ||
1206 | struct { | ||
1138 | uint32 sid; | 1207 | uint32 sid; |
1139 | } SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ | 1208 | } SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */ |
1140 | 1209 | ||
@@ -1474,10 +1543,12 @@ struct { | |||
1474 | * SVGA3dCmdDrawPrimitives structure. In order, | 1543 | * SVGA3dCmdDrawPrimitives structure. In order, |
1475 | * they are: | 1544 | * they are: |
1476 | * | 1545 | * |
1477 | * 1. SVGA3dVertexDecl, quantity 'numVertexDecls' | 1546 | * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than |
1478 | * 2. SVGA3dPrimitiveRange, quantity 'numRanges' | 1547 | * SVGA3D_MAX_VERTEX_ARRAYS; |
1548 | * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than | ||
1549 | * SVGA3D_MAX_DRAW_PRIMITIVE_RANGES; | ||
1479 | * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains | 1550 | * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains |
1480 | * the frequency divisor for this the corresponding vertex decl) | 1551 | * the frequency divisor for the corresponding vertex decl). |
1481 | */ | 1552 | */ |
1482 | } SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ | 1553 | } SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */ |
1483 | 1554 | ||
@@ -1671,6 +1742,12 @@ struct { | |||
1671 | /* Clipping: zero or more SVGASignedRects follow */ | 1742 | /* Clipping: zero or more SVGASignedRects follow */ |
1672 | } SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ | 1743 | } SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */ |
1673 | 1744 | ||
1745 | typedef | ||
1746 | struct { | ||
1747 | uint32 sid; | ||
1748 | SVGA3dTextureFilter filter; | ||
1749 | } SVGA3dCmdGenerateMipmaps; /* SVGA_3D_CMD_GENERATE_MIPMAPS */ | ||
1750 | |||
1674 | 1751 | ||
1675 | /* | 1752 | /* |
1676 | * Capability query index. | 1753 | * Capability query index. |
@@ -1774,6 +1851,32 @@ typedef enum { | |||
1774 | SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, | 1851 | SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67, |
1775 | SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, | 1852 | SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68, |
1776 | SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, | 1853 | SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69, |
1854 | SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES = 70, | ||
1855 | SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES = 71, | ||
1856 | SVGA3D_DEVCAP_ALPHATOCOVERAGE = 72, | ||
1857 | SVGA3D_DEVCAP_SUPERSAMPLE = 73, | ||
1858 | SVGA3D_DEVCAP_AUTOGENMIPMAPS = 74, | ||
1859 | SVGA3D_DEVCAP_SURFACEFMT_NV12 = 75, | ||
1860 | SVGA3D_DEVCAP_SURFACEFMT_AYUV = 76, | ||
1861 | |||
1862 | /* | ||
1863 | * This is the maximum number of SVGA context IDs that the guest | ||
1864 | * can define using SVGA_3D_CMD_CONTEXT_DEFINE. | ||
1865 | */ | ||
1866 | SVGA3D_DEVCAP_MAX_CONTEXT_IDS = 77, | ||
1867 | |||
1868 | /* | ||
1869 | * This is the maximum number of SVGA surface IDs that the guest | ||
1870 | * can define using SVGA_3D_CMD_SURFACE_DEFINE*. | ||
1871 | */ | ||
1872 | SVGA3D_DEVCAP_MAX_SURFACE_IDS = 78, | ||
1873 | |||
1874 | SVGA3D_DEVCAP_SURFACEFMT_Z_DF16 = 79, | ||
1875 | SVGA3D_DEVCAP_SURFACEFMT_Z_DF24 = 80, | ||
1876 | SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT = 81, | ||
1877 | |||
1878 | SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM = 82, | ||
1879 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, | ||
1777 | 1880 | ||
1778 | /* | 1881 | /* |
1779 | * Don't add new caps into the previous section; the values in this | 1882 | * Don't add new caps into the previous section; the values in this |
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h index 7b85e9b8c854..8e8d9682e018 100644 --- a/drivers/gpu/drm/vmwgfx/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/svga_escape.h | |||
@@ -75,7 +75,7 @@ | |||
75 | */ | 75 | */ |
76 | 76 | ||
77 | #define SVGA_ESCAPE_VMWARE_HINT 0x00030000 | 77 | #define SVGA_ESCAPE_VMWARE_HINT 0x00030000 |
78 | #define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated | 78 | #define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */ |
79 | 79 | ||
80 | typedef | 80 | typedef |
81 | struct { | 81 | struct { |
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h index f753d73c14b4..f38416fcb046 100644 --- a/drivers/gpu/drm/vmwgfx/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h | |||
@@ -38,9 +38,9 @@ | |||
38 | * Video formats we support | 38 | * Video formats we support |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2' | 41 | #define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */ |
42 | #define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2' | 42 | #define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */ |
43 | #define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y' | 43 | #define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */ |
44 | 44 | ||
45 | typedef enum { | 45 | typedef enum { |
46 | SVGA_OVERLAY_FORMAT_INVALID = 0, | 46 | SVGA_OVERLAY_FORMAT_INVALID = 0, |
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs { | |||
68 | uint32 streamId; | 68 | uint32 streamId; |
69 | } header; | 69 | } header; |
70 | 70 | ||
71 | // May include zero or more items. | 71 | /* May include zero or more items. */ |
72 | struct { | 72 | struct { |
73 | uint32 registerId; | 73 | uint32 registerId; |
74 | uint32 value; | 74 | uint32 value; |
@@ -134,12 +134,12 @@ struct { | |||
134 | */ | 134 | */ |
135 | 135 | ||
136 | static inline bool | 136 | static inline bool |
137 | VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN | 137 | VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */ |
138 | uint32 *width, // IN / OUT | 138 | uint32 *width, /* IN / OUT */ |
139 | uint32 *height, // IN / OUT | 139 | uint32 *height, /* IN / OUT */ |
140 | uint32 *size, // OUT | 140 | uint32 *size, /* OUT */ |
141 | uint32 *pitches, // OUT (optional) | 141 | uint32 *pitches, /* OUT (optional) */ |
142 | uint32 *offsets) // OUT (optional) | 142 | uint32 *offsets) /* OUT (optional) */ |
143 | { | 143 | { |
144 | int tmp; | 144 | int tmp; |
145 | 145 | ||
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN | |||
198 | return true; | 198 | return true; |
199 | } | 199 | } |
200 | 200 | ||
201 | #endif // _SVGA_OVERLAY_H_ | 201 | #endif /* _SVGA_OVERLAY_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index ec5aad9b6ed3..01f63cb49678 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
@@ -276,7 +276,7 @@ enum { | |||
276 | * possible. | 276 | * possible. |
277 | */ | 277 | */ |
278 | #define SVGA_GMR_NULL ((uint32) -1) | 278 | #define SVGA_GMR_NULL ((uint32) -1) |
279 | #define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB) | 279 | #define SVGA_GMR_FRAMEBUFFER ((uint32) -2) /* Guest Framebuffer (GFB) */ |
280 | 280 | ||
281 | typedef | 281 | typedef |
282 | struct SVGAGuestMemDescriptor { | 282 | struct SVGAGuestMemDescriptor { |
@@ -317,13 +317,35 @@ struct SVGAGMRImageFormat { | |||
317 | struct { | 317 | struct { |
318 | uint32 bitsPerPixel : 8; | 318 | uint32 bitsPerPixel : 8; |
319 | uint32 colorDepth : 8; | 319 | uint32 colorDepth : 8; |
320 | uint32 reserved : 16; // Must be zero | 320 | uint32 reserved : 16; /* Must be zero */ |
321 | }; | 321 | }; |
322 | 322 | ||
323 | uint32 value; | 323 | uint32 value; |
324 | }; | 324 | }; |
325 | } SVGAGMRImageFormat; | 325 | } SVGAGMRImageFormat; |
326 | 326 | ||
327 | typedef | ||
328 | struct SVGAGuestImage { | ||
329 | SVGAGuestPtr ptr; | ||
330 | |||
331 | /* | ||
332 | * A note on interpretation of pitch: This value of pitch is the | ||
333 | * number of bytes between vertically adjacent image | ||
334 | * blocks. Normally this is the number of bytes between the first | ||
335 | * pixel of two adjacent scanlines. With compressed textures, | ||
336 | * however, this may represent the number of bytes between | ||
337 | * compression blocks rather than between rows of pixels. | ||
338 | * | ||
339 | * XXX: Compressed textures currently must be tightly packed in guest memory. | ||
340 | * | ||
341 | * If the image is 1-dimensional, pitch is ignored. | ||
342 | * | ||
343 | * If 'pitch' is zero, the SVGA3D device calculates a pitch value | ||
344 | * assuming each row of blocks is tightly packed. | ||
345 | */ | ||
346 | uint32 pitch; | ||
347 | } SVGAGuestImage; | ||
348 | |||
327 | /* | 349 | /* |
328 | * SVGAColorBGRX -- | 350 | * SVGAColorBGRX -- |
329 | * | 351 | * |
@@ -339,7 +361,7 @@ struct SVGAColorBGRX { | |||
339 | uint32 b : 8; | 361 | uint32 b : 8; |
340 | uint32 g : 8; | 362 | uint32 g : 8; |
341 | uint32 r : 8; | 363 | uint32 r : 8; |
342 | uint32 x : 8; // Unused | 364 | uint32 x : 8; /* Unused */ |
343 | }; | 365 | }; |
344 | 366 | ||
345 | uint32 value; | 367 | uint32 value; |
@@ -395,16 +417,16 @@ struct SVGASignedPoint { | |||
395 | #define SVGA_CAP_NONE 0x00000000 | 417 | #define SVGA_CAP_NONE 0x00000000 |
396 | #define SVGA_CAP_RECT_COPY 0x00000002 | 418 | #define SVGA_CAP_RECT_COPY 0x00000002 |
397 | #define SVGA_CAP_CURSOR 0x00000020 | 419 | #define SVGA_CAP_CURSOR 0x00000020 |
398 | #define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead) | 420 | #define SVGA_CAP_CURSOR_BYPASS 0x00000040 /* Legacy (Use Cursor Bypass 3 instead) */ |
399 | #define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead) | 421 | #define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 /* Legacy (Use Cursor Bypass 3 instead) */ |
400 | #define SVGA_CAP_8BIT_EMULATION 0x00000100 | 422 | #define SVGA_CAP_8BIT_EMULATION 0x00000100 |
401 | #define SVGA_CAP_ALPHA_CURSOR 0x00000200 | 423 | #define SVGA_CAP_ALPHA_CURSOR 0x00000200 |
402 | #define SVGA_CAP_3D 0x00004000 | 424 | #define SVGA_CAP_3D 0x00004000 |
403 | #define SVGA_CAP_EXTENDED_FIFO 0x00008000 | 425 | #define SVGA_CAP_EXTENDED_FIFO 0x00008000 |
404 | #define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support | 426 | #define SVGA_CAP_MULTIMON 0x00010000 /* Legacy multi-monitor support */ |
405 | #define SVGA_CAP_PITCHLOCK 0x00020000 | 427 | #define SVGA_CAP_PITCHLOCK 0x00020000 |
406 | #define SVGA_CAP_IRQMASK 0x00040000 | 428 | #define SVGA_CAP_IRQMASK 0x00040000 |
407 | #define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support | 429 | #define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 /* Legacy multi-monitor support */ |
408 | #define SVGA_CAP_GMR 0x00100000 | 430 | #define SVGA_CAP_GMR 0x00100000 |
409 | #define SVGA_CAP_TRACES 0x00200000 | 431 | #define SVGA_CAP_TRACES 0x00200000 |
410 | #define SVGA_CAP_GMR2 0x00400000 | 432 | #define SVGA_CAP_GMR2 0x00400000 |
@@ -453,7 +475,7 @@ enum { | |||
453 | 475 | ||
454 | SVGA_FIFO_CAPABILITIES = 4, | 476 | SVGA_FIFO_CAPABILITIES = 4, |
455 | SVGA_FIFO_FLAGS, | 477 | SVGA_FIFO_FLAGS, |
456 | // Valid with SVGA_FIFO_CAP_FENCE: | 478 | /* Valid with SVGA_FIFO_CAP_FENCE: */ |
457 | SVGA_FIFO_FENCE, | 479 | SVGA_FIFO_FENCE, |
458 | 480 | ||
459 | /* | 481 | /* |
@@ -466,33 +488,47 @@ enum { | |||
466 | * extended FIFO. | 488 | * extended FIFO. |
467 | */ | 489 | */ |
468 | 490 | ||
469 | // Valid if exists (i.e. if extended FIFO enabled): | 491 | /* Valid if exists (i.e. if extended FIFO enabled): */ |
470 | SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */ | 492 | SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */ |
471 | // Valid with SVGA_FIFO_CAP_PITCHLOCK: | 493 | /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */ |
472 | SVGA_FIFO_PITCHLOCK, | 494 | SVGA_FIFO_PITCHLOCK, |
473 | 495 | ||
474 | // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: | 496 | /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */ |
475 | SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */ | 497 | SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */ |
476 | SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */ | 498 | SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */ |
477 | SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */ | 499 | SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */ |
478 | SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */ | 500 | SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */ |
479 | SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */ | 501 | SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */ |
480 | 502 | ||
481 | // Valid with SVGA_FIFO_CAP_RESERVE: | 503 | /* Valid with SVGA_FIFO_CAP_RESERVE: */ |
482 | SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */ | 504 | SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */ |
483 | 505 | ||
484 | /* | 506 | /* |
485 | * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT: | 507 | * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2: |
486 | * | 508 | * |
487 | * By default this is SVGA_ID_INVALID, to indicate that the cursor | 509 | * By default this is SVGA_ID_INVALID, to indicate that the cursor |
488 | * coordinates are specified relative to the virtual root. If this | 510 | * coordinates are specified relative to the virtual root. If this |
489 | * is set to a specific screen ID, cursor position is reinterpreted | 511 | * is set to a specific screen ID, cursor position is reinterpreted |
490 | * as a signed offset relative to that screen's origin. This is the | 512 | * as a signed offset relative to that screen's origin. |
491 | * only way to place the cursor on a non-rooted screen. | ||
492 | */ | 513 | */ |
493 | SVGA_FIFO_CURSOR_SCREEN_ID, | 514 | SVGA_FIFO_CURSOR_SCREEN_ID, |
494 | 515 | ||
495 | /* | 516 | /* |
517 | * Valid with SVGA_FIFO_CAP_DEAD | ||
518 | * | ||
519 | * An arbitrary value written by the host, drivers should not use it. | ||
520 | */ | ||
521 | SVGA_FIFO_DEAD, | ||
522 | |||
523 | /* | ||
524 | * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED: | ||
525 | * | ||
526 | * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h) | ||
527 | * on platforms that can enforce graphics resource limits. | ||
528 | */ | ||
529 | SVGA_FIFO_3D_HWVERSION_REVISED, | ||
530 | |||
531 | /* | ||
496 | * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new | 532 | * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new |
497 | * registers, but this must be done carefully and with judicious use of | 533 | * registers, but this must be done carefully and with judicious use of |
498 | * capability bits, since comparisons based on SVGA_FIFO_MIN aren't | 534 | * capability bits, since comparisons based on SVGA_FIFO_MIN aren't |
@@ -530,7 +566,7 @@ enum { | |||
530 | * sets SVGA_FIFO_MIN high enough to leave room for them. | 566 | * sets SVGA_FIFO_MIN high enough to leave room for them. |
531 | */ | 567 | */ |
532 | 568 | ||
533 | // Valid if register exists: | 569 | /* Valid if register exists: */ |
534 | SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */ | 570 | SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */ |
535 | SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */ | 571 | SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */ |
536 | SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */ | 572 | SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */ |
@@ -731,6 +767,37 @@ enum { | |||
731 | * | 767 | * |
732 | * - When a screen is resized, either using Screen Object commands or | 768 | * - When a screen is resized, either using Screen Object commands or |
733 | * legacy multimon registers, its contents are preserved. | 769 | * legacy multimon registers, its contents are preserved. |
770 | * | ||
771 | * SVGA_FIFO_CAP_GMR2 -- | ||
772 | * | ||
773 | * Provides new commands to define and remap guest memory regions (GMR). | ||
774 | * | ||
775 | * New 2D commands: | ||
776 | * DEFINE_GMR2, REMAP_GMR2. | ||
777 | * | ||
778 | * SVGA_FIFO_CAP_3D_HWVERSION_REVISED -- | ||
779 | * | ||
780 | * Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists. | ||
781 | * This register may replace SVGA_FIFO_3D_HWVERSION on platforms | ||
782 | * that enforce graphics resource limits. This allows the platform | ||
783 | * to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest | ||
784 | * drivers that do not limit their resources. | ||
785 | * | ||
786 | * Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators | ||
787 | * are codependent (and thus we use a single capability bit). | ||
788 | * | ||
789 | * SVGA_FIFO_CAP_SCREEN_OBJECT_2 -- | ||
790 | * | ||
791 | * Modifies the DEFINE_SCREEN command to include a guest provided | ||
792 | * backing store in GMR memory and the bytesPerLine for the backing | ||
793 | * store. This capability requires the use of a backing store when | ||
794 | * creating screen objects. However if SVGA_FIFO_CAP_SCREEN_OBJECT | ||
795 | * is present then backing stores are optional. | ||
796 | * | ||
797 | * SVGA_FIFO_CAP_DEAD -- | ||
798 | * | ||
799 | * Drivers should not use this cap bit. This cap bit can not be | ||
800 | * reused since some hosts already expose it. | ||
734 | */ | 801 | */ |
735 | 802 | ||
736 | #define SVGA_FIFO_CAP_NONE 0 | 803 | #define SVGA_FIFO_CAP_NONE 0 |
@@ -742,6 +809,10 @@ enum { | |||
742 | #define SVGA_FIFO_CAP_ESCAPE (1<<5) | 809 | #define SVGA_FIFO_CAP_ESCAPE (1<<5) |
743 | #define SVGA_FIFO_CAP_RESERVE (1<<6) | 810 | #define SVGA_FIFO_CAP_RESERVE (1<<6) |
744 | #define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7) | 811 | #define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7) |
812 | #define SVGA_FIFO_CAP_GMR2 (1<<8) | ||
813 | #define SVGA_FIFO_CAP_3D_HWVERSION_REVISED SVGA_FIFO_CAP_GMR2 | ||
814 | #define SVGA_FIFO_CAP_SCREEN_OBJECT_2 (1<<9) | ||
815 | #define SVGA_FIFO_CAP_DEAD (1<<10) | ||
745 | 816 | ||
746 | 817 | ||
747 | /* | 818 | /* |
@@ -752,7 +823,7 @@ enum { | |||
752 | 823 | ||
753 | #define SVGA_FIFO_FLAG_NONE 0 | 824 | #define SVGA_FIFO_FLAG_NONE 0 |
754 | #define SVGA_FIFO_FLAG_ACCELFRONT (1<<0) | 825 | #define SVGA_FIFO_FLAG_ACCELFRONT (1<<0) |
755 | #define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only | 826 | #define SVGA_FIFO_FLAG_RESERVED (1<<31) /* Internal use only */ |
756 | 827 | ||
757 | /* | 828 | /* |
758 | * FIFO reservation sentinel value | 829 | * FIFO reservation sentinel value |
@@ -785,22 +856,22 @@ enum { | |||
785 | SVGA_VIDEO_DATA_OFFSET, | 856 | SVGA_VIDEO_DATA_OFFSET, |
786 | SVGA_VIDEO_FORMAT, | 857 | SVGA_VIDEO_FORMAT, |
787 | SVGA_VIDEO_COLORKEY, | 858 | SVGA_VIDEO_COLORKEY, |
788 | SVGA_VIDEO_SIZE, // Deprecated | 859 | SVGA_VIDEO_SIZE, /* Deprecated */ |
789 | SVGA_VIDEO_WIDTH, | 860 | SVGA_VIDEO_WIDTH, |
790 | SVGA_VIDEO_HEIGHT, | 861 | SVGA_VIDEO_HEIGHT, |
791 | SVGA_VIDEO_SRC_X, | 862 | SVGA_VIDEO_SRC_X, |
792 | SVGA_VIDEO_SRC_Y, | 863 | SVGA_VIDEO_SRC_Y, |
793 | SVGA_VIDEO_SRC_WIDTH, | 864 | SVGA_VIDEO_SRC_WIDTH, |
794 | SVGA_VIDEO_SRC_HEIGHT, | 865 | SVGA_VIDEO_SRC_HEIGHT, |
795 | SVGA_VIDEO_DST_X, // Signed int32 | 866 | SVGA_VIDEO_DST_X, /* Signed int32 */ |
796 | SVGA_VIDEO_DST_Y, // Signed int32 | 867 | SVGA_VIDEO_DST_Y, /* Signed int32 */ |
797 | SVGA_VIDEO_DST_WIDTH, | 868 | SVGA_VIDEO_DST_WIDTH, |
798 | SVGA_VIDEO_DST_HEIGHT, | 869 | SVGA_VIDEO_DST_HEIGHT, |
799 | SVGA_VIDEO_PITCH_1, | 870 | SVGA_VIDEO_PITCH_1, |
800 | SVGA_VIDEO_PITCH_2, | 871 | SVGA_VIDEO_PITCH_2, |
801 | SVGA_VIDEO_PITCH_3, | 872 | SVGA_VIDEO_PITCH_3, |
802 | SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER | 873 | SVGA_VIDEO_DATA_GMRID, /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */ |
803 | SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID) | 874 | SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */ |
804 | SVGA_VIDEO_NUM_REGS | 875 | SVGA_VIDEO_NUM_REGS |
805 | }; | 876 | }; |
806 | 877 | ||
@@ -851,15 +922,51 @@ typedef struct SVGAOverlayUnit { | |||
851 | * compatibility. New flags can be added, and the struct may grow, | 922 | * compatibility. New flags can be added, and the struct may grow, |
852 | * but existing fields must retain their meaning. | 923 | * but existing fields must retain their meaning. |
853 | * | 924 | * |
925 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of | ||
926 | * a SVGAGuestPtr that is used to back the screen contents. This | ||
927 | * memory must come from the GFB. The guest is not allowed to | ||
928 | * access the memory and doing so will have undefined results. The | ||
929 | * backing store is required to be page aligned and the size is | ||
930 | * padded to the next page boundry. The number of pages is: | ||
931 | * (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE | ||
932 | * | ||
933 | * The pitch in the backingStore is required to be at least large | ||
934 | * enough to hold a 32bbp scanline. It is recommended that the | ||
935 | * driver pad bytesPerLine for a potential performance win. | ||
936 | * | ||
937 | * The cloneCount field is treated as a hint from the guest that | ||
938 | * the user wants this display to be cloned, countCount times. A | ||
939 | * value of zero means no cloning should happen. | ||
854 | */ | 940 | */ |
855 | 941 | ||
856 | #define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space | 942 | #define SVGA_SCREEN_MUST_BE_SET (1 << 0) /* Must be set or results undefined */ |
857 | #define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary' | 943 | #define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */ |
858 | #define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here | 944 | #define SVGA_SCREEN_IS_PRIMARY (1 << 1) /* Guest considers this screen to be 'primary' */ |
945 | #define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */ | ||
946 | |||
947 | /* | ||
948 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When the screen is | ||
949 | * deactivated the base layer is defined to lose all contents and | ||
950 | * become black. When a screen is deactivated the backing store is | ||
951 | * optional. When set backingPtr and bytesPerLine will be ignored. | ||
952 | */ | ||
953 | #define SVGA_SCREEN_DEACTIVATE (1 << 3) | ||
954 | |||
955 | /* | ||
956 | * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2. When this flag is set | ||
957 | * the screen contents will be outputted as all black to the user | ||
958 | * though the base layer contents is preserved. The screen base layer | ||
959 | * can still be read and written to like normal though the no visible | ||
960 | * effect will be seen by the user. When the flag is changed the | ||
961 | * screen will be blanked or redrawn to the current contents as needed | ||
962 | * without any extra commands from the driver. This flag only has an | ||
963 | * effect when the screen is not deactivated. | ||
964 | */ | ||
965 | #define SVGA_SCREEN_BLANKING (1 << 4) | ||
859 | 966 | ||
860 | typedef | 967 | typedef |
861 | struct SVGAScreenObject { | 968 | struct SVGAScreenObject { |
862 | uint32 structSize; // sizeof(SVGAScreenObject) | 969 | uint32 structSize; /* sizeof(SVGAScreenObject) */ |
863 | uint32 id; | 970 | uint32 id; |
864 | uint32 flags; | 971 | uint32 flags; |
865 | struct { | 972 | struct { |
@@ -869,7 +976,14 @@ struct SVGAScreenObject { | |||
869 | struct { | 976 | struct { |
870 | int32 x; | 977 | int32 x; |
871 | int32 y; | 978 | int32 y; |
872 | } root; // Only used if SVGA_SCREEN_HAS_ROOT is set. | 979 | } root; |
980 | |||
981 | /* | ||
982 | * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional | ||
983 | * with SVGA_FIFO_CAP_SCREEN_OBJECT. | ||
984 | */ | ||
985 | SVGAGuestImage backingStore; | ||
986 | uint32 cloneCount; | ||
873 | } SVGAScreenObject; | 987 | } SVGAScreenObject; |
874 | 988 | ||
875 | 989 | ||
@@ -944,7 +1058,7 @@ typedef enum { | |||
944 | */ | 1058 | */ |
945 | 1059 | ||
946 | typedef | 1060 | typedef |
947 | struct { | 1061 | struct SVGAFifoCmdUpdate { |
948 | uint32 x; | 1062 | uint32 x; |
949 | uint32 y; | 1063 | uint32 y; |
950 | uint32 width; | 1064 | uint32 width; |
@@ -963,7 +1077,7 @@ struct { | |||
963 | */ | 1077 | */ |
964 | 1078 | ||
965 | typedef | 1079 | typedef |
966 | struct { | 1080 | struct SVGAFifoCmdRectCopy { |
967 | uint32 srcX; | 1081 | uint32 srcX; |
968 | uint32 srcY; | 1082 | uint32 srcY; |
969 | uint32 destX; | 1083 | uint32 destX; |
@@ -987,14 +1101,14 @@ struct { | |||
987 | */ | 1101 | */ |
988 | 1102 | ||
989 | typedef | 1103 | typedef |
990 | struct { | 1104 | struct SVGAFifoCmdDefineCursor { |
991 | uint32 id; // Reserved, must be zero. | 1105 | uint32 id; /* Reserved, must be zero. */ |
992 | uint32 hotspotX; | 1106 | uint32 hotspotX; |
993 | uint32 hotspotY; | 1107 | uint32 hotspotY; |
994 | uint32 width; | 1108 | uint32 width; |
995 | uint32 height; | 1109 | uint32 height; |
996 | uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL | 1110 | uint32 andMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ |
997 | uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL | 1111 | uint32 xorMaskDepth; /* Value must be 1 or equal to BITS_PER_PIXEL */ |
998 | /* | 1112 | /* |
999 | * Followed by scanline data for AND mask, then XOR mask. | 1113 | * Followed by scanline data for AND mask, then XOR mask. |
1000 | * Each scanline is padded to a 32-bit boundary. | 1114 | * Each scanline is padded to a 32-bit boundary. |
@@ -1016,8 +1130,8 @@ struct { | |||
1016 | */ | 1130 | */ |
1017 | 1131 | ||
1018 | typedef | 1132 | typedef |
1019 | struct { | 1133 | struct SVGAFifoCmdDefineAlphaCursor { |
1020 | uint32 id; // Reserved, must be zero. | 1134 | uint32 id; /* Reserved, must be zero. */ |
1021 | uint32 hotspotX; | 1135 | uint32 hotspotX; |
1022 | uint32 hotspotY; | 1136 | uint32 hotspotY; |
1023 | uint32 width; | 1137 | uint32 width; |
@@ -1039,7 +1153,7 @@ struct { | |||
1039 | */ | 1153 | */ |
1040 | 1154 | ||
1041 | typedef | 1155 | typedef |
1042 | struct { | 1156 | struct SVGAFifoCmdUpdateVerbose { |
1043 | uint32 x; | 1157 | uint32 x; |
1044 | uint32 y; | 1158 | uint32 y; |
1045 | uint32 width; | 1159 | uint32 width; |
@@ -1064,13 +1178,13 @@ struct { | |||
1064 | #define SVGA_ROP_COPY 0x03 | 1178 | #define SVGA_ROP_COPY 0x03 |
1065 | 1179 | ||
1066 | typedef | 1180 | typedef |
1067 | struct { | 1181 | struct SVGAFifoCmdFrontRopFill { |
1068 | uint32 color; // In the same format as the GFB | 1182 | uint32 color; /* In the same format as the GFB */ |
1069 | uint32 x; | 1183 | uint32 x; |
1070 | uint32 y; | 1184 | uint32 y; |
1071 | uint32 width; | 1185 | uint32 width; |
1072 | uint32 height; | 1186 | uint32 height; |
1073 | uint32 rop; // Must be SVGA_ROP_COPY | 1187 | uint32 rop; /* Must be SVGA_ROP_COPY */ |
1074 | } SVGAFifoCmdFrontRopFill; | 1188 | } SVGAFifoCmdFrontRopFill; |
1075 | 1189 | ||
1076 | 1190 | ||
@@ -1107,7 +1221,7 @@ struct { | |||
1107 | */ | 1221 | */ |
1108 | 1222 | ||
1109 | typedef | 1223 | typedef |
1110 | struct { | 1224 | struct SVGAFifoCmdEscape { |
1111 | uint32 nsid; | 1225 | uint32 nsid; |
1112 | uint32 size; | 1226 | uint32 size; |
1113 | /* followed by 'size' bytes of data */ | 1227 | /* followed by 'size' bytes of data */ |
@@ -1137,12 +1251,12 @@ struct { | |||
1137 | * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*). | 1251 | * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*). |
1138 | * | 1252 | * |
1139 | * Availability: | 1253 | * Availability: |
1140 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1254 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1141 | */ | 1255 | */ |
1142 | 1256 | ||
1143 | typedef | 1257 | typedef |
1144 | struct { | 1258 | struct { |
1145 | SVGAScreenObject screen; // Variable-length according to version | 1259 | SVGAScreenObject screen; /* Variable-length according to version */ |
1146 | } SVGAFifoCmdDefineScreen; | 1260 | } SVGAFifoCmdDefineScreen; |
1147 | 1261 | ||
1148 | 1262 | ||
@@ -1153,7 +1267,7 @@ struct { | |||
1153 | * re-use. | 1267 | * re-use. |
1154 | * | 1268 | * |
1155 | * Availability: | 1269 | * Availability: |
1156 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1270 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1157 | */ | 1271 | */ |
1158 | 1272 | ||
1159 | typedef | 1273 | typedef |
@@ -1206,7 +1320,7 @@ struct { | |||
1206 | * GMRFB. | 1320 | * GMRFB. |
1207 | * | 1321 | * |
1208 | * Availability: | 1322 | * Availability: |
1209 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1323 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1210 | */ | 1324 | */ |
1211 | 1325 | ||
1212 | typedef | 1326 | typedef |
@@ -1243,7 +1357,7 @@ struct { | |||
1243 | * SVGA_CMD_ANNOTATION_* commands for details. | 1357 | * SVGA_CMD_ANNOTATION_* commands for details. |
1244 | * | 1358 | * |
1245 | * Availability: | 1359 | * Availability: |
1246 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1360 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1247 | */ | 1361 | */ |
1248 | 1362 | ||
1249 | typedef | 1363 | typedef |
@@ -1291,7 +1405,7 @@ struct { | |||
1291 | * the time any subsequent FENCE commands are reached. | 1405 | * the time any subsequent FENCE commands are reached. |
1292 | * | 1406 | * |
1293 | * Availability: | 1407 | * Availability: |
1294 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1408 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1295 | */ | 1409 | */ |
1296 | 1410 | ||
1297 | typedef | 1411 | typedef |
@@ -1326,7 +1440,7 @@ struct { | |||
1326 | * user's display is being remoted over a network connection. | 1440 | * user's display is being remoted over a network connection. |
1327 | * | 1441 | * |
1328 | * Availability: | 1442 | * Availability: |
1329 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1443 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1330 | */ | 1444 | */ |
1331 | 1445 | ||
1332 | typedef | 1446 | typedef |
@@ -1358,7 +1472,7 @@ struct { | |||
1358 | * undefined. | 1472 | * undefined. |
1359 | * | 1473 | * |
1360 | * Availability: | 1474 | * Availability: |
1361 | * SVGA_FIFO_CAP_SCREEN_OBJECT | 1475 | * SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2 |
1362 | */ | 1476 | */ |
1363 | 1477 | ||
1364 | typedef | 1478 | typedef |
@@ -1381,8 +1495,7 @@ typedef | |||
1381 | struct { | 1495 | struct { |
1382 | uint32 gmrId; | 1496 | uint32 gmrId; |
1383 | uint32 numPages; | 1497 | uint32 numPages; |
1384 | } | 1498 | } SVGAFifoCmdDefineGMR2; |
1385 | SVGAFifoCmdDefineGMR2; | ||
1386 | 1499 | ||
1387 | 1500 | ||
1388 | /* | 1501 | /* |
@@ -1424,8 +1537,8 @@ typedef | |||
1424 | struct { | 1537 | struct { |
1425 | uint32 gmrId; | 1538 | uint32 gmrId; |
1426 | SVGARemapGMR2Flags flags; | 1539 | SVGARemapGMR2Flags flags; |
1427 | uint32 offsetPages; /* offset in pages to begin remap */ | 1540 | uint32 offsetPages; /* offset in pages to begin remap */ |
1428 | uint32 numPages; /* number of pages to remap */ | 1541 | uint32 numPages; /* number of pages to remap */ |
1429 | /* | 1542 | /* |
1430 | * Followed by additional data depending on SVGARemapGMR2Flags. | 1543 | * Followed by additional data depending on SVGARemapGMR2Flags. |
1431 | * | 1544 | * |
@@ -1434,7 +1547,6 @@ struct { | |||
1434 | * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag | 1547 | * (according to flag SVGA_REMAP_GMR2_PPN64) follows. If flag |
1435 | * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. | 1548 | * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry. |
1436 | */ | 1549 | */ |
1437 | } | 1550 | } SVGAFifoCmdRemapGMR2; |
1438 | SVGAFifoCmdRemapGMR2; | ||
1439 | 1551 | ||
1440 | #endif | 1552 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 5d665ce8cbe4..5a72ed908232 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -42,6 +42,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | |||
42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 42 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
43 | TTM_PL_FLAG_CACHED; | 43 | TTM_PL_FLAG_CACHED; |
44 | 44 | ||
45 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | ||
46 | TTM_PL_FLAG_CACHED | | ||
47 | TTM_PL_FLAG_NO_EVICT; | ||
48 | |||
45 | struct ttm_placement vmw_vram_placement = { | 49 | struct ttm_placement vmw_vram_placement = { |
46 | .fpfn = 0, | 50 | .fpfn = 0, |
47 | .lpfn = 0, | 51 | .lpfn = 0, |
@@ -56,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = { | |||
56 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 60 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
57 | }; | 61 | }; |
58 | 62 | ||
63 | static uint32_t gmr_vram_placement_flags[] = { | ||
64 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, | ||
65 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | ||
66 | }; | ||
67 | |||
59 | struct ttm_placement vmw_vram_gmr_placement = { | 68 | struct ttm_placement vmw_vram_gmr_placement = { |
60 | .fpfn = 0, | 69 | .fpfn = 0, |
61 | .lpfn = 0, | 70 | .lpfn = 0, |
@@ -65,6 +74,20 @@ struct ttm_placement vmw_vram_gmr_placement = { | |||
65 | .busy_placement = &gmr_placement_flags | 74 | .busy_placement = &gmr_placement_flags |
66 | }; | 75 | }; |
67 | 76 | ||
77 | static uint32_t vram_gmr_ne_placement_flags[] = { | ||
78 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, | ||
79 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | ||
80 | }; | ||
81 | |||
82 | struct ttm_placement vmw_vram_gmr_ne_placement = { | ||
83 | .fpfn = 0, | ||
84 | .lpfn = 0, | ||
85 | .num_placement = 2, | ||
86 | .placement = vram_gmr_ne_placement_flags, | ||
87 | .num_busy_placement = 1, | ||
88 | .busy_placement = &gmr_ne_placement_flags | ||
89 | }; | ||
90 | |||
68 | struct ttm_placement vmw_vram_sys_placement = { | 91 | struct ttm_placement vmw_vram_sys_placement = { |
69 | .fpfn = 0, | 92 | .fpfn = 0, |
70 | .lpfn = 0, | 93 | .lpfn = 0, |
@@ -92,6 +115,30 @@ struct ttm_placement vmw_sys_placement = { | |||
92 | .busy_placement = &sys_placement_flags | 115 | .busy_placement = &sys_placement_flags |
93 | }; | 116 | }; |
94 | 117 | ||
118 | static uint32_t evictable_placement_flags[] = { | ||
119 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | ||
120 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | ||
121 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | ||
122 | }; | ||
123 | |||
124 | struct ttm_placement vmw_evictable_placement = { | ||
125 | .fpfn = 0, | ||
126 | .lpfn = 0, | ||
127 | .num_placement = 3, | ||
128 | .placement = evictable_placement_flags, | ||
129 | .num_busy_placement = 1, | ||
130 | .busy_placement = &sys_placement_flags | ||
131 | }; | ||
132 | |||
133 | struct ttm_placement vmw_srf_placement = { | ||
134 | .fpfn = 0, | ||
135 | .lpfn = 0, | ||
136 | .num_placement = 1, | ||
137 | .num_busy_placement = 2, | ||
138 | .placement = &gmr_placement_flags, | ||
139 | .busy_placement = gmr_vram_placement_flags | ||
140 | }; | ||
141 | |||
95 | struct vmw_ttm_backend { | 142 | struct vmw_ttm_backend { |
96 | struct ttm_backend backend; | 143 | struct ttm_backend backend; |
97 | struct page **pages; | 144 | struct page **pages; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c new file mode 100644 index 000000000000..3fa884db08ab --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
@@ -0,0 +1,322 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "ttm/ttm_placement.h" | ||
29 | |||
30 | #include "drmP.h" | ||
31 | #include "vmwgfx_drv.h" | ||
32 | |||
33 | |||
34 | /** | ||
35 | * vmw_dmabuf_to_placement - Validate a buffer to placement. | ||
36 | * | ||
37 | * @dev_priv: Driver private. | ||
38 | * @buf: DMA buffer to move. | ||
39 | * @pin: Pin buffer if true. | ||
40 | * @interruptible: Use interruptible wait. | ||
41 | * | ||
42 | * May only be called by the current master since it assumes that the | ||
43 | * master lock is the current master's lock. | ||
44 | * This function takes the master's lock in write mode. | ||
45 | * Flushes and unpins the query bo to avoid failures. | ||
46 | * | ||
47 | * Returns | ||
48 | * -ERESTARTSYS if interrupted by a signal. | ||
49 | */ | ||
50 | int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, | ||
51 | struct vmw_dma_buffer *buf, | ||
52 | struct ttm_placement *placement, | ||
53 | bool interruptible) | ||
54 | { | ||
55 | struct vmw_master *vmaster = dev_priv->active_master; | ||
56 | struct ttm_buffer_object *bo = &buf->base; | ||
57 | int ret; | ||
58 | |||
59 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
60 | if (unlikely(ret != 0)) | ||
61 | return ret; | ||
62 | |||
63 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
64 | |||
65 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
66 | if (unlikely(ret != 0)) | ||
67 | goto err; | ||
68 | |||
69 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
70 | |||
71 | ttm_bo_unreserve(bo); | ||
72 | |||
73 | err: | ||
74 | ttm_write_unlock(&vmaster->lock); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. | ||
80 | * | ||
81 | * May only be called by the current master since it assumes that the | ||
82 | * master lock is the current master's lock. | ||
83 | * This function takes the master's lock in write mode. | ||
84 | * Flushes and unpins the query bo if @pin == true to avoid failures. | ||
85 | * | ||
86 | * @dev_priv: Driver private. | ||
87 | * @buf: DMA buffer to move. | ||
88 | * @pin: Pin buffer if true. | ||
89 | * @interruptible: Use interruptible wait. | ||
90 | * | ||
91 | * Returns | ||
92 | * -ERESTARTSYS if interrupted by a signal. | ||
93 | */ | ||
94 | int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | ||
95 | struct vmw_dma_buffer *buf, | ||
96 | bool pin, bool interruptible) | ||
97 | { | ||
98 | struct vmw_master *vmaster = dev_priv->active_master; | ||
99 | struct ttm_buffer_object *bo = &buf->base; | ||
100 | struct ttm_placement *placement; | ||
101 | int ret; | ||
102 | |||
103 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
104 | if (unlikely(ret != 0)) | ||
105 | return ret; | ||
106 | |||
107 | if (pin) | ||
108 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
109 | |||
110 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
111 | if (unlikely(ret != 0)) | ||
112 | goto err; | ||
113 | |||
114 | /** | ||
115 | * Put BO in VRAM if there is space, otherwise as a GMR. | ||
116 | * If there is no space in VRAM and GMR ids are all used up, | ||
117 | * start evicting GMRs to make room. If the DMA buffer can't be | ||
118 | * used as a GMR, this will return -ENOMEM. | ||
119 | */ | ||
120 | |||
121 | if (pin) | ||
122 | placement = &vmw_vram_gmr_ne_placement; | ||
123 | else | ||
124 | placement = &vmw_vram_gmr_placement; | ||
125 | |||
126 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
127 | if (likely(ret == 0) || ret == -ERESTARTSYS) | ||
128 | goto err_unreserve; | ||
129 | |||
130 | |||
131 | /** | ||
132 | * If that failed, try VRAM again, this time evicting | ||
133 | * previous contents. | ||
134 | */ | ||
135 | |||
136 | if (pin) | ||
137 | placement = &vmw_vram_ne_placement; | ||
138 | else | ||
139 | placement = &vmw_vram_placement; | ||
140 | |||
141 | ret = ttm_bo_validate(bo, placement, interruptible, false, false); | ||
142 | |||
143 | err_unreserve: | ||
144 | ttm_bo_unreserve(bo); | ||
145 | err: | ||
146 | ttm_write_unlock(&vmaster->lock); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * vmw_dmabuf_to_vram - Move a buffer to vram. | ||
152 | * | ||
153 | * May only be called by the current master since it assumes that the | ||
154 | * master lock is the current master's lock. | ||
155 | * This function takes the master's lock in write mode. | ||
156 | * | ||
157 | * @dev_priv: Driver private. | ||
158 | * @buf: DMA buffer to move. | ||
159 | * @pin: Pin buffer in vram if true. | ||
160 | * @interruptible: Use interruptible wait. | ||
161 | * | ||
162 | * Returns | ||
163 | * -ERESTARTSYS if interrupted by a signal. | ||
164 | */ | ||
165 | int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, | ||
166 | struct vmw_dma_buffer *buf, | ||
167 | bool pin, bool interruptible) | ||
168 | { | ||
169 | struct ttm_placement *placement; | ||
170 | |||
171 | if (pin) | ||
172 | placement = &vmw_vram_ne_placement; | ||
173 | else | ||
174 | placement = &vmw_vram_placement; | ||
175 | |||
176 | return vmw_dmabuf_to_placement(dev_priv, buf, | ||
177 | placement, | ||
178 | interruptible); | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. | ||
183 | * | ||
184 | * May only be called by the current master since it assumes that the | ||
185 | * master lock is the current master's lock. | ||
186 | * This function takes the master's lock in write mode. | ||
187 | * Flushes and unpins the query bo if @pin == true to avoid failures. | ||
188 | * | ||
189 | * @dev_priv: Driver private. | ||
190 | * @buf: DMA buffer to move. | ||
191 | * @pin: Pin buffer in vram if true. | ||
192 | * @interruptible: Use interruptible wait. | ||
193 | * | ||
194 | * Returns | ||
195 | * -ERESTARTSYS if interrupted by a signal. | ||
196 | */ | ||
197 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, | ||
198 | struct vmw_dma_buffer *buf, | ||
199 | bool pin, bool interruptible) | ||
200 | { | ||
201 | struct vmw_master *vmaster = dev_priv->active_master; | ||
202 | struct ttm_buffer_object *bo = &buf->base; | ||
203 | struct ttm_placement placement; | ||
204 | int ret = 0; | ||
205 | |||
206 | if (pin) | ||
207 | placement = vmw_vram_ne_placement; | ||
208 | else | ||
209 | placement = vmw_vram_placement; | ||
210 | placement.lpfn = bo->num_pages; | ||
211 | |||
212 | ret = ttm_write_lock(&vmaster->lock, interruptible); | ||
213 | if (unlikely(ret != 0)) | ||
214 | return ret; | ||
215 | |||
216 | if (pin) | ||
217 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
218 | |||
219 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
220 | if (unlikely(ret != 0)) | ||
221 | goto err_unlock; | ||
222 | |||
223 | /* Is this buffer already in vram but not at the start of it? */ | ||
224 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
225 | bo->mem.start < bo->num_pages && | ||
226 | bo->mem.start > 0) | ||
227 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
228 | false, false); | ||
229 | |||
230 | ret = ttm_bo_validate(bo, &placement, interruptible, false, false); | ||
231 | |||
232 | /* For some reason we didn't up at the start of vram */ | ||
233 | WARN_ON(ret == 0 && bo->offset != 0); | ||
234 | |||
235 | ttm_bo_unreserve(bo); | ||
236 | err_unlock: | ||
237 | ttm_write_unlock(&vmaster->lock); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | |||
243 | /** | ||
244 | * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. | ||
245 | * | ||
246 | * May only be called by the current master since it assumes that the | ||
247 | * master lock is the current master's lock. | ||
248 | * This function takes the master's lock in write mode. | ||
249 | * | ||
250 | * @dev_priv: Driver private. | ||
251 | * @buf: DMA buffer to unpin. | ||
252 | * @interruptible: Use interruptible wait. | ||
253 | * | ||
254 | * Returns | ||
255 | * -ERESTARTSYS if interrupted by a signal. | ||
256 | */ | ||
257 | int vmw_dmabuf_unpin(struct vmw_private *dev_priv, | ||
258 | struct vmw_dma_buffer *buf, | ||
259 | bool interruptible) | ||
260 | { | ||
261 | /* | ||
262 | * We could in theory early out if the buffer is | ||
263 | * unpinned but we need to lock and reserve the buffer | ||
264 | * anyways so we don't gain much by that. | ||
265 | */ | ||
266 | return vmw_dmabuf_to_placement(dev_priv, buf, | ||
267 | &vmw_evictable_placement, | ||
268 | interruptible); | ||
269 | } | ||
270 | |||
271 | |||
272 | /** | ||
273 | * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement | ||
274 | * of a buffer. | ||
275 | * | ||
276 | * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. | ||
277 | * @ptr: SVGAGuestPtr returning the result. | ||
278 | */ | ||
279 | void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, | ||
280 | SVGAGuestPtr *ptr) | ||
281 | { | ||
282 | if (bo->mem.mem_type == TTM_PL_VRAM) { | ||
283 | ptr->gmrId = SVGA_GMR_FRAMEBUFFER; | ||
284 | ptr->offset = bo->offset; | ||
285 | } else { | ||
286 | ptr->gmrId = bo->mem.start; | ||
287 | ptr->offset = 0; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | |||
292 | /** | ||
293 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. | ||
294 | * | ||
295 | * @bo: The buffer object. Must be reserved, and present either in VRAM | ||
296 | * or GMR memory. | ||
297 | * @pin: Whether to pin or unpin. | ||
298 | * | ||
299 | */ | ||
300 | void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) | ||
301 | { | ||
302 | uint32_t pl_flags; | ||
303 | struct ttm_placement placement; | ||
304 | uint32_t old_mem_type = bo->mem.mem_type; | ||
305 | int ret; | ||
306 | |||
307 | BUG_ON(!atomic_read(&bo->reserved)); | ||
308 | BUG_ON(old_mem_type != TTM_PL_VRAM && | ||
309 | old_mem_type != VMW_PL_FLAG_GMR); | ||
310 | |||
311 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; | ||
312 | if (pin) | ||
313 | pl_flags |= TTM_PL_FLAG_NO_EVICT; | ||
314 | |||
315 | memset(&placement, 0, sizeof(placement)); | ||
316 | placement.num_placement = 1; | ||
317 | placement.placement = &pl_flags; | ||
318 | |||
319 | ret = ttm_bo_validate(bo, &placement, false, true, true); | ||
320 | |||
321 | BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); | ||
322 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index d4829cbf326d..ddb5abd6ac56 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -94,6 +94,12 @@ | |||
94 | #define DRM_IOCTL_VMW_FENCE_UNREF \ | 94 | #define DRM_IOCTL_VMW_FENCE_UNREF \ |
95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ | 95 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ |
96 | struct drm_vmw_fence_arg) | 96 | struct drm_vmw_fence_arg) |
97 | #define DRM_IOCTL_VMW_PRESENT \ | ||
98 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ | ||
99 | struct drm_vmw_present_arg) | ||
100 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ | ||
101 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ | ||
102 | struct drm_vmw_present_readback_arg) | ||
97 | 103 | ||
98 | /** | 104 | /** |
99 | * The core DRM version of this macro doesn't account for | 105 | * The core DRM version of this macro doesn't account for |
@@ -146,6 +152,13 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
146 | DRM_AUTH | DRM_UNLOCKED), | 152 | DRM_AUTH | DRM_UNLOCKED), |
147 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, | 153 | VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, |
148 | DRM_AUTH | DRM_UNLOCKED), | 154 | DRM_AUTH | DRM_UNLOCKED), |
155 | |||
156 | /* these allow direct access to the framebuffers mark as master only */ | ||
157 | VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, | ||
158 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | ||
159 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, | ||
160 | vmw_present_readback_ioctl, | ||
161 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | ||
149 | }; | 162 | }; |
150 | 163 | ||
151 | static struct pci_device_id vmw_pci_id_list[] = { | 164 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -200,6 +213,72 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
200 | DRM_INFO(" Screen Object 2.\n"); | 213 | DRM_INFO(" Screen Object 2.\n"); |
201 | } | 214 | } |
202 | 215 | ||
216 | |||
217 | /** | ||
218 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | ||
219 | * the start of a buffer object. | ||
220 | * | ||
221 | * @dev_priv: The device private structure. | ||
222 | * | ||
223 | * This function will idle the buffer using an uninterruptible wait, then | ||
224 | * map the first page and initialize a pending occlusion query result structure, | ||
225 | * Finally it will unmap the buffer. | ||
226 | * | ||
227 | * TODO: Since we're only mapping a single page, we should optimize the map | ||
228 | * to use kmap_atomic / iomap_atomic. | ||
229 | */ | ||
230 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | ||
231 | { | ||
232 | struct ttm_bo_kmap_obj map; | ||
233 | volatile SVGA3dQueryResult *result; | ||
234 | bool dummy; | ||
235 | int ret; | ||
236 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
237 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
238 | |||
239 | ttm_bo_reserve(bo, false, false, false, 0); | ||
240 | spin_lock(&bdev->fence_lock); | ||
241 | ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE); | ||
242 | spin_unlock(&bdev->fence_lock); | ||
243 | if (unlikely(ret != 0)) | ||
244 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | ||
245 | 10*HZ); | ||
246 | |||
247 | ret = ttm_bo_kmap(bo, 0, 1, &map); | ||
248 | if (likely(ret == 0)) { | ||
249 | result = ttm_kmap_obj_virtual(&map, &dummy); | ||
250 | result->totalSize = sizeof(*result); | ||
251 | result->state = SVGA3D_QUERYSTATE_PENDING; | ||
252 | result->result32 = 0xff; | ||
253 | ttm_bo_kunmap(&map); | ||
254 | } else | ||
255 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
256 | ttm_bo_unreserve(bo); | ||
257 | } | ||
258 | |||
259 | |||
260 | /** | ||
261 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
262 | * | ||
263 | * @dev_priv: A device private structure. | ||
264 | * | ||
265 | * This function creates a small buffer object that holds the query | ||
266 | * result for dummy queries emitted as query barriers. | ||
267 | * No interruptible waits are done within this function. | ||
268 | * | ||
269 | * Returns an error if bo creation fails. | ||
270 | */ | ||
271 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
272 | { | ||
273 | return ttm_bo_create(&dev_priv->bdev, | ||
274 | PAGE_SIZE, | ||
275 | ttm_bo_type_device, | ||
276 | &vmw_vram_sys_placement, | ||
277 | 0, 0, false, NULL, | ||
278 | &dev_priv->dummy_query_bo); | ||
279 | } | ||
280 | |||
281 | |||
203 | static int vmw_request_device(struct vmw_private *dev_priv) | 282 | static int vmw_request_device(struct vmw_private *dev_priv) |
204 | { | 283 | { |
205 | int ret; | 284 | int ret; |
@@ -210,12 +289,29 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
210 | return ret; | 289 | return ret; |
211 | } | 290 | } |
212 | vmw_fence_fifo_up(dev_priv->fman); | 291 | vmw_fence_fifo_up(dev_priv->fman); |
292 | ret = vmw_dummy_query_bo_create(dev_priv); | ||
293 | if (unlikely(ret != 0)) | ||
294 | goto out_no_query_bo; | ||
295 | vmw_dummy_query_bo_prepare(dev_priv); | ||
213 | 296 | ||
214 | return 0; | 297 | return 0; |
298 | |||
299 | out_no_query_bo: | ||
300 | vmw_fence_fifo_down(dev_priv->fman); | ||
301 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | ||
302 | return ret; | ||
215 | } | 303 | } |
216 | 304 | ||
217 | static void vmw_release_device(struct vmw_private *dev_priv) | 305 | static void vmw_release_device(struct vmw_private *dev_priv) |
218 | { | 306 | { |
307 | /* | ||
308 | * Previous destructions should've released | ||
309 | * the pinned bo. | ||
310 | */ | ||
311 | |||
312 | BUG_ON(dev_priv->pinned_bo != NULL); | ||
313 | |||
314 | ttm_bo_unref(&dev_priv->dummy_query_bo); | ||
219 | vmw_fence_fifo_down(dev_priv->fman); | 315 | vmw_fence_fifo_down(dev_priv->fman); |
220 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 316 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
221 | } | 317 | } |
@@ -306,6 +402,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
306 | init_waitqueue_head(&dev_priv->fifo_queue); | 402 | init_waitqueue_head(&dev_priv->fifo_queue); |
307 | dev_priv->fence_queue_waiters = 0; | 403 | dev_priv->fence_queue_waiters = 0; |
308 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | 404 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
405 | INIT_LIST_HEAD(&dev_priv->surface_lru); | ||
406 | dev_priv->used_memory_size = 0; | ||
309 | 407 | ||
310 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | 408 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
311 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | 409 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); |
@@ -326,6 +424,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
326 | 424 | ||
327 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); | 425 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); |
328 | 426 | ||
427 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | ||
428 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | ||
429 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | ||
430 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | ||
329 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 431 | if (dev_priv->capabilities & SVGA_CAP_GMR) { |
330 | dev_priv->max_gmr_descriptors = | 432 | dev_priv->max_gmr_descriptors = |
331 | vmw_read(dev_priv, | 433 | vmw_read(dev_priv, |
@@ -338,13 +440,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
338 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | 440 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
339 | dev_priv->memory_size = | 441 | dev_priv->memory_size = |
340 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); | 442 | vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); |
443 | dev_priv->memory_size -= dev_priv->vram_size; | ||
444 | } else { | ||
445 | /* | ||
446 | * An arbitrary limit of 512MiB on surface | ||
447 | * memory. But all HWV8 hardware supports GMR2. | ||
448 | */ | ||
449 | dev_priv->memory_size = 512*1024*1024; | ||
341 | } | 450 | } |
342 | 451 | ||
343 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | ||
344 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | ||
345 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | ||
346 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | ||
347 | |||
348 | mutex_unlock(&dev_priv->hw_mutex); | 452 | mutex_unlock(&dev_priv->hw_mutex); |
349 | 453 | ||
350 | vmw_print_capabilities(dev_priv->capabilities); | 454 | vmw_print_capabilities(dev_priv->capabilities); |
@@ -358,8 +462,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
358 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | 462 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
359 | DRM_INFO("Max number of GMR pages is %u\n", | 463 | DRM_INFO("Max number of GMR pages is %u\n", |
360 | (unsigned)dev_priv->max_gmr_pages); | 464 | (unsigned)dev_priv->max_gmr_pages); |
361 | DRM_INFO("Max dedicated hypervisor graphics memory is %u\n", | 465 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
362 | (unsigned)dev_priv->memory_size); | 466 | (unsigned)dev_priv->memory_size / 1024); |
363 | } | 467 | } |
364 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 468 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
365 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 469 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
@@ -451,22 +555,30 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
451 | dev_priv->fman = vmw_fence_manager_init(dev_priv); | 555 | dev_priv->fman = vmw_fence_manager_init(dev_priv); |
452 | if (unlikely(dev_priv->fman == NULL)) | 556 | if (unlikely(dev_priv->fman == NULL)) |
453 | goto out_no_fman; | 557 | goto out_no_fman; |
558 | |||
559 | /* Need to start the fifo to check if we can do screen objects */ | ||
560 | ret = vmw_3d_resource_inc(dev_priv, true); | ||
561 | if (unlikely(ret != 0)) | ||
562 | goto out_no_fifo; | ||
563 | vmw_kms_save_vga(dev_priv); | ||
564 | |||
565 | /* Start kms and overlay systems, needs fifo. */ | ||
454 | ret = vmw_kms_init(dev_priv); | 566 | ret = vmw_kms_init(dev_priv); |
455 | if (unlikely(ret != 0)) | 567 | if (unlikely(ret != 0)) |
456 | goto out_no_kms; | 568 | goto out_no_kms; |
457 | vmw_overlay_init(dev_priv); | 569 | vmw_overlay_init(dev_priv); |
570 | |||
571 | /* 3D Depends on Screen Objects being used. */ | ||
572 | DRM_INFO("Detected %sdevice 3D availability.\n", | ||
573 | vmw_fifo_have_3d(dev_priv) ? | ||
574 | "" : "no "); | ||
575 | |||
576 | /* We might be done with the fifo now */ | ||
458 | if (dev_priv->enable_fb) { | 577 | if (dev_priv->enable_fb) { |
459 | ret = vmw_3d_resource_inc(dev_priv, false); | ||
460 | if (unlikely(ret != 0)) | ||
461 | goto out_no_fifo; | ||
462 | vmw_kms_save_vga(dev_priv); | ||
463 | vmw_fb_init(dev_priv); | 578 | vmw_fb_init(dev_priv); |
464 | DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? | ||
465 | "Detected device 3D availability.\n" : | ||
466 | "Detected no device 3D availability.\n"); | ||
467 | } else { | 579 | } else { |
468 | DRM_INFO("Delayed 3D detection since we're not " | 580 | vmw_kms_restore_vga(dev_priv); |
469 | "running the device in SVGA mode yet.\n"); | 581 | vmw_3d_resource_dec(dev_priv, true); |
470 | } | 582 | } |
471 | 583 | ||
472 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | 584 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { |
@@ -483,15 +595,17 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
483 | return 0; | 595 | return 0; |
484 | 596 | ||
485 | out_no_irq: | 597 | out_no_irq: |
486 | if (dev_priv->enable_fb) { | 598 | if (dev_priv->enable_fb) |
487 | vmw_fb_close(dev_priv); | 599 | vmw_fb_close(dev_priv); |
600 | vmw_overlay_close(dev_priv); | ||
601 | vmw_kms_close(dev_priv); | ||
602 | out_no_kms: | ||
603 | /* We still have a 3D resource reference held */ | ||
604 | if (dev_priv->enable_fb) { | ||
488 | vmw_kms_restore_vga(dev_priv); | 605 | vmw_kms_restore_vga(dev_priv); |
489 | vmw_3d_resource_dec(dev_priv, false); | 606 | vmw_3d_resource_dec(dev_priv, false); |
490 | } | 607 | } |
491 | out_no_fifo: | 608 | out_no_fifo: |
492 | vmw_overlay_close(dev_priv); | ||
493 | vmw_kms_close(dev_priv); | ||
494 | out_no_kms: | ||
495 | vmw_fence_manager_takedown(dev_priv->fman); | 609 | vmw_fence_manager_takedown(dev_priv->fman); |
496 | out_no_fman: | 610 | out_no_fman: |
497 | if (dev_priv->stealth) | 611 | if (dev_priv->stealth) |
@@ -771,7 +885,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
771 | 885 | ||
772 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 886 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
773 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 887 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
774 | vmw_kms_idle_workqueues(vmaster); | 888 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); |
775 | 889 | ||
776 | if (unlikely((ret != 0))) { | 890 | if (unlikely((ret != 0))) { |
777 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 891 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
@@ -823,6 +937,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
823 | * This empties VRAM and unbinds all GMR bindings. | 937 | * This empties VRAM and unbinds all GMR bindings. |
824 | * Buffer contents is moved to swappable memory. | 938 | * Buffer contents is moved to swappable memory. |
825 | */ | 939 | */ |
940 | vmw_execbuf_release_pinned_bo(dev_priv, false, 0); | ||
826 | ttm_bo_swapout_all(&dev_priv->bdev); | 941 | ttm_bo_swapout_all(&dev_priv->bdev); |
827 | 942 | ||
828 | break; | 943 | break; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 564a81582111..8cce73e7d18c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20110901" | 43 | #define VMWGFX_DRIVER_DATE "20110927" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 0 | 45 | #define VMWGFX_DRIVER_MINOR 1 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -79,9 +79,11 @@ struct vmw_resource { | |||
79 | int id; | 79 | int id; |
80 | enum ttm_object_type res_type; | 80 | enum ttm_object_type res_type; |
81 | bool avail; | 81 | bool avail; |
82 | void (*remove_from_lists) (struct vmw_resource *res); | ||
82 | void (*hw_destroy) (struct vmw_resource *res); | 83 | void (*hw_destroy) (struct vmw_resource *res); |
83 | void (*res_free) (struct vmw_resource *res); | 84 | void (*res_free) (struct vmw_resource *res); |
84 | bool on_validate_list; | 85 | struct list_head validate_head; |
86 | struct list_head query_head; /* Protected by the cmdbuf mutex */ | ||
85 | /* TODO is a generic snooper needed? */ | 87 | /* TODO is a generic snooper needed? */ |
86 | #if 0 | 88 | #if 0 |
87 | void (*snoop)(struct vmw_resource *res, | 89 | void (*snoop)(struct vmw_resource *res, |
@@ -97,8 +99,12 @@ struct vmw_cursor_snooper { | |||
97 | uint32_t *image; | 99 | uint32_t *image; |
98 | }; | 100 | }; |
99 | 101 | ||
102 | struct vmw_framebuffer; | ||
103 | struct vmw_surface_offset; | ||
104 | |||
100 | struct vmw_surface { | 105 | struct vmw_surface { |
101 | struct vmw_resource res; | 106 | struct vmw_resource res; |
107 | struct list_head lru_head; /* Protected by the resource lock */ | ||
102 | uint32_t flags; | 108 | uint32_t flags; |
103 | uint32_t format; | 109 | uint32_t format; |
104 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; | 110 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
@@ -109,6 +115,9 @@ struct vmw_surface { | |||
109 | 115 | ||
110 | /* TODO so far just a extra pointer */ | 116 | /* TODO so far just a extra pointer */ |
111 | struct vmw_cursor_snooper snooper; | 117 | struct vmw_cursor_snooper snooper; |
118 | struct ttm_buffer_object *backup; | ||
119 | struct vmw_surface_offset *offsets; | ||
120 | uint32_t backup_size; | ||
112 | }; | 121 | }; |
113 | 122 | ||
114 | struct vmw_marker_queue { | 123 | struct vmw_marker_queue { |
@@ -139,6 +148,8 @@ struct vmw_sw_context{ | |||
139 | struct ida bo_list; | 148 | struct ida bo_list; |
140 | uint32_t last_cid; | 149 | uint32_t last_cid; |
141 | bool cid_valid; | 150 | bool cid_valid; |
151 | bool kernel; /**< is the called made from the kernel */ | ||
152 | struct vmw_resource *cur_ctx; | ||
142 | uint32_t last_sid; | 153 | uint32_t last_sid; |
143 | uint32_t sid_translation; | 154 | uint32_t sid_translation; |
144 | bool sid_valid; | 155 | bool sid_valid; |
@@ -150,8 +161,12 @@ struct vmw_sw_context{ | |||
150 | uint32_t cur_val_buf; | 161 | uint32_t cur_val_buf; |
151 | uint32_t *cmd_bounce; | 162 | uint32_t *cmd_bounce; |
152 | uint32_t cmd_bounce_size; | 163 | uint32_t cmd_bounce_size; |
153 | struct vmw_resource *resources[VMWGFX_MAX_VALIDATIONS]; | 164 | struct list_head resource_list; |
154 | uint32_t num_ref_resources; | 165 | uint32_t fence_flags; |
166 | struct list_head query_list; | ||
167 | struct ttm_buffer_object *cur_query_bo; | ||
168 | uint32_t cur_query_cid; | ||
169 | bool query_cid_valid; | ||
155 | }; | 170 | }; |
156 | 171 | ||
157 | struct vmw_legacy_display; | 172 | struct vmw_legacy_display; |
@@ -216,6 +231,7 @@ struct vmw_private { | |||
216 | 231 | ||
217 | void *fb_info; | 232 | void *fb_info; |
218 | struct vmw_legacy_display *ldu_priv; | 233 | struct vmw_legacy_display *ldu_priv; |
234 | struct vmw_screen_object_display *sou_priv; | ||
219 | struct vmw_overlay *overlay_priv; | 235 | struct vmw_overlay *overlay_priv; |
220 | 236 | ||
221 | /* | 237 | /* |
@@ -290,6 +306,26 @@ struct vmw_private { | |||
290 | 306 | ||
291 | struct mutex release_mutex; | 307 | struct mutex release_mutex; |
292 | uint32_t num_3d_resources; | 308 | uint32_t num_3d_resources; |
309 | |||
310 | /* | ||
311 | * Query processing. These members | ||
312 | * are protected by the cmdbuf mutex. | ||
313 | */ | ||
314 | |||
315 | struct ttm_buffer_object *dummy_query_bo; | ||
316 | struct ttm_buffer_object *pinned_bo; | ||
317 | uint32_t query_cid; | ||
318 | bool dummy_query_bo_pinned; | ||
319 | |||
320 | /* | ||
321 | * Surface swapping. The "surface_lru" list is protected by the | ||
322 | * resource lock in order to be able to destroy a surface and take | ||
323 | * it off the lru atomically. "used_memory_size" is currently | ||
324 | * protected by the cmdbuf mutex for simplicity. | ||
325 | */ | ||
326 | |||
327 | struct list_head surface_lru; | ||
328 | uint32_t used_memory_size; | ||
293 | }; | 329 | }; |
294 | 330 | ||
295 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | 331 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) |
@@ -369,6 +405,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
369 | extern int vmw_surface_check(struct vmw_private *dev_priv, | 405 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
370 | struct ttm_object_file *tfile, | 406 | struct ttm_object_file *tfile, |
371 | uint32_t handle, int *id); | 407 | uint32_t handle, int *id); |
408 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
409 | struct vmw_surface *srf); | ||
372 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 410 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
373 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 411 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
374 | struct vmw_dma_buffer *vmw_bo, | 412 | struct vmw_dma_buffer *vmw_bo, |
@@ -384,10 +422,6 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | |||
384 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 422 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
385 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 423 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
386 | uint32_t id, struct vmw_dma_buffer **out); | 424 | uint32_t id, struct vmw_dma_buffer **out); |
387 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
388 | struct vmw_dma_buffer *bo); | ||
389 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
390 | struct vmw_dma_buffer *bo); | ||
391 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | 425 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, |
392 | struct drm_file *file_priv); | 426 | struct drm_file *file_priv); |
393 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | 427 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, |
@@ -396,7 +430,30 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
396 | struct ttm_object_file *tfile, | 430 | struct ttm_object_file *tfile, |
397 | uint32_t *inout_id, | 431 | uint32_t *inout_id, |
398 | struct vmw_resource **out); | 432 | struct vmw_resource **out); |
433 | extern void vmw_resource_unreserve(struct list_head *list); | ||
399 | 434 | ||
435 | /** | ||
436 | * DMA buffer helper routines - vmwgfx_dmabuf.c | ||
437 | */ | ||
438 | extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, | ||
439 | struct vmw_dma_buffer *bo, | ||
440 | struct ttm_placement *placement, | ||
441 | bool interruptible); | ||
442 | extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, | ||
443 | struct vmw_dma_buffer *buf, | ||
444 | bool pin, bool interruptible); | ||
445 | extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, | ||
446 | struct vmw_dma_buffer *buf, | ||
447 | bool pin, bool interruptible); | ||
448 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
449 | struct vmw_dma_buffer *bo, | ||
450 | bool pin, bool interruptible); | ||
451 | extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, | ||
452 | struct vmw_dma_buffer *bo, | ||
453 | bool interruptible); | ||
454 | extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, | ||
455 | SVGAGuestPtr *ptr); | ||
456 | extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); | ||
400 | 457 | ||
401 | /** | 458 | /** |
402 | * Misc Ioctl functionality - vmwgfx_ioctl.c | 459 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
@@ -406,6 +463,10 @@ extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
406 | struct drm_file *file_priv); | 463 | struct drm_file *file_priv); |
407 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | 464 | extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
408 | struct drm_file *file_priv); | 465 | struct drm_file *file_priv); |
466 | extern int vmw_present_ioctl(struct drm_device *dev, void *data, | ||
467 | struct drm_file *file_priv); | ||
468 | extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | ||
469 | struct drm_file *file_priv); | ||
409 | 470 | ||
410 | /** | 471 | /** |
411 | * Fifo utilities - vmwgfx_fifo.c | 472 | * Fifo utilities - vmwgfx_fifo.c |
@@ -422,6 +483,8 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | |||
422 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 483 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
423 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | 484 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
424 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); | 485 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); |
486 | extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
487 | uint32_t cid); | ||
425 | 488 | ||
426 | /** | 489 | /** |
427 | * TTM glue - vmwgfx_ttm_glue.c | 490 | * TTM glue - vmwgfx_ttm_glue.c |
@@ -439,7 +502,10 @@ extern struct ttm_placement vmw_vram_placement; | |||
439 | extern struct ttm_placement vmw_vram_ne_placement; | 502 | extern struct ttm_placement vmw_vram_ne_placement; |
440 | extern struct ttm_placement vmw_vram_sys_placement; | 503 | extern struct ttm_placement vmw_vram_sys_placement; |
441 | extern struct ttm_placement vmw_vram_gmr_placement; | 504 | extern struct ttm_placement vmw_vram_gmr_placement; |
505 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | ||
442 | extern struct ttm_placement vmw_sys_placement; | 506 | extern struct ttm_placement vmw_sys_placement; |
507 | extern struct ttm_placement vmw_evictable_placement; | ||
508 | extern struct ttm_placement vmw_srf_placement; | ||
443 | extern struct ttm_bo_driver vmw_bo_driver; | 509 | extern struct ttm_bo_driver vmw_bo_driver; |
444 | extern int vmw_dma_quiescent(struct drm_device *dev); | 510 | extern int vmw_dma_quiescent(struct drm_device *dev); |
445 | 511 | ||
@@ -449,6 +515,24 @@ extern int vmw_dma_quiescent(struct drm_device *dev); | |||
449 | 515 | ||
450 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 516 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
451 | struct drm_file *file_priv); | 517 | struct drm_file *file_priv); |
518 | extern int vmw_execbuf_process(struct drm_file *file_priv, | ||
519 | struct vmw_private *dev_priv, | ||
520 | void __user *user_commands, | ||
521 | void *kernel_commands, | ||
522 | uint32_t command_size, | ||
523 | uint64_t throttle_us, | ||
524 | struct drm_vmw_fence_rep __user | ||
525 | *user_fence_rep); | ||
526 | |||
527 | extern void | ||
528 | vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | ||
529 | bool only_on_cid_match, uint32_t cid); | ||
530 | |||
531 | extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, | ||
532 | struct vmw_private *dev_priv, | ||
533 | struct vmw_fence_obj **p_fence, | ||
534 | uint32_t *p_handle); | ||
535 | |||
452 | 536 | ||
453 | /** | 537 | /** |
454 | * IRQs and wating - vmwgfx_irq.c | 538 | * IRQs and wating - vmwgfx_irq.c |
@@ -520,6 +604,19 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | |||
520 | uint32_t pitch, | 604 | uint32_t pitch, |
521 | uint32_t height); | 605 | uint32_t height); |
522 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | 606 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); |
607 | int vmw_kms_present(struct vmw_private *dev_priv, | ||
608 | struct drm_file *file_priv, | ||
609 | struct vmw_framebuffer *vfb, | ||
610 | struct vmw_surface *surface, | ||
611 | uint32_t sid, int32_t destX, int32_t destY, | ||
612 | struct drm_vmw_rect *clips, | ||
613 | uint32_t num_clips); | ||
614 | int vmw_kms_readback(struct vmw_private *dev_priv, | ||
615 | struct drm_file *file_priv, | ||
616 | struct vmw_framebuffer *vfb, | ||
617 | struct drm_vmw_fence_rep __user *user_fence_rep, | ||
618 | struct drm_vmw_rect *clips, | ||
619 | uint32_t num_clips); | ||
523 | 620 | ||
524 | /** | 621 | /** |
525 | * Overlay control - vmwgfx_overlay.c | 622 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index fa26e647f488..8a22f9d4a610 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -44,28 +44,64 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | 47 | static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | |
48 | static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | 48 | struct vmw_resource **p_res) |
49 | struct vmw_resource **p_res) | ||
50 | { | 49 | { |
51 | int ret = 0; | ||
52 | struct vmw_resource *res = *p_res; | 50 | struct vmw_resource *res = *p_res; |
53 | 51 | ||
54 | if (!res->on_validate_list) { | 52 | if (list_empty(&res->validate_head)) { |
55 | if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) { | 53 | list_add_tail(&res->validate_head, &sw_context->resource_list); |
56 | DRM_ERROR("Too many resources referenced in " | 54 | *p_res = NULL; |
57 | "command stream.\n"); | 55 | } else |
58 | ret = -ENOMEM; | 56 | vmw_resource_unreference(p_res); |
59 | goto out; | 57 | } |
60 | } | 58 | |
61 | sw_context->resources[sw_context->num_ref_resources++] = res; | 59 | /** |
62 | res->on_validate_list = true; | 60 | * vmw_bo_to_validate_list - add a bo to a validate list |
63 | return 0; | 61 | * |
62 | * @sw_context: The software context used for this command submission batch. | ||
63 | * @bo: The buffer object to add. | ||
64 | * @fence_flags: Fence flags to be or'ed with any other fence flags for | ||
65 | * this buffer on this submission batch. | ||
66 | * @p_val_node: If non-NULL Will be updated with the validate node number | ||
67 | * on return. | ||
68 | * | ||
69 | * Returns -EINVAL if the limit of number of buffer objects per command | ||
70 | * submission is reached. | ||
71 | */ | ||
72 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | ||
73 | struct ttm_buffer_object *bo, | ||
74 | uint32_t fence_flags, | ||
75 | uint32_t *p_val_node) | ||
76 | { | ||
77 | uint32_t val_node; | ||
78 | struct ttm_validate_buffer *val_buf; | ||
79 | |||
80 | val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | ||
81 | |||
82 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | ||
83 | DRM_ERROR("Max number of DMA buffers per submission" | ||
84 | " exceeded.\n"); | ||
85 | return -EINVAL; | ||
64 | } | 86 | } |
65 | 87 | ||
66 | out: | 88 | val_buf = &sw_context->val_bufs[val_node]; |
67 | vmw_resource_unreference(p_res); | 89 | if (unlikely(val_node == sw_context->cur_val_buf)) { |
68 | return ret; | 90 | val_buf->new_sync_obj_arg = NULL; |
91 | val_buf->bo = ttm_bo_reference(bo); | ||
92 | val_buf->usage = TTM_USAGE_READWRITE; | ||
93 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
94 | ++sw_context->cur_val_buf; | ||
95 | } | ||
96 | |||
97 | val_buf->new_sync_obj_arg = (void *) | ||
98 | ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); | ||
99 | sw_context->fence_flags |= fence_flags; | ||
100 | |||
101 | if (p_val_node) | ||
102 | *p_val_node = val_node; | ||
103 | |||
104 | return 0; | ||
69 | } | 105 | } |
70 | 106 | ||
71 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 107 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
@@ -94,7 +130,10 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
94 | 130 | ||
95 | sw_context->last_cid = cmd->cid; | 131 | sw_context->last_cid = cmd->cid; |
96 | sw_context->cid_valid = true; | 132 | sw_context->cid_valid = true; |
97 | return vmw_resource_to_validate_list(sw_context, &ctx); | 133 | sw_context->cur_ctx = ctx; |
134 | vmw_resource_to_validate_list(sw_context, &ctx); | ||
135 | |||
136 | return 0; | ||
98 | } | 137 | } |
99 | 138 | ||
100 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | 139 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, |
@@ -114,7 +153,8 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | |||
114 | return 0; | 153 | return 0; |
115 | } | 154 | } |
116 | 155 | ||
117 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 156 | ret = vmw_user_surface_lookup_handle(dev_priv, |
157 | sw_context->tfile, | ||
118 | *sid, &srf); | 158 | *sid, &srf); |
119 | if (unlikely(ret != 0)) { | 159 | if (unlikely(ret != 0)) { |
120 | DRM_ERROR("Could ot find or use surface 0x%08x " | 160 | DRM_ERROR("Could ot find or use surface 0x%08x " |
@@ -124,13 +164,23 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | |||
124 | return ret; | 164 | return ret; |
125 | } | 165 | } |
126 | 166 | ||
167 | ret = vmw_surface_validate(dev_priv, srf); | ||
168 | if (unlikely(ret != 0)) { | ||
169 | if (ret != -ERESTARTSYS) | ||
170 | DRM_ERROR("Could not validate surface.\n"); | ||
171 | vmw_surface_unreference(&srf); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
127 | sw_context->last_sid = *sid; | 175 | sw_context->last_sid = *sid; |
128 | sw_context->sid_valid = true; | 176 | sw_context->sid_valid = true; |
129 | sw_context->sid_translation = srf->res.id; | 177 | sw_context->sid_translation = srf->res.id; |
130 | *sid = sw_context->sid_translation; | 178 | *sid = sw_context->sid_translation; |
131 | 179 | ||
132 | res = &srf->res; | 180 | res = &srf->res; |
133 | return vmw_resource_to_validate_list(sw_context, &res); | 181 | vmw_resource_to_validate_list(sw_context, &res); |
182 | |||
183 | return 0; | ||
134 | } | 184 | } |
135 | 185 | ||
136 | 186 | ||
@@ -197,6 +247,12 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
197 | } *cmd; | 247 | } *cmd; |
198 | 248 | ||
199 | cmd = container_of(header, struct vmw_sid_cmd, header); | 249 | cmd = container_of(header, struct vmw_sid_cmd, header); |
250 | |||
251 | if (unlikely(!sw_context->kernel)) { | ||
252 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
253 | return -EPERM; | ||
254 | } | ||
255 | |||
200 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); | 256 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); |
201 | } | 257 | } |
202 | 258 | ||
@@ -209,10 +265,179 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
209 | SVGA3dCmdPresent body; | 265 | SVGA3dCmdPresent body; |
210 | } *cmd; | 266 | } *cmd; |
211 | 267 | ||
268 | |||
212 | cmd = container_of(header, struct vmw_sid_cmd, header); | 269 | cmd = container_of(header, struct vmw_sid_cmd, header); |
270 | |||
271 | if (unlikely(!sw_context->kernel)) { | ||
272 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
273 | return -EPERM; | ||
274 | } | ||
275 | |||
213 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 276 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
214 | } | 277 | } |
215 | 278 | ||
279 | /** | ||
280 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. | ||
281 | * | ||
282 | * @dev_priv: The device private structure. | ||
283 | * @cid: The hardware context for the next query. | ||
284 | * @new_query_bo: The new buffer holding query results. | ||
285 | * @sw_context: The software context used for this command submission. | ||
286 | * | ||
287 | * This function checks whether @new_query_bo is suitable for holding | ||
288 | * query results, and if another buffer currently is pinned for query | ||
289 | * results. If so, the function prepares the state of @sw_context for | ||
290 | * switching pinned buffers after successful submission of the current | ||
291 | * command batch. It also checks whether we're using a new query context. | ||
292 | * In that case, it makes sure we emit a query barrier for the old | ||
293 | * context before the current query buffer is fenced. | ||
294 | */ | ||
295 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | ||
296 | uint32_t cid, | ||
297 | struct ttm_buffer_object *new_query_bo, | ||
298 | struct vmw_sw_context *sw_context) | ||
299 | { | ||
300 | int ret; | ||
301 | bool add_cid = false; | ||
302 | uint32_t cid_to_add; | ||
303 | |||
304 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { | ||
305 | |||
306 | if (unlikely(new_query_bo->num_pages > 4)) { | ||
307 | DRM_ERROR("Query buffer too large.\n"); | ||
308 | return -EINVAL; | ||
309 | } | ||
310 | |||
311 | if (unlikely(sw_context->cur_query_bo != NULL)) { | ||
312 | BUG_ON(!sw_context->query_cid_valid); | ||
313 | add_cid = true; | ||
314 | cid_to_add = sw_context->cur_query_cid; | ||
315 | ret = vmw_bo_to_validate_list(sw_context, | ||
316 | sw_context->cur_query_bo, | ||
317 | DRM_VMW_FENCE_FLAG_EXEC, | ||
318 | NULL); | ||
319 | if (unlikely(ret != 0)) | ||
320 | return ret; | ||
321 | } | ||
322 | sw_context->cur_query_bo = new_query_bo; | ||
323 | |||
324 | ret = vmw_bo_to_validate_list(sw_context, | ||
325 | dev_priv->dummy_query_bo, | ||
326 | DRM_VMW_FENCE_FLAG_EXEC, | ||
327 | NULL); | ||
328 | if (unlikely(ret != 0)) | ||
329 | return ret; | ||
330 | |||
331 | } | ||
332 | |||
333 | if (unlikely(cid != sw_context->cur_query_cid && | ||
334 | sw_context->query_cid_valid)) { | ||
335 | add_cid = true; | ||
336 | cid_to_add = sw_context->cur_query_cid; | ||
337 | } | ||
338 | |||
339 | sw_context->cur_query_cid = cid; | ||
340 | sw_context->query_cid_valid = true; | ||
341 | |||
342 | if (add_cid) { | ||
343 | struct vmw_resource *ctx = sw_context->cur_ctx; | ||
344 | |||
345 | if (list_empty(&ctx->query_head)) | ||
346 | list_add_tail(&ctx->query_head, | ||
347 | &sw_context->query_list); | ||
348 | ret = vmw_bo_to_validate_list(sw_context, | ||
349 | dev_priv->dummy_query_bo, | ||
350 | DRM_VMW_FENCE_FLAG_EXEC, | ||
351 | NULL); | ||
352 | if (unlikely(ret != 0)) | ||
353 | return ret; | ||
354 | } | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | |||
359 | /** | ||
360 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer | ||
361 | * | ||
362 | * @dev_priv: The device private structure. | ||
363 | * @sw_context: The software context used for this command submission batch. | ||
364 | * | ||
365 | * This function will check if we're switching query buffers, and will then, | ||
366 | * if no other query waits are issued this command submission batch, | ||
367 | * issue a dummy occlusion query wait used as a query barrier. When the fence | ||
368 | * object following that query wait has signaled, we are sure that all | ||
369 | * preseding queries have finished, and the old query buffer can be unpinned. | ||
370 | * However, since both the new query buffer and the old one are fenced with | ||
371 | * that fence, we can do an asynchronus unpin now, and be sure that the | ||
372 | * old query buffer won't be moved until the fence has signaled. | ||
373 | * | ||
374 | * As mentioned above, both the new - and old query buffers need to be fenced | ||
375 | * using a sequence emitted *after* calling this function. | ||
376 | */ | ||
377 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | ||
378 | struct vmw_sw_context *sw_context) | ||
379 | { | ||
380 | |||
381 | struct vmw_resource *ctx, *next_ctx; | ||
382 | int ret; | ||
383 | |||
384 | /* | ||
385 | * The validate list should still hold references to all | ||
386 | * contexts here. | ||
387 | */ | ||
388 | |||
389 | list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, | ||
390 | query_head) { | ||
391 | list_del_init(&ctx->query_head); | ||
392 | |||
393 | BUG_ON(list_empty(&ctx->validate_head)); | ||
394 | |||
395 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); | ||
396 | |||
397 | if (unlikely(ret != 0)) | ||
398 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
399 | } | ||
400 | |||
401 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { | ||
402 | if (dev_priv->pinned_bo) { | ||
403 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
404 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
405 | } | ||
406 | |||
407 | vmw_bo_pin(sw_context->cur_query_bo, true); | ||
408 | |||
409 | /* | ||
410 | * We pin also the dummy_query_bo buffer so that we | ||
411 | * don't need to validate it when emitting | ||
412 | * dummy queries in context destroy paths. | ||
413 | */ | ||
414 | |||
415 | vmw_bo_pin(dev_priv->dummy_query_bo, true); | ||
416 | dev_priv->dummy_query_bo_pinned = true; | ||
417 | |||
418 | dev_priv->query_cid = sw_context->cur_query_cid; | ||
419 | dev_priv->pinned_bo = | ||
420 | ttm_bo_reference(sw_context->cur_query_bo); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * vmw_query_switch_backoff - clear query barrier list | ||
426 | * @sw_context: The sw context used for this submission batch. | ||
427 | * | ||
428 | * This function is used as part of an error path, where a previously | ||
429 | * set up list of query barriers needs to be cleared. | ||
430 | * | ||
431 | */ | ||
432 | static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) | ||
433 | { | ||
434 | struct list_head *list, *next; | ||
435 | |||
436 | list_for_each_safe(list, next, &sw_context->query_list) { | ||
437 | list_del_init(list); | ||
438 | } | ||
439 | } | ||
440 | |||
216 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | 441 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
217 | struct vmw_sw_context *sw_context, | 442 | struct vmw_sw_context *sw_context, |
218 | SVGAGuestPtr *ptr, | 443 | SVGAGuestPtr *ptr, |
@@ -222,8 +447,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
222 | struct ttm_buffer_object *bo; | 447 | struct ttm_buffer_object *bo; |
223 | uint32_t handle = ptr->gmrId; | 448 | uint32_t handle = ptr->gmrId; |
224 | struct vmw_relocation *reloc; | 449 | struct vmw_relocation *reloc; |
225 | uint32_t cur_validate_node; | ||
226 | struct ttm_validate_buffer *val_buf; | ||
227 | int ret; | 450 | int ret; |
228 | 451 | ||
229 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 452 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
@@ -243,23 +466,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
243 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 466 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
244 | reloc->location = ptr; | 467 | reloc->location = ptr; |
245 | 468 | ||
246 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 469 | ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, |
247 | if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) { | 470 | &reloc->index); |
248 | DRM_ERROR("Max number of DMA buffers per submission" | 471 | if (unlikely(ret != 0)) |
249 | " exceeded.\n"); | ||
250 | ret = -EINVAL; | ||
251 | goto out_no_reloc; | 472 | goto out_no_reloc; |
252 | } | ||
253 | 473 | ||
254 | reloc->index = cur_validate_node; | ||
255 | if (unlikely(cur_validate_node == sw_context->cur_val_buf)) { | ||
256 | val_buf = &sw_context->val_bufs[cur_validate_node]; | ||
257 | val_buf->bo = ttm_bo_reference(bo); | ||
258 | val_buf->usage = TTM_USAGE_READWRITE; | ||
259 | val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC; | ||
260 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
261 | ++sw_context->cur_val_buf; | ||
262 | } | ||
263 | *vmw_bo_p = vmw_bo; | 474 | *vmw_bo_p = vmw_bo; |
264 | return 0; | 475 | return 0; |
265 | 476 | ||
@@ -291,8 +502,11 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
291 | if (unlikely(ret != 0)) | 502 | if (unlikely(ret != 0)) |
292 | return ret; | 503 | return ret; |
293 | 504 | ||
505 | ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, | ||
506 | &vmw_bo->base, sw_context); | ||
507 | |||
294 | vmw_dmabuf_unreference(&vmw_bo); | 508 | vmw_dmabuf_unreference(&vmw_bo); |
295 | return 0; | 509 | return ret; |
296 | } | 510 | } |
297 | 511 | ||
298 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | 512 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
@@ -305,6 +519,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
305 | SVGA3dCmdWaitForQuery q; | 519 | SVGA3dCmdWaitForQuery q; |
306 | } *cmd; | 520 | } *cmd; |
307 | int ret; | 521 | int ret; |
522 | struct vmw_resource *ctx; | ||
308 | 523 | ||
309 | cmd = container_of(header, struct vmw_query_cmd, header); | 524 | cmd = container_of(header, struct vmw_query_cmd, header); |
310 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 525 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
@@ -318,6 +533,16 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
318 | return ret; | 533 | return ret; |
319 | 534 | ||
320 | vmw_dmabuf_unreference(&vmw_bo); | 535 | vmw_dmabuf_unreference(&vmw_bo); |
536 | |||
537 | /* | ||
538 | * This wait will act as a barrier for previous waits for this | ||
539 | * context. | ||
540 | */ | ||
541 | |||
542 | ctx = sw_context->cur_ctx; | ||
543 | if (!list_empty(&ctx->query_head)) | ||
544 | list_del_init(&ctx->query_head); | ||
545 | |||
321 | return 0; | 546 | return 0; |
322 | } | 547 | } |
323 | 548 | ||
@@ -350,6 +575,13 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
350 | goto out_no_reloc; | 575 | goto out_no_reloc; |
351 | } | 576 | } |
352 | 577 | ||
578 | ret = vmw_surface_validate(dev_priv, srf); | ||
579 | if (unlikely(ret != 0)) { | ||
580 | if (ret != -ERESTARTSYS) | ||
581 | DRM_ERROR("Culd not validate surface.\n"); | ||
582 | goto out_no_validate; | ||
583 | } | ||
584 | |||
353 | /* | 585 | /* |
354 | * Patch command stream with device SID. | 586 | * Patch command stream with device SID. |
355 | */ | 587 | */ |
@@ -359,8 +591,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
359 | vmw_dmabuf_unreference(&vmw_bo); | 591 | vmw_dmabuf_unreference(&vmw_bo); |
360 | 592 | ||
361 | res = &srf->res; | 593 | res = &srf->res; |
362 | return vmw_resource_to_validate_list(sw_context, &res); | 594 | vmw_resource_to_validate_list(sw_context, &res); |
363 | 595 | ||
596 | return 0; | ||
597 | |||
598 | out_no_validate: | ||
599 | vmw_surface_unreference(&srf); | ||
364 | out_no_reloc: | 600 | out_no_reloc: |
365 | vmw_dmabuf_unreference(&vmw_bo); | 601 | vmw_dmabuf_unreference(&vmw_bo); |
366 | return ret; | 602 | return ret; |
@@ -450,6 +686,71 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
450 | return 0; | 686 | return 0; |
451 | } | 687 | } |
452 | 688 | ||
689 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | ||
690 | struct vmw_sw_context *sw_context, | ||
691 | void *buf) | ||
692 | { | ||
693 | struct vmw_dma_buffer *vmw_bo; | ||
694 | int ret; | ||
695 | |||
696 | struct { | ||
697 | uint32_t header; | ||
698 | SVGAFifoCmdDefineGMRFB body; | ||
699 | } *cmd = buf; | ||
700 | |||
701 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | ||
702 | &cmd->body.ptr, | ||
703 | &vmw_bo); | ||
704 | if (unlikely(ret != 0)) | ||
705 | return ret; | ||
706 | |||
707 | vmw_dmabuf_unreference(&vmw_bo); | ||
708 | |||
709 | return ret; | ||
710 | } | ||
711 | |||
712 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | ||
713 | struct vmw_sw_context *sw_context, | ||
714 | void *buf, uint32_t *size) | ||
715 | { | ||
716 | uint32_t size_remaining = *size; | ||
717 | uint32_t cmd_id; | ||
718 | |||
719 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | ||
720 | switch (cmd_id) { | ||
721 | case SVGA_CMD_UPDATE: | ||
722 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); | ||
723 | break; | ||
724 | case SVGA_CMD_DEFINE_GMRFB: | ||
725 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); | ||
726 | break; | ||
727 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: | ||
728 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
729 | break; | ||
730 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: | ||
731 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); | ||
732 | break; | ||
733 | default: | ||
734 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); | ||
735 | return -EINVAL; | ||
736 | } | ||
737 | |||
738 | if (*size > size_remaining) { | ||
739 | DRM_ERROR("Invalid SVGA command (size mismatch):" | ||
740 | " %u.\n", cmd_id); | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | |||
744 | if (unlikely(!sw_context->kernel)) { | ||
745 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); | ||
746 | return -EPERM; | ||
747 | } | ||
748 | |||
749 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) | ||
750 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); | ||
751 | |||
752 | return 0; | ||
753 | } | ||
453 | 754 | ||
454 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 755 | typedef int (*vmw_cmd_func) (struct vmw_private *, |
455 | struct vmw_sw_context *, | 756 | struct vmw_sw_context *, |
@@ -502,11 +803,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
502 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 803 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
503 | int ret; | 804 | int ret; |
504 | 805 | ||
505 | cmd_id = ((uint32_t *)buf)[0]; | 806 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
506 | if (cmd_id == SVGA_CMD_UPDATE) { | 807 | /* Handle any none 3D commands */ |
507 | *size = 5 << 2; | 808 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
508 | return 0; | 809 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
509 | } | 810 | |
510 | 811 | ||
511 | cmd_id = le32_to_cpu(header->id); | 812 | cmd_id = le32_to_cpu(header->id); |
512 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | 813 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
@@ -531,9 +832,9 @@ out_err: | |||
531 | 832 | ||
532 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | 833 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
533 | struct vmw_sw_context *sw_context, | 834 | struct vmw_sw_context *sw_context, |
835 | void *buf, | ||
534 | uint32_t size) | 836 | uint32_t size) |
535 | { | 837 | { |
536 | void *buf = sw_context->cmd_bounce; | ||
537 | int32_t cur_size = size; | 838 | int32_t cur_size = size; |
538 | int ret; | 839 | int ret; |
539 | 840 | ||
@@ -582,7 +883,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
582 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 883 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
583 | { | 884 | { |
584 | struct ttm_validate_buffer *entry, *next; | 885 | struct ttm_validate_buffer *entry, *next; |
585 | uint32_t i = sw_context->num_ref_resources; | 886 | struct vmw_resource *res, *res_next; |
586 | 887 | ||
587 | /* | 888 | /* |
588 | * Drop references to DMA buffers held during command submission. | 889 | * Drop references to DMA buffers held during command submission. |
@@ -599,9 +900,11 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
599 | /* | 900 | /* |
600 | * Drop references to resources held during command submission. | 901 | * Drop references to resources held during command submission. |
601 | */ | 902 | */ |
602 | while (i-- > 0) { | 903 | vmw_resource_unreserve(&sw_context->resource_list); |
603 | sw_context->resources[i]->on_validate_list = false; | 904 | list_for_each_entry_safe(res, res_next, &sw_context->resource_list, |
604 | vmw_resource_unreference(&sw_context->resources[i]); | 905 | validate_head) { |
906 | list_del_init(&res->validate_head); | ||
907 | vmw_resource_unreference(&res); | ||
605 | } | 908 | } |
606 | } | 909 | } |
607 | 910 | ||
@@ -610,6 +913,16 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
610 | { | 913 | { |
611 | int ret; | 914 | int ret; |
612 | 915 | ||
916 | |||
917 | /* | ||
918 | * Don't validate pinned buffers. | ||
919 | */ | ||
920 | |||
921 | if (bo == dev_priv->pinned_bo || | ||
922 | (bo == dev_priv->dummy_query_bo && | ||
923 | dev_priv->dummy_query_bo_pinned)) | ||
924 | return 0; | ||
925 | |||
613 | /** | 926 | /** |
614 | * Put BO in VRAM if there is space, otherwise as a GMR. | 927 | * Put BO in VRAM if there is space, otherwise as a GMR. |
615 | * If there is no space in VRAM and GMR ids are all used up, | 928 | * If there is no space in VRAM and GMR ids are all used up, |
@@ -681,6 +994,9 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, | |||
681 | * Creates a fence object and submits a command stream marker. | 994 | * Creates a fence object and submits a command stream marker. |
682 | * If this fails for some reason, We sync the fifo and return NULL. | 995 | * If this fails for some reason, We sync the fifo and return NULL. |
683 | * It is then safe to fence buffers with a NULL pointer. | 996 | * It is then safe to fence buffers with a NULL pointer. |
997 | * | ||
998 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates | ||
999 | * a userspace handle if @p_handle is not NULL, otherwise not. | ||
684 | */ | 1000 | */ |
685 | 1001 | ||
686 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, | 1002 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
@@ -692,6 +1008,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
692 | int ret; | 1008 | int ret; |
693 | bool synced = false; | 1009 | bool synced = false; |
694 | 1010 | ||
1011 | /* p_handle implies file_priv. */ | ||
1012 | BUG_ON(p_handle != NULL && file_priv == NULL); | ||
695 | 1013 | ||
696 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 1014 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
697 | if (unlikely(ret != 0)) { | 1015 | if (unlikely(ret != 0)) { |
@@ -719,69 +1037,61 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
719 | return 0; | 1037 | return 0; |
720 | } | 1038 | } |
721 | 1039 | ||
722 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 1040 | int vmw_execbuf_process(struct drm_file *file_priv, |
723 | struct drm_file *file_priv) | 1041 | struct vmw_private *dev_priv, |
1042 | void __user *user_commands, | ||
1043 | void *kernel_commands, | ||
1044 | uint32_t command_size, | ||
1045 | uint64_t throttle_us, | ||
1046 | struct drm_vmw_fence_rep __user *user_fence_rep) | ||
724 | { | 1047 | { |
725 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
726 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | ||
727 | struct drm_vmw_fence_rep fence_rep; | ||
728 | struct drm_vmw_fence_rep __user *user_fence_rep; | ||
729 | int ret; | ||
730 | void *user_cmd; | ||
731 | void *cmd; | ||
732 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 1048 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
733 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 1049 | struct drm_vmw_fence_rep fence_rep; |
734 | struct vmw_fence_obj *fence; | 1050 | struct vmw_fence_obj *fence; |
735 | uint32_t handle; | 1051 | uint32_t handle; |
1052 | void *cmd; | ||
1053 | int ret; | ||
736 | 1054 | ||
737 | /* | 1055 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
738 | * This will allow us to extend the ioctl argument while | ||
739 | * maintaining backwards compatibility: | ||
740 | * We take different code paths depending on the value of | ||
741 | * arg->version. | ||
742 | */ | ||
743 | |||
744 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | ||
745 | DRM_ERROR("Incorrect execbuf version.\n"); | ||
746 | DRM_ERROR("You're running outdated experimental " | ||
747 | "vmwgfx user-space drivers."); | ||
748 | return -EINVAL; | ||
749 | } | ||
750 | |||
751 | ret = ttm_read_lock(&vmaster->lock, true); | ||
752 | if (unlikely(ret != 0)) | 1056 | if (unlikely(ret != 0)) |
753 | return ret; | 1057 | return -ERESTARTSYS; |
754 | 1058 | ||
755 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | 1059 | if (kernel_commands == NULL) { |
756 | if (unlikely(ret != 0)) { | 1060 | sw_context->kernel = false; |
757 | ret = -ERESTARTSYS; | ||
758 | goto out_no_cmd_mutex; | ||
759 | } | ||
760 | 1061 | ||
761 | ret = vmw_resize_cmd_bounce(sw_context, arg->command_size); | 1062 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
762 | if (unlikely(ret != 0)) | 1063 | if (unlikely(ret != 0)) |
763 | goto out_unlock; | 1064 | goto out_unlock; |
764 | 1065 | ||
765 | user_cmd = (void __user *)(unsigned long)arg->commands; | ||
766 | ret = copy_from_user(sw_context->cmd_bounce, | ||
767 | user_cmd, arg->command_size); | ||
768 | 1066 | ||
769 | if (unlikely(ret != 0)) { | 1067 | ret = copy_from_user(sw_context->cmd_bounce, |
770 | ret = -EFAULT; | 1068 | user_commands, command_size); |
771 | DRM_ERROR("Failed copying commands.\n"); | 1069 | |
772 | goto out_unlock; | 1070 | if (unlikely(ret != 0)) { |
773 | } | 1071 | ret = -EFAULT; |
1072 | DRM_ERROR("Failed copying commands.\n"); | ||
1073 | goto out_unlock; | ||
1074 | } | ||
1075 | kernel_commands = sw_context->cmd_bounce; | ||
1076 | } else | ||
1077 | sw_context->kernel = true; | ||
774 | 1078 | ||
775 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 1079 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
776 | sw_context->cid_valid = false; | 1080 | sw_context->cid_valid = false; |
777 | sw_context->sid_valid = false; | 1081 | sw_context->sid_valid = false; |
778 | sw_context->cur_reloc = 0; | 1082 | sw_context->cur_reloc = 0; |
779 | sw_context->cur_val_buf = 0; | 1083 | sw_context->cur_val_buf = 0; |
780 | sw_context->num_ref_resources = 0; | 1084 | sw_context->fence_flags = 0; |
1085 | INIT_LIST_HEAD(&sw_context->query_list); | ||
1086 | INIT_LIST_HEAD(&sw_context->resource_list); | ||
1087 | sw_context->cur_query_bo = dev_priv->pinned_bo; | ||
1088 | sw_context->cur_query_cid = dev_priv->query_cid; | ||
1089 | sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); | ||
781 | 1090 | ||
782 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 1091 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
783 | 1092 | ||
784 | ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size); | 1093 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
1094 | command_size); | ||
785 | if (unlikely(ret != 0)) | 1095 | if (unlikely(ret != 0)) |
786 | goto out_err; | 1096 | goto out_err; |
787 | 1097 | ||
@@ -795,26 +1105,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
795 | 1105 | ||
796 | vmw_apply_relocations(sw_context); | 1106 | vmw_apply_relocations(sw_context); |
797 | 1107 | ||
798 | if (arg->throttle_us) { | 1108 | if (throttle_us) { |
799 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, | 1109 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
800 | arg->throttle_us); | 1110 | throttle_us); |
801 | 1111 | ||
802 | if (unlikely(ret != 0)) | 1112 | if (unlikely(ret != 0)) |
803 | goto out_throttle; | 1113 | goto out_throttle; |
804 | } | 1114 | } |
805 | 1115 | ||
806 | cmd = vmw_fifo_reserve(dev_priv, arg->command_size); | 1116 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
807 | if (unlikely(cmd == NULL)) { | 1117 | if (unlikely(cmd == NULL)) { |
808 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 1118 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
809 | ret = -ENOMEM; | 1119 | ret = -ENOMEM; |
810 | goto out_err; | 1120 | goto out_throttle; |
811 | } | 1121 | } |
812 | 1122 | ||
813 | memcpy(cmd, sw_context->cmd_bounce, arg->command_size); | 1123 | memcpy(cmd, kernel_commands, command_size); |
814 | vmw_fifo_commit(dev_priv, arg->command_size); | 1124 | vmw_fifo_commit(dev_priv, command_size); |
815 | 1125 | ||
816 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | 1126 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
817 | (unsigned long)arg->fence_rep; | ||
818 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | 1127 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
819 | &fence, | 1128 | &fence, |
820 | (user_fence_rep) ? &handle : NULL); | 1129 | (user_fence_rep) ? &handle : NULL); |
@@ -831,7 +1140,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
831 | (void *) fence); | 1140 | (void *) fence); |
832 | 1141 | ||
833 | vmw_clear_validations(sw_context); | 1142 | vmw_clear_validations(sw_context); |
834 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
835 | 1143 | ||
836 | if (user_fence_rep) { | 1144 | if (user_fence_rep) { |
837 | fence_rep.error = ret; | 1145 | fence_rep.error = ret; |
@@ -868,17 +1176,165 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
868 | if (likely(fence != NULL)) | 1176 | if (likely(fence != NULL)) |
869 | vmw_fence_obj_unreference(&fence); | 1177 | vmw_fence_obj_unreference(&fence); |
870 | 1178 | ||
871 | vmw_kms_cursor_post_execbuf(dev_priv); | 1179 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
872 | ttm_read_unlock(&vmaster->lock); | ||
873 | return 0; | 1180 | return 0; |
1181 | |||
874 | out_err: | 1182 | out_err: |
875 | vmw_free_relocations(sw_context); | 1183 | vmw_free_relocations(sw_context); |
876 | out_throttle: | 1184 | out_throttle: |
1185 | vmw_query_switch_backoff(sw_context); | ||
877 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | 1186 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
878 | vmw_clear_validations(sw_context); | 1187 | vmw_clear_validations(sw_context); |
879 | out_unlock: | 1188 | out_unlock: |
880 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1189 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
881 | out_no_cmd_mutex: | 1190 | return ret; |
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. | ||
1195 | * | ||
1196 | * @dev_priv: The device private structure. | ||
1197 | * | ||
1198 | * This function is called to idle the fifo and unpin the query buffer | ||
1199 | * if the normal way to do this hits an error, which should typically be | ||
1200 | * extremely rare. | ||
1201 | */ | ||
1202 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | ||
1203 | { | ||
1204 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); | ||
1205 | |||
1206 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); | ||
1207 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
1208 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | ||
1209 | dev_priv->dummy_query_bo_pinned = false; | ||
1210 | } | ||
1211 | |||
1212 | |||
1213 | /** | ||
1214 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | ||
1215 | * query bo. | ||
1216 | * | ||
1217 | * @dev_priv: The device private structure. | ||
1218 | * @only_on_cid_match: Only flush and unpin if the current active query cid | ||
1219 | * matches @cid. | ||
1220 | * @cid: Optional context id to match. | ||
1221 | * | ||
1222 | * This function should be used to unpin the pinned query bo, or | ||
1223 | * as a query barrier when we need to make sure that all queries have | ||
1224 | * finished before the next fifo command. (For example on hardware | ||
1225 | * context destructions where the hardware may otherwise leak unfinished | ||
1226 | * queries). | ||
1227 | * | ||
1228 | * This function does not return any failure codes, but make attempts | ||
1229 | * to do safe unpinning in case of errors. | ||
1230 | * | ||
1231 | * The function will synchronize on the previous query barrier, and will | ||
1232 | * thus not finish until that barrier has executed. | ||
1233 | */ | ||
1234 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | ||
1235 | bool only_on_cid_match, uint32_t cid) | ||
1236 | { | ||
1237 | int ret = 0; | ||
1238 | struct list_head validate_list; | ||
1239 | struct ttm_validate_buffer pinned_val, query_val; | ||
1240 | struct vmw_fence_obj *fence; | ||
1241 | |||
1242 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1243 | |||
1244 | if (dev_priv->pinned_bo == NULL) | ||
1245 | goto out_unlock; | ||
1246 | |||
1247 | if (only_on_cid_match && cid != dev_priv->query_cid) | ||
1248 | goto out_unlock; | ||
1249 | |||
1250 | INIT_LIST_HEAD(&validate_list); | ||
1251 | |||
1252 | pinned_val.new_sync_obj_arg = (void *)(unsigned long) | ||
1253 | DRM_VMW_FENCE_FLAG_EXEC; | ||
1254 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | ||
1255 | list_add_tail(&pinned_val.head, &validate_list); | ||
1256 | |||
1257 | query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg; | ||
1258 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); | ||
1259 | list_add_tail(&query_val.head, &validate_list); | ||
1260 | |||
1261 | do { | ||
1262 | ret = ttm_eu_reserve_buffers(&validate_list); | ||
1263 | } while (ret == -ERESTARTSYS); | ||
1264 | |||
1265 | if (unlikely(ret != 0)) { | ||
1266 | vmw_execbuf_unpin_panic(dev_priv); | ||
1267 | goto out_no_reserve; | ||
1268 | } | ||
1269 | |||
1270 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | ||
1271 | if (unlikely(ret != 0)) { | ||
1272 | vmw_execbuf_unpin_panic(dev_priv); | ||
1273 | goto out_no_emit; | ||
1274 | } | ||
1275 | |||
1276 | vmw_bo_pin(dev_priv->pinned_bo, false); | ||
1277 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | ||
1278 | dev_priv->dummy_query_bo_pinned = false; | ||
1279 | |||
1280 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
1281 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); | ||
1282 | |||
1283 | ttm_bo_unref(&query_val.bo); | ||
1284 | ttm_bo_unref(&pinned_val.bo); | ||
1285 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
1286 | |||
1287 | out_unlock: | ||
1288 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1289 | return; | ||
1290 | |||
1291 | out_no_emit: | ||
1292 | ttm_eu_backoff_reservation(&validate_list); | ||
1293 | out_no_reserve: | ||
1294 | ttm_bo_unref(&query_val.bo); | ||
1295 | ttm_bo_unref(&pinned_val.bo); | ||
1296 | ttm_bo_unref(&dev_priv->pinned_bo); | ||
1297 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1298 | } | ||
1299 | |||
1300 | |||
1301 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | ||
1302 | struct drm_file *file_priv) | ||
1303 | { | ||
1304 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1305 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | ||
1306 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1307 | int ret; | ||
1308 | |||
1309 | /* | ||
1310 | * This will allow us to extend the ioctl argument while | ||
1311 | * maintaining backwards compatibility: | ||
1312 | * We take different code paths depending on the value of | ||
1313 | * arg->version. | ||
1314 | */ | ||
1315 | |||
1316 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | ||
1317 | DRM_ERROR("Incorrect execbuf version.\n"); | ||
1318 | DRM_ERROR("You're running outdated experimental " | ||
1319 | "vmwgfx user-space drivers."); | ||
1320 | return -EINVAL; | ||
1321 | } | ||
1322 | |||
1323 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1324 | if (unlikely(ret != 0)) | ||
1325 | return ret; | ||
1326 | |||
1327 | ret = vmw_execbuf_process(file_priv, dev_priv, | ||
1328 | (void __user *)(unsigned long)arg->commands, | ||
1329 | NULL, arg->command_size, arg->throttle_us, | ||
1330 | (void __user *)(unsigned long)arg->fence_rep); | ||
1331 | |||
1332 | if (unlikely(ret != 0)) | ||
1333 | goto out_unlock; | ||
1334 | |||
1335 | vmw_kms_cursor_post_execbuf(dev_priv); | ||
1336 | |||
1337 | out_unlock: | ||
882 | ttm_read_unlock(&vmaster->lock); | 1338 | ttm_read_unlock(&vmaster->lock); |
883 | return ret; | 1339 | return ret; |
884 | } | 1340 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b1888e801e22..070797b7b03a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -592,58 +592,6 @@ int vmw_fb_close(struct vmw_private *vmw_priv) | |||
592 | return 0; | 592 | return 0; |
593 | } | 593 | } |
594 | 594 | ||
595 | int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
596 | struct vmw_dma_buffer *vmw_bo) | ||
597 | { | ||
598 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
599 | int ret = 0; | ||
600 | |||
601 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
602 | if (unlikely(ret != 0)) | ||
603 | return ret; | ||
604 | |||
605 | ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); | ||
606 | ttm_bo_unreserve(bo); | ||
607 | |||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
612 | struct vmw_dma_buffer *vmw_bo) | ||
613 | { | ||
614 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
615 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | ||
616 | int ret = 0; | ||
617 | |||
618 | ne_placement.lpfn = bo->num_pages; | ||
619 | |||
620 | /* interuptable? */ | ||
621 | ret = ttm_write_lock(&vmw_priv->active_master->lock, false); | ||
622 | if (unlikely(ret != 0)) | ||
623 | return ret; | ||
624 | |||
625 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
626 | if (unlikely(ret != 0)) | ||
627 | goto err_unlock; | ||
628 | |||
629 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
630 | bo->mem.start < bo->num_pages && | ||
631 | bo->mem.start > 0) | ||
632 | (void) ttm_bo_validate(bo, &vmw_sys_placement, false, | ||
633 | false, false); | ||
634 | |||
635 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | ||
636 | |||
637 | /* Could probably bug on */ | ||
638 | WARN_ON(bo->offset != 0); | ||
639 | |||
640 | ttm_bo_unreserve(bo); | ||
641 | err_unlock: | ||
642 | ttm_write_unlock(&vmw_priv->active_master->lock); | ||
643 | |||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | int vmw_fb_off(struct vmw_private *vmw_priv) | 595 | int vmw_fb_off(struct vmw_private *vmw_priv) |
648 | { | 596 | { |
649 | struct fb_info *info; | 597 | struct fb_info *info; |
@@ -665,7 +613,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv) | |||
665 | par->bo_ptr = NULL; | 613 | par->bo_ptr = NULL; |
666 | ttm_bo_kunmap(&par->map); | 614 | ttm_bo_kunmap(&par->map); |
667 | 615 | ||
668 | vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); | 616 | vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false); |
669 | 617 | ||
670 | return 0; | 618 | return 0; |
671 | } | 619 | } |
@@ -691,7 +639,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv) | |||
691 | /* Make sure that all overlays are stoped when we take over */ | 639 | /* Make sure that all overlays are stoped when we take over */ |
692 | vmw_overlay_stop_all(vmw_priv); | 640 | vmw_overlay_stop_all(vmw_priv); |
693 | 641 | ||
694 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); | 642 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false); |
695 | if (unlikely(ret != 0)) { | 643 | if (unlikely(ret != 0)) { |
696 | DRM_ERROR("could not move buffer to start of VRAM\n"); | 644 | DRM_ERROR("could not move buffer to start of VRAM\n"); |
697 | goto err_no_buffer; | 645 | goto err_no_buffer; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 5065a140fdf8..5f60be76166e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -177,6 +177,9 @@ out_unlock: | |||
177 | 177 | ||
178 | struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) | 178 | struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) |
179 | { | 179 | { |
180 | if (unlikely(fence == NULL)) | ||
181 | return NULL; | ||
182 | |||
180 | kref_get(&fence->kref); | 183 | kref_get(&fence->kref); |
181 | return fence; | 184 | return fence; |
182 | } | 185 | } |
@@ -191,8 +194,12 @@ struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) | |||
191 | void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | 194 | void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) |
192 | { | 195 | { |
193 | struct vmw_fence_obj *fence = *fence_p; | 196 | struct vmw_fence_obj *fence = *fence_p; |
194 | struct vmw_fence_manager *fman = fence->fman; | 197 | struct vmw_fence_manager *fman; |
198 | |||
199 | if (unlikely(fence == NULL)) | ||
200 | return; | ||
195 | 201 | ||
202 | fman = fence->fman; | ||
196 | *fence_p = NULL; | 203 | *fence_p = NULL; |
197 | spin_lock_irq(&fman->lock); | 204 | spin_lock_irq(&fman->lock); |
198 | BUG_ON(atomic_read(&fence->kref.refcount) == 0); | 205 | BUG_ON(atomic_read(&fence->kref.refcount) == 0); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 3ba9cac579e0..62d6377b8ee8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -45,7 +45,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
45 | if (hwversion == 0) | 45 | if (hwversion == 0) |
46 | return false; | 46 | return false; |
47 | 47 | ||
48 | if (hwversion < SVGA3D_HWVERSION_WS65_B1) | 48 | if (hwversion < SVGA3D_HWVERSION_WS8_B1) |
49 | return false; | ||
50 | |||
51 | /* Non-Screen Object path does not support surfaces */ | ||
52 | if (!dev_priv->sou_priv) | ||
49 | return false; | 53 | return false; |
50 | 54 | ||
51 | return true; | 55 | return true; |
@@ -277,6 +281,16 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
277 | return ret; | 281 | return ret; |
278 | } | 282 | } |
279 | 283 | ||
284 | /** | ||
285 | * Reserve @bytes number of bytes in the fifo. | ||
286 | * | ||
287 | * This function will return NULL (error) on two conditions: | ||
288 | * If it timeouts waiting for fifo space, or if @bytes is larger than the | ||
289 | * available fifo space. | ||
290 | * | ||
291 | * Returns: | ||
292 | * Pointer to the fifo, or null on error (possible hardware hang). | ||
293 | */ | ||
280 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | 294 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) |
281 | { | 295 | { |
282 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 296 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
@@ -491,3 +505,60 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) | |||
491 | out_err: | 505 | out_err: |
492 | return ret; | 506 | return ret; |
493 | } | 507 | } |
508 | |||
509 | /** | ||
510 | * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. | ||
511 | * | ||
512 | * @dev_priv: The device private structure. | ||
513 | * @cid: The hardware context id used for the query. | ||
514 | * | ||
515 | * This function is used to emit a dummy occlusion query with | ||
516 | * no primitives rendered between query begin and query end. | ||
517 | * It's used to provide a query barrier, in order to know that when | ||
518 | * this query is finished, all preceding queries are also finished. | ||
519 | * | ||
520 | * A Query results structure should have been initialized at the start | ||
521 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
522 | * must also be either reserved or pinned when this function is called. | ||
523 | * | ||
524 | * Returns -ENOMEM on failure to reserve fifo space. | ||
525 | */ | ||
526 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
527 | uint32_t cid) | ||
528 | { | ||
529 | /* | ||
530 | * A query wait without a preceding query end will | ||
531 | * actually finish all queries for this cid | ||
532 | * without writing to the query result structure. | ||
533 | */ | ||
534 | |||
535 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
536 | struct { | ||
537 | SVGA3dCmdHeader header; | ||
538 | SVGA3dCmdWaitForQuery body; | ||
539 | } *cmd; | ||
540 | |||
541 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
542 | |||
543 | if (unlikely(cmd == NULL)) { | ||
544 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
545 | return -ENOMEM; | ||
546 | } | ||
547 | |||
548 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; | ||
549 | cmd->header.size = sizeof(cmd->body); | ||
550 | cmd->body.cid = cid; | ||
551 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | ||
552 | |||
553 | if (bo->mem.mem_type == TTM_PL_VRAM) { | ||
554 | cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; | ||
555 | cmd->body.guestResult.offset = bo->offset; | ||
556 | } else { | ||
557 | cmd->body.guestResult.gmrId = bo->mem.start; | ||
558 | cmd->body.guestResult.offset = 0; | ||
559 | } | ||
560 | |||
561 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
562 | |||
563 | return 0; | ||
564 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 5ecf96660644..c0284a4784c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_drm.h" | 29 | #include "vmwgfx_drm.h" |
30 | #include "vmwgfx_kms.h" | ||
30 | 31 | ||
31 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 32 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
32 | struct drm_file *file_priv) | 33 | struct drm_file *file_priv) |
@@ -110,3 +111,174 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
110 | 111 | ||
111 | return ret; | 112 | return ret; |
112 | } | 113 | } |
114 | |||
115 | int vmw_present_ioctl(struct drm_device *dev, void *data, | ||
116 | struct drm_file *file_priv) | ||
117 | { | ||
118 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
119 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
120 | struct drm_vmw_present_arg *arg = | ||
121 | (struct drm_vmw_present_arg *)data; | ||
122 | struct vmw_surface *surface; | ||
123 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
124 | struct drm_vmw_rect __user *clips_ptr; | ||
125 | struct drm_vmw_rect *clips = NULL; | ||
126 | struct drm_mode_object *obj; | ||
127 | struct vmw_framebuffer *vfb; | ||
128 | uint32_t num_clips; | ||
129 | int ret; | ||
130 | |||
131 | num_clips = arg->num_clips; | ||
132 | clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; | ||
133 | |||
134 | if (unlikely(num_clips == 0)) | ||
135 | return 0; | ||
136 | |||
137 | if (clips_ptr == NULL) { | ||
138 | DRM_ERROR("Variable clips_ptr must be specified.\n"); | ||
139 | ret = -EINVAL; | ||
140 | goto out_clips; | ||
141 | } | ||
142 | |||
143 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
144 | if (clips == NULL) { | ||
145 | DRM_ERROR("Failed to allocate clip rect list.\n"); | ||
146 | ret = -ENOMEM; | ||
147 | goto out_clips; | ||
148 | } | ||
149 | |||
150 | ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); | ||
151 | if (ret) { | ||
152 | DRM_ERROR("Failed to copy clip rects from userspace.\n"); | ||
153 | goto out_no_copy; | ||
154 | } | ||
155 | |||
156 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
157 | if (unlikely(ret != 0)) { | ||
158 | ret = -ERESTARTSYS; | ||
159 | goto out_no_mode_mutex; | ||
160 | } | ||
161 | |||
162 | obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); | ||
163 | if (!obj) { | ||
164 | DRM_ERROR("Invalid framebuffer id.\n"); | ||
165 | ret = -EINVAL; | ||
166 | goto out_no_fb; | ||
167 | } | ||
168 | |||
169 | vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); | ||
170 | if (!vfb->dmabuf) { | ||
171 | DRM_ERROR("Framebuffer not dmabuf backed.\n"); | ||
172 | ret = -EINVAL; | ||
173 | goto out_no_fb; | ||
174 | } | ||
175 | |||
176 | ret = ttm_read_lock(&vmaster->lock, true); | ||
177 | if (unlikely(ret != 0)) | ||
178 | goto out_no_ttm_lock; | ||
179 | |||
180 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, | ||
181 | &surface); | ||
182 | if (ret) | ||
183 | goto out_no_surface; | ||
184 | |||
185 | ret = vmw_kms_present(dev_priv, file_priv, | ||
186 | vfb, surface, arg->sid, | ||
187 | arg->dest_x, arg->dest_y, | ||
188 | clips, num_clips); | ||
189 | |||
190 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | ||
191 | vmw_surface_unreference(&surface); | ||
192 | |||
193 | out_no_surface: | ||
194 | ttm_read_unlock(&vmaster->lock); | ||
195 | out_no_ttm_lock: | ||
196 | out_no_fb: | ||
197 | mutex_unlock(&dev->mode_config.mutex); | ||
198 | out_no_mode_mutex: | ||
199 | out_no_copy: | ||
200 | kfree(clips); | ||
201 | out_clips: | ||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | int vmw_present_readback_ioctl(struct drm_device *dev, void *data, | ||
206 | struct drm_file *file_priv) | ||
207 | { | ||
208 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
209 | struct drm_vmw_present_readback_arg *arg = | ||
210 | (struct drm_vmw_present_readback_arg *)data; | ||
211 | struct drm_vmw_fence_rep __user *user_fence_rep = | ||
212 | (struct drm_vmw_fence_rep __user *) | ||
213 | (unsigned long)arg->fence_rep; | ||
214 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
215 | struct drm_vmw_rect __user *clips_ptr; | ||
216 | struct drm_vmw_rect *clips = NULL; | ||
217 | struct drm_mode_object *obj; | ||
218 | struct vmw_framebuffer *vfb; | ||
219 | uint32_t num_clips; | ||
220 | int ret; | ||
221 | |||
222 | num_clips = arg->num_clips; | ||
223 | clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; | ||
224 | |||
225 | if (unlikely(num_clips == 0)) | ||
226 | return 0; | ||
227 | |||
228 | if (clips_ptr == NULL) { | ||
229 | DRM_ERROR("Argument clips_ptr must be specified.\n"); | ||
230 | ret = -EINVAL; | ||
231 | goto out_clips; | ||
232 | } | ||
233 | |||
234 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
235 | if (clips == NULL) { | ||
236 | DRM_ERROR("Failed to allocate clip rect list.\n"); | ||
237 | ret = -ENOMEM; | ||
238 | goto out_clips; | ||
239 | } | ||
240 | |||
241 | ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); | ||
242 | if (ret) { | ||
243 | DRM_ERROR("Failed to copy clip rects from userspace.\n"); | ||
244 | goto out_no_copy; | ||
245 | } | ||
246 | |||
247 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
248 | if (unlikely(ret != 0)) { | ||
249 | ret = -ERESTARTSYS; | ||
250 | goto out_no_mode_mutex; | ||
251 | } | ||
252 | |||
253 | obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); | ||
254 | if (!obj) { | ||
255 | DRM_ERROR("Invalid framebuffer id.\n"); | ||
256 | ret = -EINVAL; | ||
257 | goto out_no_fb; | ||
258 | } | ||
259 | |||
260 | vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); | ||
261 | if (!vfb->dmabuf) { | ||
262 | DRM_ERROR("Framebuffer not dmabuf backed.\n"); | ||
263 | ret = -EINVAL; | ||
264 | goto out_no_fb; | ||
265 | } | ||
266 | |||
267 | ret = ttm_read_lock(&vmaster->lock, true); | ||
268 | if (unlikely(ret != 0)) | ||
269 | goto out_no_ttm_lock; | ||
270 | |||
271 | ret = vmw_kms_readback(dev_priv, file_priv, | ||
272 | vfb, user_fence_rep, | ||
273 | clips, num_clips); | ||
274 | |||
275 | ttm_read_unlock(&vmaster->lock); | ||
276 | out_no_ttm_lock: | ||
277 | out_no_fb: | ||
278 | mutex_unlock(&dev->mode_config.mutex); | ||
279 | out_no_mode_mutex: | ||
280 | out_no_copy: | ||
281 | kfree(clips); | ||
282 | out_clips: | ||
283 | return ret; | ||
284 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1a4c84cecca7..fc62c8798c4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -27,12 +27,10 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | |||
30 | /* Might need a hrtimer here? */ | 31 | /* Might need a hrtimer here? */ |
31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 32 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
32 | 33 | ||
33 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); | ||
34 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); | ||
35 | |||
36 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | 34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) |
37 | { | 35 | { |
38 | if (du->cursor_surface) | 36 | if (du->cursor_surface) |
@@ -329,41 +327,10 @@ struct vmw_framebuffer_surface { | |||
329 | struct vmw_framebuffer base; | 327 | struct vmw_framebuffer base; |
330 | struct vmw_surface *surface; | 328 | struct vmw_surface *surface; |
331 | struct vmw_dma_buffer *buffer; | 329 | struct vmw_dma_buffer *buffer; |
332 | struct delayed_work d_work; | ||
333 | struct mutex work_lock; | ||
334 | bool present_fs; | ||
335 | struct list_head head; | 330 | struct list_head head; |
336 | struct drm_master *master; | 331 | struct drm_master *master; |
337 | }; | 332 | }; |
338 | 333 | ||
339 | /** | ||
340 | * vmw_kms_idle_workqueues - Flush workqueues on this master | ||
341 | * | ||
342 | * @vmaster - Pointer identifying the master, for the surfaces of which | ||
343 | * we idle the dirty work queues. | ||
344 | * | ||
345 | * This function should be called with the ttm lock held in exclusive mode | ||
346 | * to idle all dirty work queues before the fifo is taken down. | ||
347 | * | ||
348 | * The work task may actually requeue itself, but after the flush returns we're | ||
349 | * sure that there's nothing to present, since the ttm lock is held in | ||
350 | * exclusive mode, so the fifo will never get used. | ||
351 | */ | ||
352 | |||
353 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster) | ||
354 | { | ||
355 | struct vmw_framebuffer_surface *entry; | ||
356 | |||
357 | mutex_lock(&vmaster->fb_surf_mutex); | ||
358 | list_for_each_entry(entry, &vmaster->fb_surf, head) { | ||
359 | if (cancel_delayed_work_sync(&entry->d_work)) | ||
360 | (void) entry->d_work.work.func(&entry->d_work.work); | ||
361 | |||
362 | (void) cancel_delayed_work_sync(&entry->d_work); | ||
363 | } | ||
364 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
365 | } | ||
366 | |||
367 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 334 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
368 | { | 335 | { |
369 | struct vmw_framebuffer_surface *vfbs = | 336 | struct vmw_framebuffer_surface *vfbs = |
@@ -375,64 +342,69 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | |||
375 | list_del(&vfbs->head); | 342 | list_del(&vfbs->head); |
376 | mutex_unlock(&vmaster->fb_surf_mutex); | 343 | mutex_unlock(&vmaster->fb_surf_mutex); |
377 | 344 | ||
378 | cancel_delayed_work_sync(&vfbs->d_work); | ||
379 | drm_master_put(&vfbs->master); | 345 | drm_master_put(&vfbs->master); |
380 | drm_framebuffer_cleanup(framebuffer); | 346 | drm_framebuffer_cleanup(framebuffer); |
381 | vmw_surface_unreference(&vfbs->surface); | 347 | vmw_surface_unreference(&vfbs->surface); |
348 | ttm_base_object_unref(&vfbs->base.user_obj); | ||
382 | 349 | ||
383 | kfree(vfbs); | 350 | kfree(vfbs); |
384 | } | 351 | } |
385 | 352 | ||
386 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | 353 | static int do_surface_dirty_sou(struct vmw_private *dev_priv, |
354 | struct drm_file *file_priv, | ||
355 | struct vmw_framebuffer *framebuffer, | ||
356 | struct vmw_surface *surf, | ||
357 | unsigned flags, unsigned color, | ||
358 | struct drm_clip_rect *clips, | ||
359 | unsigned num_clips, int inc) | ||
387 | { | 360 | { |
388 | struct delayed_work *d_work = | 361 | int left = clips->x2, right = clips->x1; |
389 | container_of(work, struct delayed_work, work); | 362 | int top = clips->y2, bottom = clips->y1; |
390 | struct vmw_framebuffer_surface *vfbs = | 363 | size_t fifo_size; |
391 | container_of(d_work, struct vmw_framebuffer_surface, d_work); | 364 | int i, ret; |
392 | struct vmw_surface *surf = vfbs->surface; | ||
393 | struct drm_framebuffer *framebuffer = &vfbs->base.base; | ||
394 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
395 | 365 | ||
396 | struct { | 366 | struct { |
397 | SVGA3dCmdHeader header; | 367 | SVGA3dCmdHeader header; |
398 | SVGA3dCmdPresent body; | 368 | SVGA3dCmdBlitSurfaceToScreen body; |
399 | SVGA3dCopyRect cr; | ||
400 | } *cmd; | 369 | } *cmd; |
401 | 370 | ||
402 | /** | ||
403 | * Strictly we should take the ttm_lock in read mode before accessing | ||
404 | * the fifo, to make sure the fifo is present and up. However, | ||
405 | * instead we flush all workqueues under the ttm lock in exclusive mode | ||
406 | * before taking down the fifo. | ||
407 | */ | ||
408 | mutex_lock(&vfbs->work_lock); | ||
409 | if (!vfbs->present_fs) | ||
410 | goto out_unlock; | ||
411 | |||
412 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
413 | if (unlikely(cmd == NULL)) | ||
414 | goto out_resched; | ||
415 | |||
416 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
417 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); | ||
418 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
419 | cmd->cr.x = cpu_to_le32(0); | ||
420 | cmd->cr.y = cpu_to_le32(0); | ||
421 | cmd->cr.srcx = cmd->cr.x; | ||
422 | cmd->cr.srcy = cmd->cr.y; | ||
423 | cmd->cr.w = cpu_to_le32(framebuffer->width); | ||
424 | cmd->cr.h = cpu_to_le32(framebuffer->height); | ||
425 | vfbs->present_fs = false; | ||
426 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
427 | out_resched: | ||
428 | /** | ||
429 | * Will not re-add if already pending. | ||
430 | */ | ||
431 | schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
432 | out_unlock: | ||
433 | mutex_unlock(&vfbs->work_lock); | ||
434 | } | ||
435 | 371 | ||
372 | fifo_size = sizeof(*cmd); | ||
373 | cmd = kzalloc(fifo_size, GFP_KERNEL); | ||
374 | if (unlikely(cmd == NULL)) { | ||
375 | DRM_ERROR("Temporary fifo memory alloc failed.\n"); | ||
376 | return -ENOMEM; | ||
377 | } | ||
378 | |||
379 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); | ||
380 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
381 | |||
382 | cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); | ||
383 | cmd->body.destScreenId = SVGA_ID_INVALID; /* virtual coords */ | ||
384 | |||
385 | for (i = 0; i < num_clips; i++, clips += inc) { | ||
386 | left = min_t(int, left, (int)clips->x1); | ||
387 | right = max_t(int, right, (int)clips->x2); | ||
388 | top = min_t(int, top, (int)clips->y1); | ||
389 | bottom = max_t(int, bottom, (int)clips->y2); | ||
390 | } | ||
391 | |||
392 | cmd->body.srcRect.left = left; | ||
393 | cmd->body.srcRect.right = right; | ||
394 | cmd->body.srcRect.top = top; | ||
395 | cmd->body.srcRect.bottom = bottom; | ||
396 | |||
397 | cmd->body.destRect.left = left; | ||
398 | cmd->body.destRect.right = right; | ||
399 | cmd->body.destRect.top = top; | ||
400 | cmd->body.destRect.bottom = bottom; | ||
401 | |||
402 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, | ||
403 | 0, NULL); | ||
404 | kfree(cmd); | ||
405 | |||
406 | return ret; | ||
407 | } | ||
436 | 408 | ||
437 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 409 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
438 | struct drm_file *file_priv, | 410 | struct drm_file *file_priv, |
@@ -446,42 +418,19 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
446 | vmw_framebuffer_to_vfbs(framebuffer); | 418 | vmw_framebuffer_to_vfbs(framebuffer); |
447 | struct vmw_surface *surf = vfbs->surface; | 419 | struct vmw_surface *surf = vfbs->surface; |
448 | struct drm_clip_rect norect; | 420 | struct drm_clip_rect norect; |
449 | SVGA3dCopyRect *cr; | 421 | int ret, inc = 1; |
450 | int i, inc = 1; | ||
451 | int ret; | ||
452 | |||
453 | struct { | ||
454 | SVGA3dCmdHeader header; | ||
455 | SVGA3dCmdPresent body; | ||
456 | SVGA3dCopyRect cr; | ||
457 | } *cmd; | ||
458 | 422 | ||
459 | if (unlikely(vfbs->master != file_priv->master)) | 423 | if (unlikely(vfbs->master != file_priv->master)) |
460 | return -EINVAL; | 424 | return -EINVAL; |
461 | 425 | ||
426 | /* Require ScreenObject support for 3D */ | ||
427 | if (!dev_priv->sou_priv) | ||
428 | return -EINVAL; | ||
429 | |||
462 | ret = ttm_read_lock(&vmaster->lock, true); | 430 | ret = ttm_read_lock(&vmaster->lock, true); |
463 | if (unlikely(ret != 0)) | 431 | if (unlikely(ret != 0)) |
464 | return ret; | 432 | return ret; |
465 | 433 | ||
466 | if (!num_clips || | ||
467 | !(dev_priv->fifo.capabilities & | ||
468 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
469 | int ret; | ||
470 | |||
471 | mutex_lock(&vfbs->work_lock); | ||
472 | vfbs->present_fs = true; | ||
473 | ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
474 | mutex_unlock(&vfbs->work_lock); | ||
475 | if (ret) { | ||
476 | /** | ||
477 | * No work pending, Force immediate present. | ||
478 | */ | ||
479 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | ||
480 | } | ||
481 | ttm_read_unlock(&vmaster->lock); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | if (!num_clips) { | 434 | if (!num_clips) { |
486 | num_clips = 1; | 435 | num_clips = 1; |
487 | clips = &norect; | 436 | clips = &norect; |
@@ -493,29 +442,10 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
493 | inc = 2; /* skip source rects */ | 442 | inc = 2; /* skip source rects */ |
494 | } | 443 | } |
495 | 444 | ||
496 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 445 | ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, surf, |
497 | if (unlikely(cmd == NULL)) { | 446 | flags, color, |
498 | DRM_ERROR("Fifo reserve failed.\n"); | 447 | clips, num_clips, inc); |
499 | ttm_read_unlock(&vmaster->lock); | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | memset(cmd, 0, sizeof(*cmd)); | ||
504 | |||
505 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
506 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); | ||
507 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
508 | |||
509 | for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { | ||
510 | cr->x = cpu_to_le16(clips->x1); | ||
511 | cr->y = cpu_to_le16(clips->y1); | ||
512 | cr->srcx = cr->x; | ||
513 | cr->srcy = cr->y; | ||
514 | cr->w = cpu_to_le16(clips->x2 - clips->x1); | ||
515 | cr->h = cpu_to_le16(clips->y2 - clips->y1); | ||
516 | } | ||
517 | 448 | ||
518 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
519 | ttm_read_unlock(&vmaster->lock); | 449 | ttm_read_unlock(&vmaster->lock); |
520 | return 0; | 450 | return 0; |
521 | } | 451 | } |
@@ -540,6 +470,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
540 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 470 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
541 | int ret; | 471 | int ret; |
542 | 472 | ||
473 | /* 3D is only supported on HWv8 hosts which supports screen objects */ | ||
474 | if (!dev_priv->sou_priv) | ||
475 | return -ENOSYS; | ||
476 | |||
543 | /* | 477 | /* |
544 | * Sanity checks. | 478 | * Sanity checks. |
545 | */ | 479 | */ |
@@ -602,14 +536,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
602 | vfbs->base.base.depth = mode_cmd->depth; | 536 | vfbs->base.base.depth = mode_cmd->depth; |
603 | vfbs->base.base.width = mode_cmd->width; | 537 | vfbs->base.base.width = mode_cmd->width; |
604 | vfbs->base.base.height = mode_cmd->height; | 538 | vfbs->base.base.height = mode_cmd->height; |
605 | vfbs->base.pin = &vmw_surface_dmabuf_pin; | ||
606 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; | ||
607 | vfbs->surface = surface; | 539 | vfbs->surface = surface; |
540 | vfbs->base.user_handle = mode_cmd->handle; | ||
608 | vfbs->master = drm_master_get(file_priv->master); | 541 | vfbs->master = drm_master_get(file_priv->master); |
609 | mutex_init(&vfbs->work_lock); | ||
610 | 542 | ||
611 | mutex_lock(&vmaster->fb_surf_mutex); | 543 | mutex_lock(&vmaster->fb_surf_mutex); |
612 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | ||
613 | list_add_tail(&vfbs->head, &vmaster->fb_surf); | 544 | list_add_tail(&vfbs->head, &vmaster->fb_surf); |
614 | mutex_unlock(&vmaster->fb_surf_mutex); | 545 | mutex_unlock(&vmaster->fb_surf_mutex); |
615 | 546 | ||
@@ -644,48 +575,34 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | |||
644 | 575 | ||
645 | drm_framebuffer_cleanup(framebuffer); | 576 | drm_framebuffer_cleanup(framebuffer); |
646 | vmw_dmabuf_unreference(&vfbd->buffer); | 577 | vmw_dmabuf_unreference(&vfbd->buffer); |
578 | ttm_base_object_unref(&vfbd->base.user_obj); | ||
647 | 579 | ||
648 | kfree(vfbd); | 580 | kfree(vfbd); |
649 | } | 581 | } |
650 | 582 | ||
651 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 583 | static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv, |
652 | struct drm_file *file_priv, | 584 | struct vmw_framebuffer *framebuffer, |
653 | unsigned flags, unsigned color, | 585 | struct vmw_dma_buffer *buffer, |
654 | struct drm_clip_rect *clips, | 586 | unsigned flags, unsigned color, |
655 | unsigned num_clips) | 587 | struct drm_clip_rect *clips, |
588 | unsigned num_clips, int increment) | ||
656 | { | 589 | { |
657 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 590 | size_t fifo_size; |
658 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 591 | int i; |
659 | struct drm_clip_rect norect; | 592 | |
660 | int ret; | ||
661 | struct { | 593 | struct { |
662 | uint32_t header; | 594 | uint32_t header; |
663 | SVGAFifoCmdUpdate body; | 595 | SVGAFifoCmdUpdate body; |
664 | } *cmd; | 596 | } *cmd; |
665 | int i, increment = 1; | ||
666 | |||
667 | ret = ttm_read_lock(&vmaster->lock, true); | ||
668 | if (unlikely(ret != 0)) | ||
669 | return ret; | ||
670 | 597 | ||
671 | if (!num_clips) { | 598 | fifo_size = sizeof(*cmd) * num_clips; |
672 | num_clips = 1; | 599 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); |
673 | clips = &norect; | ||
674 | norect.x1 = norect.y1 = 0; | ||
675 | norect.x2 = framebuffer->width; | ||
676 | norect.y2 = framebuffer->height; | ||
677 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
678 | num_clips /= 2; | ||
679 | increment = 2; | ||
680 | } | ||
681 | |||
682 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | ||
683 | if (unlikely(cmd == NULL)) { | 600 | if (unlikely(cmd == NULL)) { |
684 | DRM_ERROR("Fifo reserve failed.\n"); | 601 | DRM_ERROR("Fifo reserve failed.\n"); |
685 | ttm_read_unlock(&vmaster->lock); | ||
686 | return -ENOMEM; | 602 | return -ENOMEM; |
687 | } | 603 | } |
688 | 604 | ||
605 | memset(cmd, 0, fifo_size); | ||
689 | for (i = 0; i < num_clips; i++, clips += increment) { | 606 | for (i = 0; i < num_clips; i++, clips += increment) { |
690 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | 607 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); |
691 | cmd[i].body.x = cpu_to_le32(clips->x1); | 608 | cmd[i].body.x = cpu_to_le32(clips->x1); |
@@ -694,57 +611,117 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
694 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); | 611 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); |
695 | } | 612 | } |
696 | 613 | ||
697 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 614 | vmw_fifo_commit(dev_priv, fifo_size); |
698 | ttm_read_unlock(&vmaster->lock); | ||
699 | |||
700 | return 0; | 615 | return 0; |
701 | } | 616 | } |
702 | 617 | ||
703 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | 618 | static int do_dmabuf_dirty_sou(struct drm_file *file_priv, |
704 | .destroy = vmw_framebuffer_dmabuf_destroy, | 619 | struct vmw_private *dev_priv, |
705 | .dirty = vmw_framebuffer_dmabuf_dirty, | 620 | struct vmw_framebuffer *framebuffer, |
706 | .create_handle = vmw_framebuffer_create_handle, | 621 | struct vmw_dma_buffer *buffer, |
707 | }; | 622 | unsigned flags, unsigned color, |
708 | 623 | struct drm_clip_rect *clips, | |
709 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | 624 | unsigned num_clips, int increment) |
710 | { | 625 | { |
711 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 626 | size_t fifo_size; |
712 | struct vmw_framebuffer_surface *vfbs = | 627 | int i, ret; |
713 | vmw_framebuffer_to_vfbs(&vfb->base); | 628 | |
714 | unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; | 629 | struct { |
715 | int ret; | 630 | uint32_t header; |
631 | SVGAFifoCmdDefineGMRFB body; | ||
632 | } *cmd; | ||
633 | struct { | ||
634 | uint32_t header; | ||
635 | SVGAFifoCmdBlitGMRFBToScreen body; | ||
636 | } *blits; | ||
716 | 637 | ||
717 | vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); | 638 | fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips; |
718 | if (unlikely(vfbs->buffer == NULL)) | 639 | cmd = kmalloc(fifo_size, GFP_KERNEL); |
640 | if (unlikely(cmd == NULL)) { | ||
641 | DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); | ||
719 | return -ENOMEM; | 642 | return -ENOMEM; |
643 | } | ||
720 | 644 | ||
721 | vmw_overlay_pause_all(dev_priv); | 645 | memset(cmd, 0, fifo_size); |
722 | ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, | 646 | cmd->header = SVGA_CMD_DEFINE_GMRFB; |
723 | &vmw_vram_ne_placement, | 647 | cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; |
724 | false, &vmw_dmabuf_bo_free); | 648 | cmd->body.format.colorDepth = framebuffer->base.depth; |
725 | vmw_overlay_resume_all(dev_priv); | 649 | cmd->body.format.reserved = 0; |
726 | if (unlikely(ret != 0)) | 650 | cmd->body.bytesPerLine = framebuffer->base.pitch; |
727 | vfbs->buffer = NULL; | 651 | cmd->body.ptr.gmrId = framebuffer->user_handle; |
652 | cmd->body.ptr.offset = 0; | ||
653 | |||
654 | blits = (void *)&cmd[1]; | ||
655 | for (i = 0; i < num_clips; i++, clips += increment) { | ||
656 | blits[i].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; | ||
657 | blits[i].body.srcOrigin.x = clips->x1; | ||
658 | blits[i].body.srcOrigin.y = clips->y1; | ||
659 | blits[i].body.destRect.left = clips->x1; | ||
660 | blits[i].body.destRect.top = clips->y1; | ||
661 | blits[i].body.destRect.right = clips->x2; | ||
662 | blits[i].body.destRect.bottom = clips->y2; | ||
663 | } | ||
664 | |||
665 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | ||
666 | fifo_size, 0, NULL); | ||
667 | |||
668 | kfree(cmd); | ||
728 | 669 | ||
729 | return ret; | 670 | return ret; |
730 | } | 671 | } |
731 | 672 | ||
732 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | 673 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
674 | struct drm_file *file_priv, | ||
675 | unsigned flags, unsigned color, | ||
676 | struct drm_clip_rect *clips, | ||
677 | unsigned num_clips) | ||
733 | { | 678 | { |
734 | struct ttm_buffer_object *bo; | 679 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
735 | struct vmw_framebuffer_surface *vfbs = | 680 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
736 | vmw_framebuffer_to_vfbs(&vfb->base); | 681 | struct vmw_framebuffer_dmabuf *vfbd = |
682 | vmw_framebuffer_to_vfbd(framebuffer); | ||
683 | struct vmw_dma_buffer *dmabuf = vfbd->buffer; | ||
684 | struct drm_clip_rect norect; | ||
685 | int ret, increment = 1; | ||
737 | 686 | ||
738 | if (unlikely(vfbs->buffer == NULL)) | 687 | ret = ttm_read_lock(&vmaster->lock, true); |
739 | return 0; | 688 | if (unlikely(ret != 0)) |
689 | return ret; | ||
740 | 690 | ||
741 | bo = &vfbs->buffer->base; | 691 | if (!num_clips) { |
742 | ttm_bo_unref(&bo); | 692 | num_clips = 1; |
743 | vfbs->buffer = NULL; | 693 | clips = &norect; |
694 | norect.x1 = norect.y1 = 0; | ||
695 | norect.x2 = framebuffer->width; | ||
696 | norect.y2 = framebuffer->height; | ||
697 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
698 | num_clips /= 2; | ||
699 | increment = 2; | ||
700 | } | ||
744 | 701 | ||
745 | return 0; | 702 | if (dev_priv->ldu_priv) { |
703 | ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, dmabuf, | ||
704 | flags, color, | ||
705 | clips, num_clips, increment); | ||
706 | } else { | ||
707 | ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, | ||
708 | dmabuf, flags, color, | ||
709 | clips, num_clips, increment); | ||
710 | } | ||
711 | |||
712 | ttm_read_unlock(&vmaster->lock); | ||
713 | return ret; | ||
746 | } | 714 | } |
747 | 715 | ||
716 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | ||
717 | .destroy = vmw_framebuffer_dmabuf_destroy, | ||
718 | .dirty = vmw_framebuffer_dmabuf_dirty, | ||
719 | .create_handle = vmw_framebuffer_create_handle, | ||
720 | }; | ||
721 | |||
722 | /** | ||
723 | * Pin the dmabuffer to the start of vram. | ||
724 | */ | ||
748 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | 725 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) |
749 | { | 726 | { |
750 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 727 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); |
@@ -752,10 +729,12 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
752 | vmw_framebuffer_to_vfbd(&vfb->base); | 729 | vmw_framebuffer_to_vfbd(&vfb->base); |
753 | int ret; | 730 | int ret; |
754 | 731 | ||
732 | /* This code should not be used with screen objects */ | ||
733 | BUG_ON(dev_priv->sou_priv); | ||
755 | 734 | ||
756 | vmw_overlay_pause_all(dev_priv); | 735 | vmw_overlay_pause_all(dev_priv); |
757 | 736 | ||
758 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | 737 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); |
759 | 738 | ||
760 | vmw_overlay_resume_all(dev_priv); | 739 | vmw_overlay_resume_all(dev_priv); |
761 | 740 | ||
@@ -775,7 +754,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
775 | return 0; | 754 | return 0; |
776 | } | 755 | } |
777 | 756 | ||
778 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | 757 | return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); |
779 | } | 758 | } |
780 | 759 | ||
781 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | 760 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, |
@@ -797,6 +776,33 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
797 | return -EINVAL; | 776 | return -EINVAL; |
798 | } | 777 | } |
799 | 778 | ||
779 | /* Limited framebuffer color depth support for screen objects */ | ||
780 | if (dev_priv->sou_priv) { | ||
781 | switch (mode_cmd->depth) { | ||
782 | case 32: | ||
783 | case 24: | ||
784 | /* Only support 32 bpp for 32 and 24 depth fbs */ | ||
785 | if (mode_cmd->bpp == 32) | ||
786 | break; | ||
787 | |||
788 | DRM_ERROR("Invalid color depth/bbp: %d %d\n", | ||
789 | mode_cmd->depth, mode_cmd->bpp); | ||
790 | return -EINVAL; | ||
791 | case 16: | ||
792 | case 15: | ||
793 | /* Only support 16 bpp for 16 and 15 depth fbs */ | ||
794 | if (mode_cmd->bpp == 16) | ||
795 | break; | ||
796 | |||
797 | DRM_ERROR("Invalid color depth/bbp: %d %d\n", | ||
798 | mode_cmd->depth, mode_cmd->bpp); | ||
799 | return -EINVAL; | ||
800 | default: | ||
801 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); | ||
802 | return -EINVAL; | ||
803 | } | ||
804 | } | ||
805 | |||
800 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | 806 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); |
801 | if (!vfbd) { | 807 | if (!vfbd) { |
802 | ret = -ENOMEM; | 808 | ret = -ENOMEM; |
@@ -818,9 +824,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
818 | vfbd->base.base.depth = mode_cmd->depth; | 824 | vfbd->base.base.depth = mode_cmd->depth; |
819 | vfbd->base.base.width = mode_cmd->width; | 825 | vfbd->base.base.width = mode_cmd->width; |
820 | vfbd->base.base.height = mode_cmd->height; | 826 | vfbd->base.base.height = mode_cmd->height; |
821 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | 827 | if (!dev_priv->sou_priv) { |
822 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | 828 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; |
829 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | ||
830 | } | ||
831 | vfbd->base.dmabuf = true; | ||
823 | vfbd->buffer = dmabuf; | 832 | vfbd->buffer = dmabuf; |
833 | vfbd->base.user_handle = mode_cmd->handle; | ||
824 | *out = &vfbd->base; | 834 | *out = &vfbd->base; |
825 | 835 | ||
826 | return 0; | 836 | return 0; |
@@ -846,6 +856,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
846 | struct vmw_framebuffer *vfb = NULL; | 856 | struct vmw_framebuffer *vfb = NULL; |
847 | struct vmw_surface *surface = NULL; | 857 | struct vmw_surface *surface = NULL; |
848 | struct vmw_dma_buffer *bo = NULL; | 858 | struct vmw_dma_buffer *bo = NULL; |
859 | struct ttm_base_object *user_obj; | ||
849 | u64 required_size; | 860 | u64 required_size; |
850 | int ret; | 861 | int ret; |
851 | 862 | ||
@@ -861,6 +872,21 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
861 | return NULL; | 872 | return NULL; |
862 | } | 873 | } |
863 | 874 | ||
875 | /* | ||
876 | * Take a reference on the user object of the resource | ||
877 | * backing the kms fb. This ensures that user-space handle | ||
878 | * lookups on that resource will always work as long as | ||
879 | * it's registered with a kms framebuffer. This is important, | ||
880 | * since vmw_execbuf_process identifies resources in the | ||
881 | * command stream using user-space handles. | ||
882 | */ | ||
883 | |||
884 | user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle); | ||
885 | if (unlikely(user_obj == NULL)) { | ||
886 | DRM_ERROR("Could not locate requested kms frame buffer.\n"); | ||
887 | return ERR_PTR(-ENOENT); | ||
888 | } | ||
889 | |||
864 | /** | 890 | /** |
865 | * End conditioned code. | 891 | * End conditioned code. |
866 | */ | 892 | */ |
@@ -881,8 +907,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
881 | 907 | ||
882 | if (ret) { | 908 | if (ret) { |
883 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | 909 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); |
910 | ttm_base_object_unref(&user_obj); | ||
884 | return ERR_PTR(ret); | 911 | return ERR_PTR(ret); |
885 | } | 912 | } else |
913 | vfb->user_obj = user_obj; | ||
886 | return &vfb->base; | 914 | return &vfb->base; |
887 | 915 | ||
888 | try_dmabuf: | 916 | try_dmabuf: |
@@ -902,8 +930,10 @@ try_dmabuf: | |||
902 | 930 | ||
903 | if (ret) { | 931 | if (ret) { |
904 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | 932 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); |
933 | ttm_base_object_unref(&user_obj); | ||
905 | return ERR_PTR(ret); | 934 | return ERR_PTR(ret); |
906 | } | 935 | } else |
936 | vfb->user_obj = user_obj; | ||
907 | 937 | ||
908 | return &vfb->base; | 938 | return &vfb->base; |
909 | 939 | ||
@@ -911,6 +941,7 @@ err_not_scanout: | |||
911 | DRM_ERROR("surface not marked as scanout\n"); | 941 | DRM_ERROR("surface not marked as scanout\n"); |
912 | /* vmw_user_surface_lookup takes one ref */ | 942 | /* vmw_user_surface_lookup takes one ref */ |
913 | vmw_surface_unreference(&surface); | 943 | vmw_surface_unreference(&surface); |
944 | ttm_base_object_unref(&user_obj); | ||
914 | 945 | ||
915 | return ERR_PTR(-EINVAL); | 946 | return ERR_PTR(-EINVAL); |
916 | } | 947 | } |
@@ -919,6 +950,175 @@ static struct drm_mode_config_funcs vmw_kms_funcs = { | |||
919 | .fb_create = vmw_kms_fb_create, | 950 | .fb_create = vmw_kms_fb_create, |
920 | }; | 951 | }; |
921 | 952 | ||
953 | int vmw_kms_present(struct vmw_private *dev_priv, | ||
954 | struct drm_file *file_priv, | ||
955 | struct vmw_framebuffer *vfb, | ||
956 | struct vmw_surface *surface, | ||
957 | uint32_t sid, | ||
958 | int32_t destX, int32_t destY, | ||
959 | struct drm_vmw_rect *clips, | ||
960 | uint32_t num_clips) | ||
961 | { | ||
962 | size_t fifo_size; | ||
963 | int i, ret; | ||
964 | |||
965 | struct { | ||
966 | SVGA3dCmdHeader header; | ||
967 | SVGA3dCmdBlitSurfaceToScreen body; | ||
968 | } *cmd; | ||
969 | SVGASignedRect *blits; | ||
970 | |||
971 | BUG_ON(surface == NULL); | ||
972 | BUG_ON(!clips || !num_clips); | ||
973 | |||
974 | fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; | ||
975 | cmd = kmalloc(fifo_size, GFP_KERNEL); | ||
976 | if (unlikely(cmd == NULL)) { | ||
977 | DRM_ERROR("Failed to allocate temporary fifo memory.\n"); | ||
978 | return -ENOMEM; | ||
979 | } | ||
980 | |||
981 | memset(cmd, 0, fifo_size); | ||
982 | |||
983 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); | ||
984 | cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); | ||
985 | |||
986 | cmd->body.srcImage.sid = sid; | ||
987 | cmd->body.destScreenId = SVGA_ID_INVALID; /* virtual coords */ | ||
988 | |||
989 | cmd->body.srcRect.left = 0; | ||
990 | cmd->body.srcRect.right = surface->sizes[0].width; | ||
991 | cmd->body.srcRect.top = 0; | ||
992 | cmd->body.srcRect.bottom = surface->sizes[0].height; | ||
993 | |||
994 | cmd->body.destRect.left = destX; | ||
995 | cmd->body.destRect.right = destX + surface->sizes[0].width; | ||
996 | cmd->body.destRect.top = destY; | ||
997 | cmd->body.destRect.bottom = destY + surface->sizes[0].height; | ||
998 | |||
999 | blits = (SVGASignedRect *)&cmd[1]; | ||
1000 | for (i = 0; i < num_clips; i++) { | ||
1001 | blits[i].left = clips[i].x; | ||
1002 | blits[i].right = clips[i].x + clips[i].w; | ||
1003 | blits[i].top = clips[i].y; | ||
1004 | blits[i].bottom = clips[i].y + clips[i].h; | ||
1005 | } | ||
1006 | |||
1007 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, | ||
1008 | fifo_size, 0, NULL); | ||
1009 | |||
1010 | kfree(cmd); | ||
1011 | |||
1012 | return ret; | ||
1013 | } | ||
1014 | |||
1015 | int vmw_kms_readback(struct vmw_private *dev_priv, | ||
1016 | struct drm_file *file_priv, | ||
1017 | struct vmw_framebuffer *vfb, | ||
1018 | struct drm_vmw_fence_rep __user *user_fence_rep, | ||
1019 | struct drm_vmw_rect *clips, | ||
1020 | uint32_t num_clips) | ||
1021 | { | ||
1022 | struct vmw_framebuffer_dmabuf *vfbd = | ||
1023 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
1024 | struct vmw_dma_buffer *dmabuf = vfbd->buffer; | ||
1025 | struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; | ||
1026 | struct drm_crtc *crtc; | ||
1027 | size_t fifo_size; | ||
1028 | int i, k, ret, num_units, blits_pos; | ||
1029 | |||
1030 | struct { | ||
1031 | uint32_t header; | ||
1032 | SVGAFifoCmdDefineGMRFB body; | ||
1033 | } *cmd; | ||
1034 | struct { | ||
1035 | uint32_t header; | ||
1036 | SVGAFifoCmdBlitScreenToGMRFB body; | ||
1037 | } *blits; | ||
1038 | |||
1039 | num_units = 0; | ||
1040 | list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { | ||
1041 | if (crtc->fb != &vfb->base) | ||
1042 | continue; | ||
1043 | units[num_units++] = vmw_crtc_to_du(crtc); | ||
1044 | } | ||
1045 | |||
1046 | BUG_ON(dmabuf == NULL); | ||
1047 | BUG_ON(!clips || !num_clips); | ||
1048 | |||
1049 | /* take a safe guess at fifo size */ | ||
1050 | fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units; | ||
1051 | cmd = kmalloc(fifo_size, GFP_KERNEL); | ||
1052 | if (unlikely(cmd == NULL)) { | ||
1053 | DRM_ERROR("Failed to allocate temporary fifo memory.\n"); | ||
1054 | return -ENOMEM; | ||
1055 | } | ||
1056 | |||
1057 | memset(cmd, 0, fifo_size); | ||
1058 | cmd->header = SVGA_CMD_DEFINE_GMRFB; | ||
1059 | cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; | ||
1060 | cmd->body.format.colorDepth = vfb->base.depth; | ||
1061 | cmd->body.format.reserved = 0; | ||
1062 | cmd->body.bytesPerLine = vfb->base.pitch; | ||
1063 | cmd->body.ptr.gmrId = vfb->user_handle; | ||
1064 | cmd->body.ptr.offset = 0; | ||
1065 | |||
1066 | blits = (void *)&cmd[1]; | ||
1067 | blits_pos = 0; | ||
1068 | for (i = 0; i < num_units; i++) { | ||
1069 | struct drm_vmw_rect *c = clips; | ||
1070 | for (k = 0; k < num_clips; k++, c++) { | ||
1071 | /* transform clip coords to crtc origin based coords */ | ||
1072 | int clip_x1 = c->x - units[i]->crtc.x; | ||
1073 | int clip_x2 = c->x - units[i]->crtc.x + c->w; | ||
1074 | int clip_y1 = c->y - units[i]->crtc.y; | ||
1075 | int clip_y2 = c->y - units[i]->crtc.y + c->h; | ||
1076 | int dest_x = c->x; | ||
1077 | int dest_y = c->y; | ||
1078 | |||
1079 | /* compensate for clipping, we negate | ||
1080 | * a negative number and add that. | ||
1081 | */ | ||
1082 | if (clip_x1 < 0) | ||
1083 | dest_x += -clip_x1; | ||
1084 | if (clip_y1 < 0) | ||
1085 | dest_y += -clip_y1; | ||
1086 | |||
1087 | /* clip */ | ||
1088 | clip_x1 = max(clip_x1, 0); | ||
1089 | clip_y1 = max(clip_y1, 0); | ||
1090 | clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay); | ||
1091 | clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay); | ||
1092 | |||
1093 | /* and cull any rects that misses the crtc */ | ||
1094 | if (clip_x1 >= units[i]->crtc.mode.hdisplay || | ||
1095 | clip_y1 >= units[i]->crtc.mode.vdisplay || | ||
1096 | clip_x2 <= 0 || clip_y2 <= 0) | ||
1097 | continue; | ||
1098 | |||
1099 | blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; | ||
1100 | blits[blits_pos].body.srcScreenId = units[i]->unit; | ||
1101 | blits[blits_pos].body.destOrigin.x = dest_x; | ||
1102 | blits[blits_pos].body.destOrigin.y = dest_y; | ||
1103 | |||
1104 | blits[blits_pos].body.srcRect.left = clip_x1; | ||
1105 | blits[blits_pos].body.srcRect.top = clip_y1; | ||
1106 | blits[blits_pos].body.srcRect.right = clip_x2; | ||
1107 | blits[blits_pos].body.srcRect.bottom = clip_y2; | ||
1108 | blits_pos++; | ||
1109 | } | ||
1110 | } | ||
1111 | /* reset size here and use calculated exact size from loops */ | ||
1112 | fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; | ||
1113 | |||
1114 | ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, | ||
1115 | 0, user_fence_rep); | ||
1116 | |||
1117 | kfree(cmd); | ||
1118 | |||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
922 | int vmw_kms_init(struct vmw_private *dev_priv) | 1122 | int vmw_kms_init(struct vmw_private *dev_priv) |
923 | { | 1123 | { |
924 | struct drm_device *dev = dev_priv->dev; | 1124 | struct drm_device *dev = dev_priv->dev; |
@@ -932,7 +1132,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
932 | dev->mode_config.max_width = 8192; | 1132 | dev->mode_config.max_width = 8192; |
933 | dev->mode_config.max_height = 8192; | 1133 | dev->mode_config.max_height = 8192; |
934 | 1134 | ||
935 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 1135 | ret = vmw_kms_init_screen_object_display(dev_priv); |
1136 | if (ret) /* Fallback */ | ||
1137 | (void)vmw_kms_init_legacy_display_system(dev_priv); | ||
936 | 1138 | ||
937 | return 0; | 1139 | return 0; |
938 | } | 1140 | } |
@@ -1103,3 +1305,242 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | |||
1103 | { | 1305 | { |
1104 | return 0; | 1306 | return 0; |
1105 | } | 1307 | } |
1308 | |||
1309 | |||
1310 | /* | ||
1311 | * Small shared kms functions. | ||
1312 | */ | ||
1313 | |||
1314 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
1315 | struct drm_vmw_rect *rects) | ||
1316 | { | ||
1317 | struct drm_device *dev = dev_priv->dev; | ||
1318 | struct vmw_display_unit *du; | ||
1319 | struct drm_connector *con; | ||
1320 | |||
1321 | mutex_lock(&dev->mode_config.mutex); | ||
1322 | |||
1323 | #if 0 | ||
1324 | { | ||
1325 | unsigned int i; | ||
1326 | |||
1327 | DRM_INFO("%s: new layout ", __func__); | ||
1328 | for (i = 0; i < num; i++) | ||
1329 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
1330 | rects[i].w, rects[i].h); | ||
1331 | DRM_INFO("\n"); | ||
1332 | } | ||
1333 | #endif | ||
1334 | |||
1335 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
1336 | du = vmw_connector_to_du(con); | ||
1337 | if (num > du->unit) { | ||
1338 | du->pref_width = rects[du->unit].w; | ||
1339 | du->pref_height = rects[du->unit].h; | ||
1340 | du->pref_active = true; | ||
1341 | } else { | ||
1342 | du->pref_width = 800; | ||
1343 | du->pref_height = 600; | ||
1344 | du->pref_active = false; | ||
1345 | } | ||
1346 | con->status = vmw_du_connector_detect(con, true); | ||
1347 | } | ||
1348 | |||
1349 | mutex_unlock(&dev->mode_config.mutex); | ||
1350 | |||
1351 | return 0; | ||
1352 | } | ||
1353 | |||
1354 | void vmw_du_crtc_save(struct drm_crtc *crtc) | ||
1355 | { | ||
1356 | } | ||
1357 | |||
1358 | void vmw_du_crtc_restore(struct drm_crtc *crtc) | ||
1359 | { | ||
1360 | } | ||
1361 | |||
1362 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | ||
1363 | u16 *r, u16 *g, u16 *b, | ||
1364 | uint32_t start, uint32_t size) | ||
1365 | { | ||
1366 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
1367 | int i; | ||
1368 | |||
1369 | for (i = 0; i < size; i++) { | ||
1370 | DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, | ||
1371 | r[i], g[i], b[i]); | ||
1372 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); | ||
1373 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); | ||
1374 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); | ||
1375 | } | ||
1376 | } | ||
1377 | |||
1378 | void vmw_du_connector_dpms(struct drm_connector *connector, int mode) | ||
1379 | { | ||
1380 | } | ||
1381 | |||
1382 | void vmw_du_connector_save(struct drm_connector *connector) | ||
1383 | { | ||
1384 | } | ||
1385 | |||
1386 | void vmw_du_connector_restore(struct drm_connector *connector) | ||
1387 | { | ||
1388 | } | ||
1389 | |||
1390 | enum drm_connector_status | ||
1391 | vmw_du_connector_detect(struct drm_connector *connector, bool force) | ||
1392 | { | ||
1393 | uint32_t num_displays; | ||
1394 | struct drm_device *dev = connector->dev; | ||
1395 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1396 | |||
1397 | mutex_lock(&dev_priv->hw_mutex); | ||
1398 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | ||
1399 | mutex_unlock(&dev_priv->hw_mutex); | ||
1400 | |||
1401 | return ((vmw_connector_to_du(connector)->unit < num_displays) ? | ||
1402 | connector_status_connected : connector_status_disconnected); | ||
1403 | } | ||
1404 | |||
1405 | static struct drm_display_mode vmw_kms_connector_builtin[] = { | ||
1406 | /* 640x480@60Hz */ | ||
1407 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
1408 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
1409 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1410 | /* 800x600@60Hz */ | ||
1411 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | ||
1412 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
1413 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1414 | /* 1024x768@60Hz */ | ||
1415 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
1416 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
1417 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1418 | /* 1152x864@75Hz */ | ||
1419 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
1420 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
1421 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1422 | /* 1280x768@60Hz */ | ||
1423 | { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, | ||
1424 | 1472, 1664, 0, 768, 771, 778, 798, 0, | ||
1425 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1426 | /* 1280x800@60Hz */ | ||
1427 | { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, | ||
1428 | 1480, 1680, 0, 800, 803, 809, 831, 0, | ||
1429 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
1430 | /* 1280x960@60Hz */ | ||
1431 | { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, | ||
1432 | 1488, 1800, 0, 960, 961, 964, 1000, 0, | ||
1433 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1434 | /* 1280x1024@60Hz */ | ||
1435 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, | ||
1436 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
1437 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1438 | /* 1360x768@60Hz */ | ||
1439 | { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, | ||
1440 | 1536, 1792, 0, 768, 771, 777, 795, 0, | ||
1441 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1442 | /* 1440x1050@60Hz */ | ||
1443 | { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, | ||
1444 | 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, | ||
1445 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1446 | /* 1440x900@60Hz */ | ||
1447 | { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, | ||
1448 | 1672, 1904, 0, 900, 903, 909, 934, 0, | ||
1449 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1450 | /* 1600x1200@60Hz */ | ||
1451 | { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, | ||
1452 | 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, | ||
1453 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1454 | /* 1680x1050@60Hz */ | ||
1455 | { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, | ||
1456 | 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, | ||
1457 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1458 | /* 1792x1344@60Hz */ | ||
1459 | { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, | ||
1460 | 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, | ||
1461 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1462 | /* 1853x1392@60Hz */ | ||
1463 | { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, | ||
1464 | 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, | ||
1465 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1466 | /* 1920x1200@60Hz */ | ||
1467 | { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, | ||
1468 | 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, | ||
1469 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1470 | /* 1920x1440@60Hz */ | ||
1471 | { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, | ||
1472 | 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, | ||
1473 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1474 | /* 2560x1600@60Hz */ | ||
1475 | { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, | ||
1476 | 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, | ||
1477 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
1478 | /* Terminate */ | ||
1479 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | ||
1480 | }; | ||
1481 | |||
1482 | int vmw_du_connector_fill_modes(struct drm_connector *connector, | ||
1483 | uint32_t max_width, uint32_t max_height) | ||
1484 | { | ||
1485 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | ||
1486 | struct drm_device *dev = connector->dev; | ||
1487 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1488 | struct drm_display_mode *mode = NULL; | ||
1489 | struct drm_display_mode *bmode; | ||
1490 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
1491 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
1492 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
1493 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
1494 | }; | ||
1495 | int i; | ||
1496 | |||
1497 | /* Add preferred mode */ | ||
1498 | { | ||
1499 | mode = drm_mode_duplicate(dev, &prefmode); | ||
1500 | if (!mode) | ||
1501 | return 0; | ||
1502 | mode->hdisplay = du->pref_width; | ||
1503 | mode->vdisplay = du->pref_height; | ||
1504 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1505 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, | ||
1506 | mode->vdisplay)) { | ||
1507 | drm_mode_probed_add(connector, mode); | ||
1508 | |||
1509 | if (du->pref_mode) { | ||
1510 | list_del_init(&du->pref_mode->head); | ||
1511 | drm_mode_destroy(dev, du->pref_mode); | ||
1512 | } | ||
1513 | |||
1514 | du->pref_mode = mode; | ||
1515 | } | ||
1516 | } | ||
1517 | |||
1518 | for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { | ||
1519 | bmode = &vmw_kms_connector_builtin[i]; | ||
1520 | if (bmode->hdisplay > max_width || | ||
1521 | bmode->vdisplay > max_height) | ||
1522 | continue; | ||
1523 | |||
1524 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
1525 | bmode->vdisplay)) | ||
1526 | continue; | ||
1527 | |||
1528 | mode = drm_mode_duplicate(dev, bmode); | ||
1529 | if (!mode) | ||
1530 | return 0; | ||
1531 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1532 | |||
1533 | drm_mode_probed_add(connector, mode); | ||
1534 | } | ||
1535 | |||
1536 | drm_mode_connector_list_update(connector); | ||
1537 | |||
1538 | return 1; | ||
1539 | } | ||
1540 | |||
1541 | int vmw_du_connector_set_property(struct drm_connector *connector, | ||
1542 | struct drm_property *property, | ||
1543 | uint64_t val) | ||
1544 | { | ||
1545 | return 0; | ||
1546 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8a398a0339b6..db0b901f8c3f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
33 | 33 | ||
34 | #define VMWGFX_NUM_DISPLAY_UNITS 8 | ||
35 | |||
34 | 36 | ||
35 | #define vmw_framebuffer_to_vfb(x) \ | 37 | #define vmw_framebuffer_to_vfb(x) \ |
36 | container_of(x, struct vmw_framebuffer, base) | 38 | container_of(x, struct vmw_framebuffer, base) |
@@ -45,6 +47,9 @@ struct vmw_framebuffer { | |||
45 | struct drm_framebuffer base; | 47 | struct drm_framebuffer base; |
46 | int (*pin)(struct vmw_framebuffer *fb); | 48 | int (*pin)(struct vmw_framebuffer *fb); |
47 | int (*unpin)(struct vmw_framebuffer *fb); | 49 | int (*unpin)(struct vmw_framebuffer *fb); |
50 | bool dmabuf; | ||
51 | struct ttm_base_object *user_obj; | ||
52 | uint32_t user_handle; | ||
48 | }; | 53 | }; |
49 | 54 | ||
50 | 55 | ||
@@ -83,22 +88,59 @@ struct vmw_display_unit { | |||
83 | int hotspot_y; | 88 | int hotspot_y; |
84 | 89 | ||
85 | unsigned unit; | 90 | unsigned unit; |
91 | |||
92 | /* | ||
93 | * Prefered mode tracking. | ||
94 | */ | ||
95 | unsigned pref_width; | ||
96 | unsigned pref_height; | ||
97 | bool pref_active; | ||
98 | struct drm_display_mode *pref_mode; | ||
86 | }; | 99 | }; |
87 | 100 | ||
101 | #define vmw_crtc_to_du(x) \ | ||
102 | container_of(x, struct vmw_display_unit, crtc) | ||
103 | #define vmw_connector_to_du(x) \ | ||
104 | container_of(x, struct vmw_display_unit, connector) | ||
105 | |||
106 | |||
88 | /* | 107 | /* |
89 | * Shared display unit functions - vmwgfx_kms.c | 108 | * Shared display unit functions - vmwgfx_kms.c |
90 | */ | 109 | */ |
91 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); | 110 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); |
111 | void vmw_du_crtc_save(struct drm_crtc *crtc); | ||
112 | void vmw_du_crtc_restore(struct drm_crtc *crtc); | ||
113 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | ||
114 | u16 *r, u16 *g, u16 *b, | ||
115 | uint32_t start, uint32_t size); | ||
92 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 116 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
93 | uint32_t handle, uint32_t width, uint32_t height); | 117 | uint32_t handle, uint32_t width, uint32_t height); |
94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 118 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
119 | void vmw_du_connector_dpms(struct drm_connector *connector, int mode); | ||
120 | void vmw_du_connector_save(struct drm_connector *connector); | ||
121 | void vmw_du_connector_restore(struct drm_connector *connector); | ||
122 | enum drm_connector_status | ||
123 | vmw_du_connector_detect(struct drm_connector *connector, bool force); | ||
124 | int vmw_du_connector_fill_modes(struct drm_connector *connector, | ||
125 | uint32_t max_width, uint32_t max_height); | ||
126 | int vmw_du_connector_set_property(struct drm_connector *connector, | ||
127 | struct drm_property *property, | ||
128 | uint64_t val); | ||
129 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
130 | struct drm_vmw_rect *rects); | ||
95 | 131 | ||
96 | /* | 132 | /* |
97 | * Legacy display unit functions - vmwgfx_ldu.c | 133 | * Legacy display unit functions - vmwgfx_ldu.c |
98 | */ | 134 | */ |
99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); | 135 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); |
100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); | 136 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); |
101 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | 137 | |
138 | /* | ||
139 | * Screen Objects display functions - vmwgfx_scrn.c | ||
140 | */ | ||
141 | int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv); | ||
142 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv); | ||
143 | int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
102 | struct drm_vmw_rect *rects); | 144 | struct drm_vmw_rect *rects); |
103 | 145 | ||
104 | #endif | 146 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7e1901c4f065..7fc8e7de180b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_kms.h" | 28 | #include "vmwgfx_kms.h" |
29 | 29 | ||
30 | #define VMWGFX_LDU_NUM_DU 8 | ||
31 | 30 | ||
32 | #define vmw_crtc_to_ldu(x) \ | 31 | #define vmw_crtc_to_ldu(x) \ |
33 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | 32 | container_of(x, struct vmw_legacy_display_unit, base.crtc) |
@@ -51,11 +50,6 @@ struct vmw_legacy_display { | |||
51 | struct vmw_legacy_display_unit { | 50 | struct vmw_legacy_display_unit { |
52 | struct vmw_display_unit base; | 51 | struct vmw_display_unit base; |
53 | 52 | ||
54 | unsigned pref_width; | ||
55 | unsigned pref_height; | ||
56 | bool pref_active; | ||
57 | struct drm_display_mode *pref_mode; | ||
58 | |||
59 | struct list_head active; | 53 | struct list_head active; |
60 | }; | 54 | }; |
61 | 55 | ||
@@ -71,29 +65,6 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | |||
71 | * Legacy Display Unit CRTC functions | 65 | * Legacy Display Unit CRTC functions |
72 | */ | 66 | */ |
73 | 67 | ||
74 | static void vmw_ldu_crtc_save(struct drm_crtc *crtc) | ||
75 | { | ||
76 | } | ||
77 | |||
78 | static void vmw_ldu_crtc_restore(struct drm_crtc *crtc) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc, | ||
83 | u16 *r, u16 *g, u16 *b, | ||
84 | uint32_t start, uint32_t size) | ||
85 | { | ||
86 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; i < size; i++) { | ||
90 | DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, r[i], g[i], b[i]); | ||
91 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); | ||
92 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); | ||
93 | vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) | 68 | static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) |
98 | { | 69 | { |
99 | vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); | 70 | vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); |
@@ -301,15 +272,16 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
301 | } | 272 | } |
302 | 273 | ||
303 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { | 274 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { |
304 | .save = vmw_ldu_crtc_save, | 275 | .save = vmw_du_crtc_save, |
305 | .restore = vmw_ldu_crtc_restore, | 276 | .restore = vmw_du_crtc_restore, |
306 | .cursor_set = vmw_du_crtc_cursor_set, | 277 | .cursor_set = vmw_du_crtc_cursor_set, |
307 | .cursor_move = vmw_du_crtc_cursor_move, | 278 | .cursor_move = vmw_du_crtc_cursor_move, |
308 | .gamma_set = vmw_ldu_crtc_gamma_set, | 279 | .gamma_set = vmw_du_crtc_gamma_set, |
309 | .destroy = vmw_ldu_crtc_destroy, | 280 | .destroy = vmw_ldu_crtc_destroy, |
310 | .set_config = vmw_ldu_crtc_set_config, | 281 | .set_config = vmw_ldu_crtc_set_config, |
311 | }; | 282 | }; |
312 | 283 | ||
284 | |||
313 | /* | 285 | /* |
314 | * Legacy Display Unit encoder functions | 286 | * Legacy Display Unit encoder functions |
315 | */ | 287 | */ |
@@ -327,190 +299,18 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { | |||
327 | * Legacy Display Unit connector functions | 299 | * Legacy Display Unit connector functions |
328 | */ | 300 | */ |
329 | 301 | ||
330 | static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode) | ||
331 | { | ||
332 | } | ||
333 | |||
334 | static void vmw_ldu_connector_save(struct drm_connector *connector) | ||
335 | { | ||
336 | } | ||
337 | |||
338 | static void vmw_ldu_connector_restore(struct drm_connector *connector) | ||
339 | { | ||
340 | } | ||
341 | |||
342 | static enum drm_connector_status | ||
343 | vmw_ldu_connector_detect(struct drm_connector *connector, | ||
344 | bool force) | ||
345 | { | ||
346 | uint32_t num_displays; | ||
347 | struct drm_device *dev = connector->dev; | ||
348 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
349 | |||
350 | mutex_lock(&dev_priv->hw_mutex); | ||
351 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | ||
352 | mutex_unlock(&dev_priv->hw_mutex); | ||
353 | |||
354 | return ((vmw_connector_to_ldu(connector)->base.unit < num_displays) ? | ||
355 | connector_status_connected : connector_status_disconnected); | ||
356 | } | ||
357 | |||
358 | static const struct drm_display_mode vmw_ldu_connector_builtin[] = { | ||
359 | /* 640x480@60Hz */ | ||
360 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
361 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
362 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
363 | /* 800x600@60Hz */ | ||
364 | { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, | ||
365 | 968, 1056, 0, 600, 601, 605, 628, 0, | ||
366 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
367 | /* 1024x768@60Hz */ | ||
368 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
369 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
370 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
371 | /* 1152x864@75Hz */ | ||
372 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
373 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
374 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
375 | /* 1280x768@60Hz */ | ||
376 | { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, | ||
377 | 1472, 1664, 0, 768, 771, 778, 798, 0, | ||
378 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
379 | /* 1280x800@60Hz */ | ||
380 | { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, | ||
381 | 1480, 1680, 0, 800, 803, 809, 831, 0, | ||
382 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
383 | /* 1280x960@60Hz */ | ||
384 | { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, | ||
385 | 1488, 1800, 0, 960, 961, 964, 1000, 0, | ||
386 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
387 | /* 1280x1024@60Hz */ | ||
388 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, | ||
389 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
390 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
391 | /* 1360x768@60Hz */ | ||
392 | { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, | ||
393 | 1536, 1792, 0, 768, 771, 777, 795, 0, | ||
394 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
395 | /* 1440x1050@60Hz */ | ||
396 | { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, | ||
397 | 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, | ||
398 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
399 | /* 1440x900@60Hz */ | ||
400 | { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, | ||
401 | 1672, 1904, 0, 900, 903, 909, 934, 0, | ||
402 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
403 | /* 1600x1200@60Hz */ | ||
404 | { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, | ||
405 | 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, | ||
406 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
407 | /* 1680x1050@60Hz */ | ||
408 | { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, | ||
409 | 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, | ||
410 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
411 | /* 1792x1344@60Hz */ | ||
412 | { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, | ||
413 | 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, | ||
414 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
415 | /* 1853x1392@60Hz */ | ||
416 | { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, | ||
417 | 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, | ||
418 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
419 | /* 1920x1200@60Hz */ | ||
420 | { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, | ||
421 | 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, | ||
422 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
423 | /* 1920x1440@60Hz */ | ||
424 | { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, | ||
425 | 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, | ||
426 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
427 | /* 2560x1600@60Hz */ | ||
428 | { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, | ||
429 | 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, | ||
430 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
431 | /* Terminate */ | ||
432 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | ||
433 | }; | ||
434 | |||
435 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | ||
436 | uint32_t max_width, uint32_t max_height) | ||
437 | { | ||
438 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | ||
439 | struct drm_device *dev = connector->dev; | ||
440 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
441 | struct drm_display_mode *mode = NULL; | ||
442 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | ||
443 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
444 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
445 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) | ||
446 | }; | ||
447 | int i; | ||
448 | |||
449 | /* Add preferred mode */ | ||
450 | { | ||
451 | mode = drm_mode_duplicate(dev, &prefmode); | ||
452 | if (!mode) | ||
453 | return 0; | ||
454 | mode->hdisplay = ldu->pref_width; | ||
455 | mode->vdisplay = ldu->pref_height; | ||
456 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
457 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, | ||
458 | mode->vdisplay)) { | ||
459 | drm_mode_probed_add(connector, mode); | ||
460 | |||
461 | if (ldu->pref_mode) { | ||
462 | list_del_init(&ldu->pref_mode->head); | ||
463 | drm_mode_destroy(dev, ldu->pref_mode); | ||
464 | } | ||
465 | |||
466 | ldu->pref_mode = mode; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | ||
471 | const struct drm_display_mode *bmode; | ||
472 | |||
473 | bmode = &vmw_ldu_connector_builtin[i]; | ||
474 | if (bmode->hdisplay > max_width || | ||
475 | bmode->vdisplay > max_height) | ||
476 | continue; | ||
477 | |||
478 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
479 | bmode->vdisplay)) | ||
480 | continue; | ||
481 | |||
482 | mode = drm_mode_duplicate(dev, bmode); | ||
483 | if (!mode) | ||
484 | return 0; | ||
485 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
486 | |||
487 | drm_mode_probed_add(connector, mode); | ||
488 | } | ||
489 | |||
490 | drm_mode_connector_list_update(connector); | ||
491 | |||
492 | return 1; | ||
493 | } | ||
494 | |||
495 | static int vmw_ldu_connector_set_property(struct drm_connector *connector, | ||
496 | struct drm_property *property, | ||
497 | uint64_t val) | ||
498 | { | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static void vmw_ldu_connector_destroy(struct drm_connector *connector) | 302 | static void vmw_ldu_connector_destroy(struct drm_connector *connector) |
503 | { | 303 | { |
504 | vmw_ldu_destroy(vmw_connector_to_ldu(connector)); | 304 | vmw_ldu_destroy(vmw_connector_to_ldu(connector)); |
505 | } | 305 | } |
506 | 306 | ||
507 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { | 307 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { |
508 | .dpms = vmw_ldu_connector_dpms, | 308 | .dpms = vmw_du_connector_dpms, |
509 | .save = vmw_ldu_connector_save, | 309 | .save = vmw_du_connector_save, |
510 | .restore = vmw_ldu_connector_restore, | 310 | .restore = vmw_du_connector_restore, |
511 | .detect = vmw_ldu_connector_detect, | 311 | .detect = vmw_du_connector_detect, |
512 | .fill_modes = vmw_ldu_connector_fill_modes, | 312 | .fill_modes = vmw_du_connector_fill_modes, |
513 | .set_property = vmw_ldu_connector_set_property, | 313 | .set_property = vmw_du_connector_set_property, |
514 | .destroy = vmw_ldu_connector_destroy, | 314 | .destroy = vmw_ldu_connector_destroy, |
515 | }; | 315 | }; |
516 | 316 | ||
@@ -533,14 +333,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
533 | 333 | ||
534 | INIT_LIST_HEAD(&ldu->active); | 334 | INIT_LIST_HEAD(&ldu->active); |
535 | 335 | ||
536 | ldu->pref_active = (unit == 0); | 336 | ldu->base.pref_active = (unit == 0); |
537 | ldu->pref_width = 800; | 337 | ldu->base.pref_width = 800; |
538 | ldu->pref_height = 600; | 338 | ldu->base.pref_height = 600; |
539 | ldu->pref_mode = NULL; | 339 | ldu->base.pref_mode = NULL; |
540 | 340 | ||
541 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 341 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
542 | DRM_MODE_CONNECTOR_LVDS); | 342 | DRM_MODE_CONNECTOR_LVDS); |
543 | connector->status = vmw_ldu_connector_detect(connector, true); | 343 | connector->status = vmw_du_connector_detect(connector, true); |
544 | 344 | ||
545 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 345 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
546 | DRM_MODE_ENCODER_LVDS); | 346 | DRM_MODE_ENCODER_LVDS); |
@@ -583,9 +383,9 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
583 | drm_mode_create_dirty_info_property(dev_priv->dev); | 383 | drm_mode_create_dirty_info_property(dev_priv->dev); |
584 | 384 | ||
585 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | 385 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
586 | for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) | 386 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) |
587 | vmw_ldu_init(dev_priv, i); | 387 | vmw_ldu_init(dev_priv, i); |
588 | ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); | 388 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); |
589 | } else { | 389 | } else { |
590 | /* for old hardware without multimon only enable one display */ | 390 | /* for old hardware without multimon only enable one display */ |
591 | vmw_ldu_init(dev_priv, 0); | 391 | vmw_ldu_init(dev_priv, 0); |
@@ -609,42 +409,3 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | |||
609 | 409 | ||
610 | return 0; | 410 | return 0; |
611 | } | 411 | } |
612 | |||
613 | int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, | ||
614 | struct drm_vmw_rect *rects) | ||
615 | { | ||
616 | struct drm_device *dev = dev_priv->dev; | ||
617 | struct vmw_legacy_display_unit *ldu; | ||
618 | struct drm_connector *con; | ||
619 | int i; | ||
620 | |||
621 | mutex_lock(&dev->mode_config.mutex); | ||
622 | |||
623 | #if 0 | ||
624 | DRM_INFO("%s: new layout ", __func__); | ||
625 | for (i = 0; i < (int)num; i++) | ||
626 | DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, | ||
627 | rects[i].w, rects[i].h); | ||
628 | DRM_INFO("\n"); | ||
629 | #else | ||
630 | (void)i; | ||
631 | #endif | ||
632 | |||
633 | list_for_each_entry(con, &dev->mode_config.connector_list, head) { | ||
634 | ldu = vmw_connector_to_ldu(con); | ||
635 | if (num > ldu->base.unit) { | ||
636 | ldu->pref_width = rects[ldu->base.unit].w; | ||
637 | ldu->pref_height = rects[ldu->base.unit].h; | ||
638 | ldu->pref_active = true; | ||
639 | } else { | ||
640 | ldu->pref_width = 800; | ||
641 | ldu->pref_height = 600; | ||
642 | ldu->pref_active = false; | ||
643 | } | ||
644 | con->status = vmw_ldu_connector_detect(con, true); | ||
645 | } | ||
646 | |||
647 | mutex_unlock(&dev->mode_config.mutex); | ||
648 | |||
649 | return 0; | ||
650 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 07ce02da78a4..14399eec9c3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -87,48 +87,6 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * Pin or unpin a buffer in vram. | ||
91 | * | ||
92 | * @dev_priv: Driver private. | ||
93 | * @buf: DMA buffer to pin or unpin. | ||
94 | * @pin: Pin buffer in vram if true. | ||
95 | * @interruptible: Use interruptible wait. | ||
96 | * | ||
97 | * Takes the current masters ttm lock in read. | ||
98 | * | ||
99 | * Returns | ||
100 | * -ERESTARTSYS if interrupted by a signal. | ||
101 | */ | ||
102 | static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | ||
103 | struct vmw_dma_buffer *buf, | ||
104 | bool pin, bool interruptible) | ||
105 | { | ||
106 | struct ttm_buffer_object *bo = &buf->base; | ||
107 | struct ttm_placement *overlay_placement = &vmw_vram_placement; | ||
108 | int ret; | ||
109 | |||
110 | ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); | ||
111 | if (unlikely(ret != 0)) | ||
112 | return ret; | ||
113 | |||
114 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
115 | if (unlikely(ret != 0)) | ||
116 | goto err; | ||
117 | |||
118 | if (pin) | ||
119 | overlay_placement = &vmw_vram_ne_placement; | ||
120 | |||
121 | ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); | ||
122 | |||
123 | ttm_bo_unreserve(bo); | ||
124 | |||
125 | err: | ||
126 | ttm_read_unlock(&dev_priv->active_master->lock); | ||
127 | |||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * Send put command to hw. | 90 | * Send put command to hw. |
133 | * | 91 | * |
134 | * Returns | 92 | * Returns |
@@ -139,68 +97,80 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, | |||
139 | struct drm_vmw_control_stream_arg *arg, | 97 | struct drm_vmw_control_stream_arg *arg, |
140 | bool interruptible) | 98 | bool interruptible) |
141 | { | 99 | { |
100 | struct vmw_escape_video_flush *flush; | ||
101 | size_t fifo_size; | ||
102 | bool have_so = dev_priv->sou_priv ? true : false; | ||
103 | int i, num_items; | ||
104 | SVGAGuestPtr ptr; | ||
105 | |||
142 | struct { | 106 | struct { |
143 | struct vmw_escape_header escape; | 107 | struct vmw_escape_header escape; |
144 | struct { | 108 | struct { |
145 | struct { | 109 | uint32_t cmdType; |
146 | uint32_t cmdType; | 110 | uint32_t streamId; |
147 | uint32_t streamId; | 111 | } header; |
148 | } header; | ||
149 | struct { | ||
150 | uint32_t registerId; | ||
151 | uint32_t value; | ||
152 | } items[SVGA_VIDEO_PITCH_3 + 1]; | ||
153 | } body; | ||
154 | struct vmw_escape_video_flush flush; | ||
155 | } *cmds; | 112 | } *cmds; |
156 | uint32_t offset; | 113 | struct { |
157 | int i, ret; | 114 | uint32_t registerId; |
115 | uint32_t value; | ||
116 | } *items; | ||
158 | 117 | ||
159 | for (;;) { | 118 | /* defines are a index needs + 1 */ |
160 | cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); | 119 | if (have_so) |
161 | if (cmds) | 120 | num_items = SVGA_VIDEO_DST_SCREEN_ID + 1; |
162 | break; | 121 | else |
122 | num_items = SVGA_VIDEO_PITCH_3 + 1; | ||
163 | 123 | ||
164 | ret = vmw_fallback_wait(dev_priv, false, true, 0, | 124 | fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; |
165 | interruptible, 3*HZ); | 125 | |
166 | if (interruptible && ret == -ERESTARTSYS) | 126 | cmds = vmw_fifo_reserve(dev_priv, fifo_size); |
167 | return ret; | 127 | /* hardware has hung, can't do anything here */ |
168 | else | 128 | if (!cmds) |
169 | BUG_ON(ret != 0); | 129 | return -ENOMEM; |
130 | |||
131 | items = (typeof(items))&cmds[1]; | ||
132 | flush = (struct vmw_escape_video_flush *)&items[num_items]; | ||
133 | |||
134 | /* the size is header + number of items */ | ||
135 | fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1)); | ||
136 | |||
137 | cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
138 | cmds->header.streamId = arg->stream_id; | ||
139 | |||
140 | /* the IDs are neatly numbered */ | ||
141 | for (i = 0; i < num_items; i++) | ||
142 | items[i].registerId = i; | ||
143 | |||
144 | vmw_bo_get_guest_ptr(&buf->base, &ptr); | ||
145 | ptr.offset += arg->offset; | ||
146 | |||
147 | items[SVGA_VIDEO_ENABLED].value = true; | ||
148 | items[SVGA_VIDEO_FLAGS].value = arg->flags; | ||
149 | items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset; | ||
150 | items[SVGA_VIDEO_FORMAT].value = arg->format; | ||
151 | items[SVGA_VIDEO_COLORKEY].value = arg->color_key; | ||
152 | items[SVGA_VIDEO_SIZE].value = arg->size; | ||
153 | items[SVGA_VIDEO_WIDTH].value = arg->width; | ||
154 | items[SVGA_VIDEO_HEIGHT].value = arg->height; | ||
155 | items[SVGA_VIDEO_SRC_X].value = arg->src.x; | ||
156 | items[SVGA_VIDEO_SRC_Y].value = arg->src.y; | ||
157 | items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; | ||
158 | items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; | ||
159 | items[SVGA_VIDEO_DST_X].value = arg->dst.x; | ||
160 | items[SVGA_VIDEO_DST_Y].value = arg->dst.y; | ||
161 | items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; | ||
162 | items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; | ||
163 | items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; | ||
164 | items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; | ||
165 | items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; | ||
166 | if (have_so) { | ||
167 | items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId; | ||
168 | items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID; | ||
170 | } | 169 | } |
171 | 170 | ||
172 | fill_escape(&cmds->escape, sizeof(cmds->body)); | 171 | fill_flush(flush, arg->stream_id); |
173 | cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
174 | cmds->body.header.streamId = arg->stream_id; | ||
175 | |||
176 | for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++) | ||
177 | cmds->body.items[i].registerId = i; | ||
178 | |||
179 | offset = buf->base.offset + arg->offset; | ||
180 | |||
181 | cmds->body.items[SVGA_VIDEO_ENABLED].value = true; | ||
182 | cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags; | ||
183 | cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset; | ||
184 | cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format; | ||
185 | cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key; | ||
186 | cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size; | ||
187 | cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width; | ||
188 | cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height; | ||
189 | cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x; | ||
190 | cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y; | ||
191 | cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; | ||
192 | cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; | ||
193 | cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x; | ||
194 | cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y; | ||
195 | cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; | ||
196 | cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; | ||
197 | cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; | ||
198 | cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; | ||
199 | cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; | ||
200 | |||
201 | fill_flush(&cmds->flush, arg->stream_id); | ||
202 | 172 | ||
203 | vmw_fifo_commit(dev_priv, sizeof(*cmds)); | 173 | vmw_fifo_commit(dev_priv, fifo_size); |
204 | 174 | ||
205 | return 0; | 175 | return 0; |
206 | } | 176 | } |
@@ -248,6 +218,25 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, | |||
248 | } | 218 | } |
249 | 219 | ||
250 | /** | 220 | /** |
221 | * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. | ||
222 | * | ||
223 | * With the introduction of screen objects buffers could now be | ||
224 | * used with GMRs instead of being locked to vram. | ||
225 | */ | ||
226 | static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, | ||
227 | struct vmw_dma_buffer *buf, | ||
228 | bool pin, bool inter) | ||
229 | { | ||
230 | if (!pin) | ||
231 | return vmw_dmabuf_unpin(dev_priv, buf, inter); | ||
232 | |||
233 | if (!dev_priv->sou_priv) | ||
234 | return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); | ||
235 | |||
236 | return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); | ||
237 | } | ||
238 | |||
239 | /** | ||
251 | * Stop or pause a stream. | 240 | * Stop or pause a stream. |
252 | * | 241 | * |
253 | * If the stream is paused the no evict flag is removed from the buffer | 242 | * If the stream is paused the no evict flag is removed from the buffer |
@@ -279,8 +268,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, | |||
279 | return ret; | 268 | return ret; |
280 | 269 | ||
281 | /* We just remove the NO_EVICT flag so no -ENOMEM */ | 270 | /* We just remove the NO_EVICT flag so no -ENOMEM */ |
282 | ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false, | 271 | ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false, |
283 | interruptible); | 272 | interruptible); |
284 | if (interruptible && ret == -ERESTARTSYS) | 273 | if (interruptible && ret == -ERESTARTSYS) |
285 | return ret; | 274 | return ret; |
286 | else | 275 | else |
@@ -342,7 +331,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
342 | /* We don't start the old stream if we are interrupted. | 331 | /* We don't start the old stream if we are interrupted. |
343 | * Might return -ENOMEM if it can't fit the buffer in vram. | 332 | * Might return -ENOMEM if it can't fit the buffer in vram. |
344 | */ | 333 | */ |
345 | ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible); | 334 | ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible); |
346 | if (ret) | 335 | if (ret) |
347 | return ret; | 336 | return ret; |
348 | 337 | ||
@@ -351,7 +340,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
351 | /* This one needs to happen no matter what. We only remove | 340 | /* This one needs to happen no matter what. We only remove |
352 | * the NO_EVICT flag so this is safe from -ENOMEM. | 341 | * the NO_EVICT flag so this is safe from -ENOMEM. |
353 | */ | 342 | */ |
354 | BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0); | 343 | BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false) |
344 | != 0); | ||
355 | return ret; | 345 | return ret; |
356 | } | 346 | } |
357 | 347 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c1b6ffd4ce7b..93a68a61419d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -61,6 +61,12 @@ struct vmw_user_stream { | |||
61 | struct vmw_stream stream; | 61 | struct vmw_stream stream; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct vmw_surface_offset { | ||
65 | uint32_t face; | ||
66 | uint32_t mip; | ||
67 | uint32_t bo_offset; | ||
68 | }; | ||
69 | |||
64 | static inline struct vmw_dma_buffer * | 70 | static inline struct vmw_dma_buffer * |
65 | vmw_dma_buffer(struct ttm_buffer_object *bo) | 71 | vmw_dma_buffer(struct ttm_buffer_object *bo) |
66 | { | 72 | { |
@@ -80,13 +86,36 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
80 | return res; | 86 | return res; |
81 | } | 87 | } |
82 | 88 | ||
89 | |||
90 | /** | ||
91 | * vmw_resource_release_id - release a resource id to the id manager. | ||
92 | * | ||
93 | * @res: Pointer to the resource. | ||
94 | * | ||
95 | * Release the resource id to the resource id manager and set it to -1 | ||
96 | */ | ||
97 | static void vmw_resource_release_id(struct vmw_resource *res) | ||
98 | { | ||
99 | struct vmw_private *dev_priv = res->dev_priv; | ||
100 | |||
101 | write_lock(&dev_priv->resource_lock); | ||
102 | if (res->id != -1) | ||
103 | idr_remove(res->idr, res->id); | ||
104 | res->id = -1; | ||
105 | write_unlock(&dev_priv->resource_lock); | ||
106 | } | ||
107 | |||
83 | static void vmw_resource_release(struct kref *kref) | 108 | static void vmw_resource_release(struct kref *kref) |
84 | { | 109 | { |
85 | struct vmw_resource *res = | 110 | struct vmw_resource *res = |
86 | container_of(kref, struct vmw_resource, kref); | 111 | container_of(kref, struct vmw_resource, kref); |
87 | struct vmw_private *dev_priv = res->dev_priv; | 112 | struct vmw_private *dev_priv = res->dev_priv; |
113 | int id = res->id; | ||
114 | struct idr *idr = res->idr; | ||
88 | 115 | ||
89 | idr_remove(res->idr, res->id); | 116 | res->avail = false; |
117 | if (res->remove_from_lists != NULL) | ||
118 | res->remove_from_lists(res); | ||
90 | write_unlock(&dev_priv->resource_lock); | 119 | write_unlock(&dev_priv->resource_lock); |
91 | 120 | ||
92 | if (likely(res->hw_destroy != NULL)) | 121 | if (likely(res->hw_destroy != NULL)) |
@@ -98,6 +127,9 @@ static void vmw_resource_release(struct kref *kref) | |||
98 | kfree(res); | 127 | kfree(res); |
99 | 128 | ||
100 | write_lock(&dev_priv->resource_lock); | 129 | write_lock(&dev_priv->resource_lock); |
130 | |||
131 | if (id != -1) | ||
132 | idr_remove(idr, id); | ||
101 | } | 133 | } |
102 | 134 | ||
103 | void vmw_resource_unreference(struct vmw_resource **p_res) | 135 | void vmw_resource_unreference(struct vmw_resource **p_res) |
@@ -111,28 +143,29 @@ void vmw_resource_unreference(struct vmw_resource **p_res) | |||
111 | write_unlock(&dev_priv->resource_lock); | 143 | write_unlock(&dev_priv->resource_lock); |
112 | } | 144 | } |
113 | 145 | ||
114 | static int vmw_resource_init(struct vmw_private *dev_priv, | 146 | |
115 | struct vmw_resource *res, | 147 | /** |
116 | struct idr *idr, | 148 | * vmw_resource_alloc_id - release a resource id to the id manager. |
117 | enum ttm_object_type obj_type, | 149 | * |
118 | void (*res_free) (struct vmw_resource *res)) | 150 | * @dev_priv: Pointer to the device private structure. |
151 | * @res: Pointer to the resource. | ||
152 | * | ||
153 | * Allocate the lowest free resource from the resource manager, and set | ||
154 | * @res->id to that id. Returns 0 on success and -ENOMEM on failure. | ||
155 | */ | ||
156 | static int vmw_resource_alloc_id(struct vmw_private *dev_priv, | ||
157 | struct vmw_resource *res) | ||
119 | { | 158 | { |
120 | int ret; | 159 | int ret; |
121 | 160 | ||
122 | kref_init(&res->kref); | 161 | BUG_ON(res->id != -1); |
123 | res->hw_destroy = NULL; | ||
124 | res->res_free = res_free; | ||
125 | res->res_type = obj_type; | ||
126 | res->idr = idr; | ||
127 | res->avail = false; | ||
128 | res->dev_priv = dev_priv; | ||
129 | 162 | ||
130 | do { | 163 | do { |
131 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) | 164 | if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) |
132 | return -ENOMEM; | 165 | return -ENOMEM; |
133 | 166 | ||
134 | write_lock(&dev_priv->resource_lock); | 167 | write_lock(&dev_priv->resource_lock); |
135 | ret = idr_get_new_above(idr, res, 1, &res->id); | 168 | ret = idr_get_new_above(res->idr, res, 1, &res->id); |
136 | write_unlock(&dev_priv->resource_lock); | 169 | write_unlock(&dev_priv->resource_lock); |
137 | 170 | ||
138 | } while (ret == -EAGAIN); | 171 | } while (ret == -EAGAIN); |
@@ -140,6 +173,33 @@ static int vmw_resource_init(struct vmw_private *dev_priv, | |||
140 | return ret; | 173 | return ret; |
141 | } | 174 | } |
142 | 175 | ||
176 | |||
177 | static int vmw_resource_init(struct vmw_private *dev_priv, | ||
178 | struct vmw_resource *res, | ||
179 | struct idr *idr, | ||
180 | enum ttm_object_type obj_type, | ||
181 | bool delay_id, | ||
182 | void (*res_free) (struct vmw_resource *res), | ||
183 | void (*remove_from_lists) | ||
184 | (struct vmw_resource *res)) | ||
185 | { | ||
186 | kref_init(&res->kref); | ||
187 | res->hw_destroy = NULL; | ||
188 | res->res_free = res_free; | ||
189 | res->remove_from_lists = remove_from_lists; | ||
190 | res->res_type = obj_type; | ||
191 | res->idr = idr; | ||
192 | res->avail = false; | ||
193 | res->dev_priv = dev_priv; | ||
194 | INIT_LIST_HEAD(&res->query_head); | ||
195 | INIT_LIST_HEAD(&res->validate_head); | ||
196 | res->id = -1; | ||
197 | if (delay_id) | ||
198 | return 0; | ||
199 | else | ||
200 | return vmw_resource_alloc_id(dev_priv, res); | ||
201 | } | ||
202 | |||
143 | /** | 203 | /** |
144 | * vmw_resource_activate | 204 | * vmw_resource_activate |
145 | * | 205 | * |
@@ -194,8 +254,12 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
194 | struct { | 254 | struct { |
195 | SVGA3dCmdHeader header; | 255 | SVGA3dCmdHeader header; |
196 | SVGA3dCmdDestroyContext body; | 256 | SVGA3dCmdDestroyContext body; |
197 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 257 | } *cmd; |
198 | 258 | ||
259 | |||
260 | vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); | ||
261 | |||
262 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
199 | if (unlikely(cmd == NULL)) { | 263 | if (unlikely(cmd == NULL)) { |
200 | DRM_ERROR("Failed reserving FIFO space for surface " | 264 | DRM_ERROR("Failed reserving FIFO space for surface " |
201 | "destruction.\n"); | 265 | "destruction.\n"); |
@@ -222,14 +286,17 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
222 | } *cmd; | 286 | } *cmd; |
223 | 287 | ||
224 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | 288 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, |
225 | VMW_RES_CONTEXT, res_free); | 289 | VMW_RES_CONTEXT, false, res_free, NULL); |
226 | 290 | ||
227 | if (unlikely(ret != 0)) { | 291 | if (unlikely(ret != 0)) { |
228 | if (res_free == NULL) | 292 | DRM_ERROR("Failed to allocate a resource id.\n"); |
229 | kfree(res); | 293 | goto out_early; |
230 | else | 294 | } |
231 | res_free(res); | 295 | |
232 | return ret; | 296 | if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { |
297 | DRM_ERROR("Out of hw context ids.\n"); | ||
298 | vmw_resource_unreference(&res); | ||
299 | return -ENOMEM; | ||
233 | } | 300 | } |
234 | 301 | ||
235 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 302 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
@@ -247,6 +314,13 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
247 | (void) vmw_3d_resource_inc(dev_priv, false); | 314 | (void) vmw_3d_resource_inc(dev_priv, false); |
248 | vmw_resource_activate(res, vmw_hw_context_destroy); | 315 | vmw_resource_activate(res, vmw_hw_context_destroy); |
249 | return 0; | 316 | return 0; |
317 | |||
318 | out_early: | ||
319 | if (res_free == NULL) | ||
320 | kfree(res); | ||
321 | else | ||
322 | res_free(res); | ||
323 | return ret; | ||
250 | } | 324 | } |
251 | 325 | ||
252 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | 326 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) |
@@ -382,31 +456,285 @@ int vmw_context_check(struct vmw_private *dev_priv, | |||
382 | return ret; | 456 | return ret; |
383 | } | 457 | } |
384 | 458 | ||
459 | struct vmw_bpp { | ||
460 | uint8_t bpp; | ||
461 | uint8_t s_bpp; | ||
462 | }; | ||
463 | |||
464 | /* | ||
465 | * Size table for the supported SVGA3D surface formats. It consists of | ||
466 | * two values. The bpp value and the s_bpp value which is short for | ||
467 | * "stride bits per pixel" The values are given in such a way that the | ||
468 | * minimum stride for the image is calculated using | ||
469 | * | ||
470 | * min_stride = w*s_bpp | ||
471 | * | ||
472 | * and the total memory requirement for the image is | ||
473 | * | ||
474 | * h*min_stride*bpp/s_bpp | ||
475 | * | ||
476 | */ | ||
477 | static const struct vmw_bpp vmw_sf_bpp[] = { | ||
478 | [SVGA3D_FORMAT_INVALID] = {0, 0}, | ||
479 | [SVGA3D_X8R8G8B8] = {32, 32}, | ||
480 | [SVGA3D_A8R8G8B8] = {32, 32}, | ||
481 | [SVGA3D_R5G6B5] = {16, 16}, | ||
482 | [SVGA3D_X1R5G5B5] = {16, 16}, | ||
483 | [SVGA3D_A1R5G5B5] = {16, 16}, | ||
484 | [SVGA3D_A4R4G4B4] = {16, 16}, | ||
485 | [SVGA3D_Z_D32] = {32, 32}, | ||
486 | [SVGA3D_Z_D16] = {16, 16}, | ||
487 | [SVGA3D_Z_D24S8] = {32, 32}, | ||
488 | [SVGA3D_Z_D15S1] = {16, 16}, | ||
489 | [SVGA3D_LUMINANCE8] = {8, 8}, | ||
490 | [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, | ||
491 | [SVGA3D_LUMINANCE16] = {16, 16}, | ||
492 | [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, | ||
493 | [SVGA3D_DXT1] = {4, 16}, | ||
494 | [SVGA3D_DXT2] = {8, 32}, | ||
495 | [SVGA3D_DXT3] = {8, 32}, | ||
496 | [SVGA3D_DXT4] = {8, 32}, | ||
497 | [SVGA3D_DXT5] = {8, 32}, | ||
498 | [SVGA3D_BUMPU8V8] = {16, 16}, | ||
499 | [SVGA3D_BUMPL6V5U5] = {16, 16}, | ||
500 | [SVGA3D_BUMPX8L8V8U8] = {32, 32}, | ||
501 | [SVGA3D_ARGB_S10E5] = {16, 16}, | ||
502 | [SVGA3D_ARGB_S23E8] = {32, 32}, | ||
503 | [SVGA3D_A2R10G10B10] = {32, 32}, | ||
504 | [SVGA3D_V8U8] = {16, 16}, | ||
505 | [SVGA3D_Q8W8V8U8] = {32, 32}, | ||
506 | [SVGA3D_CxV8U8] = {16, 16}, | ||
507 | [SVGA3D_X8L8V8U8] = {32, 32}, | ||
508 | [SVGA3D_A2W10V10U10] = {32, 32}, | ||
509 | [SVGA3D_ALPHA8] = {8, 8}, | ||
510 | [SVGA3D_R_S10E5] = {16, 16}, | ||
511 | [SVGA3D_R_S23E8] = {32, 32}, | ||
512 | [SVGA3D_RG_S10E5] = {16, 16}, | ||
513 | [SVGA3D_RG_S23E8] = {32, 32}, | ||
514 | [SVGA3D_BUFFER] = {8, 8}, | ||
515 | [SVGA3D_Z_D24X8] = {32, 32}, | ||
516 | [SVGA3D_V16U16] = {32, 32}, | ||
517 | [SVGA3D_G16R16] = {32, 32}, | ||
518 | [SVGA3D_A16B16G16R16] = {64, 64}, | ||
519 | [SVGA3D_UYVY] = {12, 12}, | ||
520 | [SVGA3D_YUY2] = {12, 12}, | ||
521 | [SVGA3D_NV12] = {12, 8}, | ||
522 | [SVGA3D_AYUV] = {32, 32}, | ||
523 | [SVGA3D_BC4_UNORM] = {4, 16}, | ||
524 | [SVGA3D_BC5_UNORM] = {8, 32}, | ||
525 | [SVGA3D_Z_DF16] = {16, 16}, | ||
526 | [SVGA3D_Z_DF24] = {24, 24}, | ||
527 | [SVGA3D_Z_D24S8_INT] = {32, 32} | ||
528 | }; | ||
529 | |||
385 | 530 | ||
386 | /** | 531 | /** |
387 | * Surface management. | 532 | * Surface management. |
388 | */ | 533 | */ |
389 | 534 | ||
535 | struct vmw_surface_dma { | ||
536 | SVGA3dCmdHeader header; | ||
537 | SVGA3dCmdSurfaceDMA body; | ||
538 | SVGA3dCopyBox cb; | ||
539 | SVGA3dCmdSurfaceDMASuffix suffix; | ||
540 | }; | ||
541 | |||
542 | struct vmw_surface_define { | ||
543 | SVGA3dCmdHeader header; | ||
544 | SVGA3dCmdDefineSurface body; | ||
545 | }; | ||
546 | |||
547 | struct vmw_surface_destroy { | ||
548 | SVGA3dCmdHeader header; | ||
549 | SVGA3dCmdDestroySurface body; | ||
550 | }; | ||
551 | |||
552 | |||
553 | /** | ||
554 | * vmw_surface_dma_size - Compute fifo size for a dma command. | ||
555 | * | ||
556 | * @srf: Pointer to a struct vmw_surface | ||
557 | * | ||
558 | * Computes the required size for a surface dma command for backup or | ||
559 | * restoration of the surface represented by @srf. | ||
560 | */ | ||
561 | static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) | ||
562 | { | ||
563 | return srf->num_sizes * sizeof(struct vmw_surface_dma); | ||
564 | } | ||
565 | |||
566 | |||
567 | /** | ||
568 | * vmw_surface_define_size - Compute fifo size for a surface define command. | ||
569 | * | ||
570 | * @srf: Pointer to a struct vmw_surface | ||
571 | * | ||
572 | * Computes the required size for a surface define command for the definition | ||
573 | * of the surface represented by @srf. | ||
574 | */ | ||
575 | static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) | ||
576 | { | ||
577 | return sizeof(struct vmw_surface_define) + srf->num_sizes * | ||
578 | sizeof(SVGA3dSize); | ||
579 | } | ||
580 | |||
581 | |||
582 | /** | ||
583 | * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. | ||
584 | * | ||
585 | * Computes the required size for a surface destroy command for the destruction | ||
586 | * of a hw surface. | ||
587 | */ | ||
588 | static inline uint32_t vmw_surface_destroy_size(void) | ||
589 | { | ||
590 | return sizeof(struct vmw_surface_destroy); | ||
591 | } | ||
592 | |||
593 | /** | ||
594 | * vmw_surface_destroy_encode - Encode a surface_destroy command. | ||
595 | * | ||
596 | * @id: The surface id | ||
597 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
598 | */ | ||
599 | static void vmw_surface_destroy_encode(uint32_t id, | ||
600 | void *cmd_space) | ||
601 | { | ||
602 | struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) | ||
603 | cmd_space; | ||
604 | |||
605 | cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; | ||
606 | cmd->header.size = sizeof(cmd->body); | ||
607 | cmd->body.sid = id; | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * vmw_surface_define_encode - Encode a surface_define command. | ||
612 | * | ||
613 | * @srf: Pointer to a struct vmw_surface object. | ||
614 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
615 | */ | ||
616 | static void vmw_surface_define_encode(const struct vmw_surface *srf, | ||
617 | void *cmd_space) | ||
618 | { | ||
619 | struct vmw_surface_define *cmd = (struct vmw_surface_define *) | ||
620 | cmd_space; | ||
621 | struct drm_vmw_size *src_size; | ||
622 | SVGA3dSize *cmd_size; | ||
623 | uint32_t cmd_len; | ||
624 | int i; | ||
625 | |||
626 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
627 | |||
628 | cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; | ||
629 | cmd->header.size = cmd_len; | ||
630 | cmd->body.sid = srf->res.id; | ||
631 | cmd->body.surfaceFlags = srf->flags; | ||
632 | cmd->body.format = cpu_to_le32(srf->format); | ||
633 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
634 | cmd->body.face[i].numMipLevels = srf->mip_levels[i]; | ||
635 | |||
636 | cmd += 1; | ||
637 | cmd_size = (SVGA3dSize *) cmd; | ||
638 | src_size = srf->sizes; | ||
639 | |||
640 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
641 | cmd_size->width = src_size->width; | ||
642 | cmd_size->height = src_size->height; | ||
643 | cmd_size->depth = src_size->depth; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | |||
648 | /** | ||
649 | * vmw_surface_dma_encode - Encode a surface_dma command. | ||
650 | * | ||
651 | * @srf: Pointer to a struct vmw_surface object. | ||
652 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
653 | * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents | ||
654 | * should be placed or read from. | ||
655 | * @to_surface: Boolean whether to DMA to the surface or from the surface. | ||
656 | */ | ||
657 | static void vmw_surface_dma_encode(struct vmw_surface *srf, | ||
658 | void *cmd_space, | ||
659 | const SVGAGuestPtr *ptr, | ||
660 | bool to_surface) | ||
661 | { | ||
662 | uint32_t i; | ||
663 | uint32_t bpp = vmw_sf_bpp[srf->format].bpp; | ||
664 | uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
665 | struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; | ||
666 | |||
667 | for (i = 0; i < srf->num_sizes; ++i) { | ||
668 | SVGA3dCmdHeader *header = &cmd->header; | ||
669 | SVGA3dCmdSurfaceDMA *body = &cmd->body; | ||
670 | SVGA3dCopyBox *cb = &cmd->cb; | ||
671 | SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; | ||
672 | const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; | ||
673 | const struct drm_vmw_size *cur_size = &srf->sizes[i]; | ||
674 | |||
675 | header->id = SVGA_3D_CMD_SURFACE_DMA; | ||
676 | header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); | ||
677 | |||
678 | body->guest.ptr = *ptr; | ||
679 | body->guest.ptr.offset += cur_offset->bo_offset; | ||
680 | body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; | ||
681 | body->host.sid = srf->res.id; | ||
682 | body->host.face = cur_offset->face; | ||
683 | body->host.mipmap = cur_offset->mip; | ||
684 | body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : | ||
685 | SVGA3D_READ_HOST_VRAM); | ||
686 | cb->x = 0; | ||
687 | cb->y = 0; | ||
688 | cb->z = 0; | ||
689 | cb->srcx = 0; | ||
690 | cb->srcy = 0; | ||
691 | cb->srcz = 0; | ||
692 | cb->w = cur_size->width; | ||
693 | cb->h = cur_size->height; | ||
694 | cb->d = cur_size->depth; | ||
695 | |||
696 | suffix->suffixSize = sizeof(*suffix); | ||
697 | suffix->maximumOffset = body->guest.pitch*cur_size->height* | ||
698 | cur_size->depth*bpp / stride_bpp; | ||
699 | suffix->flags.discard = 0; | ||
700 | suffix->flags.unsynchronized = 0; | ||
701 | suffix->flags.reserved = 0; | ||
702 | ++cmd; | ||
703 | } | ||
704 | }; | ||
705 | |||
706 | |||
390 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | 707 | static void vmw_hw_surface_destroy(struct vmw_resource *res) |
391 | { | 708 | { |
392 | 709 | ||
393 | struct vmw_private *dev_priv = res->dev_priv; | 710 | struct vmw_private *dev_priv = res->dev_priv; |
394 | struct { | 711 | struct vmw_surface *srf; |
395 | SVGA3dCmdHeader header; | 712 | void *cmd; |
396 | SVGA3dCmdDestroySurface body; | ||
397 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
398 | 713 | ||
399 | if (unlikely(cmd == NULL)) { | 714 | if (res->id != -1) { |
400 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
401 | "destruction.\n"); | ||
402 | return; | ||
403 | } | ||
404 | 715 | ||
405 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); | 716 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
406 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | 717 | if (unlikely(cmd == NULL)) { |
407 | cmd->body.sid = cpu_to_le32(res->id); | 718 | DRM_ERROR("Failed reserving FIFO space for surface " |
719 | "destruction.\n"); | ||
720 | return; | ||
721 | } | ||
408 | 722 | ||
409 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 723 | vmw_surface_destroy_encode(res->id, cmd); |
724 | vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); | ||
725 | |||
726 | /* | ||
727 | * used_memory_size_atomic, or separate lock | ||
728 | * to avoid taking dev_priv::cmdbuf_mutex in | ||
729 | * the destroy path. | ||
730 | */ | ||
731 | |||
732 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
733 | srf = container_of(res, struct vmw_surface, res); | ||
734 | dev_priv->used_memory_size -= srf->backup_size; | ||
735 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
736 | |||
737 | } | ||
410 | vmw_3d_resource_dec(dev_priv, false); | 738 | vmw_3d_resource_dec(dev_priv, false); |
411 | } | 739 | } |
412 | 740 | ||
@@ -414,70 +742,352 @@ void vmw_surface_res_free(struct vmw_resource *res) | |||
414 | { | 742 | { |
415 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | 743 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); |
416 | 744 | ||
745 | if (srf->backup) | ||
746 | ttm_bo_unref(&srf->backup); | ||
747 | kfree(srf->offsets); | ||
417 | kfree(srf->sizes); | 748 | kfree(srf->sizes); |
418 | kfree(srf->snooper.image); | 749 | kfree(srf->snooper.image); |
419 | kfree(srf); | 750 | kfree(srf); |
420 | } | 751 | } |
421 | 752 | ||
422 | int vmw_surface_init(struct vmw_private *dev_priv, | 753 | |
423 | struct vmw_surface *srf, | 754 | /** |
424 | void (*res_free) (struct vmw_resource *res)) | 755 | * vmw_surface_do_validate - make a surface available to the device. |
756 | * | ||
757 | * @dev_priv: Pointer to a device private struct. | ||
758 | * @srf: Pointer to a struct vmw_surface. | ||
759 | * | ||
760 | * If the surface doesn't have a hw id, allocate one, and optionally | ||
761 | * DMA the backed up surface contents to the device. | ||
762 | * | ||
763 | * Returns -EBUSY if there wasn't sufficient device resources to | ||
764 | * complete the validation. Retry after freeing up resources. | ||
765 | * | ||
766 | * May return other errors if the kernel is out of guest resources. | ||
767 | */ | ||
768 | int vmw_surface_do_validate(struct vmw_private *dev_priv, | ||
769 | struct vmw_surface *srf) | ||
425 | { | 770 | { |
426 | int ret; | ||
427 | struct { | ||
428 | SVGA3dCmdHeader header; | ||
429 | SVGA3dCmdDefineSurface body; | ||
430 | } *cmd; | ||
431 | SVGA3dSize *cmd_size; | ||
432 | struct vmw_resource *res = &srf->res; | 771 | struct vmw_resource *res = &srf->res; |
433 | struct drm_vmw_size *src_size; | 772 | struct list_head val_list; |
434 | size_t submit_size; | 773 | struct ttm_validate_buffer val_buf; |
435 | uint32_t cmd_len; | 774 | uint32_t submit_size; |
436 | int i; | 775 | uint8_t *cmd; |
776 | int ret; | ||
437 | 777 | ||
438 | BUG_ON(res_free == NULL); | 778 | if (likely(res->id != -1)) |
439 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | 779 | return 0; |
440 | VMW_RES_SURFACE, res_free); | 780 | |
781 | if (unlikely(dev_priv->used_memory_size + srf->backup_size >= | ||
782 | dev_priv->memory_size)) | ||
783 | return -EBUSY; | ||
784 | |||
785 | /* | ||
786 | * Reserve- and validate the backup DMA bo. | ||
787 | */ | ||
788 | |||
789 | if (srf->backup) { | ||
790 | INIT_LIST_HEAD(&val_list); | ||
791 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
792 | val_buf.new_sync_obj_arg = (void *)((unsigned long) | ||
793 | DRM_VMW_FENCE_FLAG_EXEC); | ||
794 | list_add_tail(&val_buf.head, &val_list); | ||
795 | ret = ttm_eu_reserve_buffers(&val_list); | ||
796 | if (unlikely(ret != 0)) | ||
797 | goto out_no_reserve; | ||
798 | |||
799 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
800 | true, false, false); | ||
801 | if (unlikely(ret != 0)) | ||
802 | goto out_no_validate; | ||
803 | } | ||
441 | 804 | ||
805 | /* | ||
806 | * Alloc id for the resource. | ||
807 | */ | ||
808 | |||
809 | ret = vmw_resource_alloc_id(dev_priv, res); | ||
442 | if (unlikely(ret != 0)) { | 810 | if (unlikely(ret != 0)) { |
443 | res_free(res); | 811 | DRM_ERROR("Failed to allocate a surface id.\n"); |
444 | return ret; | 812 | goto out_no_id; |
813 | } | ||
814 | if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { | ||
815 | ret = -EBUSY; | ||
816 | goto out_no_fifo; | ||
445 | } | 817 | } |
446 | 818 | ||
447 | submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); | 819 | |
448 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | 820 | /* |
821 | * Encode surface define- and dma commands. | ||
822 | */ | ||
823 | |||
824 | submit_size = vmw_surface_define_size(srf); | ||
825 | if (srf->backup) | ||
826 | submit_size += vmw_surface_dma_size(srf); | ||
449 | 827 | ||
450 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | 828 | cmd = vmw_fifo_reserve(dev_priv, submit_size); |
451 | if (unlikely(cmd == NULL)) { | 829 | if (unlikely(cmd == NULL)) { |
452 | DRM_ERROR("Fifo reserve failed for create surface.\n"); | 830 | DRM_ERROR("Failed reserving FIFO space for surface " |
453 | vmw_resource_unreference(&res); | 831 | "validation.\n"); |
454 | return -ENOMEM; | 832 | ret = -ENOMEM; |
833 | goto out_no_fifo; | ||
455 | } | 834 | } |
456 | 835 | ||
457 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); | 836 | vmw_surface_define_encode(srf, cmd); |
458 | cmd->header.size = cpu_to_le32(cmd_len); | 837 | if (srf->backup) { |
459 | cmd->body.sid = cpu_to_le32(res->id); | 838 | SVGAGuestPtr ptr; |
460 | cmd->body.surfaceFlags = cpu_to_le32(srf->flags); | 839 | |
461 | cmd->body.format = cpu_to_le32(srf->format); | 840 | cmd += vmw_surface_define_size(srf); |
462 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | 841 | vmw_bo_get_guest_ptr(srf->backup, &ptr); |
463 | cmd->body.face[i].numMipLevels = | 842 | vmw_surface_dma_encode(srf, cmd, &ptr, true); |
464 | cpu_to_le32(srf->mip_levels[i]); | ||
465 | } | 843 | } |
466 | 844 | ||
467 | cmd += 1; | 845 | vmw_fifo_commit(dev_priv, submit_size); |
468 | cmd_size = (SVGA3dSize *) cmd; | ||
469 | src_size = srf->sizes; | ||
470 | 846 | ||
471 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | 847 | /* |
472 | cmd_size->width = cpu_to_le32(src_size->width); | 848 | * Create a fence object and fence the backup buffer. |
473 | cmd_size->height = cpu_to_le32(src_size->height); | 849 | */ |
474 | cmd_size->depth = cpu_to_le32(src_size->depth); | 850 | |
851 | if (srf->backup) { | ||
852 | struct vmw_fence_obj *fence; | ||
853 | |||
854 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
855 | &fence, NULL); | ||
856 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
857 | if (likely(fence != NULL)) | ||
858 | vmw_fence_obj_unreference(&fence); | ||
859 | ttm_bo_unref(&val_buf.bo); | ||
860 | ttm_bo_unref(&srf->backup); | ||
475 | } | 861 | } |
476 | 862 | ||
863 | /* | ||
864 | * Surface memory usage accounting. | ||
865 | */ | ||
866 | |||
867 | dev_priv->used_memory_size += srf->backup_size; | ||
868 | |||
869 | return 0; | ||
870 | |||
871 | out_no_fifo: | ||
872 | vmw_resource_release_id(res); | ||
873 | out_no_id: | ||
874 | out_no_validate: | ||
875 | if (srf->backup) | ||
876 | ttm_eu_backoff_reservation(&val_list); | ||
877 | out_no_reserve: | ||
878 | if (srf->backup) | ||
879 | ttm_bo_unref(&val_buf.bo); | ||
880 | return ret; | ||
881 | } | ||
882 | |||
883 | /** | ||
884 | * vmw_surface_evict - Evict a hw surface. | ||
885 | * | ||
886 | * @dev_priv: Pointer to a device private struct. | ||
887 | * @srf: Pointer to a struct vmw_surface | ||
888 | * | ||
889 | * DMA the contents of a hw surface to a backup guest buffer object, | ||
890 | * and destroy the hw surface, releasing its id. | ||
891 | */ | ||
892 | int vmw_surface_evict(struct vmw_private *dev_priv, | ||
893 | struct vmw_surface *srf) | ||
894 | { | ||
895 | struct vmw_resource *res = &srf->res; | ||
896 | struct list_head val_list; | ||
897 | struct ttm_validate_buffer val_buf; | ||
898 | uint32_t submit_size; | ||
899 | uint8_t *cmd; | ||
900 | int ret; | ||
901 | struct vmw_fence_obj *fence; | ||
902 | SVGAGuestPtr ptr; | ||
903 | |||
904 | BUG_ON(res->id == -1); | ||
905 | |||
906 | /* | ||
907 | * Create a surface backup buffer object. | ||
908 | */ | ||
909 | |||
910 | if (!srf->backup) { | ||
911 | ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, | ||
912 | ttm_bo_type_device, | ||
913 | &vmw_srf_placement, 0, 0, true, | ||
914 | NULL, &srf->backup); | ||
915 | if (unlikely(ret != 0)) | ||
916 | return ret; | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Reserve- and validate the backup DMA bo. | ||
921 | */ | ||
922 | |||
923 | INIT_LIST_HEAD(&val_list); | ||
924 | val_buf.bo = ttm_bo_reference(srf->backup); | ||
925 | val_buf.new_sync_obj_arg = (void *)(unsigned long) | ||
926 | DRM_VMW_FENCE_FLAG_EXEC; | ||
927 | list_add_tail(&val_buf.head, &val_list); | ||
928 | ret = ttm_eu_reserve_buffers(&val_list); | ||
929 | if (unlikely(ret != 0)) | ||
930 | goto out_no_reserve; | ||
931 | |||
932 | ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, | ||
933 | true, false, false); | ||
934 | if (unlikely(ret != 0)) | ||
935 | goto out_no_validate; | ||
936 | |||
937 | |||
938 | /* | ||
939 | * Encode the dma- and surface destroy commands. | ||
940 | */ | ||
941 | |||
942 | submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); | ||
943 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
944 | if (unlikely(cmd == NULL)) { | ||
945 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
946 | "eviction.\n"); | ||
947 | ret = -ENOMEM; | ||
948 | goto out_no_fifo; | ||
949 | } | ||
950 | |||
951 | vmw_bo_get_guest_ptr(srf->backup, &ptr); | ||
952 | vmw_surface_dma_encode(srf, cmd, &ptr, false); | ||
953 | cmd += vmw_surface_dma_size(srf); | ||
954 | vmw_surface_destroy_encode(res->id, cmd); | ||
477 | vmw_fifo_commit(dev_priv, submit_size); | 955 | vmw_fifo_commit(dev_priv, submit_size); |
956 | |||
957 | /* | ||
958 | * Surface memory usage accounting. | ||
959 | */ | ||
960 | |||
961 | dev_priv->used_memory_size -= srf->backup_size; | ||
962 | |||
963 | /* | ||
964 | * Create a fence object and fence the DMA buffer. | ||
965 | */ | ||
966 | |||
967 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
968 | &fence, NULL); | ||
969 | ttm_eu_fence_buffer_objects(&val_list, fence); | ||
970 | if (likely(fence != NULL)) | ||
971 | vmw_fence_obj_unreference(&fence); | ||
972 | ttm_bo_unref(&val_buf.bo); | ||
973 | |||
974 | /* | ||
975 | * Release the surface ID. | ||
976 | */ | ||
977 | |||
978 | vmw_resource_release_id(res); | ||
979 | |||
980 | return 0; | ||
981 | |||
982 | out_no_fifo: | ||
983 | out_no_validate: | ||
984 | if (srf->backup) | ||
985 | ttm_eu_backoff_reservation(&val_list); | ||
986 | out_no_reserve: | ||
987 | ttm_bo_unref(&val_buf.bo); | ||
988 | ttm_bo_unref(&srf->backup); | ||
989 | return ret; | ||
990 | } | ||
991 | |||
992 | |||
993 | /** | ||
994 | * vmw_surface_validate - make a surface available to the device, evicting | ||
995 | * other surfaces if needed. | ||
996 | * | ||
997 | * @dev_priv: Pointer to a device private struct. | ||
998 | * @srf: Pointer to a struct vmw_surface. | ||
999 | * | ||
1000 | * Try to validate a surface and if it fails due to limited device resources, | ||
1001 | * repeatedly try to evict other surfaces until the request can be | ||
1002 | * acommodated. | ||
1003 | * | ||
1004 | * May return errors if out of resources. | ||
1005 | */ | ||
1006 | int vmw_surface_validate(struct vmw_private *dev_priv, | ||
1007 | struct vmw_surface *srf) | ||
1008 | { | ||
1009 | int ret; | ||
1010 | struct vmw_surface *evict_srf; | ||
1011 | |||
1012 | do { | ||
1013 | write_lock(&dev_priv->resource_lock); | ||
1014 | list_del_init(&srf->lru_head); | ||
1015 | write_unlock(&dev_priv->resource_lock); | ||
1016 | |||
1017 | ret = vmw_surface_do_validate(dev_priv, srf); | ||
1018 | if (likely(ret != -EBUSY)) | ||
1019 | break; | ||
1020 | |||
1021 | write_lock(&dev_priv->resource_lock); | ||
1022 | if (list_empty(&dev_priv->surface_lru)) { | ||
1023 | DRM_ERROR("Out of device memory for surfaces.\n"); | ||
1024 | ret = -EBUSY; | ||
1025 | write_unlock(&dev_priv->resource_lock); | ||
1026 | break; | ||
1027 | } | ||
1028 | |||
1029 | evict_srf = vmw_surface_reference | ||
1030 | (list_first_entry(&dev_priv->surface_lru, | ||
1031 | struct vmw_surface, | ||
1032 | lru_head)); | ||
1033 | list_del_init(&evict_srf->lru_head); | ||
1034 | |||
1035 | write_unlock(&dev_priv->resource_lock); | ||
1036 | (void) vmw_surface_evict(dev_priv, evict_srf); | ||
1037 | |||
1038 | vmw_surface_unreference(&evict_srf); | ||
1039 | |||
1040 | } while (1); | ||
1041 | |||
1042 | if (unlikely(ret != 0 && srf->res.id != -1)) { | ||
1043 | write_lock(&dev_priv->resource_lock); | ||
1044 | list_add_tail(&srf->lru_head, &dev_priv->surface_lru); | ||
1045 | write_unlock(&dev_priv->resource_lock); | ||
1046 | } | ||
1047 | |||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | |||
1052 | /** | ||
1053 | * vmw_surface_remove_from_lists - Remove surface resources from lookup lists | ||
1054 | * | ||
1055 | * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface | ||
1056 | * | ||
1057 | * As part of the resource destruction, remove the surface from any | ||
1058 | * lookup lists. | ||
1059 | */ | ||
1060 | static void vmw_surface_remove_from_lists(struct vmw_resource *res) | ||
1061 | { | ||
1062 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
1063 | |||
1064 | list_del_init(&srf->lru_head); | ||
1065 | } | ||
1066 | |||
1067 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
1068 | struct vmw_surface *srf, | ||
1069 | void (*res_free) (struct vmw_resource *res)) | ||
1070 | { | ||
1071 | int ret; | ||
1072 | struct vmw_resource *res = &srf->res; | ||
1073 | |||
1074 | BUG_ON(res_free == NULL); | ||
1075 | INIT_LIST_HEAD(&srf->lru_head); | ||
1076 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | ||
1077 | VMW_RES_SURFACE, true, res_free, | ||
1078 | vmw_surface_remove_from_lists); | ||
1079 | |||
1080 | if (unlikely(ret != 0)) | ||
1081 | res_free(res); | ||
1082 | |||
1083 | /* | ||
1084 | * The surface won't be visible to hardware until a | ||
1085 | * surface validate. | ||
1086 | */ | ||
1087 | |||
478 | (void) vmw_3d_resource_inc(dev_priv, false); | 1088 | (void) vmw_3d_resource_inc(dev_priv, false); |
479 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 1089 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
480 | return 0; | 1090 | return ret; |
481 | } | 1091 | } |
482 | 1092 | ||
483 | static void vmw_user_surface_free(struct vmw_resource *res) | 1093 | static void vmw_user_surface_free(struct vmw_resource *res) |
@@ -486,11 +1096,54 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
486 | struct vmw_user_surface *user_srf = | 1096 | struct vmw_user_surface *user_srf = |
487 | container_of(srf, struct vmw_user_surface, srf); | 1097 | container_of(srf, struct vmw_user_surface, srf); |
488 | 1098 | ||
1099 | if (srf->backup) | ||
1100 | ttm_bo_unref(&srf->backup); | ||
1101 | kfree(srf->offsets); | ||
489 | kfree(srf->sizes); | 1102 | kfree(srf->sizes); |
490 | kfree(srf->snooper.image); | 1103 | kfree(srf->snooper.image); |
491 | kfree(user_srf); | 1104 | kfree(user_srf); |
492 | } | 1105 | } |
493 | 1106 | ||
1107 | /** | ||
1108 | * vmw_resource_unreserve - unreserve resources previously reserved for | ||
1109 | * command submission. | ||
1110 | * | ||
1111 | * @list_head: list of resources to unreserve. | ||
1112 | * | ||
1113 | * Currently only surfaces are considered, and unreserving a surface | ||
1114 | * means putting it back on the device's surface lru list, | ||
1115 | * so that it can be evicted if necessary. | ||
1116 | * This function traverses the resource list and | ||
1117 | * checks whether resources are surfaces, and in that case puts them back | ||
1118 | * on the device's surface LRU list. | ||
1119 | */ | ||
1120 | void vmw_resource_unreserve(struct list_head *list) | ||
1121 | { | ||
1122 | struct vmw_resource *res; | ||
1123 | struct vmw_surface *srf; | ||
1124 | rwlock_t *lock = NULL; | ||
1125 | |||
1126 | list_for_each_entry(res, list, validate_head) { | ||
1127 | |||
1128 | if (res->res_free != &vmw_surface_res_free && | ||
1129 | res->res_free != &vmw_user_surface_free) | ||
1130 | continue; | ||
1131 | |||
1132 | if (unlikely(lock == NULL)) { | ||
1133 | lock = &res->dev_priv->resource_lock; | ||
1134 | write_lock(lock); | ||
1135 | } | ||
1136 | |||
1137 | srf = container_of(res, struct vmw_surface, res); | ||
1138 | list_del_init(&srf->lru_head); | ||
1139 | list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); | ||
1140 | } | ||
1141 | |||
1142 | if (lock != NULL) | ||
1143 | write_unlock(lock); | ||
1144 | } | ||
1145 | |||
1146 | |||
494 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | 1147 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, |
495 | struct ttm_object_file *tfile, | 1148 | struct ttm_object_file *tfile, |
496 | uint32_t handle, struct vmw_surface **out) | 1149 | uint32_t handle, struct vmw_surface **out) |
@@ -567,7 +1220,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
567 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 1220 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
568 | struct drm_vmw_size __user *user_sizes; | 1221 | struct drm_vmw_size __user *user_sizes; |
569 | int ret; | 1222 | int ret; |
570 | int i; | 1223 | int i, j; |
1224 | uint32_t cur_bo_offset; | ||
1225 | struct drm_vmw_size *cur_size; | ||
1226 | struct vmw_surface_offset *cur_offset; | ||
1227 | uint32_t stride_bpp; | ||
1228 | uint32_t bpp; | ||
571 | 1229 | ||
572 | if (unlikely(user_srf == NULL)) | 1230 | if (unlikely(user_srf == NULL)) |
573 | return -ENOMEM; | 1231 | return -ENOMEM; |
@@ -578,6 +1236,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
578 | srf->flags = req->flags; | 1236 | srf->flags = req->flags; |
579 | srf->format = req->format; | 1237 | srf->format = req->format; |
580 | srf->scanout = req->scanout; | 1238 | srf->scanout = req->scanout; |
1239 | srf->backup = NULL; | ||
1240 | |||
581 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | 1241 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); |
582 | srf->num_sizes = 0; | 1242 | srf->num_sizes = 0; |
583 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | 1243 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) |
@@ -594,6 +1254,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
594 | ret = -ENOMEM; | 1254 | ret = -ENOMEM; |
595 | goto out_err0; | 1255 | goto out_err0; |
596 | } | 1256 | } |
1257 | srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), | ||
1258 | GFP_KERNEL); | ||
1259 | if (unlikely(srf->sizes == NULL)) { | ||
1260 | ret = -ENOMEM; | ||
1261 | goto out_no_offsets; | ||
1262 | } | ||
597 | 1263 | ||
598 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | 1264 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) |
599 | req->size_addr; | 1265 | req->size_addr; |
@@ -605,6 +1271,29 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
605 | goto out_err1; | 1271 | goto out_err1; |
606 | } | 1272 | } |
607 | 1273 | ||
1274 | cur_bo_offset = 0; | ||
1275 | cur_offset = srf->offsets; | ||
1276 | cur_size = srf->sizes; | ||
1277 | |||
1278 | bpp = vmw_sf_bpp[srf->format].bpp; | ||
1279 | stride_bpp = vmw_sf_bpp[srf->format].s_bpp; | ||
1280 | |||
1281 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
1282 | for (j = 0; j < srf->mip_levels[i]; ++j) { | ||
1283 | uint32_t stride = | ||
1284 | (cur_size->width * stride_bpp + 7) >> 3; | ||
1285 | |||
1286 | cur_offset->face = i; | ||
1287 | cur_offset->mip = j; | ||
1288 | cur_offset->bo_offset = cur_bo_offset; | ||
1289 | cur_bo_offset += stride * cur_size->height * | ||
1290 | cur_size->depth * bpp / stride_bpp; | ||
1291 | ++cur_offset; | ||
1292 | ++cur_size; | ||
1293 | } | ||
1294 | } | ||
1295 | srf->backup_size = cur_bo_offset; | ||
1296 | |||
608 | if (srf->scanout && | 1297 | if (srf->scanout && |
609 | srf->num_sizes == 1 && | 1298 | srf->num_sizes == 1 && |
610 | srf->sizes[0].width == 64 && | 1299 | srf->sizes[0].width == 64 && |
@@ -653,6 +1342,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
653 | vmw_resource_unreference(&res); | 1342 | vmw_resource_unreference(&res); |
654 | return 0; | 1343 | return 0; |
655 | out_err1: | 1344 | out_err1: |
1345 | kfree(srf->offsets); | ||
1346 | out_no_offsets: | ||
656 | kfree(srf->sizes); | 1347 | kfree(srf->sizes); |
657 | out_err0: | 1348 | out_err0: |
658 | kfree(user_srf); | 1349 | kfree(user_srf); |
@@ -969,7 +1660,7 @@ static int vmw_stream_init(struct vmw_private *dev_priv, | |||
969 | int ret; | 1660 | int ret; |
970 | 1661 | ||
971 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | 1662 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, |
972 | VMW_RES_STREAM, res_free); | 1663 | VMW_RES_STREAM, false, res_free, NULL); |
973 | 1664 | ||
974 | if (unlikely(ret != 0)) { | 1665 | if (unlikely(ret != 0)) { |
975 | if (res_free == NULL) | 1666 | if (res_free == NULL) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c new file mode 100644 index 000000000000..347e40699443 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_kms.h" | ||
29 | |||
30 | |||
31 | #define vmw_crtc_to_sou(x) \ | ||
32 | container_of(x, struct vmw_screen_object_unit, base.crtc) | ||
33 | #define vmw_encoder_to_sou(x) \ | ||
34 | container_of(x, struct vmw_screen_object_unit, base.encoder) | ||
35 | #define vmw_connector_to_sou(x) \ | ||
36 | container_of(x, struct vmw_screen_object_unit, base.connector) | ||
37 | |||
38 | struct vmw_screen_object_display { | ||
39 | struct list_head active; | ||
40 | |||
41 | unsigned num_active; | ||
42 | unsigned last_num_active; | ||
43 | |||
44 | struct vmw_framebuffer *fb; | ||
45 | }; | ||
46 | |||
47 | /** | ||
48 | * Display unit using screen objects. | ||
49 | */ | ||
50 | struct vmw_screen_object_unit { | ||
51 | struct vmw_display_unit base; | ||
52 | |||
53 | unsigned long buffer_size; /**< Size of allocated buffer */ | ||
54 | struct vmw_dma_buffer *buffer; /**< Backing store buffer */ | ||
55 | |||
56 | bool defined; | ||
57 | |||
58 | struct list_head active; | ||
59 | }; | ||
60 | |||
61 | static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) | ||
62 | { | ||
63 | list_del_init(&sou->active); | ||
64 | vmw_display_unit_cleanup(&sou->base); | ||
65 | kfree(sou); | ||
66 | } | ||
67 | |||
68 | |||
69 | /* | ||
70 | * Screen Object Display Unit CRTC functions | ||
71 | */ | ||
72 | |||
73 | static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) | ||
74 | { | ||
75 | vmw_sou_destroy(vmw_crtc_to_sou(crtc)); | ||
76 | } | ||
77 | |||
78 | static int vmw_sou_del_active(struct vmw_private *vmw_priv, | ||
79 | struct vmw_screen_object_unit *sou) | ||
80 | { | ||
81 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | ||
82 | if (list_empty(&sou->active)) | ||
83 | return 0; | ||
84 | |||
85 | /* Must init otherwise list_empty(&sou->active) will not work. */ | ||
86 | list_del_init(&sou->active); | ||
87 | if (--(ld->num_active) == 0) { | ||
88 | BUG_ON(!ld->fb); | ||
89 | if (ld->fb->unpin) | ||
90 | ld->fb->unpin(ld->fb); | ||
91 | ld->fb = NULL; | ||
92 | } | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int vmw_sou_add_active(struct vmw_private *vmw_priv, | ||
98 | struct vmw_screen_object_unit *sou, | ||
99 | struct vmw_framebuffer *vfb) | ||
100 | { | ||
101 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | ||
102 | struct vmw_screen_object_unit *entry; | ||
103 | struct list_head *at; | ||
104 | |||
105 | BUG_ON(!ld->num_active && ld->fb); | ||
106 | if (vfb != ld->fb) { | ||
107 | if (ld->fb && ld->fb->unpin) | ||
108 | ld->fb->unpin(ld->fb); | ||
109 | if (vfb->pin) | ||
110 | vfb->pin(vfb); | ||
111 | ld->fb = vfb; | ||
112 | } | ||
113 | |||
114 | if (!list_empty(&sou->active)) | ||
115 | return 0; | ||
116 | |||
117 | at = &ld->active; | ||
118 | list_for_each_entry(entry, &ld->active, active) { | ||
119 | if (entry->base.unit > sou->base.unit) | ||
120 | break; | ||
121 | |||
122 | at = &entry->active; | ||
123 | } | ||
124 | |||
125 | list_add(&sou->active, at); | ||
126 | |||
127 | ld->num_active++; | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Send the fifo command to create a screen. | ||
134 | */ | ||
135 | static int vmw_sou_fifo_create(struct vmw_private *dev_priv, | ||
136 | struct vmw_screen_object_unit *sou, | ||
137 | uint32_t x, uint32_t y, | ||
138 | struct drm_display_mode *mode) | ||
139 | { | ||
140 | size_t fifo_size; | ||
141 | |||
142 | struct { | ||
143 | struct { | ||
144 | uint32_t cmdType; | ||
145 | } header; | ||
146 | SVGAScreenObject obj; | ||
147 | } *cmd; | ||
148 | |||
149 | BUG_ON(!sou->buffer); | ||
150 | |||
151 | fifo_size = sizeof(*cmd); | ||
152 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); | ||
153 | /* The hardware has hung, nothing we can do about it here. */ | ||
154 | if (unlikely(cmd == NULL)) { | ||
155 | DRM_ERROR("Fifo reserve failed.\n"); | ||
156 | return -ENOMEM; | ||
157 | } | ||
158 | |||
159 | memset(cmd, 0, fifo_size); | ||
160 | cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN; | ||
161 | cmd->obj.structSize = sizeof(SVGAScreenObject); | ||
162 | cmd->obj.id = sou->base.unit; | ||
163 | cmd->obj.flags = SVGA_SCREEN_HAS_ROOT | | ||
164 | (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); | ||
165 | cmd->obj.size.width = mode->hdisplay; | ||
166 | cmd->obj.size.height = mode->vdisplay; | ||
167 | cmd->obj.root.x = x; | ||
168 | cmd->obj.root.y = y; | ||
169 | |||
170 | /* Ok to assume that buffer is pinned in vram */ | ||
171 | vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); | ||
172 | cmd->obj.backingStore.pitch = mode->hdisplay * 4; | ||
173 | |||
174 | vmw_fifo_commit(dev_priv, fifo_size); | ||
175 | |||
176 | sou->defined = true; | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * Send the fifo command to destroy a screen. | ||
183 | */ | ||
184 | static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, | ||
185 | struct vmw_screen_object_unit *sou) | ||
186 | { | ||
187 | size_t fifo_size; | ||
188 | int ret; | ||
189 | |||
190 | struct { | ||
191 | struct { | ||
192 | uint32_t cmdType; | ||
193 | } header; | ||
194 | SVGAFifoCmdDestroyScreen body; | ||
195 | } *cmd; | ||
196 | |||
197 | /* no need to do anything */ | ||
198 | if (unlikely(!sou->defined)) | ||
199 | return 0; | ||
200 | |||
201 | fifo_size = sizeof(*cmd); | ||
202 | cmd = vmw_fifo_reserve(dev_priv, fifo_size); | ||
203 | /* the hardware has hung, nothing we can do about it here */ | ||
204 | if (unlikely(cmd == NULL)) { | ||
205 | DRM_ERROR("Fifo reserve failed.\n"); | ||
206 | return -ENOMEM; | ||
207 | } | ||
208 | |||
209 | memset(cmd, 0, fifo_size); | ||
210 | cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN; | ||
211 | cmd->body.screenId = sou->base.unit; | ||
212 | |||
213 | vmw_fifo_commit(dev_priv, fifo_size); | ||
214 | |||
215 | /* Force sync */ | ||
216 | ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); | ||
217 | if (unlikely(ret != 0)) | ||
218 | DRM_ERROR("Failed to sync with HW"); | ||
219 | else | ||
220 | sou->defined = false; | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * Free the backing store. | ||
227 | */ | ||
228 | static void vmw_sou_backing_free(struct vmw_private *dev_priv, | ||
229 | struct vmw_screen_object_unit *sou) | ||
230 | { | ||
231 | struct ttm_buffer_object *bo; | ||
232 | |||
233 | if (unlikely(sou->buffer == NULL)) | ||
234 | return; | ||
235 | |||
236 | bo = &sou->buffer->base; | ||
237 | ttm_bo_unref(&bo); | ||
238 | sou->buffer = NULL; | ||
239 | sou->buffer_size = 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * Allocate the backing store for the buffer. | ||
244 | */ | ||
245 | static int vmw_sou_backing_alloc(struct vmw_private *dev_priv, | ||
246 | struct vmw_screen_object_unit *sou, | ||
247 | unsigned long size) | ||
248 | { | ||
249 | int ret; | ||
250 | |||
251 | if (sou->buffer_size == size) | ||
252 | return 0; | ||
253 | |||
254 | if (sou->buffer) | ||
255 | vmw_sou_backing_free(dev_priv, sou); | ||
256 | |||
257 | sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL); | ||
258 | if (unlikely(sou->buffer == NULL)) | ||
259 | return -ENOMEM; | ||
260 | |||
261 | /* After we have alloced the backing store might not be able to | ||
262 | * resume the overlays, this is preferred to failing to alloc. | ||
263 | */ | ||
264 | vmw_overlay_pause_all(dev_priv); | ||
265 | ret = vmw_dmabuf_init(dev_priv, sou->buffer, size, | ||
266 | &vmw_vram_ne_placement, | ||
267 | false, &vmw_dmabuf_bo_free); | ||
268 | vmw_overlay_resume_all(dev_priv); | ||
269 | |||
270 | if (unlikely(ret != 0)) | ||
271 | sou->buffer = NULL; /* vmw_dmabuf_init frees on error */ | ||
272 | else | ||
273 | sou->buffer_size = size; | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | ||
279 | { | ||
280 | struct vmw_private *dev_priv; | ||
281 | struct vmw_screen_object_unit *sou; | ||
282 | struct drm_connector *connector; | ||
283 | struct drm_display_mode *mode; | ||
284 | struct drm_encoder *encoder; | ||
285 | struct vmw_framebuffer *vfb; | ||
286 | struct drm_framebuffer *fb; | ||
287 | struct drm_crtc *crtc; | ||
288 | int ret = 0; | ||
289 | |||
290 | if (!set) | ||
291 | return -EINVAL; | ||
292 | |||
293 | if (!set->crtc) | ||
294 | return -EINVAL; | ||
295 | |||
296 | /* get the sou */ | ||
297 | crtc = set->crtc; | ||
298 | sou = vmw_crtc_to_sou(crtc); | ||
299 | vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; | ||
300 | dev_priv = vmw_priv(crtc->dev); | ||
301 | |||
302 | if (set->num_connectors > 1) { | ||
303 | DRM_ERROR("to many connectors\n"); | ||
304 | return -EINVAL; | ||
305 | } | ||
306 | |||
307 | if (set->num_connectors == 1 && | ||
308 | set->connectors[0] != &sou->base.connector) { | ||
309 | DRM_ERROR("connector doesn't match %p %p\n", | ||
310 | set->connectors[0], &sou->base.connector); | ||
311 | return -EINVAL; | ||
312 | } | ||
313 | |||
314 | /* sou only supports one fb active at the time */ | ||
315 | if (dev_priv->sou_priv->fb && vfb && | ||
316 | !(dev_priv->sou_priv->num_active == 1 && | ||
317 | !list_empty(&sou->active)) && | ||
318 | dev_priv->sou_priv->fb != vfb) { | ||
319 | DRM_ERROR("Multiple framebuffers not supported\n"); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | |||
323 | /* since they always map one to one these are safe */ | ||
324 | connector = &sou->base.connector; | ||
325 | encoder = &sou->base.encoder; | ||
326 | |||
327 | /* should we turn the crtc off */ | ||
328 | if (set->num_connectors == 0 || !set->mode || !set->fb) { | ||
329 | ret = vmw_sou_fifo_destroy(dev_priv, sou); | ||
330 | /* the hardware has hung don't do anything more */ | ||
331 | if (unlikely(ret != 0)) | ||
332 | return ret; | ||
333 | |||
334 | connector->encoder = NULL; | ||
335 | encoder->crtc = NULL; | ||
336 | crtc->fb = NULL; | ||
337 | crtc->x = 0; | ||
338 | crtc->y = 0; | ||
339 | |||
340 | vmw_sou_del_active(dev_priv, sou); | ||
341 | |||
342 | vmw_sou_backing_free(dev_priv, sou); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | |||
348 | /* we now know we want to set a mode */ | ||
349 | mode = set->mode; | ||
350 | fb = set->fb; | ||
351 | |||
352 | if (set->x + mode->hdisplay > fb->width || | ||
353 | set->y + mode->vdisplay > fb->height) { | ||
354 | DRM_ERROR("set outside of framebuffer\n"); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | |||
358 | vmw_fb_off(dev_priv); | ||
359 | |||
360 | if (mode->hdisplay != crtc->mode.hdisplay || | ||
361 | mode->vdisplay != crtc->mode.vdisplay) { | ||
362 | /* no need to check if depth is different, because backing | ||
363 | * store depth is forced to 4 by the device. | ||
364 | */ | ||
365 | |||
366 | ret = vmw_sou_fifo_destroy(dev_priv, sou); | ||
367 | /* the hardware has hung don't do anything more */ | ||
368 | if (unlikely(ret != 0)) | ||
369 | return ret; | ||
370 | |||
371 | vmw_sou_backing_free(dev_priv, sou); | ||
372 | } | ||
373 | |||
374 | if (!sou->buffer) { | ||
375 | /* forced to depth 4 by the device */ | ||
376 | size_t size = mode->hdisplay * mode->vdisplay * 4; | ||
377 | ret = vmw_sou_backing_alloc(dev_priv, sou, size); | ||
378 | if (unlikely(ret != 0)) | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode); | ||
383 | if (unlikely(ret != 0)) { | ||
384 | /* | ||
385 | * We are in a bit of a situation here, the hardware has | ||
386 | * hung and we may or may not have a buffer hanging of | ||
387 | * the screen object, best thing to do is not do anything | ||
388 | * if we where defined, if not just turn the crtc of. | ||
389 | * Not what userspace wants but it needs to htfu. | ||
390 | */ | ||
391 | if (sou->defined) | ||
392 | return ret; | ||
393 | |||
394 | connector->encoder = NULL; | ||
395 | encoder->crtc = NULL; | ||
396 | crtc->fb = NULL; | ||
397 | crtc->x = 0; | ||
398 | crtc->y = 0; | ||
399 | |||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | vmw_sou_add_active(dev_priv, sou, vfb); | ||
404 | |||
405 | connector->encoder = encoder; | ||
406 | encoder->crtc = crtc; | ||
407 | crtc->mode = *mode; | ||
408 | crtc->fb = fb; | ||
409 | crtc->x = set->x; | ||
410 | crtc->y = set->y; | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | ||
416 | .save = vmw_du_crtc_save, | ||
417 | .restore = vmw_du_crtc_restore, | ||
418 | .cursor_set = vmw_du_crtc_cursor_set, | ||
419 | .cursor_move = vmw_du_crtc_cursor_move, | ||
420 | .gamma_set = vmw_du_crtc_gamma_set, | ||
421 | .destroy = vmw_sou_crtc_destroy, | ||
422 | .set_config = vmw_sou_crtc_set_config, | ||
423 | }; | ||
424 | |||
425 | /* | ||
426 | * Screen Object Display Unit encoder functions | ||
427 | */ | ||
428 | |||
429 | static void vmw_sou_encoder_destroy(struct drm_encoder *encoder) | ||
430 | { | ||
431 | vmw_sou_destroy(vmw_encoder_to_sou(encoder)); | ||
432 | } | ||
433 | |||
434 | static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = { | ||
435 | .destroy = vmw_sou_encoder_destroy, | ||
436 | }; | ||
437 | |||
438 | /* | ||
439 | * Screen Object Display Unit connector functions | ||
440 | */ | ||
441 | |||
442 | static void vmw_sou_connector_destroy(struct drm_connector *connector) | ||
443 | { | ||
444 | vmw_sou_destroy(vmw_connector_to_sou(connector)); | ||
445 | } | ||
446 | |||
447 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { | ||
448 | .dpms = vmw_du_connector_dpms, | ||
449 | .save = vmw_du_connector_save, | ||
450 | .restore = vmw_du_connector_restore, | ||
451 | .detect = vmw_du_connector_detect, | ||
452 | .fill_modes = vmw_du_connector_fill_modes, | ||
453 | .set_property = vmw_du_connector_set_property, | ||
454 | .destroy = vmw_sou_connector_destroy, | ||
455 | }; | ||
456 | |||
457 | static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) | ||
458 | { | ||
459 | struct vmw_screen_object_unit *sou; | ||
460 | struct drm_device *dev = dev_priv->dev; | ||
461 | struct drm_connector *connector; | ||
462 | struct drm_encoder *encoder; | ||
463 | struct drm_crtc *crtc; | ||
464 | |||
465 | sou = kzalloc(sizeof(*sou), GFP_KERNEL); | ||
466 | if (!sou) | ||
467 | return -ENOMEM; | ||
468 | |||
469 | sou->base.unit = unit; | ||
470 | crtc = &sou->base.crtc; | ||
471 | encoder = &sou->base.encoder; | ||
472 | connector = &sou->base.connector; | ||
473 | |||
474 | INIT_LIST_HEAD(&sou->active); | ||
475 | |||
476 | sou->base.pref_active = (unit == 0); | ||
477 | sou->base.pref_width = 800; | ||
478 | sou->base.pref_height = 600; | ||
479 | sou->base.pref_mode = NULL; | ||
480 | |||
481 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | ||
482 | DRM_MODE_CONNECTOR_LVDS); | ||
483 | connector->status = vmw_du_connector_detect(connector, true); | ||
484 | |||
485 | drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, | ||
486 | DRM_MODE_ENCODER_LVDS); | ||
487 | drm_mode_connector_attach_encoder(connector, encoder); | ||
488 | encoder->possible_crtcs = (1 << unit); | ||
489 | encoder->possible_clones = 0; | ||
490 | |||
491 | drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); | ||
492 | |||
493 | drm_mode_crtc_set_gamma_size(crtc, 256); | ||
494 | |||
495 | drm_connector_attach_property(connector, | ||
496 | dev->mode_config.dirty_info_property, | ||
497 | 1); | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) | ||
503 | { | ||
504 | struct drm_device *dev = dev_priv->dev; | ||
505 | int i; | ||
506 | int ret; | ||
507 | |||
508 | if (dev_priv->sou_priv) { | ||
509 | DRM_INFO("sou system already on\n"); | ||
510 | return -EINVAL; | ||
511 | } | ||
512 | |||
513 | if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { | ||
514 | DRM_INFO("Not using screen objects," | ||
515 | " missing cap SCREEN_OBJECT_2\n"); | ||
516 | return -ENOSYS; | ||
517 | } | ||
518 | |||
519 | ret = -ENOMEM; | ||
520 | dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL); | ||
521 | if (unlikely(!dev_priv->sou_priv)) | ||
522 | goto err_no_mem; | ||
523 | |||
524 | INIT_LIST_HEAD(&dev_priv->sou_priv->active); | ||
525 | dev_priv->sou_priv->num_active = 0; | ||
526 | dev_priv->sou_priv->last_num_active = 0; | ||
527 | dev_priv->sou_priv->fb = NULL; | ||
528 | |||
529 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); | ||
530 | if (unlikely(ret != 0)) | ||
531 | goto err_free; | ||
532 | |||
533 | ret = drm_mode_create_dirty_info_property(dev_priv->dev); | ||
534 | if (unlikely(ret != 0)) | ||
535 | goto err_vblank_cleanup; | ||
536 | |||
537 | for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) | ||
538 | vmw_sou_init(dev_priv, i); | ||
539 | |||
540 | DRM_INFO("Screen objects system initialized\n"); | ||
541 | |||
542 | return 0; | ||
543 | |||
544 | err_vblank_cleanup: | ||
545 | drm_vblank_cleanup(dev); | ||
546 | err_free: | ||
547 | kfree(dev_priv->sou_priv); | ||
548 | err_no_mem: | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) | ||
553 | { | ||
554 | struct drm_device *dev = dev_priv->dev; | ||
555 | |||
556 | drm_vblank_cleanup(dev); | ||
557 | if (!dev_priv->sou_priv) | ||
558 | return -ENOSYS; | ||
559 | |||
560 | if (!list_empty(&dev_priv->sou_priv->active)) | ||
561 | DRM_ERROR("Still have active outputs when unloading driver"); | ||
562 | |||
563 | kfree(dev_priv->sou_priv); | ||
564 | |||
565 | return 0; | ||
566 | } | ||