diff options
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/svga3d_reg.h | 718 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/svga_reg.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 174 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 531 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 209 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 211 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 872 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 107 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 160 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 659 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 195 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 440 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 467 | ||||
-rw-r--r-- | include/uapi/drm/vmwgfx_drm.h | 261 |
19 files changed, 4716 insertions, 373 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 9f8b690bcf52..458cdf6d81e8 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
9 | vmwgfx_surface.o vmwgfx_prime.o | 9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o |
10 | 10 | ||
11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d0e085ee8249..d95335cb90bd 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
@@ -34,6 +34,8 @@ | |||
34 | 34 | ||
35 | #include "svga_reg.h" | 35 | #include "svga_reg.h" |
36 | 36 | ||
37 | typedef uint32 PPN; | ||
38 | typedef __le64 PPN64; | ||
37 | 39 | ||
38 | /* | 40 | /* |
39 | * 3D Hardware Version | 41 | * 3D Hardware Version |
@@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
71 | #define SVGA3D_MAX_CONTEXT_IDS 256 | 73 | #define SVGA3D_MAX_CONTEXT_IDS 256 |
72 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) | 74 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) |
73 | 75 | ||
76 | #define SVGA3D_NUM_TEXTURE_UNITS 32 | ||
77 | #define SVGA3D_NUM_LIGHTS 8 | ||
78 | |||
74 | /* | 79 | /* |
75 | * Surface formats. | 80 | * Surface formats. |
76 | * | 81 | * |
@@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
81 | */ | 86 | */ |
82 | 87 | ||
83 | typedef enum SVGA3dSurfaceFormat { | 88 | typedef enum SVGA3dSurfaceFormat { |
89 | SVGA3D_FORMAT_MIN = 0, | ||
84 | SVGA3D_FORMAT_INVALID = 0, | 90 | SVGA3D_FORMAT_INVALID = 0, |
85 | 91 | ||
86 | SVGA3D_X8R8G8B8 = 1, | 92 | SVGA3D_X8R8G8B8 = 1, |
@@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat { | |||
134 | SVGA3D_RG_S10E5 = 35, | 140 | SVGA3D_RG_S10E5 = 35, |
135 | SVGA3D_RG_S23E8 = 36, | 141 | SVGA3D_RG_S23E8 = 36, |
136 | 142 | ||
137 | /* | ||
138 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is | ||
139 | * the most efficient format to use when creating new surfaces | ||
140 | * expressly for index or vertex data. | ||
141 | */ | ||
142 | |||
143 | SVGA3D_BUFFER = 37, | 143 | SVGA3D_BUFFER = 37, |
144 | 144 | ||
145 | SVGA3D_Z_D24X8 = 38, | 145 | SVGA3D_Z_D24X8 = 38, |
@@ -159,15 +159,114 @@ typedef enum SVGA3dSurfaceFormat { | |||
159 | /* Video format with alpha */ | 159 | /* Video format with alpha */ |
160 | SVGA3D_AYUV = 45, | 160 | SVGA3D_AYUV = 45, |
161 | 161 | ||
162 | SVGA3D_R32G32B32A32_TYPELESS = 46, | ||
163 | SVGA3D_R32G32B32A32_FLOAT = 25, | ||
164 | SVGA3D_R32G32B32A32_UINT = 47, | ||
165 | SVGA3D_R32G32B32A32_SINT = 48, | ||
166 | SVGA3D_R32G32B32_TYPELESS = 49, | ||
167 | SVGA3D_R32G32B32_FLOAT = 50, | ||
168 | SVGA3D_R32G32B32_UINT = 51, | ||
169 | SVGA3D_R32G32B32_SINT = 52, | ||
170 | SVGA3D_R16G16B16A16_TYPELESS = 53, | ||
171 | SVGA3D_R16G16B16A16_FLOAT = 24, | ||
172 | SVGA3D_R16G16B16A16_UNORM = 41, | ||
173 | SVGA3D_R16G16B16A16_UINT = 54, | ||
174 | SVGA3D_R16G16B16A16_SNORM = 55, | ||
175 | SVGA3D_R16G16B16A16_SINT = 56, | ||
176 | SVGA3D_R32G32_TYPELESS = 57, | ||
177 | SVGA3D_R32G32_FLOAT = 36, | ||
178 | SVGA3D_R32G32_UINT = 58, | ||
179 | SVGA3D_R32G32_SINT = 59, | ||
180 | SVGA3D_R32G8X24_TYPELESS = 60, | ||
181 | SVGA3D_D32_FLOAT_S8X24_UINT = 61, | ||
182 | SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, | ||
183 | SVGA3D_X32_TYPELESS_G8X24_UINT = 63, | ||
184 | SVGA3D_R10G10B10A2_TYPELESS = 64, | ||
185 | SVGA3D_R10G10B10A2_UNORM = 26, | ||
186 | SVGA3D_R10G10B10A2_UINT = 65, | ||
187 | SVGA3D_R11G11B10_FLOAT = 66, | ||
188 | SVGA3D_R8G8B8A8_TYPELESS = 67, | ||
189 | SVGA3D_R8G8B8A8_UNORM = 68, | ||
190 | SVGA3D_R8G8B8A8_UNORM_SRGB = 69, | ||
191 | SVGA3D_R8G8B8A8_UINT = 70, | ||
192 | SVGA3D_R8G8B8A8_SNORM = 28, | ||
193 | SVGA3D_R8G8B8A8_SINT = 71, | ||
194 | SVGA3D_R16G16_TYPELESS = 72, | ||
195 | SVGA3D_R16G16_FLOAT = 35, | ||
196 | SVGA3D_R16G16_UNORM = 40, | ||
197 | SVGA3D_R16G16_UINT = 73, | ||
198 | SVGA3D_R16G16_SNORM = 39, | ||
199 | SVGA3D_R16G16_SINT = 74, | ||
200 | SVGA3D_R32_TYPELESS = 75, | ||
201 | SVGA3D_D32_FLOAT = 76, | ||
202 | SVGA3D_R32_FLOAT = 34, | ||
203 | SVGA3D_R32_UINT = 77, | ||
204 | SVGA3D_R32_SINT = 78, | ||
205 | SVGA3D_R24G8_TYPELESS = 79, | ||
206 | SVGA3D_D24_UNORM_S8_UINT = 80, | ||
207 | SVGA3D_R24_UNORM_X8_TYPELESS = 81, | ||
208 | SVGA3D_X24_TYPELESS_G8_UINT = 82, | ||
209 | SVGA3D_R8G8_TYPELESS = 83, | ||
210 | SVGA3D_R8G8_UNORM = 84, | ||
211 | SVGA3D_R8G8_UINT = 85, | ||
212 | SVGA3D_R8G8_SNORM = 27, | ||
213 | SVGA3D_R8G8_SINT = 86, | ||
214 | SVGA3D_R16_TYPELESS = 87, | ||
215 | SVGA3D_R16_FLOAT = 33, | ||
216 | SVGA3D_D16_UNORM = 8, | ||
217 | SVGA3D_R16_UNORM = 88, | ||
218 | SVGA3D_R16_UINT = 89, | ||
219 | SVGA3D_R16_SNORM = 90, | ||
220 | SVGA3D_R16_SINT = 91, | ||
221 | SVGA3D_R8_TYPELESS = 92, | ||
222 | SVGA3D_R8_UNORM = 93, | ||
223 | SVGA3D_R8_UINT = 94, | ||
224 | SVGA3D_R8_SNORM = 95, | ||
225 | SVGA3D_R8_SINT = 96, | ||
226 | SVGA3D_A8_UNORM = 32, | ||
227 | SVGA3D_R1_UNORM = 97, | ||
228 | SVGA3D_R9G9B9E5_SHAREDEXP = 98, | ||
229 | SVGA3D_R8G8_B8G8_UNORM = 99, | ||
230 | SVGA3D_G8R8_G8B8_UNORM = 100, | ||
231 | SVGA3D_BC1_TYPELESS = 101, | ||
232 | SVGA3D_BC1_UNORM = 15, | ||
233 | SVGA3D_BC1_UNORM_SRGB = 102, | ||
234 | SVGA3D_BC2_TYPELESS = 103, | ||
235 | SVGA3D_BC2_UNORM = 17, | ||
236 | SVGA3D_BC2_UNORM_SRGB = 104, | ||
237 | SVGA3D_BC3_TYPELESS = 105, | ||
238 | SVGA3D_BC3_UNORM = 19, | ||
239 | SVGA3D_BC3_UNORM_SRGB = 106, | ||
240 | SVGA3D_BC4_TYPELESS = 107, | ||
162 | SVGA3D_BC4_UNORM = 108, | 241 | SVGA3D_BC4_UNORM = 108, |
242 | SVGA3D_BC4_SNORM = 109, | ||
243 | SVGA3D_BC5_TYPELESS = 110, | ||
163 | SVGA3D_BC5_UNORM = 111, | 244 | SVGA3D_BC5_UNORM = 111, |
245 | SVGA3D_BC5_SNORM = 112, | ||
246 | SVGA3D_B5G6R5_UNORM = 3, | ||
247 | SVGA3D_B5G5R5A1_UNORM = 5, | ||
248 | SVGA3D_B8G8R8A8_UNORM = 2, | ||
249 | SVGA3D_B8G8R8X8_UNORM = 1, | ||
250 | SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, | ||
251 | SVGA3D_B8G8R8A8_TYPELESS = 114, | ||
252 | SVGA3D_B8G8R8A8_UNORM_SRGB = 115, | ||
253 | SVGA3D_B8G8R8X8_TYPELESS = 116, | ||
254 | SVGA3D_B8G8R8X8_UNORM_SRGB = 117, | ||
164 | 255 | ||
165 | /* Advanced D3D9 depth formats. */ | 256 | /* Advanced D3D9 depth formats. */ |
166 | SVGA3D_Z_DF16 = 118, | 257 | SVGA3D_Z_DF16 = 118, |
167 | SVGA3D_Z_DF24 = 119, | 258 | SVGA3D_Z_DF24 = 119, |
168 | SVGA3D_Z_D24S8_INT = 120, | 259 | SVGA3D_Z_D24S8_INT = 120, |
169 | 260 | ||
170 | SVGA3D_FORMAT_MAX | 261 | /* Planar video formats. */ |
262 | SVGA3D_YV12 = 121, | ||
263 | |||
264 | /* Shader constant formats. */ | ||
265 | SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, | ||
266 | SVGA3D_SURFACE_SHADERCONST_INT = 123, | ||
267 | SVGA3D_SURFACE_SHADERCONST_BOOL = 124, | ||
268 | |||
269 | SVGA3D_FORMAT_MAX = 125, | ||
171 | } SVGA3dSurfaceFormat; | 270 | } SVGA3dSurfaceFormat; |
172 | 271 | ||
173 | typedef uint32 SVGA3dColor; /* a, r, g, b */ | 272 | typedef uint32 SVGA3dColor; /* a, r, g, b */ |
@@ -957,15 +1056,21 @@ typedef enum { | |||
957 | } SVGA3dCubeFace; | 1056 | } SVGA3dCubeFace; |
958 | 1057 | ||
959 | typedef enum { | 1058 | typedef enum { |
1059 | SVGA3D_SHADERTYPE_INVALID = 0, | ||
1060 | SVGA3D_SHADERTYPE_MIN = 1, | ||
960 | SVGA3D_SHADERTYPE_VS = 1, | 1061 | SVGA3D_SHADERTYPE_VS = 1, |
961 | SVGA3D_SHADERTYPE_PS = 2, | 1062 | SVGA3D_SHADERTYPE_PS = 2, |
962 | SVGA3D_SHADERTYPE_MAX | 1063 | SVGA3D_SHADERTYPE_MAX = 3, |
1064 | SVGA3D_SHADERTYPE_GS = 3, | ||
963 | } SVGA3dShaderType; | 1065 | } SVGA3dShaderType; |
964 | 1066 | ||
1067 | #define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) | ||
1068 | |||
965 | typedef enum { | 1069 | typedef enum { |
966 | SVGA3D_CONST_TYPE_FLOAT = 0, | 1070 | SVGA3D_CONST_TYPE_FLOAT = 0, |
967 | SVGA3D_CONST_TYPE_INT = 1, | 1071 | SVGA3D_CONST_TYPE_INT = 1, |
968 | SVGA3D_CONST_TYPE_BOOL = 2, | 1072 | SVGA3D_CONST_TYPE_BOOL = 2, |
1073 | SVGA3D_CONST_TYPE_MAX | ||
969 | } SVGA3dShaderConstType; | 1074 | } SVGA3dShaderConstType; |
970 | 1075 | ||
971 | #define SVGA3D_MAX_SURFACE_FACES 6 | 1076 | #define SVGA3D_MAX_SURFACE_FACES 6 |
@@ -1056,9 +1161,74 @@ typedef enum { | |||
1056 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 | 1161 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 |
1057 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 | 1162 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 |
1058 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 | 1163 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 |
1059 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 | 1164 | #define SVGA_3D_CMD_SCREEN_DMA 1082 |
1060 | 1165 | #define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 | |
1061 | #define SVGA_3D_CMD_FUTURE_MAX 2000 | 1166 | #define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 |
1167 | |||
1168 | #define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 | ||
1169 | #define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 | ||
1170 | #define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 | ||
1171 | #define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 | ||
1172 | #define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 | ||
1173 | #define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 | ||
1174 | |||
1175 | #define SVGA_3D_CMD_SET_OTABLE_BASE 1091 | ||
1176 | #define SVGA_3D_CMD_READBACK_OTABLE 1092 | ||
1177 | |||
1178 | #define SVGA_3D_CMD_DEFINE_GB_MOB 1093 | ||
1179 | #define SVGA_3D_CMD_DESTROY_GB_MOB 1094 | ||
1180 | #define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 | ||
1181 | #define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 | ||
1182 | |||
1183 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 | ||
1184 | #define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 | ||
1185 | #define SVGA_3D_CMD_BIND_GB_SURFACE 1099 | ||
1186 | #define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 | ||
1187 | #define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 | ||
1188 | #define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 | ||
1189 | #define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 | ||
1190 | #define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 | ||
1191 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 | ||
1192 | #define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 | ||
1193 | |||
1194 | #define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 | ||
1195 | #define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 | ||
1196 | #define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 | ||
1197 | #define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 | ||
1198 | #define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 | ||
1199 | |||
1200 | #define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 | ||
1201 | #define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 | ||
1202 | #define SVGA_3D_CMD_BIND_GB_SHADER 1114 | ||
1203 | |||
1204 | #define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 | ||
1205 | |||
1206 | #define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 | ||
1207 | #define SVGA_3D_CMD_END_GB_QUERY 1117 | ||
1208 | #define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 | ||
1209 | |||
1210 | #define SVGA_3D_CMD_NOP 1119 | ||
1211 | |||
1212 | #define SVGA_3D_CMD_ENABLE_GART 1120 | ||
1213 | #define SVGA_3D_CMD_DISABLE_GART 1121 | ||
1214 | #define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 | ||
1215 | #define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 | ||
1216 | |||
1217 | #define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 | ||
1218 | #define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 | ||
1219 | #define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 | ||
1220 | #define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 | ||
1221 | |||
1222 | #define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 | ||
1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | ||
1224 | |||
1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | ||
1226 | |||
1227 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | ||
1228 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | ||
1229 | |||
1230 | #define SVGA_3D_CMD_MAX 1142 | ||
1231 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | ||
1062 | 1232 | ||
1063 | /* | 1233 | /* |
1064 | * Common substructures used in multiple FIFO commands: | 1234 | * Common substructures used in multiple FIFO commands: |
@@ -1750,6 +1920,495 @@ struct { | |||
1750 | 1920 | ||
1751 | 1921 | ||
1752 | /* | 1922 | /* |
1923 | * Guest-backed surface definitions. | ||
1924 | */ | ||
1925 | |||
1926 | typedef uint32 SVGAMobId; | ||
1927 | |||
1928 | typedef enum SVGAMobFormat { | ||
1929 | SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, | ||
1930 | SVGA3D_MOBFMT_PTDEPTH_0 = 0, | ||
1931 | SVGA3D_MOBFMT_PTDEPTH_1 = 1, | ||
1932 | SVGA3D_MOBFMT_PTDEPTH_2 = 2, | ||
1933 | SVGA3D_MOBFMT_RANGE = 3, | ||
1934 | SVGA3D_MOBFMT_PTDEPTH64_0 = 4, | ||
1935 | SVGA3D_MOBFMT_PTDEPTH64_1 = 5, | ||
1936 | SVGA3D_MOBFMT_PTDEPTH64_2 = 6, | ||
1937 | SVGA3D_MOBFMT_MAX, | ||
1938 | } SVGAMobFormat; | ||
1939 | |||
1940 | /* | ||
1941 | * Sizes of opaque types. | ||
1942 | */ | ||
1943 | |||
1944 | #define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 | ||
1945 | #define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 | ||
1946 | #define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 | ||
1947 | #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 | ||
1948 | #define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 | ||
1949 | #define SVGA3D_CONTEXT_DATA_SIZE 16384 | ||
1950 | |||
1951 | /* | ||
1952 | * SVGA3dCmdSetOTableBase -- | ||
1953 | * | ||
1954 | * This command allows the guest to specify the base PPN of the | ||
1955 | * specified object table. | ||
1956 | */ | ||
1957 | |||
1958 | typedef enum { | ||
1959 | SVGA_OTABLE_MOB = 0, | ||
1960 | SVGA_OTABLE_MIN = 0, | ||
1961 | SVGA_OTABLE_SURFACE = 1, | ||
1962 | SVGA_OTABLE_CONTEXT = 2, | ||
1963 | SVGA_OTABLE_SHADER = 3, | ||
1964 | SVGA_OTABLE_SCREEN_TARGET = 4, | ||
1965 | SVGA_OTABLE_DX9_MAX = 5, | ||
1966 | SVGA_OTABLE_MAX = 8 | ||
1967 | } SVGAOTableType; | ||
1968 | |||
1969 | typedef | ||
1970 | struct { | ||
1971 | SVGAOTableType type; | ||
1972 | PPN baseAddress; | ||
1973 | uint32 sizeInBytes; | ||
1974 | uint32 validSizeInBytes; | ||
1975 | SVGAMobFormat ptDepth; | ||
1976 | } | ||
1977 | __attribute__((__packed__)) | ||
1978 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | ||
1979 | |||
1980 | typedef | ||
1981 | struct { | ||
1982 | SVGAOTableType type; | ||
1983 | PPN64 baseAddress; | ||
1984 | uint32 sizeInBytes; | ||
1985 | uint32 validSizeInBytes; | ||
1986 | SVGAMobFormat ptDepth; | ||
1987 | } | ||
1988 | __attribute__((__packed__)) | ||
1989 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | ||
1990 | |||
1991 | typedef | ||
1992 | struct { | ||
1993 | SVGAOTableType type; | ||
1994 | } | ||
1995 | __attribute__((__packed__)) | ||
1996 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | ||
1997 | |||
1998 | /* | ||
1999 | * Define a memory object (Mob) in the OTable. | ||
2000 | */ | ||
2001 | |||
2002 | typedef | ||
2003 | struct SVGA3dCmdDefineGBMob { | ||
2004 | SVGAMobId mobid; | ||
2005 | SVGAMobFormat ptDepth; | ||
2006 | PPN base; | ||
2007 | uint32 sizeInBytes; | ||
2008 | } | ||
2009 | __attribute__((__packed__)) | ||
2010 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | ||
2011 | |||
2012 | |||
2013 | /* | ||
2014 | * Destroys an object in the OTable. | ||
2015 | */ | ||
2016 | |||
2017 | typedef | ||
2018 | struct SVGA3dCmdDestroyGBMob { | ||
2019 | SVGAMobId mobid; | ||
2020 | } | ||
2021 | __attribute__((__packed__)) | ||
2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | ||
2023 | |||
2024 | /* | ||
2025 | * Redefine an object in the OTable. | ||
2026 | */ | ||
2027 | |||
2028 | typedef | ||
2029 | struct SVGA3dCmdRedefineGBMob { | ||
2030 | SVGAMobId mobid; | ||
2031 | SVGAMobFormat ptDepth; | ||
2032 | PPN base; | ||
2033 | uint32 sizeInBytes; | ||
2034 | } | ||
2035 | __attribute__((__packed__)) | ||
2036 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | ||
2037 | |||
2038 | /* | ||
2039 | * Define a memory object (Mob) in the OTable with a PPN64 base. | ||
2040 | */ | ||
2041 | |||
2042 | typedef | ||
2043 | struct SVGA3dCmdDefineGBMob64 { | ||
2044 | SVGAMobId mobid; | ||
2045 | SVGAMobFormat ptDepth; | ||
2046 | PPN64 base; | ||
2047 | uint32 sizeInBytes; | ||
2048 | } | ||
2049 | __attribute__((__packed__)) | ||
2050 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | ||
2051 | |||
2052 | /* | ||
2053 | * Redefine an object in the OTable with PPN64 base. | ||
2054 | */ | ||
2055 | |||
2056 | typedef | ||
2057 | struct SVGA3dCmdRedefineGBMob64 { | ||
2058 | SVGAMobId mobid; | ||
2059 | SVGAMobFormat ptDepth; | ||
2060 | PPN64 base; | ||
2061 | uint32 sizeInBytes; | ||
2062 | } | ||
2063 | __attribute__((__packed__)) | ||
2064 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | ||
2065 | |||
2066 | /* | ||
2067 | * Notification that the page tables have been modified. | ||
2068 | */ | ||
2069 | |||
2070 | typedef | ||
2071 | struct SVGA3dCmdUpdateGBMobMapping { | ||
2072 | SVGAMobId mobid; | ||
2073 | } | ||
2074 | __attribute__((__packed__)) | ||
2075 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | ||
2076 | |||
2077 | /* | ||
2078 | * Define a guest-backed surface. | ||
2079 | */ | ||
2080 | |||
2081 | typedef | ||
2082 | struct SVGA3dCmdDefineGBSurface { | ||
2083 | uint32 sid; | ||
2084 | SVGA3dSurfaceFlags surfaceFlags; | ||
2085 | SVGA3dSurfaceFormat format; | ||
2086 | uint32 numMipLevels; | ||
2087 | uint32 multisampleCount; | ||
2088 | SVGA3dTextureFilter autogenFilter; | ||
2089 | SVGA3dSize size; | ||
2090 | } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
2091 | |||
2092 | /* | ||
2093 | * Destroy a guest-backed surface. | ||
2094 | */ | ||
2095 | |||
2096 | typedef | ||
2097 | struct SVGA3dCmdDestroyGBSurface { | ||
2098 | uint32 sid; | ||
2099 | } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
2100 | |||
2101 | /* | ||
2102 | * Bind a guest-backed surface to an object. | ||
2103 | */ | ||
2104 | |||
2105 | typedef | ||
2106 | struct SVGA3dCmdBindGBSurface { | ||
2107 | uint32 sid; | ||
2108 | SVGAMobId mobid; | ||
2109 | } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
2110 | |||
2111 | /* | ||
2112 | * Conditionally bind a mob to a guest backed surface if testMobid | ||
2113 | * matches the currently bound mob. Optionally issue a readback on | ||
2114 | * the surface while it is still bound to the old mobid if the mobid | ||
2115 | * is changed by this command. | ||
2116 | */ | ||
2117 | |||
2118 | #define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) | ||
2119 | |||
2120 | typedef | ||
2121 | struct{ | ||
2122 | uint32 sid; | ||
2123 | SVGAMobId testMobid; | ||
2124 | SVGAMobId mobid; | ||
2125 | uint32 flags; | ||
2126 | } | ||
2127 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | ||
2128 | |||
2129 | /* | ||
2130 | * Update an image in a guest-backed surface. | ||
2131 | * (Inform the device that the guest-contents have been updated.) | ||
2132 | */ | ||
2133 | |||
2134 | typedef | ||
2135 | struct SVGA3dCmdUpdateGBImage { | ||
2136 | SVGA3dSurfaceImageId image; | ||
2137 | SVGA3dBox box; | ||
2138 | } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
2139 | |||
2140 | /* | ||
2141 | * Update an entire guest-backed surface. | ||
2142 | * (Inform the device that the guest-contents have been updated.) | ||
2143 | */ | ||
2144 | |||
2145 | typedef | ||
2146 | struct SVGA3dCmdUpdateGBSurface { | ||
2147 | uint32 sid; | ||
2148 | } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
2149 | |||
2150 | /* | ||
2151 | * Readback an image in a guest-backed surface. | ||
2152 | * (Request the device to flush the dirty contents into the guest.) | ||
2153 | */ | ||
2154 | |||
2155 | typedef | ||
2156 | struct SVGA3dCmdReadbackGBImage { | ||
2157 | SVGA3dSurfaceImageId image; | ||
2158 | } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
2159 | |||
2160 | /* | ||
2161 | * Readback an entire guest-backed surface. | ||
2162 | * (Request the device to flush the dirty contents into the guest.) | ||
2163 | */ | ||
2164 | |||
2165 | typedef | ||
2166 | struct SVGA3dCmdReadbackGBSurface { | ||
2167 | uint32 sid; | ||
2168 | } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
2169 | |||
2170 | /* | ||
2171 | * Readback a sub rect of an image in a guest-backed surface. After | ||
2172 | * issuing this command the driver is required to issue an update call | ||
2173 | * of the same region before issuing any other commands that reference | ||
2174 | * this surface or rendering is not guaranteed. | ||
2175 | */ | ||
2176 | |||
2177 | typedef | ||
2178 | struct SVGA3dCmdReadbackGBImagePartial { | ||
2179 | SVGA3dSurfaceImageId image; | ||
2180 | SVGA3dBox box; | ||
2181 | uint32 invertBox; | ||
2182 | } | ||
2183 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | ||
2184 | |||
2185 | /* | ||
2186 | * Invalidate an image in a guest-backed surface. | ||
2187 | * (Notify the device that the contents can be lost.) | ||
2188 | */ | ||
2189 | |||
2190 | typedef | ||
2191 | struct SVGA3dCmdInvalidateGBImage { | ||
2192 | SVGA3dSurfaceImageId image; | ||
2193 | } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
2194 | |||
2195 | /* | ||
2196 | * Invalidate an entire guest-backed surface. | ||
2197 | * (Notify the device that the contents if all images can be lost.) | ||
2198 | */ | ||
2199 | |||
2200 | typedef | ||
2201 | struct SVGA3dCmdInvalidateGBSurface { | ||
2202 | uint32 sid; | ||
2203 | } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
2204 | |||
2205 | /* | ||
2206 | * Invalidate a sub rect of an image in a guest-backed surface. After | ||
2207 | * issuing this command the driver is required to issue an update call | ||
2208 | * of the same region before issuing any other commands that reference | ||
2209 | * this surface or rendering is not guaranteed. | ||
2210 | */ | ||
2211 | |||
2212 | typedef | ||
2213 | struct SVGA3dCmdInvalidateGBImagePartial { | ||
2214 | SVGA3dSurfaceImageId image; | ||
2215 | SVGA3dBox box; | ||
2216 | uint32 invertBox; | ||
2217 | } | ||
2218 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | ||
2219 | |||
2220 | /* | ||
2221 | * Define a guest-backed context. | ||
2222 | */ | ||
2223 | |||
2224 | typedef | ||
2225 | struct SVGA3dCmdDefineGBContext { | ||
2226 | uint32 cid; | ||
2227 | } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
2228 | |||
2229 | /* | ||
2230 | * Destroy a guest-backed context. | ||
2231 | */ | ||
2232 | |||
2233 | typedef | ||
2234 | struct SVGA3dCmdDestroyGBContext { | ||
2235 | uint32 cid; | ||
2236 | } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
2237 | |||
2238 | /* | ||
2239 | * Bind a guest-backed context. | ||
2240 | * | ||
2241 | * validContents should be set to 0 for new contexts, | ||
2242 | * and 1 if this is an old context which is getting paged | ||
2243 | * back on to the device. | ||
2244 | * | ||
2245 | * For new contexts, it is recommended that the driver | ||
2246 | * issue commands to initialize all interesting state | ||
2247 | * prior to rendering. | ||
2248 | */ | ||
2249 | |||
2250 | typedef | ||
2251 | struct SVGA3dCmdBindGBContext { | ||
2252 | uint32 cid; | ||
2253 | SVGAMobId mobid; | ||
2254 | uint32 validContents; | ||
2255 | } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
2256 | |||
2257 | /* | ||
2258 | * Readback a guest-backed context. | ||
2259 | * (Request that the device flush the contents back into guest memory.) | ||
2260 | */ | ||
2261 | |||
2262 | typedef | ||
2263 | struct SVGA3dCmdReadbackGBContext { | ||
2264 | uint32 cid; | ||
2265 | } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
2266 | |||
2267 | /* | ||
2268 | * Invalidate a guest-backed context. | ||
2269 | */ | ||
2270 | typedef | ||
2271 | struct SVGA3dCmdInvalidateGBContext { | ||
2272 | uint32 cid; | ||
2273 | } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
2274 | |||
2275 | /* | ||
2276 | * Define a guest-backed shader. | ||
2277 | */ | ||
2278 | |||
2279 | typedef | ||
2280 | struct SVGA3dCmdDefineGBShader { | ||
2281 | uint32 shid; | ||
2282 | SVGA3dShaderType type; | ||
2283 | uint32 sizeInBytes; | ||
2284 | } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
2285 | |||
2286 | /* | ||
2287 | * Bind a guest-backed shader. | ||
2288 | */ | ||
2289 | |||
2290 | typedef struct SVGA3dCmdBindGBShader { | ||
2291 | uint32 shid; | ||
2292 | SVGAMobId mobid; | ||
2293 | uint32 offsetInBytes; | ||
2294 | } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
2295 | |||
2296 | /* | ||
2297 | * Destroy a guest-backed shader. | ||
2298 | */ | ||
2299 | |||
2300 | typedef struct SVGA3dCmdDestroyGBShader { | ||
2301 | uint32 shid; | ||
2302 | } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
2303 | |||
2304 | typedef | ||
2305 | struct { | ||
2306 | uint32 cid; | ||
2307 | uint32 regStart; | ||
2308 | SVGA3dShaderType shaderType; | ||
2309 | SVGA3dShaderConstType constType; | ||
2310 | |||
2311 | /* | ||
2312 | * Followed by a variable number of shader constants. | ||
2313 | * | ||
2314 | * Note that FLOAT and INT constants are 4-dwords in length, while | ||
2315 | * BOOL constants are 1-dword in length. | ||
2316 | */ | ||
2317 | } SVGA3dCmdSetGBShaderConstInline; | ||
2318 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | ||
2319 | |||
2320 | typedef | ||
2321 | struct { | ||
2322 | uint32 cid; | ||
2323 | SVGA3dQueryType type; | ||
2324 | } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
2325 | |||
2326 | typedef | ||
2327 | struct { | ||
2328 | uint32 cid; | ||
2329 | SVGA3dQueryType type; | ||
2330 | SVGAMobId mobid; | ||
2331 | uint32 offset; | ||
2332 | } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
2333 | |||
2334 | |||
2335 | /* | ||
2336 | * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- | ||
2337 | * | ||
2338 | * The semantics of this command are identical to the | ||
2339 | * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written | ||
2340 | * to a Mob instead of a GMR. | ||
2341 | */ | ||
2342 | |||
2343 | typedef | ||
2344 | struct { | ||
2345 | uint32 cid; | ||
2346 | SVGA3dQueryType type; | ||
2347 | SVGAMobId mobid; | ||
2348 | uint32 offset; | ||
2349 | } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
2350 | |||
2351 | typedef | ||
2352 | struct { | ||
2353 | SVGAMobId mobid; | ||
2354 | uint32 fbOffset; | ||
2355 | uint32 initalized; | ||
2356 | } | ||
2357 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | ||
2358 | |||
2359 | typedef | ||
2360 | struct { | ||
2361 | SVGAMobId mobid; | ||
2362 | uint32 gartOffset; | ||
2363 | } | ||
2364 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | ||
2365 | |||
2366 | |||
2367 | typedef | ||
2368 | struct { | ||
2369 | uint32 gartOffset; | ||
2370 | uint32 numPages; | ||
2371 | } | ||
2372 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | ||
2373 | |||
2374 | |||
2375 | /* | ||
2376 | * Screen Targets | ||
2377 | */ | ||
2378 | #define SVGA_STFLAG_PRIMARY (1 << 0) | ||
2379 | |||
2380 | typedef | ||
2381 | struct { | ||
2382 | uint32 stid; | ||
2383 | uint32 width; | ||
2384 | uint32 height; | ||
2385 | int32 xRoot; | ||
2386 | int32 yRoot; | ||
2387 | uint32 flags; | ||
2388 | } | ||
2389 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | ||
2390 | |||
2391 | typedef | ||
2392 | struct { | ||
2393 | uint32 stid; | ||
2394 | } | ||
2395 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | ||
2396 | |||
2397 | typedef | ||
2398 | struct { | ||
2399 | uint32 stid; | ||
2400 | SVGA3dSurfaceImageId image; | ||
2401 | } | ||
2402 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | ||
2403 | |||
2404 | typedef | ||
2405 | struct { | ||
2406 | uint32 stid; | ||
2407 | SVGA3dBox box; | ||
2408 | } | ||
2409 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | ||
2410 | |||
2411 | /* | ||
1753 | * Capability query index. | 2412 | * Capability query index. |
1754 | * | 2413 | * |
1755 | * Notes: | 2414 | * Notes: |
@@ -1879,10 +2538,41 @@ typedef enum { | |||
1879 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, | 2538 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, |
1880 | 2539 | ||
1881 | /* | 2540 | /* |
1882 | * Don't add new caps into the previous section; the values in this | 2541 | * Deprecated. |
1883 | * enumeration must not change. You can put new values right before | ||
1884 | * SVGA3D_DEVCAP_MAX. | ||
1885 | */ | 2542 | */ |
2543 | SVGA3D_DEVCAP_VGPU10 = 84, | ||
2544 | |||
2545 | /* | ||
2546 | * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements | ||
2547 | * ored together, one for every type of video decoding supported. | ||
2548 | */ | ||
2549 | SVGA3D_DEVCAP_VIDEO_DECODE = 85, | ||
2550 | |||
2551 | /* | ||
2552 | * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements | ||
2553 | * ored together, one for every type of video processing supported. | ||
2554 | */ | ||
2555 | SVGA3D_DEVCAP_VIDEO_PROCESS = 86, | ||
2556 | |||
2557 | SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ | ||
2558 | SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ | ||
2559 | SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ | ||
2560 | SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ | ||
2561 | |||
2562 | SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, | ||
2563 | |||
2564 | /* | ||
2565 | * Does the host support the SVGA logic ops commands? | ||
2566 | */ | ||
2567 | SVGA3D_DEVCAP_LOGICOPS = 92, | ||
2568 | |||
2569 | /* | ||
2570 | * What support does the host have for screen targets? | ||
2571 | * | ||
2572 | * See the SVGA3D_SCREENTARGET_CAP bits below. | ||
2573 | */ | ||
2574 | SVGA3D_DEVCAP_SCREENTARGETS = 93, | ||
2575 | |||
1886 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ | 2576 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ |
1887 | } SVGA3dDevCapIndex; | 2577 | } SVGA3dDevCapIndex; |
1888 | 2578 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 01f63cb49678..71defa4d2d75 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
@@ -169,7 +169,10 @@ enum { | |||
169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
172 | SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ | 172 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
173 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | ||
174 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | ||
175 | SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ | ||
173 | 176 | ||
174 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 177 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
175 | /* Next 768 (== 256*3) registers exist for colormap */ | 178 | /* Next 768 (== 256*3) registers exist for colormap */ |
@@ -431,7 +434,10 @@ struct SVGASignedPoint { | |||
431 | #define SVGA_CAP_TRACES 0x00200000 | 434 | #define SVGA_CAP_TRACES 0x00200000 |
432 | #define SVGA_CAP_GMR2 0x00400000 | 435 | #define SVGA_CAP_GMR2 0x00400000 |
433 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 | 436 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 |
434 | 437 | #define SVGA_CAP_COMMAND_BUFFERS 0x01000000 | |
438 | #define SVGA_CAP_DEAD1 0x02000000 | ||
439 | #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 | ||
440 | #define SVGA_CAP_GBOBJECTS 0x08000000 | ||
435 | 441 | ||
436 | /* | 442 | /* |
437 | * FIFO register indices. | 443 | * FIFO register indices. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 2d61a2d86bd7..6327cfc36805 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
41 | TTM_PL_FLAG_CACHED; | 41 | TTM_PL_FLAG_CACHED; |
42 | 42 | ||
43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
44 | TTM_PL_FLAG_CACHED | | ||
45 | TTM_PL_FLAG_NO_EVICT; | ||
46 | |||
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; | 48 | TTM_PL_FLAG_CACHED; |
45 | 49 | ||
@@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | |||
47 | TTM_PL_FLAG_CACHED | | 51 | TTM_PL_FLAG_CACHED | |
48 | TTM_PL_FLAG_NO_EVICT; | 52 | TTM_PL_FLAG_NO_EVICT; |
49 | 53 | ||
54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | | ||
55 | TTM_PL_FLAG_CACHED; | ||
56 | |||
50 | struct ttm_placement vmw_vram_placement = { | 57 | struct ttm_placement vmw_vram_placement = { |
51 | .fpfn = 0, | 58 | .fpfn = 0, |
52 | .lpfn = 0, | 59 | .lpfn = 0, |
@@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = { | |||
116 | .busy_placement = &sys_placement_flags | 123 | .busy_placement = &sys_placement_flags |
117 | }; | 124 | }; |
118 | 125 | ||
126 | struct ttm_placement vmw_sys_ne_placement = { | ||
127 | .fpfn = 0, | ||
128 | .lpfn = 0, | ||
129 | .num_placement = 1, | ||
130 | .placement = &sys_ne_placement_flags, | ||
131 | .num_busy_placement = 1, | ||
132 | .busy_placement = &sys_ne_placement_flags | ||
133 | }; | ||
134 | |||
119 | static uint32_t evictable_placement_flags[] = { | 135 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | 136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | 137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | ||
123 | }; | 140 | }; |
124 | 141 | ||
125 | struct ttm_placement vmw_evictable_placement = { | 142 | struct ttm_placement vmw_evictable_placement = { |
126 | .fpfn = 0, | 143 | .fpfn = 0, |
127 | .lpfn = 0, | 144 | .lpfn = 0, |
128 | .num_placement = 3, | 145 | .num_placement = 4, |
129 | .placement = evictable_placement_flags, | 146 | .placement = evictable_placement_flags, |
130 | .num_busy_placement = 1, | 147 | .num_busy_placement = 1, |
131 | .busy_placement = &sys_placement_flags | 148 | .busy_placement = &sys_placement_flags |
@@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = { | |||
140 | .busy_placement = gmr_vram_placement_flags | 157 | .busy_placement = gmr_vram_placement_flags |
141 | }; | 158 | }; |
142 | 159 | ||
160 | struct ttm_placement vmw_mob_placement = { | ||
161 | .fpfn = 0, | ||
162 | .lpfn = 0, | ||
163 | .num_placement = 1, | ||
164 | .num_busy_placement = 1, | ||
165 | .placement = &mob_placement_flags, | ||
166 | .busy_placement = &mob_placement_flags | ||
167 | }; | ||
168 | |||
143 | struct vmw_ttm_tt { | 169 | struct vmw_ttm_tt { |
144 | struct ttm_dma_tt dma_ttm; | 170 | struct ttm_dma_tt dma_ttm; |
145 | struct vmw_private *dev_priv; | 171 | struct vmw_private *dev_priv; |
146 | int gmr_id; | 172 | int gmr_id; |
173 | struct vmw_mob *mob; | ||
174 | int mem_type; | ||
147 | struct sg_table sgt; | 175 | struct sg_table sgt; |
148 | struct vmw_sg_table vsgt; | 176 | struct vmw_sg_table vsgt; |
149 | uint64_t sg_alloc_size; | 177 | uint64_t sg_alloc_size; |
@@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |||
244 | viter->dma_address = &__vmw_piter_dma_addr; | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
245 | viter->page = &__vmw_piter_non_sg_page; | 273 | viter->page = &__vmw_piter_non_sg_page; |
246 | viter->addrs = vsgt->addrs; | 274 | viter->addrs = vsgt->addrs; |
275 | viter->pages = vsgt->pages; | ||
247 | break; | 276 | break; |
248 | case vmw_dma_map_populate: | 277 | case vmw_dma_map_populate: |
249 | case vmw_dma_map_bind: | 278 | case vmw_dma_map_bind: |
@@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |||
424 | vmw_tt->mapped = false; | 453 | vmw_tt->mapped = false; |
425 | } | 454 | } |
426 | 455 | ||
456 | |||
457 | /** | ||
458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | ||
459 | * | ||
460 | * @bo: Pointer to a struct ttm_buffer_object | ||
461 | * | ||
462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | ||
463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
464 | * Note that the buffer object must be either pinned or reserved before | ||
465 | * calling this function. | ||
466 | */ | ||
467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | ||
468 | { | ||
469 | struct vmw_ttm_tt *vmw_tt = | ||
470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
471 | |||
472 | return vmw_ttm_map_dma(vmw_tt); | ||
473 | } | ||
474 | |||
475 | |||
476 | /** | ||
477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | ||
478 | * | ||
479 | * @bo: Pointer to a struct ttm_buffer_object | ||
480 | * | ||
481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | ||
482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
483 | */ | ||
484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | ||
485 | { | ||
486 | struct vmw_ttm_tt *vmw_tt = | ||
487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
488 | |||
489 | vmw_ttm_unmap_dma(vmw_tt); | ||
490 | } | ||
491 | |||
492 | |||
493 | /** | ||
494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | ||
495 | * TTM buffer object | ||
496 | * | ||
497 | * @bo: Pointer to a struct ttm_buffer_object | ||
498 | * | ||
499 | * Returns a pointer to a struct vmw_sg_table object. The object should | ||
500 | * not be freed after use. | ||
501 | * Note that for the device addresses to be valid, the buffer object must | ||
502 | * either be reserved or pinned. | ||
503 | */ | ||
504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | ||
505 | { | ||
506 | struct vmw_ttm_tt *vmw_tt = | ||
507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
508 | |||
509 | return &vmw_tt->vsgt; | ||
510 | } | ||
511 | |||
512 | |||
427 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
428 | { | 514 | { |
429 | struct vmw_ttm_tt *vmw_be = | 515 | struct vmw_ttm_tt *vmw_be = |
@@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
435 | return ret; | 521 | return ret; |
436 | 522 | ||
437 | vmw_be->gmr_id = bo_mem->start; | 523 | vmw_be->gmr_id = bo_mem->start; |
524 | vmw_be->mem_type = bo_mem->mem_type; | ||
525 | |||
526 | switch (bo_mem->mem_type) { | ||
527 | case VMW_PL_GMR: | ||
528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | ||
529 | ttm->num_pages, vmw_be->gmr_id); | ||
530 | case VMW_PL_MOB: | ||
531 | if (unlikely(vmw_be->mob == NULL)) { | ||
532 | vmw_be->mob = | ||
533 | vmw_mob_create(ttm->num_pages); | ||
534 | if (unlikely(vmw_be->mob == NULL)) | ||
535 | return -ENOMEM; | ||
536 | } | ||
438 | 537 | ||
439 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
440 | ttm->num_pages, vmw_be->gmr_id); | 539 | &vmw_be->vsgt, ttm->num_pages, |
540 | vmw_be->gmr_id); | ||
541 | default: | ||
542 | BUG(); | ||
543 | } | ||
544 | return 0; | ||
441 | } | 545 | } |
442 | 546 | ||
443 | static int vmw_ttm_unbind(struct ttm_tt *ttm) | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
@@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
445 | struct vmw_ttm_tt *vmw_be = | 549 | struct vmw_ttm_tt *vmw_be = |
446 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | 550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
447 | 551 | ||
448 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | 552 | switch (vmw_be->mem_type) { |
553 | case VMW_PL_GMR: | ||
554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
555 | break; | ||
556 | case VMW_PL_MOB: | ||
557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | ||
558 | break; | ||
559 | default: | ||
560 | BUG(); | ||
561 | } | ||
449 | 562 | ||
450 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | 563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
451 | vmw_ttm_unmap_dma(vmw_be); | 564 | vmw_ttm_unmap_dma(vmw_be); |
@@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
453 | return 0; | 566 | return 0; |
454 | } | 567 | } |
455 | 568 | ||
569 | |||
456 | static void vmw_ttm_destroy(struct ttm_tt *ttm) | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
457 | { | 571 | { |
458 | struct vmw_ttm_tt *vmw_be = | 572 | struct vmw_ttm_tt *vmw_be = |
@@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) | |||
463 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | 577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
464 | else | 578 | else |
465 | ttm_tt_fini(ttm); | 579 | ttm_tt_fini(ttm); |
580 | |||
581 | if (vmw_be->mob) | ||
582 | vmw_mob_destroy(vmw_be->mob); | ||
583 | |||
466 | kfree(vmw_be); | 584 | kfree(vmw_be); |
467 | } | 585 | } |
468 | 586 | ||
587 | |||
469 | static int vmw_ttm_populate(struct ttm_tt *ttm) | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
470 | { | 589 | { |
471 | struct vmw_ttm_tt *vmw_tt = | 590 | struct vmw_ttm_tt *vmw_tt = |
@@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |||
500 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | 619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
501 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | 620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
502 | 621 | ||
622 | |||
623 | if (vmw_tt->mob) { | ||
624 | vmw_mob_destroy(vmw_tt->mob); | ||
625 | vmw_tt->mob = NULL; | ||
626 | } | ||
627 | |||
503 | vmw_ttm_unmap_dma(vmw_tt); | 628 | vmw_ttm_unmap_dma(vmw_tt); |
504 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | 629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
505 | size_t size = | 630 | size_t size = |
@@ -530,6 +655,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | |||
530 | 655 | ||
531 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
532 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
658 | vmw_be->mob = NULL; | ||
533 | 659 | ||
534 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | 660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
535 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | 661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
@@ -571,6 +697,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
571 | man->default_caching = TTM_PL_FLAG_CACHED; | 697 | man->default_caching = TTM_PL_FLAG_CACHED; |
572 | break; | 698 | break; |
573 | case VMW_PL_GMR: | 699 | case VMW_PL_GMR: |
700 | case VMW_PL_MOB: | ||
574 | /* | 701 | /* |
575 | * "Guest Memory Regions" is an aperture like feature with | 702 | * "Guest Memory Regions" is an aperture like feature with |
576 | * one slot per bo. There is an upper limit of the number of | 703 | * one slot per bo. There is an upper limit of the number of |
@@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
618 | switch (mem->mem_type) { | 745 | switch (mem->mem_type) { |
619 | case TTM_PL_SYSTEM: | 746 | case TTM_PL_SYSTEM: |
620 | case VMW_PL_GMR: | 747 | case VMW_PL_GMR: |
748 | case VMW_PL_MOB: | ||
621 | return 0; | 749 | return 0; |
622 | case TTM_PL_VRAM: | 750 | case TTM_PL_VRAM: |
623 | mem->bus.offset = mem->start << PAGE_SHIFT; | 751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
@@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | |||
677 | VMW_FENCE_WAIT_TIMEOUT); | 805 | VMW_FENCE_WAIT_TIMEOUT); |
678 | } | 806 | } |
679 | 807 | ||
808 | /** | ||
809 | * vmw_move_notify - TTM move_notify_callback | ||
810 | * | ||
811 | * @bo: The TTM buffer object about to move. | ||
812 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
813 | * region the move is taking place. | ||
814 | * | ||
815 | * Calls move_notify for all subsystems needing it. | ||
816 | * (currently only resources). | ||
817 | */ | ||
818 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
819 | struct ttm_mem_reg *mem) | ||
820 | { | ||
821 | vmw_resource_move_notify(bo, mem); | ||
822 | } | ||
823 | |||
824 | |||
825 | /** | ||
826 | * vmw_swap_notify - TTM move_notify_callback | ||
827 | * | ||
828 | * @bo: The TTM buffer object about to be swapped out. | ||
829 | */ | ||
830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
831 | { | ||
832 | struct ttm_bo_device *bdev = bo->bdev; | ||
833 | |||
834 | spin_lock(&bdev->fence_lock); | ||
835 | ttm_bo_wait(bo, false, false, false); | ||
836 | spin_unlock(&bdev->fence_lock); | ||
837 | } | ||
838 | |||
839 | |||
680 | struct ttm_bo_driver vmw_bo_driver = { | 840 | struct ttm_bo_driver vmw_bo_driver = { |
681 | .ttm_tt_create = &vmw_ttm_tt_create, | 841 | .ttm_tt_create = &vmw_ttm_tt_create, |
682 | .ttm_tt_populate = &vmw_ttm_populate, | 842 | .ttm_tt_populate = &vmw_ttm_populate, |
@@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
691 | .sync_obj_flush = vmw_sync_obj_flush, | 851 | .sync_obj_flush = vmw_sync_obj_flush, |
692 | .sync_obj_unref = vmw_sync_obj_unref, | 852 | .sync_obj_unref = vmw_sync_obj_unref, |
693 | .sync_obj_ref = vmw_sync_obj_ref, | 853 | .sync_obj_ref = vmw_sync_obj_ref, |
694 | .move_notify = NULL, | 854 | .move_notify = vmw_move_notify, |
695 | .swap_notify = NULL, | 855 | .swap_notify = vmw_swap_notify, |
696 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
697 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
698 | .io_mem_free = &vmw_ttm_io_mem_free, | 858 | .io_mem_free = &vmw_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 00ae0925aca8..97aa55159107 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -32,12 +32,28 @@ | |||
32 | struct vmw_user_context { | 32 | struct vmw_user_context { |
33 | struct ttm_base_object base; | 33 | struct ttm_base_object base; |
34 | struct vmw_resource res; | 34 | struct vmw_resource res; |
35 | struct vmw_ctx_binding_state cbs; | ||
35 | }; | 36 | }; |
36 | 37 | ||
38 | |||
39 | |||
40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); | ||
41 | |||
37 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
38 | static struct vmw_resource * | 43 | static struct vmw_resource * |
39 | vmw_user_context_base_to_res(struct ttm_base_object *base); | 44 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
40 | 45 | ||
46 | static int vmw_gb_context_create(struct vmw_resource *res); | ||
47 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
48 | struct ttm_validate_buffer *val_buf); | ||
49 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
50 | bool readback, | ||
51 | struct ttm_validate_buffer *val_buf); | ||
52 | static int vmw_gb_context_destroy(struct vmw_resource *res); | ||
53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); | ||
54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); | ||
55 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); | ||
56 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | ||
41 | static uint64_t vmw_user_context_size; | 57 | static uint64_t vmw_user_context_size; |
42 | 58 | ||
43 | static const struct vmw_user_resource_conv user_context_conv = { | 59 | static const struct vmw_user_resource_conv user_context_conv = { |
@@ -62,6 +78,23 @@ static const struct vmw_res_func vmw_legacy_context_func = { | |||
62 | .unbind = NULL | 78 | .unbind = NULL |
63 | }; | 79 | }; |
64 | 80 | ||
81 | static const struct vmw_res_func vmw_gb_context_func = { | ||
82 | .res_type = vmw_res_context, | ||
83 | .needs_backup = true, | ||
84 | .may_evict = true, | ||
85 | .type_name = "guest backed contexts", | ||
86 | .backup_placement = &vmw_mob_placement, | ||
87 | .create = vmw_gb_context_create, | ||
88 | .destroy = vmw_gb_context_destroy, | ||
89 | .bind = vmw_gb_context_bind, | ||
90 | .unbind = vmw_gb_context_unbind | ||
91 | }; | ||
92 | |||
93 | static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { | ||
94 | [vmw_ctx_binding_shader] = vmw_context_scrub_shader, | ||
95 | [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, | ||
96 | [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; | ||
97 | |||
65 | /** | 98 | /** |
66 | * Context management: | 99 | * Context management: |
67 | */ | 100 | */ |
@@ -76,6 +109,16 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
76 | } *cmd; | 109 | } *cmd; |
77 | 110 | ||
78 | 111 | ||
112 | if (res->func->destroy == vmw_gb_context_destroy) { | ||
113 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
114 | (void) vmw_gb_context_destroy(res); | ||
115 | if (dev_priv->pinned_bo != NULL && | ||
116 | !dev_priv->query_cid_valid) | ||
117 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
118 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
119 | return; | ||
120 | } | ||
121 | |||
79 | vmw_execbuf_release_pinned_bo(dev_priv); | 122 | vmw_execbuf_release_pinned_bo(dev_priv); |
80 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 123 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
81 | if (unlikely(cmd == NULL)) { | 124 | if (unlikely(cmd == NULL)) { |
@@ -92,6 +135,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
92 | vmw_3d_resource_dec(dev_priv, false); | 135 | vmw_3d_resource_dec(dev_priv, false); |
93 | } | 136 | } |
94 | 137 | ||
138 | static int vmw_gb_context_init(struct vmw_private *dev_priv, | ||
139 | struct vmw_resource *res, | ||
140 | void (*res_free) (struct vmw_resource *res)) | ||
141 | { | ||
142 | int ret; | ||
143 | struct vmw_user_context *uctx = | ||
144 | container_of(res, struct vmw_user_context, res); | ||
145 | |||
146 | ret = vmw_resource_init(dev_priv, res, true, | ||
147 | res_free, &vmw_gb_context_func); | ||
148 | res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; | ||
149 | |||
150 | if (unlikely(ret != 0)) { | ||
151 | if (res_free) | ||
152 | res_free(res); | ||
153 | else | ||
154 | kfree(res); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | ||
159 | INIT_LIST_HEAD(&uctx->cbs.list); | ||
160 | |||
161 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
95 | static int vmw_context_init(struct vmw_private *dev_priv, | 165 | static int vmw_context_init(struct vmw_private *dev_priv, |
96 | struct vmw_resource *res, | 166 | struct vmw_resource *res, |
97 | void (*res_free) (struct vmw_resource *res)) | 167 | void (*res_free) (struct vmw_resource *res)) |
@@ -103,6 +173,9 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
103 | SVGA3dCmdDefineContext body; | 173 | SVGA3dCmdDefineContext body; |
104 | } *cmd; | 174 | } *cmd; |
105 | 175 | ||
176 | if (dev_priv->has_mob) | ||
177 | return vmw_gb_context_init(dev_priv, res, res_free); | ||
178 | |||
106 | ret = vmw_resource_init(dev_priv, res, false, | 179 | ret = vmw_resource_init(dev_priv, res, false, |
107 | res_free, &vmw_legacy_context_func); | 180 | res_free, &vmw_legacy_context_func); |
108 | 181 | ||
@@ -154,6 +227,184 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | |||
154 | return (ret == 0) ? res : NULL; | 227 | return (ret == 0) ? res : NULL; |
155 | } | 228 | } |
156 | 229 | ||
230 | |||
231 | static int vmw_gb_context_create(struct vmw_resource *res) | ||
232 | { | ||
233 | struct vmw_private *dev_priv = res->dev_priv; | ||
234 | int ret; | ||
235 | struct { | ||
236 | SVGA3dCmdHeader header; | ||
237 | SVGA3dCmdDefineGBContext body; | ||
238 | } *cmd; | ||
239 | |||
240 | if (likely(res->id != -1)) | ||
241 | return 0; | ||
242 | |||
243 | ret = vmw_resource_alloc_id(res); | ||
244 | if (unlikely(ret != 0)) { | ||
245 | DRM_ERROR("Failed to allocate a context id.\n"); | ||
246 | goto out_no_id; | ||
247 | } | ||
248 | |||
249 | if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { | ||
250 | ret = -EBUSY; | ||
251 | goto out_no_fifo; | ||
252 | } | ||
253 | |||
254 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
255 | if (unlikely(cmd == NULL)) { | ||
256 | DRM_ERROR("Failed reserving FIFO space for context " | ||
257 | "creation.\n"); | ||
258 | ret = -ENOMEM; | ||
259 | goto out_no_fifo; | ||
260 | } | ||
261 | |||
262 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; | ||
263 | cmd->header.size = sizeof(cmd->body); | ||
264 | cmd->body.cid = res->id; | ||
265 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
266 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
267 | |||
268 | return 0; | ||
269 | |||
270 | out_no_fifo: | ||
271 | vmw_resource_release_id(res); | ||
272 | out_no_id: | ||
273 | return ret; | ||
274 | } | ||
275 | |||
276 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
277 | struct ttm_validate_buffer *val_buf) | ||
278 | { | ||
279 | struct vmw_private *dev_priv = res->dev_priv; | ||
280 | struct { | ||
281 | SVGA3dCmdHeader header; | ||
282 | SVGA3dCmdBindGBContext body; | ||
283 | } *cmd; | ||
284 | struct ttm_buffer_object *bo = val_buf->bo; | ||
285 | |||
286 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
287 | |||
288 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
289 | if (unlikely(cmd == NULL)) { | ||
290 | DRM_ERROR("Failed reserving FIFO space for context " | ||
291 | "binding.\n"); | ||
292 | return -ENOMEM; | ||
293 | } | ||
294 | |||
295 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
296 | cmd->header.size = sizeof(cmd->body); | ||
297 | cmd->body.cid = res->id; | ||
298 | cmd->body.mobid = bo->mem.start; | ||
299 | cmd->body.validContents = res->backup_dirty; | ||
300 | res->backup_dirty = false; | ||
301 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
307 | bool readback, | ||
308 | struct ttm_validate_buffer *val_buf) | ||
309 | { | ||
310 | struct vmw_private *dev_priv = res->dev_priv; | ||
311 | struct ttm_buffer_object *bo = val_buf->bo; | ||
312 | struct vmw_fence_obj *fence; | ||
313 | struct vmw_user_context *uctx = | ||
314 | container_of(res, struct vmw_user_context, res); | ||
315 | |||
316 | struct { | ||
317 | SVGA3dCmdHeader header; | ||
318 | SVGA3dCmdReadbackGBContext body; | ||
319 | } *cmd1; | ||
320 | struct { | ||
321 | SVGA3dCmdHeader header; | ||
322 | SVGA3dCmdBindGBContext body; | ||
323 | } *cmd2; | ||
324 | uint32_t submit_size; | ||
325 | uint8_t *cmd; | ||
326 | |||
327 | |||
328 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
329 | |||
330 | mutex_lock(&dev_priv->binding_mutex); | ||
331 | vmw_context_binding_state_kill(&uctx->cbs); | ||
332 | |||
333 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||
334 | |||
335 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
336 | if (unlikely(cmd == NULL)) { | ||
337 | DRM_ERROR("Failed reserving FIFO space for context " | ||
338 | "unbinding.\n"); | ||
339 | mutex_unlock(&dev_priv->binding_mutex); | ||
340 | return -ENOMEM; | ||
341 | } | ||
342 | |||
343 | cmd2 = (void *) cmd; | ||
344 | if (readback) { | ||
345 | cmd1 = (void *) cmd; | ||
346 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; | ||
347 | cmd1->header.size = sizeof(cmd1->body); | ||
348 | cmd1->body.cid = res->id; | ||
349 | cmd2 = (void *) (&cmd1[1]); | ||
350 | } | ||
351 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
352 | cmd2->header.size = sizeof(cmd2->body); | ||
353 | cmd2->body.cid = res->id; | ||
354 | cmd2->body.mobid = SVGA3D_INVALID_ID; | ||
355 | |||
356 | vmw_fifo_commit(dev_priv, submit_size); | ||
357 | mutex_unlock(&dev_priv->binding_mutex); | ||
358 | |||
359 | /* | ||
360 | * Create a fence object and fence the backup buffer. | ||
361 | */ | ||
362 | |||
363 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
364 | &fence, NULL); | ||
365 | |||
366 | vmw_fence_single_bo(bo, fence); | ||
367 | |||
368 | if (likely(fence != NULL)) | ||
369 | vmw_fence_obj_unreference(&fence); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int vmw_gb_context_destroy(struct vmw_resource *res) | ||
375 | { | ||
376 | struct vmw_private *dev_priv = res->dev_priv; | ||
377 | struct { | ||
378 | SVGA3dCmdHeader header; | ||
379 | SVGA3dCmdDestroyGBContext body; | ||
380 | } *cmd; | ||
381 | struct vmw_user_context *uctx = | ||
382 | container_of(res, struct vmw_user_context, res); | ||
383 | |||
384 | BUG_ON(!list_empty(&uctx->cbs.list)); | ||
385 | |||
386 | if (likely(res->id == -1)) | ||
387 | return 0; | ||
388 | |||
389 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
390 | if (unlikely(cmd == NULL)) { | ||
391 | DRM_ERROR("Failed reserving FIFO space for context " | ||
392 | "destruction.\n"); | ||
393 | return -ENOMEM; | ||
394 | } | ||
395 | |||
396 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; | ||
397 | cmd->header.size = sizeof(cmd->body); | ||
398 | cmd->body.cid = res->id; | ||
399 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
400 | if (dev_priv->query_cid == res->id) | ||
401 | dev_priv->query_cid_valid = false; | ||
402 | vmw_resource_release_id(res); | ||
403 | vmw_3d_resource_dec(dev_priv, false); | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
157 | /** | 408 | /** |
158 | * User-space context management: | 409 | * User-space context management: |
159 | */ | 410 | */ |
@@ -272,3 +523,283 @@ out_unlock: | |||
272 | return ret; | 523 | return ret; |
273 | 524 | ||
274 | } | 525 | } |
526 | |||
527 | /** | ||
528 | * vmw_context_scrub_shader - scrub a shader binding from a context. | ||
529 | * | ||
530 | * @bi: single binding information. | ||
531 | */ | ||
532 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) | ||
533 | { | ||
534 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
535 | struct { | ||
536 | SVGA3dCmdHeader header; | ||
537 | SVGA3dCmdSetShader body; | ||
538 | } *cmd; | ||
539 | |||
540 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
541 | if (unlikely(cmd == NULL)) { | ||
542 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
543 | "unbinding.\n"); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | |||
547 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; | ||
548 | cmd->header.size = sizeof(cmd->body); | ||
549 | cmd->body.cid = bi->ctx->id; | ||
550 | cmd->body.type = bi->i1.shader_type; | ||
551 | cmd->body.shid = SVGA3D_INVALID_ID; | ||
552 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
553 | |||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | /** | ||
558 | * vmw_context_scrub_render_target - scrub a render target binding | ||
559 | * from a context. | ||
560 | * | ||
561 | * @bi: single binding information. | ||
562 | */ | ||
563 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) | ||
564 | { | ||
565 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
566 | struct { | ||
567 | SVGA3dCmdHeader header; | ||
568 | SVGA3dCmdSetRenderTarget body; | ||
569 | } *cmd; | ||
570 | |||
571 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
572 | if (unlikely(cmd == NULL)) { | ||
573 | DRM_ERROR("Failed reserving FIFO space for render target " | ||
574 | "unbinding.\n"); | ||
575 | return -ENOMEM; | ||
576 | } | ||
577 | |||
578 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; | ||
579 | cmd->header.size = sizeof(cmd->body); | ||
580 | cmd->body.cid = bi->ctx->id; | ||
581 | cmd->body.type = bi->i1.rt_type; | ||
582 | cmd->body.target.sid = SVGA3D_INVALID_ID; | ||
583 | cmd->body.target.face = 0; | ||
584 | cmd->body.target.mipmap = 0; | ||
585 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | /** | ||
591 | * vmw_context_scrub_texture - scrub a texture binding from a context. | ||
592 | * | ||
593 | * @bi: single binding information. | ||
594 | * | ||
595 | * TODO: Possibly complement this function with a function that takes | ||
596 | * a list of texture bindings and combines them to a single command. | ||
597 | */ | ||
598 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) | ||
599 | { | ||
600 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
601 | struct { | ||
602 | SVGA3dCmdHeader header; | ||
603 | struct { | ||
604 | SVGA3dCmdSetTextureState c; | ||
605 | SVGA3dTextureState s1; | ||
606 | } body; | ||
607 | } *cmd; | ||
608 | |||
609 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
610 | if (unlikely(cmd == NULL)) { | ||
611 | DRM_ERROR("Failed reserving FIFO space for texture " | ||
612 | "unbinding.\n"); | ||
613 | return -ENOMEM; | ||
614 | } | ||
615 | |||
616 | |||
617 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; | ||
618 | cmd->header.size = sizeof(cmd->body); | ||
619 | cmd->body.c.cid = bi->ctx->id; | ||
620 | cmd->body.s1.stage = bi->i1.texture_stage; | ||
621 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | ||
622 | cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; | ||
623 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
624 | |||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | /** | ||
629 | * vmw_context_binding_drop: Stop tracking a context binding | ||
630 | * | ||
631 | * @cb: Pointer to binding tracker storage. | ||
632 | * | ||
633 | * Stops tracking a context binding, and re-initializes its storage. | ||
634 | * Typically used when the context binding is replaced with a binding to | ||
635 | * another (or the same, for that matter) resource. | ||
636 | */ | ||
637 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) | ||
638 | { | ||
639 | list_del(&cb->ctx_list); | ||
640 | if (!list_empty(&cb->res_list)) | ||
641 | list_del(&cb->res_list); | ||
642 | cb->bi.ctx = NULL; | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * vmw_context_binding_add: Start tracking a context binding | ||
647 | * | ||
648 | * @cbs: Pointer to the context binding state tracker. | ||
649 | * @bi: Information about the binding to track. | ||
650 | * | ||
651 | * Performs basic checks on the binding to make sure arguments are within | ||
652 | * bounds and then starts tracking the binding in the context binding | ||
653 | * state structure @cbs. | ||
654 | */ | ||
655 | int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
656 | const struct vmw_ctx_bindinfo *bi) | ||
657 | { | ||
658 | struct vmw_ctx_binding *loc; | ||
659 | |||
660 | switch (bi->bt) { | ||
661 | case vmw_ctx_binding_rt: | ||
662 | if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { | ||
663 | DRM_ERROR("Illegal render target type %u.\n", | ||
664 | (unsigned) bi->i1.rt_type); | ||
665 | return -EINVAL; | ||
666 | } | ||
667 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
668 | break; | ||
669 | case vmw_ctx_binding_tex: | ||
670 | if (unlikely((unsigned)bi->i1.texture_stage >= | ||
671 | SVGA3D_NUM_TEXTURE_UNITS)) { | ||
672 | DRM_ERROR("Illegal texture/sampler unit %u.\n", | ||
673 | (unsigned) bi->i1.texture_stage); | ||
674 | return -EINVAL; | ||
675 | } | ||
676 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
677 | break; | ||
678 | case vmw_ctx_binding_shader: | ||
679 | if (unlikely((unsigned)bi->i1.shader_type >= | ||
680 | SVGA3D_SHADERTYPE_MAX)) { | ||
681 | DRM_ERROR("Illegal shader type %u.\n", | ||
682 | (unsigned) bi->i1.shader_type); | ||
683 | return -EINVAL; | ||
684 | } | ||
685 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
686 | break; | ||
687 | default: | ||
688 | BUG(); | ||
689 | } | ||
690 | |||
691 | if (loc->bi.ctx != NULL) | ||
692 | vmw_context_binding_drop(loc); | ||
693 | |||
694 | loc->bi = *bi; | ||
695 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
696 | INIT_LIST_HEAD(&loc->res_list); | ||
697 | |||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | /** | ||
702 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. | ||
703 | * | ||
704 | * @cbs: Pointer to the persistent context binding state tracker. | ||
705 | * @bi: Information about the binding to track. | ||
706 | * | ||
707 | */ | ||
708 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
709 | const struct vmw_ctx_bindinfo *bi) | ||
710 | { | ||
711 | struct vmw_ctx_binding *loc; | ||
712 | |||
713 | switch (bi->bt) { | ||
714 | case vmw_ctx_binding_rt: | ||
715 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
716 | break; | ||
717 | case vmw_ctx_binding_tex: | ||
718 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
719 | break; | ||
720 | case vmw_ctx_binding_shader: | ||
721 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
722 | break; | ||
723 | default: | ||
724 | BUG(); | ||
725 | } | ||
726 | |||
727 | if (loc->bi.ctx != NULL) | ||
728 | vmw_context_binding_drop(loc); | ||
729 | |||
730 | loc->bi = *bi; | ||
731 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
732 | if (bi->res != NULL) | ||
733 | list_add_tail(&loc->res_list, &bi->res->binding_head); | ||
734 | else | ||
735 | INIT_LIST_HEAD(&loc->res_list); | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * vmw_context_binding_kill - Kill a binding on the device | ||
740 | * and stop tracking it. | ||
741 | * | ||
742 | * @cb: Pointer to binding tracker storage. | ||
743 | * | ||
744 | * Emits FIFO commands to scrub a binding represented by @cb. | ||
745 | * Then stops tracking the binding and re-initializes its storage. | ||
746 | */ | ||
747 | void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | ||
748 | { | ||
749 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); | ||
750 | vmw_context_binding_drop(cb); | ||
751 | } | ||
752 | |||
753 | /** | ||
754 | * vmw_context_binding_state_kill - Kill all bindings associated with a | ||
755 | * struct vmw_ctx_binding state structure, and re-initialize the structure. | ||
756 | * | ||
757 | * @cbs: Pointer to the context binding state tracker. | ||
758 | * | ||
759 | * Emits commands to scrub all bindings associated with the | ||
760 | * context binding state tracker. Then re-initializes the whole structure. | ||
761 | */ | ||
762 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | ||
763 | { | ||
764 | struct vmw_ctx_binding *entry, *next; | ||
765 | |||
766 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
767 | vmw_context_binding_kill(entry); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * vmw_context_binding_res_list_kill - Kill all bindings on a | ||
772 | * resource binding list | ||
773 | * | ||
774 | * @head: list head of resource binding list | ||
775 | * | ||
776 | * Kills all bindings associated with a specific resource. Typically | ||
777 | * called before the resource is destroyed. | ||
778 | */ | ||
779 | void vmw_context_binding_res_list_kill(struct list_head *head) | ||
780 | { | ||
781 | struct vmw_ctx_binding *entry, *next; | ||
782 | |||
783 | list_for_each_entry_safe(entry, next, head, res_list) | ||
784 | vmw_context_binding_kill(entry); | ||
785 | } | ||
786 | |||
787 | /** | ||
788 | * vmw_context_binding_state_transfer - Commit staged binding info | ||
789 | * | ||
790 | * @ctx: Pointer to context to commit the staged binding info to. | ||
791 | * @from: Staged binding info built during execbuf. | ||
792 | * | ||
793 | * Transfers binding info from a temporary structure to the persistent | ||
794 | * structure in the context. This can be done once commands | ||
795 | */ | ||
796 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | ||
797 | struct vmw_ctx_binding_state *from) | ||
798 | { | ||
799 | struct vmw_user_context *uctx = | ||
800 | container_of(ctx, struct vmw_user_context, res); | ||
801 | struct vmw_ctx_binding *entry, *next; | ||
802 | |||
803 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | ||
804 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | ||
805 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index d4e54fcc0acd..a75840211b3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
@@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, | |||
290 | /** | 290 | /** |
291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. | 291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. |
292 | * | 292 | * |
293 | * @bo: The buffer object. Must be reserved, and present either in VRAM | 293 | * @bo: The buffer object. Must be reserved. |
294 | * or GMR memory. | ||
295 | * @pin: Whether to pin or unpin. | 294 | * @pin: Whether to pin or unpin. |
296 | * | 295 | * |
297 | */ | 296 | */ |
@@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) | |||
303 | int ret; | 302 | int ret; |
304 | 303 | ||
305 | lockdep_assert_held(&bo->resv->lock.base); | 304 | lockdep_assert_held(&bo->resv->lock.base); |
306 | BUG_ON(old_mem_type != TTM_PL_VRAM && | ||
307 | old_mem_type != VMW_PL_GMR); | ||
308 | 305 | ||
309 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; | 306 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB |
307 | | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; | ||
310 | if (pin) | 308 | if (pin) |
311 | pl_flags |= TTM_PL_FLAG_NO_EVICT; | 309 | pl_flags |= TTM_PL_FLAG_NO_EVICT; |
312 | 310 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c7a549694e59..078b9b0d2dfe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -112,6 +112,21 @@ | |||
112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
114 | struct drm_vmw_update_layout_arg) | 114 | struct drm_vmw_update_layout_arg) |
115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ | ||
116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ | ||
117 | struct drm_vmw_shader_create_arg) | ||
118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ | ||
119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ | ||
120 | struct drm_vmw_shader_arg) | ||
121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ | ||
122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | ||
123 | union drm_vmw_gb_surface_create_arg) | ||
124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | ||
125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | ||
126 | union drm_vmw_gb_surface_reference_arg) | ||
127 | #define DRM_IOCTL_VMW_SYNCCPU \ | ||
128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | ||
129 | struct drm_vmw_synccpu_arg) | ||
115 | 130 | ||
116 | /** | 131 | /** |
117 | * The core DRM version of this macro doesn't account for | 132 | * The core DRM version of this macro doesn't account for |
@@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
177 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | 192 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
178 | vmw_kms_update_layout_ioctl, | 193 | vmw_kms_update_layout_ioctl, |
179 | DRM_MASTER | DRM_UNLOCKED), | 194 | DRM_MASTER | DRM_UNLOCKED), |
195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, | ||
196 | vmw_shader_define_ioctl, | ||
197 | DRM_AUTH | DRM_UNLOCKED), | ||
198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, | ||
199 | vmw_shader_destroy_ioctl, | ||
200 | DRM_AUTH | DRM_UNLOCKED), | ||
201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | ||
202 | vmw_gb_surface_define_ioctl, | ||
203 | DRM_AUTH | DRM_UNLOCKED), | ||
204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | ||
205 | vmw_gb_surface_reference_ioctl, | ||
206 | DRM_AUTH | DRM_UNLOCKED), | ||
207 | VMW_IOCTL_DEF(VMW_SYNCCPU, | ||
208 | vmw_user_dmabuf_synccpu_ioctl, | ||
209 | DRM_AUTH | DRM_UNLOCKED), | ||
180 | }; | 210 | }; |
181 | 211 | ||
182 | static struct pci_device_id vmw_pci_id_list[] = { | 212 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); | |||
189 | static int vmw_force_iommu; | 219 | static int vmw_force_iommu; |
190 | static int vmw_restrict_iommu; | 220 | static int vmw_restrict_iommu; |
191 | static int vmw_force_coherent; | 221 | static int vmw_force_coherent; |
222 | static int vmw_restrict_dma_mask; | ||
192 | 223 | ||
193 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 224 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
194 | static void vmw_master_init(struct vmw_master *); | 225 | static void vmw_master_init(struct vmw_master *); |
@@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | |||
203 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 234 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
204 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 235 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
205 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 236 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
237 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | ||
238 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | ||
206 | 239 | ||
207 | 240 | ||
208 | static void vmw_print_capabilities(uint32_t capabilities) | 241 | static void vmw_print_capabilities(uint32_t capabilities) |
@@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
240 | DRM_INFO(" GMR2.\n"); | 273 | DRM_INFO(" GMR2.\n"); |
241 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | 274 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
242 | DRM_INFO(" Screen Object 2.\n"); | 275 | DRM_INFO(" Screen Object 2.\n"); |
276 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) | ||
277 | DRM_INFO(" Command Buffers.\n"); | ||
278 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) | ||
279 | DRM_INFO(" Command Buffers 2.\n"); | ||
280 | if (capabilities & SVGA_CAP_GBOBJECTS) | ||
281 | DRM_INFO(" Guest Backed Resources.\n"); | ||
243 | } | 282 | } |
244 | 283 | ||
245 | |||
246 | /** | 284 | /** |
247 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | 285 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
248 | * the start of a buffer object. | ||
249 | * | 286 | * |
250 | * @dev_priv: The device private structure. | 287 | * @dev_priv: A device private structure. |
251 | * | 288 | * |
252 | * This function will idle the buffer using an uninterruptible wait, then | 289 | * This function creates a small buffer object that holds the query |
253 | * map the first page and initialize a pending occlusion query result structure, | 290 | * result for dummy queries emitted as query barriers. |
254 | * Finally it will unmap the buffer. | 291 | * The function will then map the first page and initialize a pending |
292 | * occlusion query result structure, Finally it will unmap the buffer. | ||
293 | * No interruptible waits are done within this function. | ||
255 | * | 294 | * |
256 | * TODO: Since we're only mapping a single page, we should optimize the map | 295 | * Returns an error if bo creation or initialization fails. |
257 | * to use kmap_atomic / iomap_atomic. | ||
258 | */ | 296 | */ |
259 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | 297 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
260 | { | 298 | { |
299 | int ret; | ||
300 | struct ttm_buffer_object *bo; | ||
261 | struct ttm_bo_kmap_obj map; | 301 | struct ttm_bo_kmap_obj map; |
262 | volatile SVGA3dQueryResult *result; | 302 | volatile SVGA3dQueryResult *result; |
263 | bool dummy; | 303 | bool dummy; |
264 | int ret; | ||
265 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
266 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
267 | 304 | ||
268 | ttm_bo_reserve(bo, false, false, false, 0); | 305 | /* |
269 | spin_lock(&bdev->fence_lock); | 306 | * Create the bo as pinned, so that a tryreserve will |
270 | ret = ttm_bo_wait(bo, false, false, false); | 307 | * immediately succeed. This is because we're the only |
271 | spin_unlock(&bdev->fence_lock); | 308 | * user of the bo currently. |
309 | */ | ||
310 | ret = ttm_bo_create(&dev_priv->bdev, | ||
311 | PAGE_SIZE, | ||
312 | ttm_bo_type_device, | ||
313 | &vmw_sys_ne_placement, | ||
314 | 0, false, NULL, | ||
315 | &bo); | ||
316 | |||
272 | if (unlikely(ret != 0)) | 317 | if (unlikely(ret != 0)) |
273 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | 318 | return ret; |
274 | 10*HZ); | 319 | |
320 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
321 | BUG_ON(ret != 0); | ||
275 | 322 | ||
276 | ret = ttm_bo_kmap(bo, 0, 1, &map); | 323 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
277 | if (likely(ret == 0)) { | 324 | if (likely(ret == 0)) { |
@@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | |||
280 | result->state = SVGA3D_QUERYSTATE_PENDING; | 327 | result->state = SVGA3D_QUERYSTATE_PENDING; |
281 | result->result32 = 0xff; | 328 | result->result32 = 0xff; |
282 | ttm_bo_kunmap(&map); | 329 | ttm_bo_kunmap(&map); |
283 | } else | 330 | } |
284 | DRM_ERROR("Dummy query buffer map failed.\n"); | 331 | vmw_bo_pin(bo, false); |
285 | ttm_bo_unreserve(bo); | 332 | ttm_bo_unreserve(bo); |
286 | } | ||
287 | 333 | ||
334 | if (unlikely(ret != 0)) { | ||
335 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
336 | ttm_bo_unref(&bo); | ||
337 | } else | ||
338 | dev_priv->dummy_query_bo = bo; | ||
288 | 339 | ||
289 | /** | 340 | return ret; |
290 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
291 | * | ||
292 | * @dev_priv: A device private structure. | ||
293 | * | ||
294 | * This function creates a small buffer object that holds the query | ||
295 | * result for dummy queries emitted as query barriers. | ||
296 | * No interruptible waits are done within this function. | ||
297 | * | ||
298 | * Returns an error if bo creation fails. | ||
299 | */ | ||
300 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
301 | { | ||
302 | return ttm_bo_create(&dev_priv->bdev, | ||
303 | PAGE_SIZE, | ||
304 | ttm_bo_type_device, | ||
305 | &vmw_vram_sys_placement, | ||
306 | 0, false, NULL, | ||
307 | &dev_priv->dummy_query_bo); | ||
308 | } | 341 | } |
309 | 342 | ||
310 | |||
311 | static int vmw_request_device(struct vmw_private *dev_priv) | 343 | static int vmw_request_device(struct vmw_private *dev_priv) |
312 | { | 344 | { |
313 | int ret; | 345 | int ret; |
@@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
318 | return ret; | 350 | return ret; |
319 | } | 351 | } |
320 | vmw_fence_fifo_up(dev_priv->fman); | 352 | vmw_fence_fifo_up(dev_priv->fman); |
353 | if (dev_priv->has_mob) { | ||
354 | ret = vmw_otables_setup(dev_priv); | ||
355 | if (unlikely(ret != 0)) { | ||
356 | DRM_ERROR("Unable to initialize " | ||
357 | "guest Memory OBjects.\n"); | ||
358 | goto out_no_mob; | ||
359 | } | ||
360 | } | ||
321 | ret = vmw_dummy_query_bo_create(dev_priv); | 361 | ret = vmw_dummy_query_bo_create(dev_priv); |
322 | if (unlikely(ret != 0)) | 362 | if (unlikely(ret != 0)) |
323 | goto out_no_query_bo; | 363 | goto out_no_query_bo; |
324 | vmw_dummy_query_bo_prepare(dev_priv); | ||
325 | 364 | ||
326 | return 0; | 365 | return 0; |
327 | 366 | ||
328 | out_no_query_bo: | 367 | out_no_query_bo: |
368 | if (dev_priv->has_mob) | ||
369 | vmw_otables_takedown(dev_priv); | ||
370 | out_no_mob: | ||
329 | vmw_fence_fifo_down(dev_priv->fman); | 371 | vmw_fence_fifo_down(dev_priv->fman); |
330 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 372 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
331 | return ret; | 373 | return ret; |
@@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv) | |||
341 | BUG_ON(dev_priv->pinned_bo != NULL); | 383 | BUG_ON(dev_priv->pinned_bo != NULL); |
342 | 384 | ||
343 | ttm_bo_unref(&dev_priv->dummy_query_bo); | 385 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
386 | if (dev_priv->has_mob) | ||
387 | vmw_otables_takedown(dev_priv); | ||
344 | vmw_fence_fifo_down(dev_priv->fman); | 388 | vmw_fence_fifo_down(dev_priv->fman); |
345 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 389 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
346 | } | 390 | } |
347 | 391 | ||
392 | |||
348 | /** | 393 | /** |
349 | * Increase the 3d resource refcount. | 394 | * Increase the 3d resource refcount. |
350 | * If the count was prevously zero, initialize the fifo, switching to svga | 395 | * If the count was prevously zero, initialize the fifo, switching to svga |
@@ -510,6 +555,33 @@ out_fixup: | |||
510 | return 0; | 555 | return 0; |
511 | } | 556 | } |
512 | 557 | ||
558 | /** | ||
559 | * vmw_dma_masks - set required page- and dma masks | ||
560 | * | ||
561 | * @dev: Pointer to struct drm-device | ||
562 | * | ||
563 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | ||
564 | * restriction also for 64-bit systems. | ||
565 | */ | ||
566 | #ifdef CONFIG_INTEL_IOMMU | ||
567 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
568 | { | ||
569 | struct drm_device *dev = dev_priv->dev; | ||
570 | |||
571 | if (intel_iommu_enabled && | ||
572 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | ||
573 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | ||
574 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | ||
575 | } | ||
576 | return 0; | ||
577 | } | ||
578 | #else | ||
579 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
580 | { | ||
581 | return 0; | ||
582 | } | ||
583 | #endif | ||
584 | |||
513 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 585 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
514 | { | 586 | { |
515 | struct vmw_private *dev_priv; | 587 | struct vmw_private *dev_priv; |
@@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
532 | mutex_init(&dev_priv->hw_mutex); | 604 | mutex_init(&dev_priv->hw_mutex); |
533 | mutex_init(&dev_priv->cmdbuf_mutex); | 605 | mutex_init(&dev_priv->cmdbuf_mutex); |
534 | mutex_init(&dev_priv->release_mutex); | 606 | mutex_init(&dev_priv->release_mutex); |
607 | mutex_init(&dev_priv->binding_mutex); | ||
535 | rwlock_init(&dev_priv->resource_lock); | 608 | rwlock_init(&dev_priv->resource_lock); |
536 | 609 | ||
537 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
@@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
578 | 651 | ||
579 | vmw_get_initial_size(dev_priv); | 652 | vmw_get_initial_size(dev_priv); |
580 | 653 | ||
581 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 654 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
582 | dev_priv->max_gmr_descriptors = | ||
583 | vmw_read(dev_priv, | ||
584 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
585 | dev_priv->max_gmr_ids = | 655 | dev_priv->max_gmr_ids = |
586 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | 656 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
587 | } | ||
588 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
589 | dev_priv->max_gmr_pages = | 657 | dev_priv->max_gmr_pages = |
590 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | 658 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
591 | dev_priv->memory_size = | 659 | dev_priv->memory_size = |
@@ -598,23 +666,40 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
598 | */ | 666 | */ |
599 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
600 | } | 668 | } |
669 | dev_priv->max_mob_pages = 0; | ||
670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
671 | uint64_t mem_size = | ||
672 | vmw_read(dev_priv, | ||
673 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); | ||
674 | |||
675 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; | ||
676 | dev_priv->prim_bb_mem = | ||
677 | vmw_read(dev_priv, | ||
678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | ||
679 | } else | ||
680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
681 | |||
682 | ret = vmw_dma_masks(dev_priv); | ||
683 | if (unlikely(ret != 0)) | ||
684 | goto out_err0; | ||
685 | |||
686 | if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) | ||
687 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
601 | 688 | ||
602 | mutex_unlock(&dev_priv->hw_mutex); | 689 | mutex_unlock(&dev_priv->hw_mutex); |
603 | 690 | ||
604 | vmw_print_capabilities(dev_priv->capabilities); | 691 | vmw_print_capabilities(dev_priv->capabilities); |
605 | 692 | ||
606 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 693 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
607 | DRM_INFO("Max GMR ids is %u\n", | 694 | DRM_INFO("Max GMR ids is %u\n", |
608 | (unsigned)dev_priv->max_gmr_ids); | 695 | (unsigned)dev_priv->max_gmr_ids); |
609 | DRM_INFO("Max GMR descriptors is %u\n", | ||
610 | (unsigned)dev_priv->max_gmr_descriptors); | ||
611 | } | ||
612 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
613 | DRM_INFO("Max number of GMR pages is %u\n", | 696 | DRM_INFO("Max number of GMR pages is %u\n", |
614 | (unsigned)dev_priv->max_gmr_pages); | 697 | (unsigned)dev_priv->max_gmr_pages); |
615 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", | 698 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
616 | (unsigned)dev_priv->memory_size / 1024); | 699 | (unsigned)dev_priv->memory_size / 1024); |
617 | } | 700 | } |
701 | DRM_INFO("Maximum display memory size is %u kiB\n", | ||
702 | dev_priv->prim_bb_mem / 1024); | ||
618 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 703 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
619 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 704 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
620 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 705 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
@@ -649,12 +734,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
649 | dev_priv->has_gmr = true; | 734 | dev_priv->has_gmr = true; |
650 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | 735 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
651 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | 736 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
652 | dev_priv->max_gmr_ids) != 0) { | 737 | VMW_PL_GMR) != 0) { |
653 | DRM_INFO("No GMR memory available. " | 738 | DRM_INFO("No GMR memory available. " |
654 | "Graphics memory resources are very limited.\n"); | 739 | "Graphics memory resources are very limited.\n"); |
655 | dev_priv->has_gmr = false; | 740 | dev_priv->has_gmr = false; |
656 | } | 741 | } |
657 | 742 | ||
743 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
744 | dev_priv->has_mob = true; | ||
745 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
746 | VMW_PL_MOB) != 0) { | ||
747 | DRM_INFO("No MOB memory available. " | ||
748 | "3D will be disabled.\n"); | ||
749 | dev_priv->has_mob = false; | ||
750 | } | ||
751 | } | ||
752 | |||
658 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 753 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
659 | dev_priv->mmio_size); | 754 | dev_priv->mmio_size); |
660 | 755 | ||
@@ -757,6 +852,8 @@ out_err4: | |||
757 | iounmap(dev_priv->mmio_virt); | 852 | iounmap(dev_priv->mmio_virt); |
758 | out_err3: | 853 | out_err3: |
759 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 854 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
855 | if (dev_priv->has_mob) | ||
856 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
760 | if (dev_priv->has_gmr) | 857 | if (dev_priv->has_gmr) |
761 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 858 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
762 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 859 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
@@ -801,6 +898,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
801 | ttm_object_device_release(&dev_priv->tdev); | 898 | ttm_object_device_release(&dev_priv->tdev); |
802 | iounmap(dev_priv->mmio_virt); | 899 | iounmap(dev_priv->mmio_virt); |
803 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 900 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
804 | if (dev_priv->has_gmr) | 903 | if (dev_priv->has_gmr) |
805 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
806 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 036629dd992a..554e7fa33082 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20120209" | 43 | #define VMWGFX_DRIVER_DATE "20121114" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 4 | 45 | #define VMWGFX_DRIVER_MINOR 5 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -50,14 +50,30 @@ | |||
50 | #define VMWGFX_MAX_VALIDATIONS 2048 | 50 | #define VMWGFX_MAX_VALIDATIONS 2048 |
51 | #define VMWGFX_MAX_DISPLAYS 16 | 51 | #define VMWGFX_MAX_DISPLAYS 16 |
52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
53 | #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 | ||
54 | |||
55 | /* | ||
56 | * Perhaps we should have sysfs entries for these. | ||
57 | */ | ||
58 | #define VMWGFX_NUM_GB_CONTEXT 256 | ||
59 | #define VMWGFX_NUM_GB_SHADER 20000 | ||
60 | #define VMWGFX_NUM_GB_SURFACE 32768 | ||
61 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | ||
62 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | ||
63 | VMWGFX_NUM_GB_SHADER +\ | ||
64 | VMWGFX_NUM_GB_SURFACE +\ | ||
65 | VMWGFX_NUM_GB_SCREEN_TARGET) | ||
53 | 66 | ||
54 | #define VMW_PL_GMR TTM_PL_PRIV0 | 67 | #define VMW_PL_GMR TTM_PL_PRIV0 |
55 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 68 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
69 | #define VMW_PL_MOB TTM_PL_PRIV1 | ||
70 | #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 | ||
56 | 71 | ||
57 | #define VMW_RES_CONTEXT ttm_driver_type0 | 72 | #define VMW_RES_CONTEXT ttm_driver_type0 |
58 | #define VMW_RES_SURFACE ttm_driver_type1 | 73 | #define VMW_RES_SURFACE ttm_driver_type1 |
59 | #define VMW_RES_STREAM ttm_driver_type2 | 74 | #define VMW_RES_STREAM ttm_driver_type2 |
60 | #define VMW_RES_FENCE ttm_driver_type3 | 75 | #define VMW_RES_FENCE ttm_driver_type3 |
76 | #define VMW_RES_SHADER ttm_driver_type4 | ||
61 | 77 | ||
62 | struct vmw_fpriv { | 78 | struct vmw_fpriv { |
63 | struct drm_master *locked_master; | 79 | struct drm_master *locked_master; |
@@ -82,6 +98,7 @@ struct vmw_dma_buffer { | |||
82 | struct vmw_validate_buffer { | 98 | struct vmw_validate_buffer { |
83 | struct ttm_validate_buffer base; | 99 | struct ttm_validate_buffer base; |
84 | struct drm_hash_item hash; | 100 | struct drm_hash_item hash; |
101 | bool validate_as_mob; | ||
85 | }; | 102 | }; |
86 | 103 | ||
87 | struct vmw_res_func; | 104 | struct vmw_res_func; |
@@ -98,6 +115,7 @@ struct vmw_resource { | |||
98 | const struct vmw_res_func *func; | 115 | const struct vmw_res_func *func; |
99 | struct list_head lru_head; /* Protected by the resource lock */ | 116 | struct list_head lru_head; /* Protected by the resource lock */ |
100 | struct list_head mob_head; /* Protected by @backup reserved */ | 117 | struct list_head mob_head; /* Protected by @backup reserved */ |
118 | struct list_head binding_head; /* Protected by binding_mutex */ | ||
101 | void (*res_free) (struct vmw_resource *res); | 119 | void (*res_free) (struct vmw_resource *res); |
102 | void (*hw_destroy) (struct vmw_resource *res); | 120 | void (*hw_destroy) (struct vmw_resource *res); |
103 | }; | 121 | }; |
@@ -106,6 +124,7 @@ enum vmw_res_type { | |||
106 | vmw_res_context, | 124 | vmw_res_context, |
107 | vmw_res_surface, | 125 | vmw_res_surface, |
108 | vmw_res_stream, | 126 | vmw_res_stream, |
127 | vmw_res_shader, | ||
109 | vmw_res_max | 128 | vmw_res_max |
110 | }; | 129 | }; |
111 | 130 | ||
@@ -154,6 +173,7 @@ struct vmw_fifo_state { | |||
154 | }; | 173 | }; |
155 | 174 | ||
156 | struct vmw_relocation { | 175 | struct vmw_relocation { |
176 | SVGAMobId *mob_loc; | ||
157 | SVGAGuestPtr *location; | 177 | SVGAGuestPtr *location; |
158 | uint32_t index; | 178 | uint32_t index; |
159 | }; | 179 | }; |
@@ -229,6 +249,71 @@ struct vmw_piter { | |||
229 | struct page *(*page)(struct vmw_piter *); | 249 | struct page *(*page)(struct vmw_piter *); |
230 | }; | 250 | }; |
231 | 251 | ||
252 | /* | ||
253 | * enum vmw_ctx_binding_type - abstract resource to context binding types | ||
254 | */ | ||
255 | enum vmw_ctx_binding_type { | ||
256 | vmw_ctx_binding_shader, | ||
257 | vmw_ctx_binding_rt, | ||
258 | vmw_ctx_binding_tex, | ||
259 | vmw_ctx_binding_max | ||
260 | }; | ||
261 | |||
262 | /** | ||
263 | * struct vmw_ctx_bindinfo - structure representing a single context binding | ||
264 | * | ||
265 | * @ctx: Pointer to the context structure. NULL means the binding is not | ||
266 | * active. | ||
267 | * @res: Non ref-counted pointer to the bound resource. | ||
268 | * @bt: The binding type. | ||
269 | * @i1: Union of information needed to unbind. | ||
270 | */ | ||
271 | struct vmw_ctx_bindinfo { | ||
272 | struct vmw_resource *ctx; | ||
273 | struct vmw_resource *res; | ||
274 | enum vmw_ctx_binding_type bt; | ||
275 | union { | ||
276 | SVGA3dShaderType shader_type; | ||
277 | SVGA3dRenderTargetType rt_type; | ||
278 | uint32 texture_stage; | ||
279 | } i1; | ||
280 | }; | ||
281 | |||
282 | /** | ||
283 | * struct vmw_ctx_binding - structure representing a single context binding | ||
284 | * - suitable for tracking in a context | ||
285 | * | ||
286 | * @ctx_list: List head for context. | ||
287 | * @res_list: List head for bound resource. | ||
288 | * @bi: Binding info | ||
289 | */ | ||
290 | struct vmw_ctx_binding { | ||
291 | struct list_head ctx_list; | ||
292 | struct list_head res_list; | ||
293 | struct vmw_ctx_bindinfo bi; | ||
294 | }; | ||
295 | |||
296 | |||
297 | /** | ||
298 | * struct vmw_ctx_binding_state - context binding state | ||
299 | * | ||
300 | * @list: linked list of individual bindings. | ||
301 | * @render_targets: Render target bindings. | ||
302 | * @texture_units: Texture units/samplers bindings. | ||
303 | * @shaders: Shader bindings. | ||
304 | * | ||
305 | * Note that this structure also provides storage space for the individual | ||
306 | * struct vmw_ctx_binding objects, so that no dynamic allocation is needed | ||
307 | * for individual bindings. | ||
308 | * | ||
309 | */ | ||
310 | struct vmw_ctx_binding_state { | ||
311 | struct list_head list; | ||
312 | struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; | ||
313 | struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; | ||
314 | struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; | ||
315 | }; | ||
316 | |||
232 | struct vmw_sw_context{ | 317 | struct vmw_sw_context{ |
233 | struct drm_open_hash res_ht; | 318 | struct drm_open_hash res_ht; |
234 | bool res_ht_initialized; | 319 | bool res_ht_initialized; |
@@ -250,6 +335,7 @@ struct vmw_sw_context{ | |||
250 | struct vmw_resource *last_query_ctx; | 335 | struct vmw_resource *last_query_ctx; |
251 | bool needs_post_query_barrier; | 336 | bool needs_post_query_barrier; |
252 | struct vmw_resource *error_resource; | 337 | struct vmw_resource *error_resource; |
338 | struct vmw_ctx_binding_state staged_bindings; | ||
253 | }; | 339 | }; |
254 | 340 | ||
255 | struct vmw_legacy_display; | 341 | struct vmw_legacy_display; |
@@ -281,6 +367,7 @@ struct vmw_private { | |||
281 | unsigned int io_start; | 367 | unsigned int io_start; |
282 | uint32_t vram_start; | 368 | uint32_t vram_start; |
283 | uint32_t vram_size; | 369 | uint32_t vram_size; |
370 | uint32_t prim_bb_mem; | ||
284 | uint32_t mmio_start; | 371 | uint32_t mmio_start; |
285 | uint32_t mmio_size; | 372 | uint32_t mmio_size; |
286 | uint32_t fb_max_width; | 373 | uint32_t fb_max_width; |
@@ -290,11 +377,12 @@ struct vmw_private { | |||
290 | __le32 __iomem *mmio_virt; | 377 | __le32 __iomem *mmio_virt; |
291 | int mmio_mtrr; | 378 | int mmio_mtrr; |
292 | uint32_t capabilities; | 379 | uint32_t capabilities; |
293 | uint32_t max_gmr_descriptors; | ||
294 | uint32_t max_gmr_ids; | 380 | uint32_t max_gmr_ids; |
295 | uint32_t max_gmr_pages; | 381 | uint32_t max_gmr_pages; |
382 | uint32_t max_mob_pages; | ||
296 | uint32_t memory_size; | 383 | uint32_t memory_size; |
297 | bool has_gmr; | 384 | bool has_gmr; |
385 | bool has_mob; | ||
298 | struct mutex hw_mutex; | 386 | struct mutex hw_mutex; |
299 | 387 | ||
300 | /* | 388 | /* |
@@ -370,6 +458,7 @@ struct vmw_private { | |||
370 | 458 | ||
371 | struct vmw_sw_context ctx; | 459 | struct vmw_sw_context ctx; |
372 | struct mutex cmdbuf_mutex; | 460 | struct mutex cmdbuf_mutex; |
461 | struct mutex binding_mutex; | ||
373 | 462 | ||
374 | /** | 463 | /** |
375 | * Operating mode. | 464 | * Operating mode. |
@@ -415,6 +504,12 @@ struct vmw_private { | |||
415 | * DMA mapping stuff. | 504 | * DMA mapping stuff. |
416 | */ | 505 | */ |
417 | enum vmw_dma_map_mode map_mode; | 506 | enum vmw_dma_map_mode map_mode; |
507 | |||
508 | /* | ||
509 | * Guest Backed stuff | ||
510 | */ | ||
511 | struct ttm_buffer_object *otable_bo; | ||
512 | struct vmw_otable *otables; | ||
418 | }; | 513 | }; |
419 | 514 | ||
420 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 515 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
@@ -471,23 +566,12 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | |||
471 | * Resource utilities - vmwgfx_resource.c | 566 | * Resource utilities - vmwgfx_resource.c |
472 | */ | 567 | */ |
473 | struct vmw_user_resource_conv; | 568 | struct vmw_user_resource_conv; |
474 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
475 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
476 | 569 | ||
477 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
478 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 570 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
479 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 571 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
480 | extern int vmw_resource_validate(struct vmw_resource *res); | 572 | extern int vmw_resource_validate(struct vmw_resource *res); |
481 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | 573 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
482 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 574 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
483 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
484 | struct drm_file *file_priv); | ||
485 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
486 | struct drm_file *file_priv); | ||
487 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
488 | struct ttm_object_file *tfile, | ||
489 | int id, | ||
490 | struct vmw_resource **p_res); | ||
491 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, | 575 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
492 | struct ttm_object_file *tfile, | 576 | struct ttm_object_file *tfile, |
493 | uint32_t handle, | 577 | uint32_t handle, |
@@ -499,18 +583,6 @@ extern int vmw_user_resource_lookup_handle( | |||
499 | uint32_t handle, | 583 | uint32_t handle, |
500 | const struct vmw_user_resource_conv *converter, | 584 | const struct vmw_user_resource_conv *converter, |
501 | struct vmw_resource **p_res); | 585 | struct vmw_resource **p_res); |
502 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
503 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
504 | struct drm_file *file_priv); | ||
505 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
506 | struct drm_file *file_priv); | ||
507 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
508 | struct drm_file *file_priv); | ||
509 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
510 | struct ttm_object_file *tfile, | ||
511 | uint32_t handle, int *id); | ||
512 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
513 | struct vmw_surface *srf); | ||
514 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 586 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
515 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 587 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
516 | struct vmw_dma_buffer *vmw_bo, | 588 | struct vmw_dma_buffer *vmw_bo, |
@@ -519,10 +591,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
519 | void (*bo_free) (struct ttm_buffer_object *bo)); | 591 | void (*bo_free) (struct ttm_buffer_object *bo)); |
520 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | 592 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
521 | struct ttm_object_file *tfile); | 593 | struct ttm_object_file *tfile); |
594 | extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
595 | struct ttm_object_file *tfile, | ||
596 | uint32_t size, | ||
597 | bool shareable, | ||
598 | uint32_t *handle, | ||
599 | struct vmw_dma_buffer **p_dma_buf); | ||
600 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
601 | struct vmw_dma_buffer *dma_buf, | ||
602 | uint32_t *handle); | ||
522 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 603 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
523 | struct drm_file *file_priv); | 604 | struct drm_file *file_priv); |
524 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 605 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
525 | struct drm_file *file_priv); | 606 | struct drm_file *file_priv); |
607 | extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
608 | struct drm_file *file_priv); | ||
526 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | 609 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, |
527 | uint32_t cur_validate_node); | 610 | uint32_t cur_validate_node); |
528 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 611 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
@@ -622,10 +705,16 @@ extern struct ttm_placement vmw_vram_sys_placement; | |||
622 | extern struct ttm_placement vmw_vram_gmr_placement; | 705 | extern struct ttm_placement vmw_vram_gmr_placement; |
623 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | 706 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
624 | extern struct ttm_placement vmw_sys_placement; | 707 | extern struct ttm_placement vmw_sys_placement; |
708 | extern struct ttm_placement vmw_sys_ne_placement; | ||
625 | extern struct ttm_placement vmw_evictable_placement; | 709 | extern struct ttm_placement vmw_evictable_placement; |
626 | extern struct ttm_placement vmw_srf_placement; | 710 | extern struct ttm_placement vmw_srf_placement; |
711 | extern struct ttm_placement vmw_mob_placement; | ||
627 | extern struct ttm_bo_driver vmw_bo_driver; | 712 | extern struct ttm_bo_driver vmw_bo_driver; |
628 | extern int vmw_dma_quiescent(struct drm_device *dev); | 713 | extern int vmw_dma_quiescent(struct drm_device *dev); |
714 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); | ||
715 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); | ||
716 | extern const struct vmw_sg_table * | ||
717 | vmw_bo_sg_table(struct ttm_buffer_object *bo); | ||
629 | extern void vmw_piter_start(struct vmw_piter *viter, | 718 | extern void vmw_piter_start(struct vmw_piter *viter, |
630 | const struct vmw_sg_table *vsgt, | 719 | const struct vmw_sg_table *vsgt, |
631 | unsigned long p_offs); | 720 | unsigned long p_offs); |
@@ -832,6 +921,76 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, | |||
832 | uint32_t handle, uint32_t flags, | 921 | uint32_t handle, uint32_t flags, |
833 | int *prime_fd); | 922 | int *prime_fd); |
834 | 923 | ||
924 | /* | ||
925 | * MemoryOBject management - vmwgfx_mob.c | ||
926 | */ | ||
927 | struct vmw_mob; | ||
928 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, | ||
929 | const struct vmw_sg_table *vsgt, | ||
930 | unsigned long num_data_pages, int32_t mob_id); | ||
931 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
932 | struct vmw_mob *mob); | ||
933 | extern void vmw_mob_destroy(struct vmw_mob *mob); | ||
934 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); | ||
935 | extern int vmw_otables_setup(struct vmw_private *dev_priv); | ||
936 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); | ||
937 | |||
938 | /* | ||
939 | * Context management - vmwgfx_context.c | ||
940 | */ | ||
941 | |||
942 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
943 | |||
944 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
945 | |||
946 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
947 | struct ttm_object_file *tfile, | ||
948 | int id, | ||
949 | struct vmw_resource **p_res); | ||
950 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
951 | struct drm_file *file_priv); | ||
952 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
953 | struct drm_file *file_priv); | ||
954 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
955 | const struct vmw_ctx_bindinfo *ci); | ||
956 | extern void | ||
957 | vmw_context_binding_state_transfer(struct vmw_resource *res, | ||
958 | struct vmw_ctx_binding_state *cbs); | ||
959 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | ||
960 | |||
961 | /* | ||
962 | * Surface management - vmwgfx_surface.c | ||
963 | */ | ||
964 | |||
965 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
966 | |||
967 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
968 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
969 | struct drm_file *file_priv); | ||
970 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
971 | struct drm_file *file_priv); | ||
972 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
973 | struct drm_file *file_priv); | ||
974 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
975 | struct drm_file *file_priv); | ||
976 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
977 | struct drm_file *file_priv); | ||
978 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
979 | struct ttm_object_file *tfile, | ||
980 | uint32_t handle, int *id); | ||
981 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
982 | struct vmw_surface *srf); | ||
983 | |||
984 | /* | ||
985 | * Shader management - vmwgfx_shader.c | ||
986 | */ | ||
987 | |||
988 | extern const struct vmw_user_resource_conv *user_shader_converter; | ||
989 | |||
990 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
991 | struct drm_file *file_priv); | ||
992 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
993 | struct drm_file *file_priv); | ||
835 | 994 | ||
836 | /** | 995 | /** |
837 | * Inline helper functions | 996 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 599f6469a1eb..7a5f1eb55c5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -54,6 +54,8 @@ struct vmw_resource_relocation { | |||
54 | * @res: Ref-counted pointer to the resource. | 54 | * @res: Ref-counted pointer to the resource. |
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
56 | * @new_backup: Refcounted pointer to the new backup buffer. | 56 | * @new_backup: Refcounted pointer to the new backup buffer. |
57 | * @staged_bindings: If @res is a context, tracks bindings set up during | ||
58 | * the command batch. Otherwise NULL. | ||
57 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | 59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
58 | * @first_usage: Set to true the first time the resource is referenced in | 60 | * @first_usage: Set to true the first time the resource is referenced in |
59 | * the command stream. | 61 | * the command stream. |
@@ -65,12 +67,32 @@ struct vmw_resource_val_node { | |||
65 | struct drm_hash_item hash; | 67 | struct drm_hash_item hash; |
66 | struct vmw_resource *res; | 68 | struct vmw_resource *res; |
67 | struct vmw_dma_buffer *new_backup; | 69 | struct vmw_dma_buffer *new_backup; |
70 | struct vmw_ctx_binding_state *staged_bindings; | ||
68 | unsigned long new_backup_offset; | 71 | unsigned long new_backup_offset; |
69 | bool first_usage; | 72 | bool first_usage; |
70 | bool no_buffer_needed; | 73 | bool no_buffer_needed; |
71 | }; | 74 | }; |
72 | 75 | ||
73 | /** | 76 | /** |
77 | * struct vmw_cmd_entry - Describe a command for the verifier | ||
78 | * | ||
79 | * @user_allow: Whether allowed from the execbuf ioctl. | ||
80 | * @gb_disable: Whether disabled if guest-backed objects are available. | ||
81 | * @gb_enable: Whether enabled iff guest-backed objects are available. | ||
82 | */ | ||
83 | struct vmw_cmd_entry { | ||
84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, | ||
85 | SVGA3dCmdHeader *); | ||
86 | bool user_allow; | ||
87 | bool gb_disable; | ||
88 | bool gb_enable; | ||
89 | }; | ||
90 | |||
91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ | ||
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | ||
93 | (_gb_disable), (_gb_enable)} | ||
94 | |||
95 | /** | ||
74 | * vmw_resource_unreserve - unreserve resources previously reserved for | 96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
75 | * command submission. | 97 | * command submission. |
76 | * | 98 | * |
@@ -87,6 +109,16 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
87 | struct vmw_dma_buffer *new_backup = | 109 | struct vmw_dma_buffer *new_backup = |
88 | backoff ? NULL : val->new_backup; | 110 | backoff ? NULL : val->new_backup; |
89 | 111 | ||
112 | /* | ||
113 | * Transfer staged context bindings to the | ||
114 | * persistent context binding tracker. | ||
115 | */ | ||
116 | if (unlikely(val->staged_bindings)) { | ||
117 | vmw_context_binding_state_transfer | ||
118 | (val->res, val->staged_bindings); | ||
119 | kfree(val->staged_bindings); | ||
120 | val->staged_bindings = NULL; | ||
121 | } | ||
90 | vmw_resource_unreserve(res, new_backup, | 122 | vmw_resource_unreserve(res, new_backup, |
91 | val->new_backup_offset); | 123 | val->new_backup_offset); |
92 | vmw_dmabuf_unreference(&val->new_backup); | 124 | vmw_dmabuf_unreference(&val->new_backup); |
@@ -224,6 +256,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
224 | * | 256 | * |
225 | * @sw_context: The software context used for this command submission batch. | 257 | * @sw_context: The software context used for this command submission batch. |
226 | * @bo: The buffer object to add. | 258 | * @bo: The buffer object to add. |
259 | * @validate_as_mob: Validate this buffer as a MOB. | ||
227 | * @p_val_node: If non-NULL Will be updated with the validate node number | 260 | * @p_val_node: If non-NULL Will be updated with the validate node number |
228 | * on return. | 261 | * on return. |
229 | * | 262 | * |
@@ -232,6 +265,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
232 | */ | 265 | */ |
233 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | 266 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
234 | struct ttm_buffer_object *bo, | 267 | struct ttm_buffer_object *bo, |
268 | bool validate_as_mob, | ||
235 | uint32_t *p_val_node) | 269 | uint32_t *p_val_node) |
236 | { | 270 | { |
237 | uint32_t val_node; | 271 | uint32_t val_node; |
@@ -244,6 +278,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
244 | &hash) == 0)) { | 278 | &hash) == 0)) { |
245 | vval_buf = container_of(hash, struct vmw_validate_buffer, | 279 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
246 | hash); | 280 | hash); |
281 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { | ||
282 | DRM_ERROR("Inconsistent buffer usage.\n"); | ||
283 | return -EINVAL; | ||
284 | } | ||
247 | val_buf = &vval_buf->base; | 285 | val_buf = &vval_buf->base; |
248 | val_node = vval_buf - sw_context->val_bufs; | 286 | val_node = vval_buf - sw_context->val_bufs; |
249 | } else { | 287 | } else { |
@@ -266,6 +304,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
266 | val_buf->bo = ttm_bo_reference(bo); | 304 | val_buf->bo = ttm_bo_reference(bo); |
267 | val_buf->reserved = false; | 305 | val_buf->reserved = false; |
268 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 306 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
307 | vval_buf->validate_as_mob = validate_as_mob; | ||
269 | } | 308 | } |
270 | 309 | ||
271 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 310 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
@@ -302,7 +341,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | |||
302 | struct ttm_buffer_object *bo = &res->backup->base; | 341 | struct ttm_buffer_object *bo = &res->backup->base; |
303 | 342 | ||
304 | ret = vmw_bo_to_validate_list | 343 | ret = vmw_bo_to_validate_list |
305 | (sw_context, bo, NULL); | 344 | (sw_context, bo, |
345 | vmw_resource_needs_backup(res), NULL); | ||
306 | 346 | ||
307 | if (unlikely(ret != 0)) | 347 | if (unlikely(ret != 0)) |
308 | return ret; | 348 | return ret; |
@@ -362,8 +402,15 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
362 | struct vmw_resource_val_node *node; | 402 | struct vmw_resource_val_node *node; |
363 | int ret; | 403 | int ret; |
364 | 404 | ||
365 | if (*id == SVGA3D_INVALID_ID) | 405 | if (*id == SVGA3D_INVALID_ID) { |
406 | if (p_val) | ||
407 | *p_val = NULL; | ||
408 | if (res_type == vmw_res_context) { | ||
409 | DRM_ERROR("Illegal context invalid id.\n"); | ||
410 | return -EINVAL; | ||
411 | } | ||
366 | return 0; | 412 | return 0; |
413 | } | ||
367 | 414 | ||
368 | /* | 415 | /* |
369 | * Fastpath in case of repeated commands referencing the same | 416 | * Fastpath in case of repeated commands referencing the same |
@@ -411,6 +458,18 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
411 | rcache->node = node; | 458 | rcache->node = node; |
412 | if (p_val) | 459 | if (p_val) |
413 | *p_val = node; | 460 | *p_val = node; |
461 | |||
462 | if (node->first_usage && res_type == vmw_res_context) { | ||
463 | node->staged_bindings = | ||
464 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | ||
465 | if (node->staged_bindings == NULL) { | ||
466 | DRM_ERROR("Failed to allocate context binding " | ||
467 | "information.\n"); | ||
468 | goto out_no_reloc; | ||
469 | } | ||
470 | INIT_LIST_HEAD(&node->staged_bindings->list); | ||
471 | } | ||
472 | |||
414 | vmw_resource_unreference(&res); | 473 | vmw_resource_unreference(&res); |
415 | return 0; | 474 | return 0; |
416 | 475 | ||
@@ -453,17 +512,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
453 | SVGA3dCmdHeader header; | 512 | SVGA3dCmdHeader header; |
454 | SVGA3dCmdSetRenderTarget body; | 513 | SVGA3dCmdSetRenderTarget body; |
455 | } *cmd; | 514 | } *cmd; |
515 | struct vmw_resource_val_node *ctx_node; | ||
516 | struct vmw_resource_val_node *res_node; | ||
456 | int ret; | 517 | int ret; |
457 | 518 | ||
458 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 519 | cmd = container_of(header, struct vmw_sid_cmd, header); |
520 | |||
521 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
522 | user_context_converter, &cmd->body.cid, | ||
523 | &ctx_node); | ||
459 | if (unlikely(ret != 0)) | 524 | if (unlikely(ret != 0)) |
460 | return ret; | 525 | return ret; |
461 | 526 | ||
462 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
463 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 527 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
464 | user_surface_converter, | 528 | user_surface_converter, |
465 | &cmd->body.target.sid, NULL); | 529 | &cmd->body.target.sid, &res_node); |
466 | return ret; | 530 | if (unlikely(ret != 0)) |
531 | return ret; | ||
532 | |||
533 | if (dev_priv->has_mob) { | ||
534 | struct vmw_ctx_bindinfo bi; | ||
535 | |||
536 | bi.ctx = ctx_node->res; | ||
537 | bi.res = res_node ? res_node->res : NULL; | ||
538 | bi.bt = vmw_ctx_binding_rt; | ||
539 | bi.i1.rt_type = cmd->body.type; | ||
540 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
541 | } | ||
542 | |||
543 | return 0; | ||
467 | } | 544 | } |
468 | 545 | ||
469 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | 546 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
@@ -519,11 +596,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
519 | 596 | ||
520 | cmd = container_of(header, struct vmw_sid_cmd, header); | 597 | cmd = container_of(header, struct vmw_sid_cmd, header); |
521 | 598 | ||
522 | if (unlikely(!sw_context->kernel)) { | ||
523 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
524 | return -EPERM; | ||
525 | } | ||
526 | |||
527 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 599 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
528 | user_surface_converter, | 600 | user_surface_converter, |
529 | &cmd->body.srcImage.sid, NULL); | 601 | &cmd->body.srcImage.sid, NULL); |
@@ -541,11 +613,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
541 | 613 | ||
542 | cmd = container_of(header, struct vmw_sid_cmd, header); | 614 | cmd = container_of(header, struct vmw_sid_cmd, header); |
543 | 615 | ||
544 | if (unlikely(!sw_context->kernel)) { | ||
545 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
546 | return -EPERM; | ||
547 | } | ||
548 | |||
549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 616 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
550 | user_surface_converter, &cmd->body.sid, | 617 | user_surface_converter, &cmd->body.sid, |
551 | NULL); | 618 | NULL); |
@@ -586,7 +653,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
586 | sw_context->needs_post_query_barrier = true; | 653 | sw_context->needs_post_query_barrier = true; |
587 | ret = vmw_bo_to_validate_list(sw_context, | 654 | ret = vmw_bo_to_validate_list(sw_context, |
588 | sw_context->cur_query_bo, | 655 | sw_context->cur_query_bo, |
589 | NULL); | 656 | dev_priv->has_mob, NULL); |
590 | if (unlikely(ret != 0)) | 657 | if (unlikely(ret != 0)) |
591 | return ret; | 658 | return ret; |
592 | } | 659 | } |
@@ -594,7 +661,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
594 | 661 | ||
595 | ret = vmw_bo_to_validate_list(sw_context, | 662 | ret = vmw_bo_to_validate_list(sw_context, |
596 | dev_priv->dummy_query_bo, | 663 | dev_priv->dummy_query_bo, |
597 | NULL); | 664 | dev_priv->has_mob, NULL); |
598 | if (unlikely(ret != 0)) | 665 | if (unlikely(ret != 0)) |
599 | return ret; | 666 | return ret; |
600 | 667 | ||
@@ -672,6 +739,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | |||
672 | } | 739 | } |
673 | 740 | ||
674 | /** | 741 | /** |
742 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer | ||
743 | * handle to a MOB id. | ||
744 | * | ||
745 | * @dev_priv: Pointer to a device private structure. | ||
746 | * @sw_context: The software context used for this command batch validation. | ||
747 | * @id: Pointer to the user-space handle to be translated. | ||
748 | * @vmw_bo_p: Points to a location that, on successful return will carry | ||
749 | * a reference-counted pointer to the DMA buffer identified by the | ||
750 | * user-space handle in @id. | ||
751 | * | ||
752 | * This function saves information needed to translate a user-space buffer | ||
753 | * handle to a MOB id. The translation does not take place immediately, but | ||
754 | * during a call to vmw_apply_relocations(). This function builds a relocation | ||
755 | * list and a list of buffers to validate. The former needs to be freed using | ||
756 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter | ||
757 | * needs to be freed using vmw_clear_validations. | ||
758 | */ | ||
759 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | ||
760 | struct vmw_sw_context *sw_context, | ||
761 | SVGAMobId *id, | ||
762 | struct vmw_dma_buffer **vmw_bo_p) | ||
763 | { | ||
764 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
765 | struct ttm_buffer_object *bo; | ||
766 | uint32_t handle = *id; | ||
767 | struct vmw_relocation *reloc; | ||
768 | int ret; | ||
769 | |||
770 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | ||
771 | if (unlikely(ret != 0)) { | ||
772 | DRM_ERROR("Could not find or use MOB buffer.\n"); | ||
773 | return -EINVAL; | ||
774 | } | ||
775 | bo = &vmw_bo->base; | ||
776 | |||
777 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | ||
778 | DRM_ERROR("Max number relocations per submission" | ||
779 | " exceeded\n"); | ||
780 | ret = -EINVAL; | ||
781 | goto out_no_reloc; | ||
782 | } | ||
783 | |||
784 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | ||
785 | reloc->mob_loc = id; | ||
786 | reloc->location = NULL; | ||
787 | |||
788 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); | ||
789 | if (unlikely(ret != 0)) | ||
790 | goto out_no_reloc; | ||
791 | |||
792 | *vmw_bo_p = vmw_bo; | ||
793 | return 0; | ||
794 | |||
795 | out_no_reloc: | ||
796 | vmw_dmabuf_unreference(&vmw_bo); | ||
797 | vmw_bo_p = NULL; | ||
798 | return ret; | ||
799 | } | ||
800 | |||
801 | /** | ||
675 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer | 802 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
676 | * handle to a valid SVGAGuestPtr | 803 | * handle to a valid SVGAGuestPtr |
677 | * | 804 | * |
@@ -718,7 +845,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
718 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 845 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
719 | reloc->location = ptr; | 846 | reloc->location = ptr; |
720 | 847 | ||
721 | ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); | 848 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
722 | if (unlikely(ret != 0)) | 849 | if (unlikely(ret != 0)) |
723 | goto out_no_reloc; | 850 | goto out_no_reloc; |
724 | 851 | ||
@@ -732,6 +859,30 @@ out_no_reloc: | |||
732 | } | 859 | } |
733 | 860 | ||
734 | /** | 861 | /** |
862 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. | ||
863 | * | ||
864 | * @dev_priv: Pointer to a device private struct. | ||
865 | * @sw_context: The software context used for this command submission. | ||
866 | * @header: Pointer to the command header in the command stream. | ||
867 | */ | ||
868 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, | ||
869 | struct vmw_sw_context *sw_context, | ||
870 | SVGA3dCmdHeader *header) | ||
871 | { | ||
872 | struct vmw_begin_gb_query_cmd { | ||
873 | SVGA3dCmdHeader header; | ||
874 | SVGA3dCmdBeginGBQuery q; | ||
875 | } *cmd; | ||
876 | |||
877 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, | ||
878 | header); | ||
879 | |||
880 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
881 | user_context_converter, &cmd->q.cid, | ||
882 | NULL); | ||
883 | } | ||
884 | |||
885 | /** | ||
735 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | 886 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
736 | * | 887 | * |
737 | * @dev_priv: Pointer to a device private struct. | 888 | * @dev_priv: Pointer to a device private struct. |
@@ -750,12 +901,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | |||
750 | cmd = container_of(header, struct vmw_begin_query_cmd, | 901 | cmd = container_of(header, struct vmw_begin_query_cmd, |
751 | header); | 902 | header); |
752 | 903 | ||
904 | if (unlikely(dev_priv->has_mob)) { | ||
905 | struct { | ||
906 | SVGA3dCmdHeader header; | ||
907 | SVGA3dCmdBeginGBQuery q; | ||
908 | } gb_cmd; | ||
909 | |||
910 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
911 | |||
912 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; | ||
913 | gb_cmd.header.size = cmd->header.size; | ||
914 | gb_cmd.q.cid = cmd->q.cid; | ||
915 | gb_cmd.q.type = cmd->q.type; | ||
916 | |||
917 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
918 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); | ||
919 | } | ||
920 | |||
753 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 921 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
754 | user_context_converter, &cmd->q.cid, | 922 | user_context_converter, &cmd->q.cid, |
755 | NULL); | 923 | NULL); |
756 | } | 924 | } |
757 | 925 | ||
758 | /** | 926 | /** |
927 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. | ||
928 | * | ||
929 | * @dev_priv: Pointer to a device private struct. | ||
930 | * @sw_context: The software context used for this command submission. | ||
931 | * @header: Pointer to the command header in the command stream. | ||
932 | */ | ||
933 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, | ||
934 | struct vmw_sw_context *sw_context, | ||
935 | SVGA3dCmdHeader *header) | ||
936 | { | ||
937 | struct vmw_dma_buffer *vmw_bo; | ||
938 | struct vmw_query_cmd { | ||
939 | SVGA3dCmdHeader header; | ||
940 | SVGA3dCmdEndGBQuery q; | ||
941 | } *cmd; | ||
942 | int ret; | ||
943 | |||
944 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
945 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
946 | if (unlikely(ret != 0)) | ||
947 | return ret; | ||
948 | |||
949 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
950 | &cmd->q.mobid, | ||
951 | &vmw_bo); | ||
952 | if (unlikely(ret != 0)) | ||
953 | return ret; | ||
954 | |||
955 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); | ||
956 | |||
957 | vmw_dmabuf_unreference(&vmw_bo); | ||
958 | return ret; | ||
959 | } | ||
960 | |||
961 | /** | ||
759 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | 962 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
760 | * | 963 | * |
761 | * @dev_priv: Pointer to a device private struct. | 964 | * @dev_priv: Pointer to a device private struct. |
@@ -774,6 +977,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
774 | int ret; | 977 | int ret; |
775 | 978 | ||
776 | cmd = container_of(header, struct vmw_query_cmd, header); | 979 | cmd = container_of(header, struct vmw_query_cmd, header); |
980 | if (dev_priv->has_mob) { | ||
981 | struct { | ||
982 | SVGA3dCmdHeader header; | ||
983 | SVGA3dCmdEndGBQuery q; | ||
984 | } gb_cmd; | ||
985 | |||
986 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
987 | |||
988 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; | ||
989 | gb_cmd.header.size = cmd->header.size; | ||
990 | gb_cmd.q.cid = cmd->q.cid; | ||
991 | gb_cmd.q.type = cmd->q.type; | ||
992 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
993 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
994 | |||
995 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
996 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); | ||
997 | } | ||
998 | |||
777 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 999 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
778 | if (unlikely(ret != 0)) | 1000 | if (unlikely(ret != 0)) |
779 | return ret; | 1001 | return ret; |
@@ -790,7 +1012,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
790 | return ret; | 1012 | return ret; |
791 | } | 1013 | } |
792 | 1014 | ||
793 | /* | 1015 | /** |
1016 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. | ||
1017 | * | ||
1018 | * @dev_priv: Pointer to a device private struct. | ||
1019 | * @sw_context: The software context used for this command submission. | ||
1020 | * @header: Pointer to the command header in the command stream. | ||
1021 | */ | ||
1022 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, | ||
1023 | struct vmw_sw_context *sw_context, | ||
1024 | SVGA3dCmdHeader *header) | ||
1025 | { | ||
1026 | struct vmw_dma_buffer *vmw_bo; | ||
1027 | struct vmw_query_cmd { | ||
1028 | SVGA3dCmdHeader header; | ||
1029 | SVGA3dCmdWaitForGBQuery q; | ||
1030 | } *cmd; | ||
1031 | int ret; | ||
1032 | |||
1033 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
1034 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
1035 | if (unlikely(ret != 0)) | ||
1036 | return ret; | ||
1037 | |||
1038 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
1039 | &cmd->q.mobid, | ||
1040 | &vmw_bo); | ||
1041 | if (unlikely(ret != 0)) | ||
1042 | return ret; | ||
1043 | |||
1044 | vmw_dmabuf_unreference(&vmw_bo); | ||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | /** | ||
794 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | 1049 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
795 | * | 1050 | * |
796 | * @dev_priv: Pointer to a device private struct. | 1051 | * @dev_priv: Pointer to a device private struct. |
@@ -809,6 +1064,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
809 | int ret; | 1064 | int ret; |
810 | 1065 | ||
811 | cmd = container_of(header, struct vmw_query_cmd, header); | 1066 | cmd = container_of(header, struct vmw_query_cmd, header); |
1067 | if (dev_priv->has_mob) { | ||
1068 | struct { | ||
1069 | SVGA3dCmdHeader header; | ||
1070 | SVGA3dCmdWaitForGBQuery q; | ||
1071 | } gb_cmd; | ||
1072 | |||
1073 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
1074 | |||
1075 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
1076 | gb_cmd.header.size = cmd->header.size; | ||
1077 | gb_cmd.q.cid = cmd->q.cid; | ||
1078 | gb_cmd.q.type = cmd->q.type; | ||
1079 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
1080 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
1081 | |||
1082 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
1083 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); | ||
1084 | } | ||
1085 | |||
812 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1086 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
813 | if (unlikely(ret != 0)) | 1087 | if (unlikely(ret != 0)) |
814 | return ret; | 1088 | return ret; |
@@ -921,15 +1195,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
921 | struct vmw_tex_state_cmd { | 1195 | struct vmw_tex_state_cmd { |
922 | SVGA3dCmdHeader header; | 1196 | SVGA3dCmdHeader header; |
923 | SVGA3dCmdSetTextureState state; | 1197 | SVGA3dCmdSetTextureState state; |
924 | }; | 1198 | } *cmd; |
925 | 1199 | ||
926 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | 1200 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
927 | ((unsigned long) header + header->size + sizeof(header)); | 1201 | ((unsigned long) header + header->size + sizeof(header)); |
928 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1202 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
929 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1203 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1204 | struct vmw_resource_val_node *ctx_node; | ||
1205 | struct vmw_resource_val_node *res_node; | ||
930 | int ret; | 1206 | int ret; |
931 | 1207 | ||
932 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1208 | cmd = container_of(header, struct vmw_tex_state_cmd, |
1209 | header); | ||
1210 | |||
1211 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
1212 | user_context_converter, &cmd->state.cid, | ||
1213 | &ctx_node); | ||
933 | if (unlikely(ret != 0)) | 1214 | if (unlikely(ret != 0)) |
934 | return ret; | 1215 | return ret; |
935 | 1216 | ||
@@ -939,9 +1220,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
939 | 1220 | ||
940 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1221 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
941 | user_surface_converter, | 1222 | user_surface_converter, |
942 | &cur_state->value, NULL); | 1223 | &cur_state->value, &res_node); |
943 | if (unlikely(ret != 0)) | 1224 | if (unlikely(ret != 0)) |
944 | return ret; | 1225 | return ret; |
1226 | |||
1227 | if (dev_priv->has_mob) { | ||
1228 | struct vmw_ctx_bindinfo bi; | ||
1229 | |||
1230 | bi.ctx = ctx_node->res; | ||
1231 | bi.res = res_node ? res_node->res : NULL; | ||
1232 | bi.bt = vmw_ctx_binding_tex; | ||
1233 | bi.i1.texture_stage = cur_state->stage; | ||
1234 | vmw_context_binding_add(ctx_node->staged_bindings, | ||
1235 | &bi); | ||
1236 | } | ||
945 | } | 1237 | } |
946 | 1238 | ||
947 | return 0; | 1239 | return 0; |
@@ -971,6 +1263,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
971 | } | 1263 | } |
972 | 1264 | ||
973 | /** | 1265 | /** |
1266 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | ||
1267 | * | ||
1268 | * @dev_priv: Pointer to a device private struct. | ||
1269 | * @sw_context: The software context being used for this batch. | ||
1270 | * @res_type: The resource type. | ||
1271 | * @converter: Information about user-space binding for this resource type. | ||
1272 | * @res_id: Pointer to the user-space resource handle in the command stream. | ||
1273 | * @buf_id: Pointer to the user-space backup buffer handle in the command | ||
1274 | * stream. | ||
1275 | * @backup_offset: Offset of backup into MOB. | ||
1276 | * | ||
1277 | * This function prepares for registering a switch of backup buffers | ||
1278 | * in the resource metadata just prior to unreserving. | ||
1279 | */ | ||
1280 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | ||
1281 | struct vmw_sw_context *sw_context, | ||
1282 | enum vmw_res_type res_type, | ||
1283 | const struct vmw_user_resource_conv | ||
1284 | *converter, | ||
1285 | uint32_t *res_id, | ||
1286 | uint32_t *buf_id, | ||
1287 | unsigned long backup_offset) | ||
1288 | { | ||
1289 | int ret; | ||
1290 | struct vmw_dma_buffer *dma_buf; | ||
1291 | struct vmw_resource_val_node *val_node; | ||
1292 | |||
1293 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | ||
1294 | converter, res_id, &val_node); | ||
1295 | if (unlikely(ret != 0)) | ||
1296 | return ret; | ||
1297 | |||
1298 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
1299 | if (unlikely(ret != 0)) | ||
1300 | return ret; | ||
1301 | |||
1302 | if (val_node->first_usage) | ||
1303 | val_node->no_buffer_needed = true; | ||
1304 | |||
1305 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
1306 | val_node->new_backup = dma_buf; | ||
1307 | val_node->new_backup_offset = backup_offset; | ||
1308 | |||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | /** | ||
1313 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE | ||
1314 | * command | ||
1315 | * | ||
1316 | * @dev_priv: Pointer to a device private struct. | ||
1317 | * @sw_context: The software context being used for this batch. | ||
1318 | * @header: Pointer to the command header in the command stream. | ||
1319 | */ | ||
1320 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, | ||
1321 | struct vmw_sw_context *sw_context, | ||
1322 | SVGA3dCmdHeader *header) | ||
1323 | { | ||
1324 | struct vmw_bind_gb_surface_cmd { | ||
1325 | SVGA3dCmdHeader header; | ||
1326 | SVGA3dCmdBindGBSurface body; | ||
1327 | } *cmd; | ||
1328 | |||
1329 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); | ||
1330 | |||
1331 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, | ||
1332 | user_surface_converter, | ||
1333 | &cmd->body.sid, &cmd->body.mobid, | ||
1334 | 0); | ||
1335 | } | ||
1336 | |||
1337 | /** | ||
1338 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE | ||
1339 | * command | ||
1340 | * | ||
1341 | * @dev_priv: Pointer to a device private struct. | ||
1342 | * @sw_context: The software context being used for this batch. | ||
1343 | * @header: Pointer to the command header in the command stream. | ||
1344 | */ | ||
1345 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, | ||
1346 | struct vmw_sw_context *sw_context, | ||
1347 | SVGA3dCmdHeader *header) | ||
1348 | { | ||
1349 | struct vmw_gb_surface_cmd { | ||
1350 | SVGA3dCmdHeader header; | ||
1351 | SVGA3dCmdUpdateGBImage body; | ||
1352 | } *cmd; | ||
1353 | |||
1354 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1355 | |||
1356 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1357 | user_surface_converter, | ||
1358 | &cmd->body.image.sid, NULL); | ||
1359 | } | ||
1360 | |||
1361 | /** | ||
1362 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE | ||
1363 | * command | ||
1364 | * | ||
1365 | * @dev_priv: Pointer to a device private struct. | ||
1366 | * @sw_context: The software context being used for this batch. | ||
1367 | * @header: Pointer to the command header in the command stream. | ||
1368 | */ | ||
1369 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, | ||
1370 | struct vmw_sw_context *sw_context, | ||
1371 | SVGA3dCmdHeader *header) | ||
1372 | { | ||
1373 | struct vmw_gb_surface_cmd { | ||
1374 | SVGA3dCmdHeader header; | ||
1375 | SVGA3dCmdUpdateGBSurface body; | ||
1376 | } *cmd; | ||
1377 | |||
1378 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1379 | |||
1380 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1381 | user_surface_converter, | ||
1382 | &cmd->body.sid, NULL); | ||
1383 | } | ||
1384 | |||
1385 | /** | ||
1386 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE | ||
1387 | * command | ||
1388 | * | ||
1389 | * @dev_priv: Pointer to a device private struct. | ||
1390 | * @sw_context: The software context being used for this batch. | ||
1391 | * @header: Pointer to the command header in the command stream. | ||
1392 | */ | ||
1393 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, | ||
1394 | struct vmw_sw_context *sw_context, | ||
1395 | SVGA3dCmdHeader *header) | ||
1396 | { | ||
1397 | struct vmw_gb_surface_cmd { | ||
1398 | SVGA3dCmdHeader header; | ||
1399 | SVGA3dCmdReadbackGBImage body; | ||
1400 | } *cmd; | ||
1401 | |||
1402 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1403 | |||
1404 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1405 | user_surface_converter, | ||
1406 | &cmd->body.image.sid, NULL); | ||
1407 | } | ||
1408 | |||
1409 | /** | ||
1410 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE | ||
1411 | * command | ||
1412 | * | ||
1413 | * @dev_priv: Pointer to a device private struct. | ||
1414 | * @sw_context: The software context being used for this batch. | ||
1415 | * @header: Pointer to the command header in the command stream. | ||
1416 | */ | ||
1417 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, | ||
1418 | struct vmw_sw_context *sw_context, | ||
1419 | SVGA3dCmdHeader *header) | ||
1420 | { | ||
1421 | struct vmw_gb_surface_cmd { | ||
1422 | SVGA3dCmdHeader header; | ||
1423 | SVGA3dCmdReadbackGBSurface body; | ||
1424 | } *cmd; | ||
1425 | |||
1426 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1427 | |||
1428 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1429 | user_surface_converter, | ||
1430 | &cmd->body.sid, NULL); | ||
1431 | } | ||
1432 | |||
1433 | /** | ||
1434 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE | ||
1435 | * command | ||
1436 | * | ||
1437 | * @dev_priv: Pointer to a device private struct. | ||
1438 | * @sw_context: The software context being used for this batch. | ||
1439 | * @header: Pointer to the command header in the command stream. | ||
1440 | */ | ||
1441 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, | ||
1442 | struct vmw_sw_context *sw_context, | ||
1443 | SVGA3dCmdHeader *header) | ||
1444 | { | ||
1445 | struct vmw_gb_surface_cmd { | ||
1446 | SVGA3dCmdHeader header; | ||
1447 | SVGA3dCmdInvalidateGBImage body; | ||
1448 | } *cmd; | ||
1449 | |||
1450 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1451 | |||
1452 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1453 | user_surface_converter, | ||
1454 | &cmd->body.image.sid, NULL); | ||
1455 | } | ||
1456 | |||
1457 | /** | ||
1458 | * vmw_cmd_invalidate_gb_surface - Validate an | ||
1459 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command | ||
1460 | * | ||
1461 | * @dev_priv: Pointer to a device private struct. | ||
1462 | * @sw_context: The software context being used for this batch. | ||
1463 | * @header: Pointer to the command header in the command stream. | ||
1464 | */ | ||
1465 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | ||
1466 | struct vmw_sw_context *sw_context, | ||
1467 | SVGA3dCmdHeader *header) | ||
1468 | { | ||
1469 | struct vmw_gb_surface_cmd { | ||
1470 | SVGA3dCmdHeader header; | ||
1471 | SVGA3dCmdInvalidateGBSurface body; | ||
1472 | } *cmd; | ||
1473 | |||
1474 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1475 | |||
1476 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1477 | user_surface_converter, | ||
1478 | &cmd->body.sid, NULL); | ||
1479 | } | ||
1480 | |||
1481 | /** | ||
974 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1482 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
975 | * command | 1483 | * command |
976 | * | 1484 | * |
@@ -986,18 +1494,64 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
986 | SVGA3dCmdHeader header; | 1494 | SVGA3dCmdHeader header; |
987 | SVGA3dCmdSetShader body; | 1495 | SVGA3dCmdSetShader body; |
988 | } *cmd; | 1496 | } *cmd; |
1497 | struct vmw_resource_val_node *ctx_node; | ||
989 | int ret; | 1498 | int ret; |
990 | 1499 | ||
991 | cmd = container_of(header, struct vmw_set_shader_cmd, | 1500 | cmd = container_of(header, struct vmw_set_shader_cmd, |
992 | header); | 1501 | header); |
993 | 1502 | ||
994 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1503 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1504 | user_context_converter, &cmd->body.cid, | ||
1505 | &ctx_node); | ||
995 | if (unlikely(ret != 0)) | 1506 | if (unlikely(ret != 0)) |
996 | return ret; | 1507 | return ret; |
997 | 1508 | ||
1509 | if (dev_priv->has_mob) { | ||
1510 | struct vmw_ctx_bindinfo bi; | ||
1511 | struct vmw_resource_val_node *res_node; | ||
1512 | |||
1513 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, | ||
1514 | user_shader_converter, | ||
1515 | &cmd->body.shid, &res_node); | ||
1516 | if (unlikely(ret != 0)) | ||
1517 | return ret; | ||
1518 | |||
1519 | bi.ctx = ctx_node->res; | ||
1520 | bi.res = res_node ? res_node->res : NULL; | ||
1521 | bi.bt = vmw_ctx_binding_shader; | ||
1522 | bi.i1.shader_type = cmd->body.type; | ||
1523 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
1524 | } | ||
1525 | |||
998 | return 0; | 1526 | return 0; |
999 | } | 1527 | } |
1000 | 1528 | ||
1529 | /** | ||
1530 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | ||
1531 | * command | ||
1532 | * | ||
1533 | * @dev_priv: Pointer to a device private struct. | ||
1534 | * @sw_context: The software context being used for this batch. | ||
1535 | * @header: Pointer to the command header in the command stream. | ||
1536 | */ | ||
1537 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, | ||
1538 | struct vmw_sw_context *sw_context, | ||
1539 | SVGA3dCmdHeader *header) | ||
1540 | { | ||
1541 | struct vmw_bind_gb_shader_cmd { | ||
1542 | SVGA3dCmdHeader header; | ||
1543 | SVGA3dCmdBindGBShader body; | ||
1544 | } *cmd; | ||
1545 | |||
1546 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, | ||
1547 | header); | ||
1548 | |||
1549 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, | ||
1550 | user_shader_converter, | ||
1551 | &cmd->body.shid, &cmd->body.mobid, | ||
1552 | cmd->body.offsetInBytes); | ||
1553 | } | ||
1554 | |||
1001 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1555 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1002 | struct vmw_sw_context *sw_context, | 1556 | struct vmw_sw_context *sw_context, |
1003 | void *buf, uint32_t *size) | 1557 | void *buf, uint32_t *size) |
@@ -1041,50 +1595,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
1041 | return 0; | 1595 | return 0; |
1042 | } | 1596 | } |
1043 | 1597 | ||
1044 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 1598 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1045 | struct vmw_sw_context *, | 1599 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1046 | SVGA3dCmdHeader *); | 1600 | false, false, false), |
1047 | 1601 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | |
1048 | #define VMW_CMD_DEF(cmd, func) \ | 1602 | false, false, false), |
1049 | [cmd - SVGA_3D_CMD_BASE] = func | 1603 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
1050 | 1604 | true, false, false), | |
1051 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | 1605 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
1052 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), | 1606 | true, false, false), |
1053 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), | 1607 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
1054 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), | 1608 | true, false, false), |
1055 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), | 1609 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
1056 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), | 1610 | false, false, false), |
1057 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), | 1611 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
1058 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), | 1612 | false, false, false), |
1059 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), | 1613 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
1060 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), | 1614 | true, false, false), |
1061 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | 1615 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
1616 | true, false, false), | ||
1617 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, | ||
1618 | true, false, false), | ||
1062 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | 1619 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
1063 | &vmw_cmd_set_render_target_check), | 1620 | &vmw_cmd_set_render_target_check, true, false, false), |
1064 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), | 1621 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
1065 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), | 1622 | true, false, false), |
1066 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | 1623 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
1067 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | 1624 | true, false, false), |
1068 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), | 1625 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
1069 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), | 1626 | true, false, false), |
1070 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), | 1627 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
1071 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | 1628 | true, false, false), |
1072 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | 1629 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
1073 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 1630 | true, false, false), |
1074 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), | 1631 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
1075 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 1632 | true, false, false), |
1076 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 1633 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
1077 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 1634 | true, false, false), |
1078 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), | 1635 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
1079 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), | 1636 | false, false, false), |
1080 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | 1637 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, |
1081 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 1638 | true, true, false), |
1639 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, | ||
1640 | true, true, false), | ||
1641 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | ||
1642 | true, false, false), | ||
1643 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, | ||
1644 | true, true, false), | ||
1645 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | ||
1646 | true, false, false), | ||
1647 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | ||
1648 | true, false, false), | ||
1649 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, | ||
1650 | true, false, false), | ||
1651 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, | ||
1652 | true, false, false), | ||
1653 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, | ||
1654 | true, false, false), | ||
1655 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, | ||
1656 | true, false, false), | ||
1082 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1657 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
1083 | &vmw_cmd_blt_surf_screen_check), | 1658 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
1084 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), | 1659 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
1085 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | 1660 | false, false, false), |
1086 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | 1661 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
1087 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | 1662 | false, false, false), |
1663 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
1664 | false, false, false), | ||
1665 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
1666 | false, false, false), | ||
1667 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, | ||
1668 | false, false, false), | ||
1669 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, | ||
1670 | false, false, false), | ||
1671 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, | ||
1672 | false, false, false), | ||
1673 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, | ||
1674 | false, false, false), | ||
1675 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, | ||
1676 | false, false, false), | ||
1677 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, | ||
1678 | false, false, false), | ||
1679 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, | ||
1680 | false, false, false), | ||
1681 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, | ||
1682 | false, false, false), | ||
1683 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, | ||
1684 | false, false, false), | ||
1685 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, | ||
1686 | false, false, true), | ||
1687 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, | ||
1688 | false, false, true), | ||
1689 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, | ||
1690 | false, false, true), | ||
1691 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, | ||
1692 | false, false, true), | ||
1693 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, | ||
1694 | false, false, true), | ||
1695 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, | ||
1696 | false, false, true), | ||
1697 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, | ||
1698 | false, false, true), | ||
1699 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, | ||
1700 | false, false, true), | ||
1701 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, | ||
1702 | true, false, true), | ||
1703 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, | ||
1704 | false, false, true), | ||
1705 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, | ||
1706 | true, false, true), | ||
1707 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, | ||
1708 | &vmw_cmd_update_gb_surface, true, false, true), | ||
1709 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, | ||
1710 | &vmw_cmd_readback_gb_image, true, false, true), | ||
1711 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, | ||
1712 | &vmw_cmd_readback_gb_surface, true, false, true), | ||
1713 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, | ||
1714 | &vmw_cmd_invalidate_gb_image, true, false, true), | ||
1715 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, | ||
1716 | &vmw_cmd_invalidate_gb_surface, true, false, true), | ||
1717 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, | ||
1718 | false, false, true), | ||
1719 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, | ||
1720 | false, false, true), | ||
1721 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, | ||
1722 | false, false, true), | ||
1723 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, | ||
1724 | false, false, true), | ||
1725 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, | ||
1726 | false, false, true), | ||
1727 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, | ||
1728 | false, false, true), | ||
1729 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, | ||
1730 | true, false, true), | ||
1731 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, | ||
1732 | false, false, true), | ||
1733 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, | ||
1734 | false, false, false), | ||
1735 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, | ||
1736 | true, false, true), | ||
1737 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, | ||
1738 | true, false, true), | ||
1739 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, | ||
1740 | true, false, true), | ||
1741 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, | ||
1742 | true, false, true), | ||
1743 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, | ||
1744 | false, false, true), | ||
1745 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, | ||
1746 | false, false, true), | ||
1747 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, | ||
1748 | false, false, true), | ||
1749 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, | ||
1750 | false, false, true), | ||
1751 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
1752 | false, false, true), | ||
1753 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
1754 | false, false, true), | ||
1755 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
1756 | false, false, true), | ||
1757 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
1758 | false, false, true), | ||
1759 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
1760 | false, false, true), | ||
1761 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
1762 | false, false, true), | ||
1763 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, | ||
1764 | true, false, true) | ||
1088 | }; | 1765 | }; |
1089 | 1766 | ||
1090 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 1767 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
@@ -1095,6 +1772,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
1095 | uint32_t size_remaining = *size; | 1772 | uint32_t size_remaining = *size; |
1096 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 1773 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
1097 | int ret; | 1774 | int ret; |
1775 | const struct vmw_cmd_entry *entry; | ||
1776 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; | ||
1098 | 1777 | ||
1099 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | 1778 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
1100 | /* Handle any none 3D commands */ | 1779 | /* Handle any none 3D commands */ |
@@ -1107,18 +1786,40 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
1107 | 1786 | ||
1108 | cmd_id -= SVGA_3D_CMD_BASE; | 1787 | cmd_id -= SVGA_3D_CMD_BASE; |
1109 | if (unlikely(*size > size_remaining)) | 1788 | if (unlikely(*size > size_remaining)) |
1110 | goto out_err; | 1789 | goto out_invalid; |
1111 | 1790 | ||
1112 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | 1791 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
1113 | goto out_err; | 1792 | goto out_invalid; |
1793 | |||
1794 | entry = &vmw_cmd_entries[cmd_id]; | ||
1795 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | ||
1796 | goto out_privileged; | ||
1114 | 1797 | ||
1115 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); | 1798 | if (unlikely(entry->gb_disable && gb)) |
1799 | goto out_old; | ||
1800 | |||
1801 | if (unlikely(entry->gb_enable && !gb)) | ||
1802 | goto out_new; | ||
1803 | |||
1804 | ret = entry->func(dev_priv, sw_context, header); | ||
1116 | if (unlikely(ret != 0)) | 1805 | if (unlikely(ret != 0)) |
1117 | goto out_err; | 1806 | goto out_invalid; |
1118 | 1807 | ||
1119 | return 0; | 1808 | return 0; |
1120 | out_err: | 1809 | out_invalid: |
1121 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", | 1810 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
1811 | cmd_id + SVGA_3D_CMD_BASE); | ||
1812 | return -EINVAL; | ||
1813 | out_privileged: | ||
1814 | DRM_ERROR("Privileged SVGA3D command: %d\n", | ||
1815 | cmd_id + SVGA_3D_CMD_BASE); | ||
1816 | return -EPERM; | ||
1817 | out_old: | ||
1818 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", | ||
1819 | cmd_id + SVGA_3D_CMD_BASE); | ||
1820 | return -EINVAL; | ||
1821 | out_new: | ||
1822 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", | ||
1122 | cmd_id + SVGA_3D_CMD_BASE); | 1823 | cmd_id + SVGA_3D_CMD_BASE); |
1123 | return -EINVAL; | 1824 | return -EINVAL; |
1124 | } | 1825 | } |
@@ -1174,6 +1875,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
1174 | case VMW_PL_GMR: | 1875 | case VMW_PL_GMR: |
1175 | reloc->location->gmrId = bo->mem.start; | 1876 | reloc->location->gmrId = bo->mem.start; |
1176 | break; | 1877 | break; |
1878 | case VMW_PL_MOB: | ||
1879 | *reloc->mob_loc = bo->mem.start; | ||
1880 | break; | ||
1177 | default: | 1881 | default: |
1178 | BUG(); | 1882 | BUG(); |
1179 | } | 1883 | } |
@@ -1198,6 +1902,8 @@ static void vmw_resource_list_unreference(struct list_head *list) | |||
1198 | list_for_each_entry_safe(val, val_next, list, head) { | 1902 | list_for_each_entry_safe(val, val_next, list, head) { |
1199 | list_del_init(&val->head); | 1903 | list_del_init(&val->head); |
1200 | vmw_resource_unreference(&val->res); | 1904 | vmw_resource_unreference(&val->res); |
1905 | if (unlikely(val->staged_bindings)) | ||
1906 | kfree(val->staged_bindings); | ||
1201 | kfree(val); | 1907 | kfree(val); |
1202 | } | 1908 | } |
1203 | } | 1909 | } |
@@ -1224,7 +1930,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
1224 | } | 1930 | } |
1225 | 1931 | ||
1226 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 1932 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
1227 | struct ttm_buffer_object *bo) | 1933 | struct ttm_buffer_object *bo, |
1934 | bool validate_as_mob) | ||
1228 | { | 1935 | { |
1229 | int ret; | 1936 | int ret; |
1230 | 1937 | ||
@@ -1238,6 +1945,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
1238 | dev_priv->dummy_query_bo_pinned)) | 1945 | dev_priv->dummy_query_bo_pinned)) |
1239 | return 0; | 1946 | return 0; |
1240 | 1947 | ||
1948 | if (validate_as_mob) | ||
1949 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); | ||
1950 | |||
1241 | /** | 1951 | /** |
1242 | * Put BO in VRAM if there is space, otherwise as a GMR. | 1952 | * Put BO in VRAM if there is space, otherwise as a GMR. |
1243 | * If there is no space in VRAM and GMR ids are all used up, | 1953 | * If there is no space in VRAM and GMR ids are all used up, |
@@ -1259,7 +1969,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
1259 | return ret; | 1969 | return ret; |
1260 | } | 1970 | } |
1261 | 1971 | ||
1262 | |||
1263 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 1972 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
1264 | struct vmw_sw_context *sw_context) | 1973 | struct vmw_sw_context *sw_context) |
1265 | { | 1974 | { |
@@ -1267,7 +1976,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, | |||
1267 | int ret; | 1976 | int ret; |
1268 | 1977 | ||
1269 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { | 1978 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
1270 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); | 1979 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
1980 | entry->validate_as_mob); | ||
1271 | if (unlikely(ret != 0)) | 1981 | if (unlikely(ret != 0)) |
1272 | return ret; | 1982 | return ret; |
1273 | } | 1983 | } |
@@ -1509,11 +2219,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1509 | goto out_err; | 2219 | goto out_err; |
1510 | } | 2220 | } |
1511 | 2221 | ||
2222 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | ||
2223 | if (unlikely(ret != 0)) { | ||
2224 | ret = -ERESTARTSYS; | ||
2225 | goto out_err; | ||
2226 | } | ||
2227 | |||
1512 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2228 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
1513 | if (unlikely(cmd == NULL)) { | 2229 | if (unlikely(cmd == NULL)) { |
1514 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2230 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
1515 | ret = -ENOMEM; | 2231 | ret = -ENOMEM; |
1516 | goto out_err; | 2232 | goto out_unlock_binding; |
1517 | } | 2233 | } |
1518 | 2234 | ||
1519 | vmw_apply_relocations(sw_context); | 2235 | vmw_apply_relocations(sw_context); |
@@ -1538,6 +2254,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1538 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2254 | DRM_ERROR("Fence submission error. Syncing.\n"); |
1539 | 2255 | ||
1540 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 2256 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
2257 | mutex_unlock(&dev_priv->binding_mutex); | ||
2258 | |||
1541 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 2259 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
1542 | (void *) fence); | 2260 | (void *) fence); |
1543 | 2261 | ||
@@ -1568,6 +2286,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1568 | 2286 | ||
1569 | return 0; | 2287 | return 0; |
1570 | 2288 | ||
2289 | out_unlock_binding: | ||
2290 | mutex_unlock(&dev_priv->binding_mutex); | ||
1571 | out_err: | 2291 | out_err: |
1572 | vmw_resource_relocations_free(&sw_context->res_relocations); | 2292 | vmw_resource_relocations_free(&sw_context->res_relocations); |
1573 | vmw_free_relocations(sw_context); | 2293 | vmw_free_relocations(sw_context); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 3eb148667d63..6ccd993e26bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
37 | 37 | ||
38 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) | ||
39 | return false; | ||
40 | |||
41 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
42 | uint32_t result; | ||
43 | |||
44 | if (!dev_priv->has_mob) | ||
45 | return false; | ||
46 | |||
47 | mutex_lock(&dev_priv->hw_mutex); | ||
48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | ||
49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
50 | mutex_unlock(&dev_priv->hw_mutex); | ||
51 | |||
52 | return (result != 0); | ||
53 | } | ||
54 | |||
38 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | 55 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
39 | return false; | 56 | return false; |
40 | 57 | ||
@@ -511,24 +528,16 @@ out_err: | |||
511 | } | 528 | } |
512 | 529 | ||
513 | /** | 530 | /** |
514 | * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. | 531 | * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using |
532 | * legacy query commands. | ||
515 | * | 533 | * |
516 | * @dev_priv: The device private structure. | 534 | * @dev_priv: The device private structure. |
517 | * @cid: The hardware context id used for the query. | 535 | * @cid: The hardware context id used for the query. |
518 | * | 536 | * |
519 | * This function is used to emit a dummy occlusion query with | 537 | * See the vmw_fifo_emit_dummy_query documentation. |
520 | * no primitives rendered between query begin and query end. | ||
521 | * It's used to provide a query barrier, in order to know that when | ||
522 | * this query is finished, all preceding queries are also finished. | ||
523 | * | ||
524 | * A Query results structure should have been initialized at the start | ||
525 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
526 | * must also be either reserved or pinned when this function is called. | ||
527 | * | ||
528 | * Returns -ENOMEM on failure to reserve fifo space. | ||
529 | */ | 538 | */ |
530 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | 539 | static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
531 | uint32_t cid) | 540 | uint32_t cid) |
532 | { | 541 | { |
533 | /* | 542 | /* |
534 | * A query wait without a preceding query end will | 543 | * A query wait without a preceding query end will |
@@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | |||
566 | 575 | ||
567 | return 0; | 576 | return 0; |
568 | } | 577 | } |
578 | |||
579 | /** | ||
580 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
581 | * guest-backed resource query commands. | ||
582 | * | ||
583 | * @dev_priv: The device private structure. | ||
584 | * @cid: The hardware context id used for the query. | ||
585 | * | ||
586 | * See the vmw_fifo_emit_dummy_query documentation. | ||
587 | */ | ||
588 | static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, | ||
589 | uint32_t cid) | ||
590 | { | ||
591 | /* | ||
592 | * A query wait without a preceding query end will | ||
593 | * actually finish all queries for this cid | ||
594 | * without writing to the query result structure. | ||
595 | */ | ||
596 | |||
597 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
598 | struct { | ||
599 | SVGA3dCmdHeader header; | ||
600 | SVGA3dCmdWaitForGBQuery body; | ||
601 | } *cmd; | ||
602 | |||
603 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
604 | |||
605 | if (unlikely(cmd == NULL)) { | ||
606 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
607 | return -ENOMEM; | ||
608 | } | ||
609 | |||
610 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
611 | cmd->header.size = sizeof(cmd->body); | ||
612 | cmd->body.cid = cid; | ||
613 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | ||
614 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
615 | cmd->body.mobid = bo->mem.start; | ||
616 | cmd->body.offset = 0; | ||
617 | |||
618 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | |||
624 | /** | ||
625 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
626 | * appropriate resource query commands. | ||
627 | * | ||
628 | * @dev_priv: The device private structure. | ||
629 | * @cid: The hardware context id used for the query. | ||
630 | * | ||
631 | * This function is used to emit a dummy occlusion query with | ||
632 | * no primitives rendered between query begin and query end. | ||
633 | * It's used to provide a query barrier, in order to know that when | ||
634 | * this query is finished, all preceding queries are also finished. | ||
635 | * | ||
636 | * A Query results structure should have been initialized at the start | ||
637 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
638 | * must also be either reserved or pinned when this function is called. | ||
639 | * | ||
640 | * Returns -ENOMEM on failure to reserve fifo space. | ||
641 | */ | ||
642 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
643 | uint32_t cid) | ||
644 | { | ||
645 | if (dev_priv->has_mob) | ||
646 | return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); | ||
647 | |||
648 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | ||
649 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6ef0b035becb..61d8d803199f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, | |||
125 | } | 125 | } |
126 | 126 | ||
127 | 127 | ||
128 | static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, | ||
129 | struct list_head *desc_pages) | ||
130 | { | ||
131 | struct page *page, *next; | ||
132 | struct svga_guest_mem_descriptor *page_virtual; | ||
133 | unsigned int desc_per_page = PAGE_SIZE / | ||
134 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
135 | |||
136 | if (list_empty(desc_pages)) | ||
137 | return; | ||
138 | |||
139 | list_for_each_entry_safe(page, next, desc_pages, lru) { | ||
140 | list_del_init(&page->lru); | ||
141 | |||
142 | if (likely(desc_dma != DMA_ADDR_INVALID)) { | ||
143 | dma_unmap_page(dev, desc_dma, PAGE_SIZE, | ||
144 | DMA_TO_DEVICE); | ||
145 | } | ||
146 | |||
147 | page_virtual = kmap_atomic(page); | ||
148 | desc_dma = (dma_addr_t) | ||
149 | le32_to_cpu(page_virtual[desc_per_page].ppn) << | ||
150 | PAGE_SHIFT; | ||
151 | kunmap_atomic(page_virtual); | ||
152 | |||
153 | __free_page(page); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | ||
159 | * the number of used descriptors. | ||
160 | * | ||
161 | */ | ||
162 | |||
163 | static int vmw_gmr_build_descriptors(struct device *dev, | ||
164 | struct list_head *desc_pages, | ||
165 | struct vmw_piter *iter, | ||
166 | unsigned long num_pages, | ||
167 | dma_addr_t *first_dma) | ||
168 | { | ||
169 | struct page *page; | ||
170 | struct svga_guest_mem_descriptor *page_virtual = NULL; | ||
171 | struct svga_guest_mem_descriptor *desc_virtual = NULL; | ||
172 | unsigned int desc_per_page; | ||
173 | unsigned long prev_pfn; | ||
174 | unsigned long pfn; | ||
175 | int ret; | ||
176 | dma_addr_t desc_dma; | ||
177 | |||
178 | desc_per_page = PAGE_SIZE / | ||
179 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
180 | |||
181 | while (likely(num_pages != 0)) { | ||
182 | page = alloc_page(__GFP_HIGHMEM); | ||
183 | if (unlikely(page == NULL)) { | ||
184 | ret = -ENOMEM; | ||
185 | goto out_err; | ||
186 | } | ||
187 | |||
188 | list_add_tail(&page->lru, desc_pages); | ||
189 | page_virtual = kmap_atomic(page); | ||
190 | desc_virtual = page_virtual - 1; | ||
191 | prev_pfn = ~(0UL); | ||
192 | |||
193 | while (likely(num_pages != 0)) { | ||
194 | pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; | ||
195 | |||
196 | if (pfn != prev_pfn + 1) { | ||
197 | |||
198 | if (desc_virtual - page_virtual == | ||
199 | desc_per_page - 1) | ||
200 | break; | ||
201 | |||
202 | (++desc_virtual)->ppn = cpu_to_le32(pfn); | ||
203 | desc_virtual->num_pages = cpu_to_le32(1); | ||
204 | } else { | ||
205 | uint32_t tmp = | ||
206 | le32_to_cpu(desc_virtual->num_pages); | ||
207 | desc_virtual->num_pages = cpu_to_le32(tmp + 1); | ||
208 | } | ||
209 | prev_pfn = pfn; | ||
210 | --num_pages; | ||
211 | vmw_piter_next(iter); | ||
212 | } | ||
213 | |||
214 | (++desc_virtual)->ppn = DMA_PAGE_INVALID; | ||
215 | desc_virtual->num_pages = cpu_to_le32(0); | ||
216 | kunmap_atomic(page_virtual); | ||
217 | } | ||
218 | |||
219 | desc_dma = 0; | ||
220 | list_for_each_entry_reverse(page, desc_pages, lru) { | ||
221 | page_virtual = kmap_atomic(page); | ||
222 | page_virtual[desc_per_page].ppn = cpu_to_le32 | ||
223 | (desc_dma >> PAGE_SHIFT); | ||
224 | kunmap_atomic(page_virtual); | ||
225 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
226 | DMA_TO_DEVICE); | ||
227 | |||
228 | if (unlikely(dma_mapping_error(dev, desc_dma))) | ||
229 | goto out_err; | ||
230 | } | ||
231 | *first_dma = desc_dma; | ||
232 | |||
233 | return 0; | ||
234 | out_err: | ||
235 | vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, | ||
240 | int gmr_id, dma_addr_t desc_dma) | ||
241 | { | ||
242 | mutex_lock(&dev_priv->hw_mutex); | ||
243 | |||
244 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
245 | wmb(); | ||
246 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); | ||
247 | mb(); | ||
248 | |||
249 | mutex_unlock(&dev_priv->hw_mutex); | ||
250 | |||
251 | } | ||
252 | |||
253 | int vmw_gmr_bind(struct vmw_private *dev_priv, | 128 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
254 | const struct vmw_sg_table *vsgt, | 129 | const struct vmw_sg_table *vsgt, |
255 | unsigned long num_pages, | 130 | unsigned long num_pages, |
256 | int gmr_id) | 131 | int gmr_id) |
257 | { | 132 | { |
258 | struct list_head desc_pages; | ||
259 | dma_addr_t desc_dma = 0; | ||
260 | struct device *dev = dev_priv->dev->dev; | ||
261 | struct vmw_piter data_iter; | 133 | struct vmw_piter data_iter; |
262 | int ret; | ||
263 | 134 | ||
264 | vmw_piter_start(&data_iter, vsgt, 0); | 135 | vmw_piter_start(&data_iter, vsgt, 0); |
265 | 136 | ||
266 | if (unlikely(!vmw_piter_next(&data_iter))) | 137 | if (unlikely(!vmw_piter_next(&data_iter))) |
267 | return 0; | 138 | return 0; |
268 | 139 | ||
269 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) | 140 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) |
270 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); | ||
271 | |||
272 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) | ||
273 | return -EINVAL; | ||
274 | |||
275 | if (vsgt->num_regions > dev_priv->max_gmr_descriptors) | ||
276 | return -EINVAL; | 141 | return -EINVAL; |
277 | 142 | ||
278 | INIT_LIST_HEAD(&desc_pages); | 143 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); |
279 | |||
280 | ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, | ||
281 | num_pages, &desc_dma); | ||
282 | if (unlikely(ret != 0)) | ||
283 | return ret; | ||
284 | |||
285 | vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); | ||
286 | vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); | ||
287 | |||
288 | return 0; | ||
289 | } | 144 | } |
290 | 145 | ||
291 | 146 | ||
292 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | 147 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
293 | { | 148 | { |
294 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { | 149 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
295 | vmw_gmr2_unbind(dev_priv, gmr_id); | 150 | vmw_gmr2_unbind(dev_priv, gmr_id); |
296 | return; | ||
297 | } | ||
298 | |||
299 | mutex_lock(&dev_priv->hw_mutex); | ||
300 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
301 | wmb(); | ||
302 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); | ||
303 | mb(); | ||
304 | mutex_unlock(&dev_priv->hw_mutex); | ||
305 | } | 151 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c5c054ae9056..b1273e8e9a69 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
@@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | |||
125 | return -ENOMEM; | 125 | return -ENOMEM; |
126 | 126 | ||
127 | spin_lock_init(&gman->lock); | 127 | spin_lock_init(&gman->lock); |
128 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
129 | gman->used_gmr_pages = 0; | 128 | gman->used_gmr_pages = 0; |
130 | ida_init(&gman->gmr_ida); | 129 | ida_init(&gman->gmr_ida); |
131 | gman->max_gmr_ids = p_size; | 130 | |
131 | switch (p_size) { | ||
132 | case VMW_PL_GMR: | ||
133 | gman->max_gmr_ids = dev_priv->max_gmr_ids; | ||
134 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
135 | break; | ||
136 | case VMW_PL_MOB: | ||
137 | gman->max_gmr_ids = VMWGFX_NUM_MOB; | ||
138 | gman->max_gmr_pages = dev_priv->max_mob_pages; | ||
139 | break; | ||
140 | default: | ||
141 | BUG(); | ||
142 | } | ||
132 | man->priv = (void *) gman; | 143 | man->priv = (void *) gman; |
133 | return 0; | 144 | return 0; |
134 | } | 145 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a51f48e3e917..116c49736763 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -53,7 +53,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
53 | param->value = dev_priv->fifo.capabilities; | 53 | param->value = dev_priv->fifo.capabilities; |
54 | break; | 54 | break; |
55 | case DRM_VMW_PARAM_MAX_FB_SIZE: | 55 | case DRM_VMW_PARAM_MAX_FB_SIZE: |
56 | param->value = dev_priv->vram_size; | 56 | param->value = dev_priv->prim_bb_mem; |
57 | break; | 57 | break; |
58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: | 58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: |
59 | { | 59 | { |
@@ -68,6 +68,20 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
68 | SVGA_FIFO_3D_HWVERSION)); | 68 | SVGA_FIFO_3D_HWVERSION)); |
69 | break; | 69 | break; |
70 | } | 70 | } |
71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | ||
72 | param->value = dev_priv->memory_size; | ||
73 | break; | ||
74 | case DRM_VMW_PARAM_3D_CAPS_SIZE: | ||
75 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
76 | param->value = SVGA3D_DEVCAP_MAX; | ||
77 | else | ||
78 | param->value = (SVGA_FIFO_3D_CAPS_LAST - | ||
79 | SVGA_FIFO_3D_CAPS + 1); | ||
80 | param->value *= sizeof(uint32_t); | ||
81 | break; | ||
82 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: | ||
83 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | ||
84 | break; | ||
71 | default: | 85 | default: |
72 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 86 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
73 | param->param); | 87 | param->param); |
@@ -89,13 +103,19 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
89 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); | 103 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); |
90 | void *bounce; | 104 | void *bounce; |
91 | int ret; | 105 | int ret; |
106 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | ||
92 | 107 | ||
93 | if (unlikely(arg->pad64 != 0)) { | 108 | if (unlikely(arg->pad64 != 0)) { |
94 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 109 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
95 | return -EINVAL; | 110 | return -EINVAL; |
96 | } | 111 | } |
97 | 112 | ||
98 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; | 113 | if (gb_objects) |
114 | size = SVGA3D_DEVCAP_MAX; | ||
115 | else | ||
116 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); | ||
117 | |||
118 | size *= sizeof(uint32_t); | ||
99 | 119 | ||
100 | if (arg->max_size < size) | 120 | if (arg->max_size < size) |
101 | size = arg->max_size; | 121 | size = arg->max_size; |
@@ -106,8 +126,22 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
106 | return -ENOMEM; | 126 | return -ENOMEM; |
107 | } | 127 | } |
108 | 128 | ||
109 | fifo_mem = dev_priv->mmio_virt; | 129 | if (gb_objects) { |
110 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 130 | int i; |
131 | uint32_t *bounce32 = (uint32_t *) bounce; | ||
132 | |||
133 | mutex_lock(&dev_priv->hw_mutex); | ||
134 | for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { | ||
135 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
136 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
137 | } | ||
138 | mutex_unlock(&dev_priv->hw_mutex); | ||
139 | |||
140 | } else { | ||
141 | |||
142 | fifo_mem = dev_priv->mmio_virt; | ||
143 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | ||
144 | } | ||
111 | 145 | ||
112 | ret = copy_to_user(buffer, bounce, size); | 146 | ret = copy_to_user(buffer, bounce, size); |
113 | if (ret) | 147 | if (ret) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 019e2dbb46c8..8a650413dea5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -672,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
672 | 672 | ||
673 | if (unlikely(surface->mip_levels[0] != 1 || | 673 | if (unlikely(surface->mip_levels[0] != 1 || |
674 | surface->num_sizes != 1 || | 674 | surface->num_sizes != 1 || |
675 | surface->sizes[0].width < mode_cmd->width || | 675 | surface->base_size.width < mode_cmd->width || |
676 | surface->sizes[0].height < mode_cmd->height || | 676 | surface->base_size.height < mode_cmd->height || |
677 | surface->sizes[0].depth != 1)) { | 677 | surface->base_size.depth != 1)) { |
678 | DRM_ERROR("Incompatible surface dimensions " | 678 | DRM_ERROR("Incompatible surface dimensions " |
679 | "for requested mode.\n"); | 679 | "for requested mode.\n"); |
680 | return -EINVAL; | 680 | return -EINVAL; |
@@ -1645,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | |||
1645 | uint32_t pitch, | 1645 | uint32_t pitch, |
1646 | uint32_t height) | 1646 | uint32_t height) |
1647 | { | 1647 | { |
1648 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | 1648 | return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | 1651 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c new file mode 100644 index 000000000000..ad29651a4302 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
@@ -0,0 +1,659 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | |||
30 | /* | ||
31 | * If we set up the screen target otable, screen objects stop working. | ||
32 | */ | ||
33 | |||
34 | #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) | ||
35 | |||
36 | #ifdef CONFIG_64BIT | ||
37 | #define VMW_PPN_SIZE 8 | ||
38 | #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase64 | ||
39 | #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE64 | ||
40 | #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob64 | ||
41 | #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB64 | ||
42 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 | ||
43 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 | ||
44 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 | ||
45 | #else | ||
46 | #define VMW_PPN_SIZE 4 | ||
47 | #define vmw_cmd_set_otable_base SVGA3dCmdSetOTableBase | ||
48 | #define VMW_ID_SET_OTABLE_BASE SVGA_3D_CMD_SET_OTABLE_BASE | ||
49 | #define vmw_cmd_define_gb_mob SVGA3dCmdDefineGBMob | ||
50 | #define VMW_ID_DEFINE_GB_MOB SVGA_3D_CMD_DEFINE_GB_MOB | ||
51 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 | ||
52 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 | ||
53 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * struct vmw_mob - Structure containing page table and metadata for a | ||
58 | * Guest Memory OBject. | ||
59 | * | ||
60 | * @num_pages Number of pages that make up the page table. | ||
61 | * @pt_level The indirection level of the page table. 0-2. | ||
62 | * @pt_root_page DMA address of the level 0 page of the page table. | ||
63 | */ | ||
64 | struct vmw_mob { | ||
65 | struct ttm_buffer_object *pt_bo; | ||
66 | unsigned long num_pages; | ||
67 | unsigned pt_level; | ||
68 | dma_addr_t pt_root_page; | ||
69 | uint32_t id; | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * struct vmw_otable - Guest Memory OBject table metadata | ||
74 | * | ||
75 | * @size: Size of the table (page-aligned). | ||
76 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
77 | */ | ||
78 | struct vmw_otable { | ||
79 | unsigned long size; | ||
80 | struct vmw_mob *page_table; | ||
81 | }; | ||
82 | |||
83 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
84 | struct vmw_mob *mob); | ||
85 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
86 | struct vmw_piter data_iter, | ||
87 | unsigned long num_data_pages); | ||
88 | |||
89 | /* | ||
90 | * vmw_setup_otable_base - Issue an object table base setup command to | ||
91 | * the device | ||
92 | * | ||
93 | * @dev_priv: Pointer to a device private structure | ||
94 | * @type: Type of object table base | ||
95 | * @offset Start of table offset into dev_priv::otable_bo | ||
96 | * @otable Pointer to otable metadata; | ||
97 | * | ||
98 | * This function returns -ENOMEM if it fails to reserve fifo space, | ||
99 | * and may block waiting for fifo space. | ||
100 | */ | ||
101 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||
102 | SVGAOTableType type, | ||
103 | unsigned long offset, | ||
104 | struct vmw_otable *otable) | ||
105 | { | ||
106 | struct { | ||
107 | SVGA3dCmdHeader header; | ||
108 | vmw_cmd_set_otable_base body; | ||
109 | } *cmd; | ||
110 | struct vmw_mob *mob; | ||
111 | const struct vmw_sg_table *vsgt; | ||
112 | struct vmw_piter iter; | ||
113 | int ret; | ||
114 | |||
115 | BUG_ON(otable->page_table != NULL); | ||
116 | |||
117 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | ||
118 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||
119 | WARN_ON(!vmw_piter_next(&iter)); | ||
120 | |||
121 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | ||
122 | if (unlikely(mob == NULL)) { | ||
123 | DRM_ERROR("Failed creating OTable page table.\n"); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | if (otable->size <= PAGE_SIZE) { | ||
128 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
129 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
130 | } else if (vsgt->num_regions == 1) { | ||
131 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
132 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
133 | } else { | ||
134 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
135 | if (unlikely(ret != 0)) | ||
136 | goto out_no_populate; | ||
137 | |||
138 | vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); | ||
139 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
140 | } | ||
141 | |||
142 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
143 | if (unlikely(cmd == NULL)) { | ||
144 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
145 | goto out_no_fifo; | ||
146 | } | ||
147 | |||
148 | memset(cmd, 0, sizeof(*cmd)); | ||
149 | cmd->header.id = VMW_ID_SET_OTABLE_BASE; | ||
150 | cmd->header.size = sizeof(cmd->body); | ||
151 | cmd->body.type = type; | ||
152 | cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; | ||
153 | cmd->body.sizeInBytes = otable->size; | ||
154 | cmd->body.validSizeInBytes = 0; | ||
155 | cmd->body.ptDepth = mob->pt_level; | ||
156 | |||
157 | /* | ||
158 | * The device doesn't support this, But the otable size is | ||
159 | * determined at compile-time, so this BUG shouldn't trigger | ||
160 | * randomly. | ||
161 | */ | ||
162 | BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); | ||
163 | |||
164 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
165 | otable->page_table = mob; | ||
166 | |||
167 | return 0; | ||
168 | |||
169 | out_no_fifo: | ||
170 | out_no_populate: | ||
171 | vmw_mob_destroy(mob); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * vmw_takedown_otable_base - Issue an object table base takedown command | ||
177 | * to the device | ||
178 | * | ||
179 | * @dev_priv: Pointer to a device private structure | ||
180 | * @type: Type of object table base | ||
181 | * | ||
182 | */ | ||
183 | static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||
184 | SVGAOTableType type, | ||
185 | struct vmw_otable *otable) | ||
186 | { | ||
187 | struct { | ||
188 | SVGA3dCmdHeader header; | ||
189 | SVGA3dCmdSetOTableBase body; | ||
190 | } *cmd; | ||
191 | struct ttm_buffer_object *bo = otable->page_table->pt_bo; | ||
192 | |||
193 | if (otable->page_table == NULL) | ||
194 | return; | ||
195 | |||
196 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
197 | if (unlikely(cmd == NULL)) | ||
198 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
199 | |||
200 | memset(cmd, 0, sizeof(*cmd)); | ||
201 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
202 | cmd->header.size = sizeof(cmd->body); | ||
203 | cmd->body.type = type; | ||
204 | cmd->body.baseAddress = 0; | ||
205 | cmd->body.sizeInBytes = 0; | ||
206 | cmd->body.validSizeInBytes = 0; | ||
207 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | ||
208 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
209 | |||
210 | if (bo) { | ||
211 | int ret; | ||
212 | |||
213 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
214 | BUG_ON(ret != 0); | ||
215 | |||
216 | vmw_fence_single_bo(bo, NULL); | ||
217 | ttm_bo_unreserve(bo); | ||
218 | } | ||
219 | |||
220 | vmw_mob_destroy(otable->page_table); | ||
221 | otable->page_table = NULL; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * vmw_otables_setup - Set up guest backed memory object tables | ||
226 | * | ||
227 | * @dev_priv: Pointer to a device private structure | ||
228 | * | ||
229 | * Takes care of the device guest backed surface | ||
230 | * initialization, by setting up the guest backed memory object tables. | ||
231 | * Returns 0 on success and various error codes on failure. A succesful return | ||
232 | * means the object tables can be taken down using the vmw_otables_takedown | ||
233 | * function. | ||
234 | */ | ||
235 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
236 | { | ||
237 | unsigned long offset; | ||
238 | unsigned long bo_size; | ||
239 | struct vmw_otable *otables; | ||
240 | SVGAOTableType i; | ||
241 | int ret; | ||
242 | |||
243 | otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), | ||
244 | GFP_KERNEL); | ||
245 | if (unlikely(otables == NULL)) { | ||
246 | DRM_ERROR("Failed to allocate space for otable " | ||
247 | "metadata.\n"); | ||
248 | return -ENOMEM; | ||
249 | } | ||
250 | |||
251 | otables[SVGA_OTABLE_MOB].size = | ||
252 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
253 | otables[SVGA_OTABLE_SURFACE].size = | ||
254 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
255 | otables[SVGA_OTABLE_CONTEXT].size = | ||
256 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
257 | otables[SVGA_OTABLE_SHADER].size = | ||
258 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
259 | otables[SVGA_OTABLE_SCREEN_TARGET].size = | ||
260 | VMWGFX_NUM_GB_SCREEN_TARGET * | ||
261 | SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; | ||
262 | |||
263 | bo_size = 0; | ||
264 | for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | ||
265 | otables[i].size = | ||
266 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||
267 | bo_size += otables[i].size; | ||
268 | } | ||
269 | |||
270 | ret = ttm_bo_create(&dev_priv->bdev, bo_size, | ||
271 | ttm_bo_type_device, | ||
272 | &vmw_sys_ne_placement, | ||
273 | 0, false, NULL, | ||
274 | &dev_priv->otable_bo); | ||
275 | |||
276 | if (unlikely(ret != 0)) | ||
277 | goto out_no_bo; | ||
278 | |||
279 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); | ||
280 | BUG_ON(ret != 0); | ||
281 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | ||
282 | if (unlikely(ret != 0)) | ||
283 | goto out_unreserve; | ||
284 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | ||
285 | if (unlikely(ret != 0)) | ||
286 | goto out_unreserve; | ||
287 | |||
288 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
289 | |||
290 | offset = 0; | ||
291 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | ||
292 | ret = vmw_setup_otable_base(dev_priv, i, offset, | ||
293 | &otables[i]); | ||
294 | if (unlikely(ret != 0)) | ||
295 | goto out_no_setup; | ||
296 | offset += otables[i].size; | ||
297 | } | ||
298 | |||
299 | dev_priv->otables = otables; | ||
300 | return 0; | ||
301 | |||
302 | out_unreserve: | ||
303 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
304 | out_no_setup: | ||
305 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
306 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | ||
307 | |||
308 | ttm_bo_unref(&dev_priv->otable_bo); | ||
309 | out_no_bo: | ||
310 | kfree(otables); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | |||
315 | /* | ||
316 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
317 | * | ||
318 | * @dev_priv: Pointer to a device private structure | ||
319 | * | ||
320 | * Take down the Guest Memory Object tables. | ||
321 | */ | ||
322 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
323 | { | ||
324 | SVGAOTableType i; | ||
325 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | ||
326 | int ret; | ||
327 | |||
328 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
329 | vmw_takedown_otable_base(dev_priv, i, | ||
330 | &dev_priv->otables[i]); | ||
331 | |||
332 | ret = ttm_bo_reserve(bo, false, true, false, false); | ||
333 | BUG_ON(ret != 0); | ||
334 | |||
335 | vmw_fence_single_bo(bo, NULL); | ||
336 | ttm_bo_unreserve(bo); | ||
337 | |||
338 | ttm_bo_unref(&dev_priv->otable_bo); | ||
339 | kfree(dev_priv->otables); | ||
340 | dev_priv->otables = NULL; | ||
341 | } | ||
342 | |||
343 | |||
344 | /* | ||
345 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||
346 | * needed for a guest backed memory object. | ||
347 | * | ||
348 | * @data_pages: Number of data pages in the memory object buffer. | ||
349 | */ | ||
350 | static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) | ||
351 | { | ||
352 | unsigned long data_size = data_pages * PAGE_SIZE; | ||
353 | unsigned long tot_size = 0; | ||
354 | |||
355 | while (likely(data_size > PAGE_SIZE)) { | ||
356 | data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); | ||
357 | data_size *= VMW_PPN_SIZE; | ||
358 | tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
359 | } | ||
360 | |||
361 | return tot_size >> PAGE_SHIFT; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * vmw_mob_create - Create a mob, but don't populate it. | ||
366 | * | ||
367 | * @data_pages: Number of data pages of the underlying buffer object. | ||
368 | */ | ||
369 | struct vmw_mob *vmw_mob_create(unsigned long data_pages) | ||
370 | { | ||
371 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | ||
372 | |||
373 | if (unlikely(mob == NULL)) | ||
374 | return NULL; | ||
375 | |||
376 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | ||
377 | |||
378 | return mob; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * vmw_mob_pt_populate - Populate the mob pagetable | ||
383 | * | ||
384 | * @mob: Pointer to the mob the pagetable of which we want to | ||
385 | * populate. | ||
386 | * | ||
387 | * This function allocates memory to be used for the pagetable, and | ||
388 | * adjusts TTM memory accounting accordingly. Returns ENOMEM if | ||
389 | * memory resources aren't sufficient and may cause TTM buffer objects | ||
390 | * to be swapped out by using the TTM memory accounting function. | ||
391 | */ | ||
392 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
393 | struct vmw_mob *mob) | ||
394 | { | ||
395 | int ret; | ||
396 | BUG_ON(mob->pt_bo != NULL); | ||
397 | |||
398 | ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, | ||
399 | ttm_bo_type_device, | ||
400 | &vmw_sys_ne_placement, | ||
401 | 0, false, NULL, &mob->pt_bo); | ||
402 | if (unlikely(ret != 0)) | ||
403 | return ret; | ||
404 | |||
405 | ret = ttm_bo_reserve(mob->pt_bo, false, true, false, false); | ||
406 | |||
407 | BUG_ON(ret != 0); | ||
408 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | ||
409 | if (unlikely(ret != 0)) | ||
410 | goto out_unreserve; | ||
411 | ret = vmw_bo_map_dma(mob->pt_bo); | ||
412 | if (unlikely(ret != 0)) | ||
413 | goto out_unreserve; | ||
414 | |||
415 | ttm_bo_unreserve(mob->pt_bo); | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | out_unreserve: | ||
420 | ttm_bo_unreserve(mob->pt_bo); | ||
421 | ttm_bo_unref(&mob->pt_bo); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * vmw_mob_assign_ppn - Assign a value to a page table entry | ||
428 | * | ||
429 | * @addr: Pointer to pointer to page table entry. | ||
430 | * @val: The page table entry | ||
431 | * | ||
432 | * Assigns a value to a page table entry pointed to by *@addr and increments | ||
433 | * *@addr according to the page table entry size. | ||
434 | */ | ||
435 | #if (VMW_PPN_SIZE == 8) | ||
436 | static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) | ||
437 | { | ||
438 | *((uint64_t *) *addr) = val >> PAGE_SHIFT; | ||
439 | *addr += 2; | ||
440 | } | ||
441 | #else | ||
442 | static void vmw_mob_assign_ppn(uint32_t **addr, dma_addr_t val) | ||
443 | { | ||
444 | *(*addr)++ = val >> PAGE_SHIFT; | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | /* | ||
449 | * vmw_mob_build_pt - Build a pagetable | ||
450 | * | ||
451 | * @data_addr: Array of DMA addresses to the underlying buffer | ||
452 | * object's data pages. | ||
453 | * @num_data_pages: Number of buffer object data pages. | ||
454 | * @pt_pages: Array of page pointers to the page table pages. | ||
455 | * | ||
456 | * Returns the number of page table pages actually used. | ||
457 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | ||
458 | */ | ||
459 | static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, | ||
460 | unsigned long num_data_pages, | ||
461 | struct vmw_piter *pt_iter) | ||
462 | { | ||
463 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | ||
464 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | ||
465 | unsigned long pt_page; | ||
466 | uint32_t *addr, *save_addr; | ||
467 | unsigned long i; | ||
468 | struct page *page; | ||
469 | |||
470 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | ||
471 | page = vmw_piter_page(pt_iter); | ||
472 | |||
473 | save_addr = addr = kmap_atomic(page); | ||
474 | |||
475 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | ||
476 | vmw_mob_assign_ppn(&addr, | ||
477 | vmw_piter_dma_addr(data_iter)); | ||
478 | if (unlikely(--num_data_pages == 0)) | ||
479 | break; | ||
480 | WARN_ON(!vmw_piter_next(data_iter)); | ||
481 | } | ||
482 | kunmap_atomic(save_addr); | ||
483 | vmw_piter_next(pt_iter); | ||
484 | } | ||
485 | |||
486 | return num_pt_pages; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | ||
491 | * | ||
492 | * @mob: Pointer to a mob whose page table needs setting up. | ||
493 | * @data_addr Array of DMA addresses to the buffer object's data | ||
494 | * pages. | ||
495 | * @num_data_pages: Number of buffer object data pages. | ||
496 | * | ||
497 | * Uses tail recursion to set up a multilevel mob page table. | ||
498 | */ | ||
499 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
500 | struct vmw_piter data_iter, | ||
501 | unsigned long num_data_pages) | ||
502 | { | ||
503 | unsigned long num_pt_pages = 0; | ||
504 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
505 | struct vmw_piter save_pt_iter; | ||
506 | struct vmw_piter pt_iter; | ||
507 | const struct vmw_sg_table *vsgt; | ||
508 | int ret; | ||
509 | |||
510 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
511 | BUG_ON(ret != 0); | ||
512 | |||
513 | vsgt = vmw_bo_sg_table(bo); | ||
514 | vmw_piter_start(&pt_iter, vsgt, 0); | ||
515 | BUG_ON(!vmw_piter_next(&pt_iter)); | ||
516 | mob->pt_level = 0; | ||
517 | while (likely(num_data_pages > 1)) { | ||
518 | ++mob->pt_level; | ||
519 | BUG_ON(mob->pt_level > 2); | ||
520 | save_pt_iter = pt_iter; | ||
521 | num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, | ||
522 | &pt_iter); | ||
523 | data_iter = save_pt_iter; | ||
524 | num_data_pages = num_pt_pages; | ||
525 | } | ||
526 | |||
527 | mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); | ||
528 | ttm_bo_unreserve(bo); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. | ||
533 | * | ||
534 | * @mob: Pointer to a mob to destroy. | ||
535 | */ | ||
536 | void vmw_mob_destroy(struct vmw_mob *mob) | ||
537 | { | ||
538 | if (mob->pt_bo) | ||
539 | ttm_bo_unref(&mob->pt_bo); | ||
540 | kfree(mob); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * vmw_mob_unbind - Hide a mob from the device. | ||
545 | * | ||
546 | * @dev_priv: Pointer to a device private. | ||
547 | * @mob_id: Device id of the mob to unbind. | ||
548 | */ | ||
549 | void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
550 | struct vmw_mob *mob) | ||
551 | { | ||
552 | struct { | ||
553 | SVGA3dCmdHeader header; | ||
554 | SVGA3dCmdDestroyGBMob body; | ||
555 | } *cmd; | ||
556 | int ret; | ||
557 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
558 | |||
559 | if (bo) { | ||
560 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
561 | /* | ||
562 | * Noone else should be using this buffer. | ||
563 | */ | ||
564 | BUG_ON(ret != 0); | ||
565 | } | ||
566 | |||
567 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
568 | if (unlikely(cmd == NULL)) { | ||
569 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
570 | "Object unbinding.\n"); | ||
571 | } | ||
572 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
573 | cmd->header.size = sizeof(cmd->body); | ||
574 | cmd->body.mobid = mob->id; | ||
575 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
576 | if (bo) { | ||
577 | vmw_fence_single_bo(bo, NULL); | ||
578 | ttm_bo_unreserve(bo); | ||
579 | } | ||
580 | vmw_3d_resource_dec(dev_priv, false); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * vmw_mob_bind - Make a mob visible to the device after first | ||
585 | * populating it if necessary. | ||
586 | * | ||
587 | * @dev_priv: Pointer to a device private. | ||
588 | * @mob: Pointer to the mob we're making visible. | ||
589 | * @data_addr: Array of DMA addresses to the data pages of the underlying | ||
590 | * buffer object. | ||
591 | * @num_data_pages: Number of data pages of the underlying buffer | ||
592 | * object. | ||
593 | * @mob_id: Device id of the mob to bind | ||
594 | * | ||
595 | * This function is intended to be interfaced with the ttm_tt backend | ||
596 | * code. | ||
597 | */ | ||
598 | int vmw_mob_bind(struct vmw_private *dev_priv, | ||
599 | struct vmw_mob *mob, | ||
600 | const struct vmw_sg_table *vsgt, | ||
601 | unsigned long num_data_pages, | ||
602 | int32_t mob_id) | ||
603 | { | ||
604 | int ret; | ||
605 | bool pt_set_up = false; | ||
606 | struct vmw_piter data_iter; | ||
607 | struct { | ||
608 | SVGA3dCmdHeader header; | ||
609 | vmw_cmd_define_gb_mob body; | ||
610 | } *cmd; | ||
611 | |||
612 | mob->id = mob_id; | ||
613 | vmw_piter_start(&data_iter, vsgt, 0); | ||
614 | if (unlikely(!vmw_piter_next(&data_iter))) | ||
615 | return 0; | ||
616 | |||
617 | if (likely(num_data_pages == 1)) { | ||
618 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
619 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
620 | } else if (vsgt->num_regions == 1) { | ||
621 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
622 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
623 | } else if (unlikely(mob->pt_bo == NULL)) { | ||
624 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
625 | if (unlikely(ret != 0)) | ||
626 | return ret; | ||
627 | |||
628 | vmw_mob_pt_setup(mob, data_iter, num_data_pages); | ||
629 | pt_set_up = true; | ||
630 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
631 | } | ||
632 | |||
633 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
634 | |||
635 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
636 | if (unlikely(cmd == NULL)) { | ||
637 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
638 | "Object binding.\n"); | ||
639 | goto out_no_cmd_space; | ||
640 | } | ||
641 | |||
642 | cmd->header.id = VMW_ID_DEFINE_GB_MOB; | ||
643 | cmd->header.size = sizeof(cmd->body); | ||
644 | cmd->body.mobid = mob_id; | ||
645 | cmd->body.ptDepth = mob->pt_level; | ||
646 | cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; | ||
647 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | ||
648 | |||
649 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
650 | |||
651 | return 0; | ||
652 | |||
653 | out_no_cmd_space: | ||
654 | vmw_3d_resource_dec(dev_priv, false); | ||
655 | if (pt_set_up) | ||
656 | ttm_bo_unref(&mob->pt_bo); | ||
657 | |||
658 | return -ENOMEM; | ||
659 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 9b5ea2ac7ddf..6fdd82d42f65 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -215,6 +215,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | |||
215 | res->func = func; | 215 | res->func = func; |
216 | INIT_LIST_HEAD(&res->lru_head); | 216 | INIT_LIST_HEAD(&res->lru_head); |
217 | INIT_LIST_HEAD(&res->mob_head); | 217 | INIT_LIST_HEAD(&res->mob_head); |
218 | INIT_LIST_HEAD(&res->binding_head); | ||
218 | res->id = -1; | 219 | res->id = -1; |
219 | res->backup = NULL; | 220 | res->backup = NULL; |
220 | res->backup_offset = 0; | 221 | res->backup_offset = 0; |
@@ -441,6 +442,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |||
441 | ttm_bo_unref(&bo); | 442 | ttm_bo_unref(&bo); |
442 | } | 443 | } |
443 | 444 | ||
445 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, | ||
446 | enum ttm_ref_type ref_type) | ||
447 | { | ||
448 | struct vmw_user_dma_buffer *user_bo; | ||
449 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); | ||
450 | |||
451 | switch (ref_type) { | ||
452 | case TTM_REF_SYNCCPU_WRITE: | ||
453 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
454 | break; | ||
455 | default: | ||
456 | BUG(); | ||
457 | } | ||
458 | } | ||
459 | |||
444 | /** | 460 | /** |
445 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | 461 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
446 | * | 462 | * |
@@ -471,6 +487,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
471 | } | 487 | } |
472 | 488 | ||
473 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | 489 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
490 | (dev_priv->has_mob) ? | ||
491 | &vmw_sys_placement : | ||
474 | &vmw_vram_sys_placement, true, | 492 | &vmw_vram_sys_placement, true, |
475 | &vmw_user_dmabuf_destroy); | 493 | &vmw_user_dmabuf_destroy); |
476 | if (unlikely(ret != 0)) | 494 | if (unlikely(ret != 0)) |
@@ -482,7 +500,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
482 | &user_bo->prime, | 500 | &user_bo->prime, |
483 | shareable, | 501 | shareable, |
484 | ttm_buffer_type, | 502 | ttm_buffer_type, |
485 | &vmw_user_dmabuf_release, NULL); | 503 | &vmw_user_dmabuf_release, |
504 | &vmw_user_dmabuf_ref_obj_release); | ||
486 | if (unlikely(ret != 0)) { | 505 | if (unlikely(ret != 0)) { |
487 | ttm_bo_unref(&tmp); | 506 | ttm_bo_unref(&tmp); |
488 | goto out_no_base_object; | 507 | goto out_no_base_object; |
@@ -515,6 +534,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | |||
515 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; | 534 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; |
516 | } | 535 | } |
517 | 536 | ||
537 | /** | ||
538 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu | ||
539 | * access, idling previous GPU operations on the buffer and optionally | ||
540 | * blocking it for further command submissions. | ||
541 | * | ||
542 | * @user_bo: Pointer to the buffer object being grabbed for CPU access | ||
543 | * @tfile: Identifying the caller. | ||
544 | * @flags: Flags indicating how the grab should be performed. | ||
545 | * | ||
546 | * A blocking grab will be automatically released when @tfile is closed. | ||
547 | */ | ||
548 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | ||
549 | struct ttm_object_file *tfile, | ||
550 | uint32_t flags) | ||
551 | { | ||
552 | struct ttm_buffer_object *bo = &user_bo->dma.base; | ||
553 | bool existed; | ||
554 | int ret; | ||
555 | |||
556 | if (flags & drm_vmw_synccpu_allow_cs) { | ||
557 | struct ttm_bo_device *bdev = bo->bdev; | ||
558 | |||
559 | spin_lock(&bdev->fence_lock); | ||
560 | ret = ttm_bo_wait(bo, false, true, | ||
561 | !!(flags & drm_vmw_synccpu_dontblock)); | ||
562 | spin_unlock(&bdev->fence_lock); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | ret = ttm_bo_synccpu_write_grab | ||
567 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); | ||
568 | if (unlikely(ret != 0)) | ||
569 | return ret; | ||
570 | |||
571 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, | ||
572 | TTM_REF_SYNCCPU_WRITE, &existed); | ||
573 | if (ret != 0 || existed) | ||
574 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
575 | |||
576 | return ret; | ||
577 | } | ||
578 | |||
579 | /** | ||
580 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, | ||
581 | * and unblock command submission on the buffer if blocked. | ||
582 | * | ||
583 | * @handle: Handle identifying the buffer object. | ||
584 | * @tfile: Identifying the caller. | ||
585 | * @flags: Flags indicating the type of release. | ||
586 | */ | ||
587 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, | ||
588 | struct ttm_object_file *tfile, | ||
589 | uint32_t flags) | ||
590 | { | ||
591 | if (!(flags & drm_vmw_synccpu_allow_cs)) | ||
592 | return ttm_ref_object_base_unref(tfile, handle, | ||
593 | TTM_REF_SYNCCPU_WRITE); | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu | ||
600 | * functionality. | ||
601 | * | ||
602 | * @dev: Identifies the drm device. | ||
603 | * @data: Pointer to the ioctl argument. | ||
604 | * @file_priv: Identifies the caller. | ||
605 | * | ||
606 | * This function checks the ioctl arguments for validity and calls the | ||
607 | * relevant synccpu functions. | ||
608 | */ | ||
609 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
610 | struct drm_file *file_priv) | ||
611 | { | ||
612 | struct drm_vmw_synccpu_arg *arg = | ||
613 | (struct drm_vmw_synccpu_arg *) data; | ||
614 | struct vmw_dma_buffer *dma_buf; | ||
615 | struct vmw_user_dma_buffer *user_bo; | ||
616 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
617 | int ret; | ||
618 | |||
619 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | ||
620 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | | ||
621 | drm_vmw_synccpu_dontblock | | ||
622 | drm_vmw_synccpu_allow_cs)) != 0) { | ||
623 | DRM_ERROR("Illegal synccpu flags.\n"); | ||
624 | return -EINVAL; | ||
625 | } | ||
626 | |||
627 | switch (arg->op) { | ||
628 | case drm_vmw_synccpu_grab: | ||
629 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); | ||
630 | if (unlikely(ret != 0)) | ||
631 | return ret; | ||
632 | |||
633 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, | ||
634 | dma); | ||
635 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); | ||
636 | vmw_dmabuf_unreference(&dma_buf); | ||
637 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && | ||
638 | ret != -EBUSY)) { | ||
639 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", | ||
640 | (unsigned int) arg->handle); | ||
641 | return ret; | ||
642 | } | ||
643 | break; | ||
644 | case drm_vmw_synccpu_release: | ||
645 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, | ||
646 | arg->flags); | ||
647 | if (unlikely(ret != 0)) { | ||
648 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", | ||
649 | (unsigned int) arg->handle); | ||
650 | return ret; | ||
651 | } | ||
652 | break; | ||
653 | default: | ||
654 | DRM_ERROR("Invalid synccpu operation.\n"); | ||
655 | return -EINVAL; | ||
656 | } | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
518 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 661 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
519 | struct drm_file *file_priv) | 662 | struct drm_file *file_priv) |
520 | { | 663 | { |
@@ -591,7 +734,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
591 | } | 734 | } |
592 | 735 | ||
593 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 736 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
594 | struct vmw_dma_buffer *dma_buf) | 737 | struct vmw_dma_buffer *dma_buf, |
738 | uint32_t *handle) | ||
595 | { | 739 | { |
596 | struct vmw_user_dma_buffer *user_bo; | 740 | struct vmw_user_dma_buffer *user_bo; |
597 | 741 | ||
@@ -599,6 +743,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | |||
599 | return -EINVAL; | 743 | return -EINVAL; |
600 | 744 | ||
601 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | 745 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
746 | |||
747 | *handle = user_bo->prime.base.hash.key; | ||
602 | return ttm_ref_object_add(tfile, &user_bo->prime.base, | 748 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
603 | TTM_REF_USAGE, NULL); | 749 | TTM_REF_USAGE, NULL); |
604 | } | 750 | } |
@@ -1291,11 +1437,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1291 | * @mem: The truct ttm_mem_reg indicating to what memory | 1437 | * @mem: The truct ttm_mem_reg indicating to what memory |
1292 | * region the move is taking place. | 1438 | * region the move is taking place. |
1293 | * | 1439 | * |
1294 | * For now does nothing. | 1440 | * Evicts the Guest Backed hardware resource if the backup |
1441 | * buffer is being moved out of MOB memory. | ||
1442 | * Note that this function should not race with the resource | ||
1443 | * validation code as long as it accesses only members of struct | ||
1444 | * resource that remain static while bo::res is !NULL and | ||
1445 | * while we have @bo reserved. struct resource::backup is *not* a | ||
1446 | * static member. The resource validation code will take care | ||
1447 | * to set @bo::res to NULL, while having @bo reserved when the | ||
1448 | * buffer is no longer bound to the resource, so @bo:res can be | ||
1449 | * used to determine whether there is a need to unbind and whether | ||
1450 | * it is safe to unbind. | ||
1295 | */ | 1451 | */ |
1296 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | 1452 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
1297 | struct ttm_mem_reg *mem) | 1453 | struct ttm_mem_reg *mem) |
1298 | { | 1454 | { |
1455 | struct vmw_dma_buffer *dma_buf; | ||
1456 | |||
1457 | if (mem == NULL) | ||
1458 | return; | ||
1459 | |||
1460 | if (bo->destroy != vmw_dmabuf_bo_free && | ||
1461 | bo->destroy != vmw_user_dmabuf_destroy) | ||
1462 | return; | ||
1463 | |||
1464 | dma_buf = container_of(bo, struct vmw_dma_buffer, base); | ||
1465 | |||
1466 | if (mem->mem_type != VMW_PL_MOB) { | ||
1467 | struct vmw_resource *res, *n; | ||
1468 | struct ttm_bo_device *bdev = bo->bdev; | ||
1469 | struct ttm_validate_buffer val_buf; | ||
1470 | |||
1471 | val_buf.bo = bo; | ||
1472 | |||
1473 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { | ||
1474 | |||
1475 | if (unlikely(res->func->unbind == NULL)) | ||
1476 | continue; | ||
1477 | |||
1478 | (void) res->func->unbind(res, true, &val_buf); | ||
1479 | res->backup_dirty = true; | ||
1480 | res->res_dirty = false; | ||
1481 | list_del_init(&res->mob_head); | ||
1482 | } | ||
1483 | |||
1484 | spin_lock(&bdev->fence_lock); | ||
1485 | (void) ttm_bo_wait(bo, false, false, false); | ||
1486 | spin_unlock(&bdev->fence_lock); | ||
1487 | } | ||
1299 | } | 1488 | } |
1300 | 1489 | ||
1301 | /** | 1490 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c new file mode 100644 index 000000000000..813bd0a2abaf --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "vmwgfx_resource_priv.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | |||
32 | struct vmw_shader { | ||
33 | struct vmw_resource res; | ||
34 | SVGA3dShaderType type; | ||
35 | uint32_t size; | ||
36 | }; | ||
37 | |||
38 | struct vmw_user_shader { | ||
39 | struct ttm_base_object base; | ||
40 | struct vmw_shader shader; | ||
41 | }; | ||
42 | |||
43 | static void vmw_user_shader_free(struct vmw_resource *res); | ||
44 | static struct vmw_resource * | ||
45 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | ||
46 | |||
47 | static int vmw_gb_shader_create(struct vmw_resource *res); | ||
48 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
49 | struct ttm_validate_buffer *val_buf); | ||
50 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
51 | bool readback, | ||
52 | struct ttm_validate_buffer *val_buf); | ||
53 | static int vmw_gb_shader_destroy(struct vmw_resource *res); | ||
54 | |||
55 | static uint64_t vmw_user_shader_size; | ||
56 | |||
57 | static const struct vmw_user_resource_conv user_shader_conv = { | ||
58 | .object_type = VMW_RES_SHADER, | ||
59 | .base_obj_to_res = vmw_user_shader_base_to_res, | ||
60 | .res_free = vmw_user_shader_free | ||
61 | }; | ||
62 | |||
63 | const struct vmw_user_resource_conv *user_shader_converter = | ||
64 | &user_shader_conv; | ||
65 | |||
66 | |||
67 | static const struct vmw_res_func vmw_gb_shader_func = { | ||
68 | .res_type = vmw_res_shader, | ||
69 | .needs_backup = true, | ||
70 | .may_evict = true, | ||
71 | .type_name = "guest backed shaders", | ||
72 | .backup_placement = &vmw_mob_placement, | ||
73 | .create = vmw_gb_shader_create, | ||
74 | .destroy = vmw_gb_shader_destroy, | ||
75 | .bind = vmw_gb_shader_bind, | ||
76 | .unbind = vmw_gb_shader_unbind | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * Shader management: | ||
81 | */ | ||
82 | |||
83 | static inline struct vmw_shader * | ||
84 | vmw_res_to_shader(struct vmw_resource *res) | ||
85 | { | ||
86 | return container_of(res, struct vmw_shader, res); | ||
87 | } | ||
88 | |||
89 | static void vmw_hw_shader_destroy(struct vmw_resource *res) | ||
90 | { | ||
91 | (void) vmw_gb_shader_destroy(res); | ||
92 | } | ||
93 | |||
94 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||
95 | struct vmw_resource *res, | ||
96 | uint32_t size, | ||
97 | uint64_t offset, | ||
98 | SVGA3dShaderType type, | ||
99 | struct vmw_dma_buffer *byte_code, | ||
100 | void (*res_free) (struct vmw_resource *res)) | ||
101 | { | ||
102 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
103 | int ret; | ||
104 | |||
105 | ret = vmw_resource_init(dev_priv, res, true, | ||
106 | res_free, &vmw_gb_shader_func); | ||
107 | |||
108 | |||
109 | if (unlikely(ret != 0)) { | ||
110 | if (res_free) | ||
111 | res_free(res); | ||
112 | else | ||
113 | kfree(res); | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | res->backup_size = size; | ||
118 | if (byte_code) { | ||
119 | res->backup = vmw_dmabuf_reference(byte_code); | ||
120 | res->backup_offset = offset; | ||
121 | } | ||
122 | shader->size = size; | ||
123 | shader->type = type; | ||
124 | |||
125 | vmw_resource_activate(res, vmw_hw_shader_destroy); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int vmw_gb_shader_create(struct vmw_resource *res) | ||
130 | { | ||
131 | struct vmw_private *dev_priv = res->dev_priv; | ||
132 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
133 | int ret; | ||
134 | struct { | ||
135 | SVGA3dCmdHeader header; | ||
136 | SVGA3dCmdDefineGBShader body; | ||
137 | } *cmd; | ||
138 | |||
139 | if (likely(res->id != -1)) | ||
140 | return 0; | ||
141 | |||
142 | ret = vmw_resource_alloc_id(res); | ||
143 | if (unlikely(ret != 0)) { | ||
144 | DRM_ERROR("Failed to allocate a shader id.\n"); | ||
145 | goto out_no_id; | ||
146 | } | ||
147 | |||
148 | if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { | ||
149 | ret = -EBUSY; | ||
150 | goto out_no_fifo; | ||
151 | } | ||
152 | |||
153 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
154 | if (unlikely(cmd == NULL)) { | ||
155 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
156 | "creation.\n"); | ||
157 | ret = -ENOMEM; | ||
158 | goto out_no_fifo; | ||
159 | } | ||
160 | |||
161 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; | ||
162 | cmd->header.size = sizeof(cmd->body); | ||
163 | cmd->body.shid = res->id; | ||
164 | cmd->body.type = shader->type; | ||
165 | cmd->body.sizeInBytes = shader->size; | ||
166 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
167 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
168 | |||
169 | return 0; | ||
170 | |||
171 | out_no_fifo: | ||
172 | vmw_resource_release_id(res); | ||
173 | out_no_id: | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
178 | struct ttm_validate_buffer *val_buf) | ||
179 | { | ||
180 | struct vmw_private *dev_priv = res->dev_priv; | ||
181 | struct { | ||
182 | SVGA3dCmdHeader header; | ||
183 | SVGA3dCmdBindGBShader body; | ||
184 | } *cmd; | ||
185 | struct ttm_buffer_object *bo = val_buf->bo; | ||
186 | |||
187 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
188 | |||
189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
190 | if (unlikely(cmd == NULL)) { | ||
191 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
192 | "binding.\n"); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | |||
196 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
197 | cmd->header.size = sizeof(cmd->body); | ||
198 | cmd->body.shid = res->id; | ||
199 | cmd->body.mobid = bo->mem.start; | ||
200 | cmd->body.offsetInBytes = 0; | ||
201 | res->backup_dirty = false; | ||
202 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
208 | bool readback, | ||
209 | struct ttm_validate_buffer *val_buf) | ||
210 | { | ||
211 | struct vmw_private *dev_priv = res->dev_priv; | ||
212 | struct { | ||
213 | SVGA3dCmdHeader header; | ||
214 | SVGA3dCmdBindGBShader body; | ||
215 | } *cmd; | ||
216 | struct vmw_fence_obj *fence; | ||
217 | |||
218 | BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||
219 | |||
220 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
221 | if (unlikely(cmd == NULL)) { | ||
222 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
223 | "unbinding.\n"); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | |||
227 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
228 | cmd->header.size = sizeof(cmd->body); | ||
229 | cmd->body.shid = res->id; | ||
230 | cmd->body.mobid = SVGA3D_INVALID_ID; | ||
231 | cmd->body.offsetInBytes = 0; | ||
232 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
233 | |||
234 | /* | ||
235 | * Create a fence object and fence the backup buffer. | ||
236 | */ | ||
237 | |||
238 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
239 | &fence, NULL); | ||
240 | |||
241 | vmw_fence_single_bo(val_buf->bo, fence); | ||
242 | |||
243 | if (likely(fence != NULL)) | ||
244 | vmw_fence_obj_unreference(&fence); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||
250 | { | ||
251 | struct vmw_private *dev_priv = res->dev_priv; | ||
252 | struct { | ||
253 | SVGA3dCmdHeader header; | ||
254 | SVGA3dCmdDestroyGBShader body; | ||
255 | } *cmd; | ||
256 | |||
257 | if (likely(res->id == -1)) | ||
258 | return 0; | ||
259 | |||
260 | mutex_lock(&dev_priv->binding_mutex); | ||
261 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
262 | |||
263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
264 | if (unlikely(cmd == NULL)) { | ||
265 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
266 | "destruction.\n"); | ||
267 | return -ENOMEM; | ||
268 | } | ||
269 | |||
270 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; | ||
271 | cmd->header.size = sizeof(cmd->body); | ||
272 | cmd->body.shid = res->id; | ||
273 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
274 | mutex_unlock(&dev_priv->binding_mutex); | ||
275 | vmw_resource_release_id(res); | ||
276 | vmw_3d_resource_dec(dev_priv, false); | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * User-space shader management: | ||
283 | */ | ||
284 | |||
285 | static struct vmw_resource * | ||
286 | vmw_user_shader_base_to_res(struct ttm_base_object *base) | ||
287 | { | ||
288 | return &(container_of(base, struct vmw_user_shader, base)-> | ||
289 | shader.res); | ||
290 | } | ||
291 | |||
292 | static void vmw_user_shader_free(struct vmw_resource *res) | ||
293 | { | ||
294 | struct vmw_user_shader *ushader = | ||
295 | container_of(res, struct vmw_user_shader, shader.res); | ||
296 | struct vmw_private *dev_priv = res->dev_priv; | ||
297 | |||
298 | ttm_base_object_kfree(ushader, base); | ||
299 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
300 | vmw_user_shader_size); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * This function is called when user space has no more references on the | ||
305 | * base object. It releases the base-object's reference on the resource object. | ||
306 | */ | ||
307 | |||
308 | static void vmw_user_shader_base_release(struct ttm_base_object **p_base) | ||
309 | { | ||
310 | struct ttm_base_object *base = *p_base; | ||
311 | struct vmw_resource *res = vmw_user_shader_base_to_res(base); | ||
312 | |||
313 | *p_base = NULL; | ||
314 | vmw_resource_unreference(&res); | ||
315 | } | ||
316 | |||
317 | int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
318 | struct drm_file *file_priv) | ||
319 | { | ||
320 | struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; | ||
321 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
322 | |||
323 | return ttm_ref_object_base_unref(tfile, arg->handle, | ||
324 | TTM_REF_USAGE); | ||
325 | } | ||
326 | |||
327 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
328 | struct drm_file *file_priv) | ||
329 | { | ||
330 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
331 | struct vmw_user_shader *ushader; | ||
332 | struct vmw_resource *res; | ||
333 | struct vmw_resource *tmp; | ||
334 | struct drm_vmw_shader_create_arg *arg = | ||
335 | (struct drm_vmw_shader_create_arg *)data; | ||
336 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
337 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
338 | struct vmw_dma_buffer *buffer = NULL; | ||
339 | SVGA3dShaderType shader_type; | ||
340 | int ret; | ||
341 | |||
342 | if (arg->buffer_handle != SVGA3D_INVALID_ID) { | ||
343 | ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | ||
344 | &buffer); | ||
345 | if (unlikely(ret != 0)) { | ||
346 | DRM_ERROR("Could not find buffer for shader " | ||
347 | "creation.\n"); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | if ((u64)buffer->base.num_pages * PAGE_SIZE < | ||
352 | (u64)arg->size + (u64)arg->offset) { | ||
353 | DRM_ERROR("Illegal buffer- or shader size.\n"); | ||
354 | ret = -EINVAL; | ||
355 | goto out_bad_arg; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | switch (arg->shader_type) { | ||
360 | case drm_vmw_shader_type_vs: | ||
361 | shader_type = SVGA3D_SHADERTYPE_VS; | ||
362 | break; | ||
363 | case drm_vmw_shader_type_ps: | ||
364 | shader_type = SVGA3D_SHADERTYPE_PS; | ||
365 | break; | ||
366 | case drm_vmw_shader_type_gs: | ||
367 | shader_type = SVGA3D_SHADERTYPE_GS; | ||
368 | break; | ||
369 | default: | ||
370 | DRM_ERROR("Illegal shader type.\n"); | ||
371 | ret = -EINVAL; | ||
372 | goto out_bad_arg; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
377 | * by maximum number_of shaders anyway. | ||
378 | */ | ||
379 | |||
380 | if (unlikely(vmw_user_shader_size == 0)) | ||
381 | vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) | ||
382 | + 128; | ||
383 | |||
384 | ret = ttm_read_lock(&vmaster->lock, true); | ||
385 | if (unlikely(ret != 0)) | ||
386 | return ret; | ||
387 | |||
388 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
389 | vmw_user_shader_size, | ||
390 | false, true); | ||
391 | if (unlikely(ret != 0)) { | ||
392 | if (ret != -ERESTARTSYS) | ||
393 | DRM_ERROR("Out of graphics memory for shader" | ||
394 | " creation.\n"); | ||
395 | goto out_unlock; | ||
396 | } | ||
397 | |||
398 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
399 | if (unlikely(ushader == NULL)) { | ||
400 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
401 | vmw_user_shader_size); | ||
402 | ret = -ENOMEM; | ||
403 | goto out_unlock; | ||
404 | } | ||
405 | |||
406 | res = &ushader->shader.res; | ||
407 | ushader->base.shareable = false; | ||
408 | ushader->base.tfile = NULL; | ||
409 | |||
410 | /* | ||
411 | * From here on, the destructor takes over resource freeing. | ||
412 | */ | ||
413 | |||
414 | ret = vmw_gb_shader_init(dev_priv, res, arg->size, | ||
415 | arg->offset, shader_type, buffer, | ||
416 | vmw_user_shader_free); | ||
417 | if (unlikely(ret != 0)) | ||
418 | goto out_unlock; | ||
419 | |||
420 | tmp = vmw_resource_reference(res); | ||
421 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
422 | VMW_RES_SHADER, | ||
423 | &vmw_user_shader_base_release, NULL); | ||
424 | |||
425 | if (unlikely(ret != 0)) { | ||
426 | vmw_resource_unreference(&tmp); | ||
427 | goto out_err; | ||
428 | } | ||
429 | |||
430 | arg->shader_handle = ushader->base.hash.key; | ||
431 | out_err: | ||
432 | vmw_resource_unreference(&res); | ||
433 | out_unlock: | ||
434 | ttm_read_unlock(&vmaster->lock); | ||
435 | out_bad_arg: | ||
436 | vmw_dmabuf_unreference(&buffer); | ||
437 | |||
438 | return ret; | ||
439 | |||
440 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 0fc93398bba2..3bb3331acdaf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -41,7 +41,6 @@ struct vmw_user_surface { | |||
41 | struct ttm_prime_object prime; | 41 | struct ttm_prime_object prime; |
42 | struct vmw_surface srf; | 42 | struct vmw_surface srf; |
43 | uint32_t size; | 43 | uint32_t size; |
44 | uint32_t backup_handle; | ||
45 | }; | 44 | }; |
46 | 45 | ||
47 | /** | 46 | /** |
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |||
68 | struct ttm_validate_buffer *val_buf); | 67 | struct ttm_validate_buffer *val_buf); |
69 | static int vmw_legacy_srf_create(struct vmw_resource *res); | 68 | static int vmw_legacy_srf_create(struct vmw_resource *res); |
70 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | 69 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); |
70 | static int vmw_gb_surface_create(struct vmw_resource *res); | ||
71 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
72 | struct ttm_validate_buffer *val_buf); | ||
73 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
74 | bool readback, | ||
75 | struct ttm_validate_buffer *val_buf); | ||
76 | static int vmw_gb_surface_destroy(struct vmw_resource *res); | ||
77 | |||
71 | 78 | ||
72 | static const struct vmw_user_resource_conv user_surface_conv = { | 79 | static const struct vmw_user_resource_conv user_surface_conv = { |
73 | .object_type = VMW_RES_SURFACE, | 80 | .object_type = VMW_RES_SURFACE, |
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = { | |||
93 | .unbind = &vmw_legacy_srf_unbind | 100 | .unbind = &vmw_legacy_srf_unbind |
94 | }; | 101 | }; |
95 | 102 | ||
103 | static const struct vmw_res_func vmw_gb_surface_func = { | ||
104 | .res_type = vmw_res_surface, | ||
105 | .needs_backup = true, | ||
106 | .may_evict = true, | ||
107 | .type_name = "guest backed surfaces", | ||
108 | .backup_placement = &vmw_mob_placement, | ||
109 | .create = vmw_gb_surface_create, | ||
110 | .destroy = vmw_gb_surface_destroy, | ||
111 | .bind = vmw_gb_surface_bind, | ||
112 | .unbind = vmw_gb_surface_unbind | ||
113 | }; | ||
114 | |||
96 | /** | 115 | /** |
97 | * struct vmw_surface_dma - SVGA3D DMA command | 116 | * struct vmw_surface_dma - SVGA3D DMA command |
98 | */ | 117 | */ |
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
291 | struct vmw_surface *srf; | 310 | struct vmw_surface *srf; |
292 | void *cmd; | 311 | void *cmd; |
293 | 312 | ||
313 | if (res->func->destroy == vmw_gb_surface_destroy) { | ||
314 | (void) vmw_gb_surface_destroy(res); | ||
315 | return; | ||
316 | } | ||
317 | |||
294 | if (res->id != -1) { | 318 | if (res->id != -1) { |
295 | 319 | ||
296 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 320 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
549 | struct vmw_resource *res = &srf->res; | 573 | struct vmw_resource *res = &srf->res; |
550 | 574 | ||
551 | BUG_ON(res_free == NULL); | 575 | BUG_ON(res_free == NULL); |
552 | (void) vmw_3d_resource_inc(dev_priv, false); | 576 | if (!dev_priv->has_mob) |
577 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
553 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 578 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
579 | (dev_priv->has_mob) ? &vmw_gb_surface_func : | ||
554 | &vmw_legacy_surface_func); | 580 | &vmw_legacy_surface_func); |
555 | 581 | ||
556 | if (unlikely(ret != 0)) { | 582 | if (unlikely(ret != 0)) { |
557 | vmw_3d_resource_dec(dev_priv, false); | 583 | if (!dev_priv->has_mob) |
584 | vmw_3d_resource_dec(dev_priv, false); | ||
558 | res_free(res); | 585 | res_free(res); |
559 | return ret; | 586 | return ret; |
560 | } | 587 | } |
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
750 | 777 | ||
751 | srf->base_size = *srf->sizes; | 778 | srf->base_size = *srf->sizes; |
752 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 779 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
753 | srf->multisample_count = 1; | 780 | srf->multisample_count = 0; |
754 | 781 | ||
755 | cur_bo_offset = 0; | 782 | cur_bo_offset = 0; |
756 | cur_offset = srf->offsets; | 783 | cur_offset = srf->offsets; |
@@ -894,3 +921,435 @@ out_no_reference: | |||
894 | 921 | ||
895 | return ret; | 922 | return ret; |
896 | } | 923 | } |
924 | |||
925 | /** | ||
926 | * vmw_surface_define_encode - Encode a surface_define command. | ||
927 | * | ||
928 | * @srf: Pointer to a struct vmw_surface object. | ||
929 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
930 | */ | ||
931 | static int vmw_gb_surface_create(struct vmw_resource *res) | ||
932 | { | ||
933 | struct vmw_private *dev_priv = res->dev_priv; | ||
934 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
935 | uint32_t cmd_len, submit_len; | ||
936 | int ret; | ||
937 | struct { | ||
938 | SVGA3dCmdHeader header; | ||
939 | SVGA3dCmdDefineGBSurface body; | ||
940 | } *cmd; | ||
941 | |||
942 | if (likely(res->id != -1)) | ||
943 | return 0; | ||
944 | |||
945 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
946 | ret = vmw_resource_alloc_id(res); | ||
947 | if (unlikely(ret != 0)) { | ||
948 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
949 | goto out_no_id; | ||
950 | } | ||
951 | |||
952 | if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { | ||
953 | ret = -EBUSY; | ||
954 | goto out_no_fifo; | ||
955 | } | ||
956 | |||
957 | cmd_len = sizeof(cmd->body); | ||
958 | submit_len = sizeof(*cmd); | ||
959 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | ||
960 | if (unlikely(cmd == NULL)) { | ||
961 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
962 | "creation.\n"); | ||
963 | ret = -ENOMEM; | ||
964 | goto out_no_fifo; | ||
965 | } | ||
966 | |||
967 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||
968 | cmd->header.size = cmd_len; | ||
969 | cmd->body.sid = srf->res.id; | ||
970 | cmd->body.surfaceFlags = srf->flags; | ||
971 | cmd->body.format = cpu_to_le32(srf->format); | ||
972 | cmd->body.numMipLevels = srf->mip_levels[0]; | ||
973 | cmd->body.multisampleCount = srf->multisample_count; | ||
974 | cmd->body.autogenFilter = srf->autogen_filter; | ||
975 | cmd->body.size.width = srf->base_size.width; | ||
976 | cmd->body.size.height = srf->base_size.height; | ||
977 | cmd->body.size.depth = srf->base_size.depth; | ||
978 | vmw_fifo_commit(dev_priv, submit_len); | ||
979 | |||
980 | return 0; | ||
981 | |||
982 | out_no_fifo: | ||
983 | vmw_resource_release_id(res); | ||
984 | out_no_id: | ||
985 | vmw_3d_resource_dec(dev_priv, false); | ||
986 | return ret; | ||
987 | } | ||
988 | |||
989 | |||
990 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
991 | struct ttm_validate_buffer *val_buf) | ||
992 | { | ||
993 | struct vmw_private *dev_priv = res->dev_priv; | ||
994 | struct { | ||
995 | SVGA3dCmdHeader header; | ||
996 | SVGA3dCmdBindGBSurface body; | ||
997 | } *cmd1; | ||
998 | struct { | ||
999 | SVGA3dCmdHeader header; | ||
1000 | SVGA3dCmdUpdateGBSurface body; | ||
1001 | } *cmd2; | ||
1002 | uint32_t submit_size; | ||
1003 | struct ttm_buffer_object *bo = val_buf->bo; | ||
1004 | |||
1005 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
1006 | |||
1007 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | ||
1008 | |||
1009 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | ||
1010 | if (unlikely(cmd1 == NULL)) { | ||
1011 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1012 | "binding.\n"); | ||
1013 | return -ENOMEM; | ||
1014 | } | ||
1015 | |||
1016 | cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
1017 | cmd1->header.size = sizeof(cmd1->body); | ||
1018 | cmd1->body.sid = res->id; | ||
1019 | cmd1->body.mobid = bo->mem.start; | ||
1020 | if (res->backup_dirty) { | ||
1021 | cmd2 = (void *) &cmd1[1]; | ||
1022 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; | ||
1023 | cmd2->header.size = sizeof(cmd2->body); | ||
1024 | cmd2->body.sid = res->id; | ||
1025 | res->backup_dirty = false; | ||
1026 | } | ||
1027 | vmw_fifo_commit(dev_priv, submit_size); | ||
1028 | |||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
1033 | bool readback, | ||
1034 | struct ttm_validate_buffer *val_buf) | ||
1035 | { | ||
1036 | struct vmw_private *dev_priv = res->dev_priv; | ||
1037 | struct ttm_buffer_object *bo = val_buf->bo; | ||
1038 | struct vmw_fence_obj *fence; | ||
1039 | |||
1040 | struct { | ||
1041 | SVGA3dCmdHeader header; | ||
1042 | SVGA3dCmdReadbackGBSurface body; | ||
1043 | } *cmd1; | ||
1044 | struct { | ||
1045 | SVGA3dCmdHeader header; | ||
1046 | SVGA3dCmdInvalidateGBSurface body; | ||
1047 | } *cmd2; | ||
1048 | struct { | ||
1049 | SVGA3dCmdHeader header; | ||
1050 | SVGA3dCmdBindGBSurface body; | ||
1051 | } *cmd3; | ||
1052 | uint32_t submit_size; | ||
1053 | uint8_t *cmd; | ||
1054 | |||
1055 | |||
1056 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
1057 | |||
1058 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); | ||
1059 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
1060 | if (unlikely(cmd == NULL)) { | ||
1061 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1062 | "unbinding.\n"); | ||
1063 | return -ENOMEM; | ||
1064 | } | ||
1065 | |||
1066 | if (readback) { | ||
1067 | cmd1 = (void *) cmd; | ||
1068 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; | ||
1069 | cmd1->header.size = sizeof(cmd1->body); | ||
1070 | cmd1->body.sid = res->id; | ||
1071 | cmd3 = (void *) &cmd1[1]; | ||
1072 | } else { | ||
1073 | cmd2 = (void *) cmd; | ||
1074 | cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; | ||
1075 | cmd2->header.size = sizeof(cmd2->body); | ||
1076 | cmd2->body.sid = res->id; | ||
1077 | cmd3 = (void *) &cmd2[1]; | ||
1078 | } | ||
1079 | |||
1080 | cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
1081 | cmd3->header.size = sizeof(cmd3->body); | ||
1082 | cmd3->body.sid = res->id; | ||
1083 | cmd3->body.mobid = SVGA3D_INVALID_ID; | ||
1084 | |||
1085 | vmw_fifo_commit(dev_priv, submit_size); | ||
1086 | |||
1087 | /* | ||
1088 | * Create a fence object and fence the backup buffer. | ||
1089 | */ | ||
1090 | |||
1091 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
1092 | &fence, NULL); | ||
1093 | |||
1094 | vmw_fence_single_bo(val_buf->bo, fence); | ||
1095 | |||
1096 | if (likely(fence != NULL)) | ||
1097 | vmw_fence_obj_unreference(&fence); | ||
1098 | |||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||
1103 | { | ||
1104 | struct vmw_private *dev_priv = res->dev_priv; | ||
1105 | struct { | ||
1106 | SVGA3dCmdHeader header; | ||
1107 | SVGA3dCmdDestroyGBSurface body; | ||
1108 | } *cmd; | ||
1109 | |||
1110 | if (likely(res->id == -1)) | ||
1111 | return 0; | ||
1112 | |||
1113 | mutex_lock(&dev_priv->binding_mutex); | ||
1114 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
1115 | |||
1116 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
1117 | if (unlikely(cmd == NULL)) { | ||
1118 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1119 | "destruction.\n"); | ||
1120 | return -ENOMEM; | ||
1121 | } | ||
1122 | |||
1123 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; | ||
1124 | cmd->header.size = sizeof(cmd->body); | ||
1125 | cmd->body.sid = res->id; | ||
1126 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
1127 | mutex_unlock(&dev_priv->binding_mutex); | ||
1128 | vmw_resource_release_id(res); | ||
1129 | vmw_3d_resource_dec(dev_priv, false); | ||
1130 | |||
1131 | return 0; | ||
1132 | } | ||
1133 | |||
1134 | /** | ||
1135 | * vmw_gb_surface_define_ioctl - Ioctl function implementing | ||
1136 | * the user surface define functionality. | ||
1137 | * | ||
1138 | * @dev: Pointer to a struct drm_device. | ||
1139 | * @data: Pointer to data copied from / to user-space. | ||
1140 | * @file_priv: Pointer to a drm file private structure. | ||
1141 | */ | ||
1142 | int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
1143 | struct drm_file *file_priv) | ||
1144 | { | ||
1145 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1146 | struct vmw_user_surface *user_srf; | ||
1147 | struct vmw_surface *srf; | ||
1148 | struct vmw_resource *res; | ||
1149 | struct vmw_resource *tmp; | ||
1150 | union drm_vmw_gb_surface_create_arg *arg = | ||
1151 | (union drm_vmw_gb_surface_create_arg *)data; | ||
1152 | struct drm_vmw_gb_surface_create_req *req = &arg->req; | ||
1153 | struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; | ||
1154 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1155 | int ret; | ||
1156 | uint32_t size; | ||
1157 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1158 | const struct svga3d_surface_desc *desc; | ||
1159 | uint32_t backup_handle; | ||
1160 | |||
1161 | if (unlikely(vmw_user_surface_size == 0)) | ||
1162 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
1163 | 128; | ||
1164 | |||
1165 | size = vmw_user_surface_size + 128; | ||
1166 | |||
1167 | desc = svga3dsurface_get_desc(req->format); | ||
1168 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | ||
1169 | DRM_ERROR("Invalid surface format for surface creation.\n"); | ||
1170 | return -EINVAL; | ||
1171 | } | ||
1172 | |||
1173 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1174 | if (unlikely(ret != 0)) | ||
1175 | return ret; | ||
1176 | |||
1177 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1178 | size, false, true); | ||
1179 | if (unlikely(ret != 0)) { | ||
1180 | if (ret != -ERESTARTSYS) | ||
1181 | DRM_ERROR("Out of graphics memory for surface" | ||
1182 | " creation.\n"); | ||
1183 | goto out_unlock; | ||
1184 | } | ||
1185 | |||
1186 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
1187 | if (unlikely(user_srf == NULL)) { | ||
1188 | ret = -ENOMEM; | ||
1189 | goto out_no_user_srf; | ||
1190 | } | ||
1191 | |||
1192 | srf = &user_srf->srf; | ||
1193 | res = &srf->res; | ||
1194 | |||
1195 | srf->flags = req->svga3d_flags; | ||
1196 | srf->format = req->format; | ||
1197 | srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; | ||
1198 | srf->mip_levels[0] = req->mip_levels; | ||
1199 | srf->num_sizes = 1; | ||
1200 | srf->sizes = NULL; | ||
1201 | srf->offsets = NULL; | ||
1202 | user_srf->size = size; | ||
1203 | srf->base_size = req->base_size; | ||
1204 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
1205 | srf->multisample_count = req->multisample_count; | ||
1206 | res->backup_size = svga3dsurface_get_serialized_size | ||
1207 | (srf->format, srf->base_size, srf->mip_levels[0], | ||
1208 | srf->flags & SVGA3D_SURFACE_CUBEMAP); | ||
1209 | |||
1210 | user_srf->prime.base.shareable = false; | ||
1211 | user_srf->prime.base.tfile = NULL; | ||
1212 | |||
1213 | /** | ||
1214 | * From this point, the generic resource management functions | ||
1215 | * destroy the object on failure. | ||
1216 | */ | ||
1217 | |||
1218 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
1219 | if (unlikely(ret != 0)) | ||
1220 | goto out_unlock; | ||
1221 | |||
1222 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | ||
1223 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | ||
1224 | &res->backup); | ||
1225 | } else if (req->drm_surface_flags & | ||
1226 | drm_vmw_surface_flag_create_buffer) | ||
1227 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
1228 | res->backup_size, | ||
1229 | req->drm_surface_flags & | ||
1230 | drm_vmw_surface_flag_shareable, | ||
1231 | &backup_handle, | ||
1232 | &res->backup); | ||
1233 | |||
1234 | if (unlikely(ret != 0)) { | ||
1235 | vmw_resource_unreference(&res); | ||
1236 | goto out_unlock; | ||
1237 | } | ||
1238 | |||
1239 | tmp = vmw_resource_reference(&srf->res); | ||
1240 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | ||
1241 | req->drm_surface_flags & | ||
1242 | drm_vmw_surface_flag_shareable, | ||
1243 | VMW_RES_SURFACE, | ||
1244 | &vmw_user_surface_base_release, NULL); | ||
1245 | |||
1246 | if (unlikely(ret != 0)) { | ||
1247 | vmw_resource_unreference(&tmp); | ||
1248 | vmw_resource_unreference(&res); | ||
1249 | goto out_unlock; | ||
1250 | } | ||
1251 | |||
1252 | rep->handle = user_srf->prime.base.hash.key; | ||
1253 | rep->backup_size = res->backup_size; | ||
1254 | if (res->backup) { | ||
1255 | rep->buffer_map_handle = | ||
1256 | drm_vma_node_offset_addr(&res->backup->base.vma_node); | ||
1257 | rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; | ||
1258 | rep->buffer_handle = backup_handle; | ||
1259 | } else { | ||
1260 | rep->buffer_map_handle = 0; | ||
1261 | rep->buffer_size = 0; | ||
1262 | rep->buffer_handle = SVGA3D_INVALID_ID; | ||
1263 | } | ||
1264 | |||
1265 | vmw_resource_unreference(&res); | ||
1266 | |||
1267 | ttm_read_unlock(&vmaster->lock); | ||
1268 | return 0; | ||
1269 | out_no_user_srf: | ||
1270 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1271 | out_unlock: | ||
1272 | ttm_read_unlock(&vmaster->lock); | ||
1273 | return ret; | ||
1274 | } | ||
1275 | |||
1276 | /** | ||
1277 | * vmw_gb_surface_reference_ioctl - Ioctl function implementing | ||
1278 | * the user surface reference functionality. | ||
1279 | * | ||
1280 | * @dev: Pointer to a struct drm_device. | ||
1281 | * @data: Pointer to data copied from / to user-space. | ||
1282 | * @file_priv: Pointer to a drm file private structure. | ||
1283 | */ | ||
1284 | int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
1285 | struct drm_file *file_priv) | ||
1286 | { | ||
1287 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1288 | union drm_vmw_gb_surface_reference_arg *arg = | ||
1289 | (union drm_vmw_gb_surface_reference_arg *)data; | ||
1290 | struct drm_vmw_surface_arg *req = &arg->req; | ||
1291 | struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; | ||
1292 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1293 | struct vmw_surface *srf; | ||
1294 | struct vmw_user_surface *user_srf; | ||
1295 | struct ttm_base_object *base; | ||
1296 | uint32_t backup_handle; | ||
1297 | int ret = -EINVAL; | ||
1298 | |||
1299 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | ||
1300 | if (unlikely(base == NULL)) { | ||
1301 | DRM_ERROR("Could not find surface to reference.\n"); | ||
1302 | return -EINVAL; | ||
1303 | } | ||
1304 | |||
1305 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | ||
1306 | goto out_bad_resource; | ||
1307 | |||
1308 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | ||
1309 | srf = &user_srf->srf; | ||
1310 | if (srf->res.backup == NULL) { | ||
1311 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | ||
1312 | goto out_bad_resource; | ||
1313 | } | ||
1314 | |||
1315 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
1316 | TTM_REF_USAGE, NULL); | ||
1317 | if (unlikely(ret != 0)) { | ||
1318 | DRM_ERROR("Could not add a reference to a GB surface.\n"); | ||
1319 | goto out_bad_resource; | ||
1320 | } | ||
1321 | |||
1322 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ | ||
1323 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, | ||
1324 | &backup_handle); | ||
1325 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1326 | |||
1327 | if (unlikely(ret != 0)) { | ||
1328 | DRM_ERROR("Could not add a reference to a GB surface " | ||
1329 | "backup buffer.\n"); | ||
1330 | (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
1331 | req->sid, | ||
1332 | TTM_REF_USAGE); | ||
1333 | goto out_bad_resource; | ||
1334 | } | ||
1335 | |||
1336 | rep->creq.svga3d_flags = srf->flags; | ||
1337 | rep->creq.format = srf->format; | ||
1338 | rep->creq.mip_levels = srf->mip_levels[0]; | ||
1339 | rep->creq.drm_surface_flags = 0; | ||
1340 | rep->creq.multisample_count = srf->multisample_count; | ||
1341 | rep->creq.autogen_filter = srf->autogen_filter; | ||
1342 | rep->creq.buffer_handle = backup_handle; | ||
1343 | rep->creq.base_size = srf->base_size; | ||
1344 | rep->crep.handle = user_srf->prime.base.hash.key; | ||
1345 | rep->crep.backup_size = srf->res.backup_size; | ||
1346 | rep->crep.buffer_handle = backup_handle; | ||
1347 | rep->crep.buffer_map_handle = | ||
1348 | drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); | ||
1349 | rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; | ||
1350 | |||
1351 | out_bad_resource: | ||
1352 | ttm_base_object_unref(&base); | ||
1353 | |||
1354 | return ret; | ||
1355 | } | ||
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index bcb0912afe7a..9971c560ed9a 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -28,6 +28,10 @@ | |||
28 | #ifndef __VMWGFX_DRM_H__ | 28 | #ifndef __VMWGFX_DRM_H__ |
29 | #define __VMWGFX_DRM_H__ | 29 | #define __VMWGFX_DRM_H__ |
30 | 30 | ||
31 | #ifndef __KERNEL__ | ||
32 | #include <drm.h> | ||
33 | #endif | ||
34 | |||
31 | #define DRM_VMW_MAX_SURFACE_FACES 6 | 35 | #define DRM_VMW_MAX_SURFACE_FACES 6 |
32 | #define DRM_VMW_MAX_MIP_LEVELS 24 | 36 | #define DRM_VMW_MAX_MIP_LEVELS 24 |
33 | 37 | ||
@@ -55,6 +59,11 @@ | |||
55 | #define DRM_VMW_PRESENT 18 | 59 | #define DRM_VMW_PRESENT 18 |
56 | #define DRM_VMW_PRESENT_READBACK 19 | 60 | #define DRM_VMW_PRESENT_READBACK 19 |
57 | #define DRM_VMW_UPDATE_LAYOUT 20 | 61 | #define DRM_VMW_UPDATE_LAYOUT 20 |
62 | #define DRM_VMW_CREATE_SHADER 21 | ||
63 | #define DRM_VMW_UNREF_SHADER 22 | ||
64 | #define DRM_VMW_GB_SURFACE_CREATE 23 | ||
65 | #define DRM_VMW_GB_SURFACE_REF 24 | ||
66 | #define DRM_VMW_SYNCCPU 25 | ||
58 | 67 | ||
59 | /*************************************************************************/ | 68 | /*************************************************************************/ |
60 | /** | 69 | /** |
@@ -75,6 +84,9 @@ | |||
75 | #define DRM_VMW_PARAM_FIFO_CAPS 4 | 84 | #define DRM_VMW_PARAM_FIFO_CAPS 4 |
76 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 | 85 | #define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
77 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 | 86 | #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
87 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 | ||
88 | #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 | ||
89 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 | ||
78 | 90 | ||
79 | /** | 91 | /** |
80 | * struct drm_vmw_getparam_arg | 92 | * struct drm_vmw_getparam_arg |
@@ -787,4 +799,253 @@ struct drm_vmw_update_layout_arg { | |||
787 | uint64_t rects; | 799 | uint64_t rects; |
788 | }; | 800 | }; |
789 | 801 | ||
802 | |||
803 | /*************************************************************************/ | ||
804 | /** | ||
805 | * DRM_VMW_CREATE_SHADER - Create shader | ||
806 | * | ||
807 | * Creates a shader and optionally binds it to a dma buffer containing | ||
808 | * the shader byte-code. | ||
809 | */ | ||
810 | |||
811 | /** | ||
812 | * enum drm_vmw_shader_type - Shader types | ||
813 | */ | ||
814 | enum drm_vmw_shader_type { | ||
815 | drm_vmw_shader_type_vs = 0, | ||
816 | drm_vmw_shader_type_ps, | ||
817 | drm_vmw_shader_type_gs | ||
818 | }; | ||
819 | |||
820 | |||
821 | /** | ||
822 | * struct drm_vmw_shader_create_arg | ||
823 | * | ||
824 | * @shader_type: Shader type of the shader to create. | ||
825 | * @size: Size of the byte-code in bytes. | ||
826 | * where the shader byte-code starts | ||
827 | * @buffer_handle: Buffer handle identifying the buffer containing the | ||
828 | * shader byte-code | ||
829 | * @shader_handle: On successful completion contains a handle that | ||
830 | * can be used to subsequently identify the shader. | ||
831 | * @offset: Offset in bytes into the buffer given by @buffer_handle, | ||
832 | * | ||
833 | * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. | ||
834 | */ | ||
835 | struct drm_vmw_shader_create_arg { | ||
836 | enum drm_vmw_shader_type shader_type; | ||
837 | uint32_t size; | ||
838 | uint32_t buffer_handle; | ||
839 | uint32_t shader_handle; | ||
840 | uint64_t offset; | ||
841 | }; | ||
842 | |||
843 | /*************************************************************************/ | ||
844 | /** | ||
845 | * DRM_VMW_UNREF_SHADER - Unreferences a shader | ||
846 | * | ||
847 | * Destroys a user-space reference to a shader, optionally destroying | ||
848 | * it. | ||
849 | */ | ||
850 | |||
851 | /** | ||
852 | * struct drm_vmw_shader_arg | ||
853 | * | ||
854 | * @handle: Handle identifying the shader to destroy. | ||
855 | * | ||
856 | * Input argument to the DRM_VMW_UNREF_SHADER ioctl. | ||
857 | */ | ||
858 | struct drm_vmw_shader_arg { | ||
859 | uint32_t handle; | ||
860 | uint32_t pad64; | ||
861 | }; | ||
862 | |||
863 | /*************************************************************************/ | ||
864 | /** | ||
865 | * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. | ||
866 | * | ||
867 | * Allocates a surface handle and queues a create surface command | ||
868 | * for the host on the first use of the surface. The surface ID can | ||
869 | * be used as the surface ID in commands referencing the surface. | ||
870 | */ | ||
871 | |||
872 | /** | ||
873 | * enum drm_vmw_surface_flags | ||
874 | * | ||
875 | * @drm_vmw_surface_flag_shareable: Whether the surface is shareable | ||
876 | * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout | ||
877 | * surface. | ||
878 | * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is | ||
879 | * given. | ||
880 | */ | ||
881 | enum drm_vmw_surface_flags { | ||
882 | drm_vmw_surface_flag_shareable = (1 << 0), | ||
883 | drm_vmw_surface_flag_scanout = (1 << 1), | ||
884 | drm_vmw_surface_flag_create_buffer = (1 << 2) | ||
885 | }; | ||
886 | |||
887 | /** | ||
888 | * struct drm_vmw_gb_surface_create_req | ||
889 | * | ||
890 | * @svga3d_flags: SVGA3d surface flags for the device. | ||
891 | * @format: SVGA3d format. | ||
892 | * @mip_level: Number of mip levels for all faces. | ||
893 | * @drm_surface_flags Flags as described above. | ||
894 | * @multisample_count Future use. Set to 0. | ||
895 | * @autogen_filter Future use. Set to 0. | ||
896 | * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID | ||
897 | * if none. | ||
898 | * @base_size Size of the base mip level for all faces. | ||
899 | * | ||
900 | * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. | ||
901 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. | ||
902 | */ | ||
903 | struct drm_vmw_gb_surface_create_req { | ||
904 | uint32_t svga3d_flags; | ||
905 | uint32_t format; | ||
906 | uint32_t mip_levels; | ||
907 | enum drm_vmw_surface_flags drm_surface_flags; | ||
908 | uint32_t multisample_count; | ||
909 | uint32_t autogen_filter; | ||
910 | uint32_t buffer_handle; | ||
911 | uint32_t pad64; | ||
912 | struct drm_vmw_size base_size; | ||
913 | }; | ||
914 | |||
915 | /** | ||
916 | * struct drm_vmw_gb_surface_create_rep | ||
917 | * | ||
918 | * @handle: Surface handle. | ||
919 | * @backup_size: Size of backup buffers for this surface. | ||
920 | * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. | ||
921 | * @buffer_size: Actual size of the buffer identified by | ||
922 | * @buffer_handle | ||
923 | * @buffer_map_handle: Offset into device address space for the buffer | ||
924 | * identified by @buffer_handle. | ||
925 | * | ||
926 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. | ||
927 | * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
928 | */ | ||
929 | struct drm_vmw_gb_surface_create_rep { | ||
930 | uint32_t handle; | ||
931 | uint32_t backup_size; | ||
932 | uint32_t buffer_handle; | ||
933 | uint32_t buffer_size; | ||
934 | uint64_t buffer_map_handle; | ||
935 | }; | ||
936 | |||
937 | /** | ||
938 | * union drm_vmw_gb_surface_create_arg | ||
939 | * | ||
940 | * @req: Input argument as described above. | ||
941 | * @rep: Output argument as described above. | ||
942 | * | ||
943 | * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
944 | */ | ||
945 | union drm_vmw_gb_surface_create_arg { | ||
946 | struct drm_vmw_gb_surface_create_rep rep; | ||
947 | struct drm_vmw_gb_surface_create_req req; | ||
948 | }; | ||
949 | |||
950 | /*************************************************************************/ | ||
951 | /** | ||
952 | * DRM_VMW_GB_SURFACE_REF - Reference a host surface. | ||
953 | * | ||
954 | * Puts a reference on a host surface with a given handle, as previously | ||
955 | * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
956 | * A reference will make sure the surface isn't destroyed while we hold | ||
957 | * it and will allow the calling client to use the surface handle in | ||
958 | * the command stream. | ||
959 | * | ||
960 | * On successful return, the Ioctl returns the surface information given | ||
961 | * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. | ||
962 | */ | ||
963 | |||
964 | /** | ||
965 | * struct drm_vmw_gb_surface_reference_arg | ||
966 | * | ||
967 | * @creq: The data used as input when the surface was created, as described | ||
968 | * above at "struct drm_vmw_gb_surface_create_req" | ||
969 | * @crep: Additional data output when the surface was created, as described | ||
970 | * above at "struct drm_vmw_gb_surface_create_rep" | ||
971 | * | ||
972 | * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. | ||
973 | */ | ||
974 | struct drm_vmw_gb_surface_ref_rep { | ||
975 | struct drm_vmw_gb_surface_create_req creq; | ||
976 | struct drm_vmw_gb_surface_create_rep crep; | ||
977 | }; | ||
978 | |||
979 | /** | ||
980 | * union drm_vmw_gb_surface_reference_arg | ||
981 | * | ||
982 | * @req: Input data as described above at "struct drm_vmw_surface_arg" | ||
983 | * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" | ||
984 | * | ||
985 | * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. | ||
986 | */ | ||
987 | union drm_vmw_gb_surface_reference_arg { | ||
988 | struct drm_vmw_gb_surface_ref_rep rep; | ||
989 | struct drm_vmw_surface_arg req; | ||
990 | }; | ||
991 | |||
992 | |||
993 | /*************************************************************************/ | ||
994 | /** | ||
995 | * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. | ||
996 | * | ||
997 | * Idles any previously submitted GPU operations on the buffer and | ||
998 | * by default blocks command submissions that reference the buffer. | ||
999 | * If the file descriptor used to grab a blocking CPU sync is closed, the | ||
1000 | * cpu sync is released. | ||
1001 | * The flags argument indicates how the grab / release operation should be | ||
1002 | * performed: | ||
1003 | */ | ||
1004 | |||
1005 | /** | ||
1006 | * enum drm_vmw_synccpu_flags - Synccpu flags: | ||
1007 | * | ||
1008 | * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a | ||
1009 | * hint to the kernel to allow command submissions that references the buffer | ||
1010 | * for read-only. | ||
1011 | * @drm_vmw_synccpu_write: Sync for write. Block all command submissions | ||
1012 | * referencing this buffer. | ||
1013 | * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return | ||
1014 | * -EBUSY should the buffer be busy. | ||
1015 | * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer | ||
1016 | * while the buffer is synced for CPU. This is similar to the GEM bo idle | ||
1017 | * behavior. | ||
1018 | */ | ||
1019 | enum drm_vmw_synccpu_flags { | ||
1020 | drm_vmw_synccpu_read = (1 << 0), | ||
1021 | drm_vmw_synccpu_write = (1 << 1), | ||
1022 | drm_vmw_synccpu_dontblock = (1 << 2), | ||
1023 | drm_vmw_synccpu_allow_cs = (1 << 3) | ||
1024 | }; | ||
1025 | |||
1026 | /** | ||
1027 | * enum drm_vmw_synccpu_op - Synccpu operations: | ||
1028 | * | ||
1029 | * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations | ||
1030 | * @drm_vmw_synccpu_release: Release a previous grab. | ||
1031 | */ | ||
1032 | enum drm_vmw_synccpu_op { | ||
1033 | drm_vmw_synccpu_grab, | ||
1034 | drm_vmw_synccpu_release | ||
1035 | }; | ||
1036 | |||
1037 | /** | ||
1038 | * struct drm_vmw_synccpu_arg | ||
1039 | * | ||
1040 | * @op: The synccpu operation as described above. | ||
1041 | * @handle: Handle identifying the buffer object. | ||
1042 | * @flags: Flags as described above. | ||
1043 | */ | ||
1044 | struct drm_vmw_synccpu_arg { | ||
1045 | enum drm_vmw_synccpu_op op; | ||
1046 | enum drm_vmw_synccpu_flags flags; | ||
1047 | uint32_t handle; | ||
1048 | uint32_t pad64; | ||
1049 | }; | ||
1050 | |||
790 | #endif | 1051 | #endif |