diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
21 files changed, 5435 insertions, 429 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 9f8b690bcf52..458cdf6d81e8 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
| @@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
| 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
| 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ | 7 | vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ |
| 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
| 9 | vmwgfx_surface.o vmwgfx_prime.o | 9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o |
| 10 | 10 | ||
| 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 11 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d0e085ee8249..f58dc7dd15c5 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
| @@ -34,6 +34,8 @@ | |||
| 34 | 34 | ||
| 35 | #include "svga_reg.h" | 35 | #include "svga_reg.h" |
| 36 | 36 | ||
| 37 | typedef uint32 PPN; | ||
| 38 | typedef __le64 PPN64; | ||
| 37 | 39 | ||
| 38 | /* | 40 | /* |
| 39 | * 3D Hardware Version | 41 | * 3D Hardware Version |
| @@ -71,6 +73,9 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
| 71 | #define SVGA3D_MAX_CONTEXT_IDS 256 | 73 | #define SVGA3D_MAX_CONTEXT_IDS 256 |
| 72 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) | 74 | #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) |
| 73 | 75 | ||
| 76 | #define SVGA3D_NUM_TEXTURE_UNITS 32 | ||
| 77 | #define SVGA3D_NUM_LIGHTS 8 | ||
| 78 | |||
| 74 | /* | 79 | /* |
| 75 | * Surface formats. | 80 | * Surface formats. |
| 76 | * | 81 | * |
| @@ -81,6 +86,7 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */ | |||
| 81 | */ | 86 | */ |
| 82 | 87 | ||
| 83 | typedef enum SVGA3dSurfaceFormat { | 88 | typedef enum SVGA3dSurfaceFormat { |
| 89 | SVGA3D_FORMAT_MIN = 0, | ||
| 84 | SVGA3D_FORMAT_INVALID = 0, | 90 | SVGA3D_FORMAT_INVALID = 0, |
| 85 | 91 | ||
| 86 | SVGA3D_X8R8G8B8 = 1, | 92 | SVGA3D_X8R8G8B8 = 1, |
| @@ -134,12 +140,6 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 134 | SVGA3D_RG_S10E5 = 35, | 140 | SVGA3D_RG_S10E5 = 35, |
| 135 | SVGA3D_RG_S23E8 = 36, | 141 | SVGA3D_RG_S23E8 = 36, |
| 136 | 142 | ||
| 137 | /* | ||
| 138 | * Any surface can be used as a buffer object, but SVGA3D_BUFFER is | ||
| 139 | * the most efficient format to use when creating new surfaces | ||
| 140 | * expressly for index or vertex data. | ||
| 141 | */ | ||
| 142 | |||
| 143 | SVGA3D_BUFFER = 37, | 143 | SVGA3D_BUFFER = 37, |
| 144 | 144 | ||
| 145 | SVGA3D_Z_D24X8 = 38, | 145 | SVGA3D_Z_D24X8 = 38, |
| @@ -159,15 +159,109 @@ typedef enum SVGA3dSurfaceFormat { | |||
| 159 | /* Video format with alpha */ | 159 | /* Video format with alpha */ |
| 160 | SVGA3D_AYUV = 45, | 160 | SVGA3D_AYUV = 45, |
| 161 | 161 | ||
| 162 | SVGA3D_R32G32B32A32_TYPELESS = 46, | ||
| 163 | SVGA3D_R32G32B32A32_FLOAT = 25, | ||
| 164 | SVGA3D_R32G32B32A32_UINT = 47, | ||
| 165 | SVGA3D_R32G32B32A32_SINT = 48, | ||
| 166 | SVGA3D_R32G32B32_TYPELESS = 49, | ||
| 167 | SVGA3D_R32G32B32_FLOAT = 50, | ||
| 168 | SVGA3D_R32G32B32_UINT = 51, | ||
| 169 | SVGA3D_R32G32B32_SINT = 52, | ||
| 170 | SVGA3D_R16G16B16A16_TYPELESS = 53, | ||
| 171 | SVGA3D_R16G16B16A16_FLOAT = 24, | ||
| 172 | SVGA3D_R16G16B16A16_UNORM = 41, | ||
| 173 | SVGA3D_R16G16B16A16_UINT = 54, | ||
| 174 | SVGA3D_R16G16B16A16_SNORM = 55, | ||
| 175 | SVGA3D_R16G16B16A16_SINT = 56, | ||
| 176 | SVGA3D_R32G32_TYPELESS = 57, | ||
| 177 | SVGA3D_R32G32_FLOAT = 36, | ||
| 178 | SVGA3D_R32G32_UINT = 58, | ||
| 179 | SVGA3D_R32G32_SINT = 59, | ||
| 180 | SVGA3D_R32G8X24_TYPELESS = 60, | ||
| 181 | SVGA3D_D32_FLOAT_S8X24_UINT = 61, | ||
| 182 | SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, | ||
| 183 | SVGA3D_X32_TYPELESS_G8X24_UINT = 63, | ||
| 184 | SVGA3D_R10G10B10A2_TYPELESS = 64, | ||
| 185 | SVGA3D_R10G10B10A2_UNORM = 26, | ||
| 186 | SVGA3D_R10G10B10A2_UINT = 65, | ||
| 187 | SVGA3D_R11G11B10_FLOAT = 66, | ||
| 188 | SVGA3D_R8G8B8A8_TYPELESS = 67, | ||
| 189 | SVGA3D_R8G8B8A8_UNORM = 68, | ||
| 190 | SVGA3D_R8G8B8A8_UNORM_SRGB = 69, | ||
| 191 | SVGA3D_R8G8B8A8_UINT = 70, | ||
| 192 | SVGA3D_R8G8B8A8_SNORM = 28, | ||
| 193 | SVGA3D_R8G8B8A8_SINT = 71, | ||
| 194 | SVGA3D_R16G16_TYPELESS = 72, | ||
| 195 | SVGA3D_R16G16_FLOAT = 35, | ||
| 196 | SVGA3D_R16G16_UNORM = 40, | ||
| 197 | SVGA3D_R16G16_UINT = 73, | ||
| 198 | SVGA3D_R16G16_SNORM = 39, | ||
| 199 | SVGA3D_R16G16_SINT = 74, | ||
| 200 | SVGA3D_R32_TYPELESS = 75, | ||
| 201 | SVGA3D_D32_FLOAT = 76, | ||
| 202 | SVGA3D_R32_FLOAT = 34, | ||
| 203 | SVGA3D_R32_UINT = 77, | ||
| 204 | SVGA3D_R32_SINT = 78, | ||
| 205 | SVGA3D_R24G8_TYPELESS = 79, | ||
| 206 | SVGA3D_D24_UNORM_S8_UINT = 80, | ||
| 207 | SVGA3D_R24_UNORM_X8_TYPELESS = 81, | ||
| 208 | SVGA3D_X24_TYPELESS_G8_UINT = 82, | ||
| 209 | SVGA3D_R8G8_TYPELESS = 83, | ||
| 210 | SVGA3D_R8G8_UNORM = 84, | ||
| 211 | SVGA3D_R8G8_UINT = 85, | ||
| 212 | SVGA3D_R8G8_SNORM = 27, | ||
| 213 | SVGA3D_R8G8_SINT = 86, | ||
| 214 | SVGA3D_R16_TYPELESS = 87, | ||
| 215 | SVGA3D_R16_FLOAT = 33, | ||
| 216 | SVGA3D_D16_UNORM = 8, | ||
| 217 | SVGA3D_R16_UNORM = 88, | ||
| 218 | SVGA3D_R16_UINT = 89, | ||
| 219 | SVGA3D_R16_SNORM = 90, | ||
| 220 | SVGA3D_R16_SINT = 91, | ||
| 221 | SVGA3D_R8_TYPELESS = 92, | ||
| 222 | SVGA3D_R8_UNORM = 93, | ||
| 223 | SVGA3D_R8_UINT = 94, | ||
| 224 | SVGA3D_R8_SNORM = 95, | ||
| 225 | SVGA3D_R8_SINT = 96, | ||
| 226 | SVGA3D_A8_UNORM = 32, | ||
| 227 | SVGA3D_R1_UNORM = 97, | ||
| 228 | SVGA3D_R9G9B9E5_SHAREDEXP = 98, | ||
| 229 | SVGA3D_R8G8_B8G8_UNORM = 99, | ||
| 230 | SVGA3D_G8R8_G8B8_UNORM = 100, | ||
| 231 | SVGA3D_BC1_TYPELESS = 101, | ||
| 232 | SVGA3D_BC1_UNORM = 15, | ||
| 233 | SVGA3D_BC1_UNORM_SRGB = 102, | ||
| 234 | SVGA3D_BC2_TYPELESS = 103, | ||
| 235 | SVGA3D_BC2_UNORM = 17, | ||
| 236 | SVGA3D_BC2_UNORM_SRGB = 104, | ||
| 237 | SVGA3D_BC3_TYPELESS = 105, | ||
| 238 | SVGA3D_BC3_UNORM = 19, | ||
| 239 | SVGA3D_BC3_UNORM_SRGB = 106, | ||
| 240 | SVGA3D_BC4_TYPELESS = 107, | ||
| 162 | SVGA3D_BC4_UNORM = 108, | 241 | SVGA3D_BC4_UNORM = 108, |
| 242 | SVGA3D_BC4_SNORM = 109, | ||
| 243 | SVGA3D_BC5_TYPELESS = 110, | ||
| 163 | SVGA3D_BC5_UNORM = 111, | 244 | SVGA3D_BC5_UNORM = 111, |
| 245 | SVGA3D_BC5_SNORM = 112, | ||
| 246 | SVGA3D_B5G6R5_UNORM = 3, | ||
| 247 | SVGA3D_B5G5R5A1_UNORM = 5, | ||
| 248 | SVGA3D_B8G8R8A8_UNORM = 2, | ||
| 249 | SVGA3D_B8G8R8X8_UNORM = 1, | ||
| 250 | SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, | ||
| 251 | SVGA3D_B8G8R8A8_TYPELESS = 114, | ||
| 252 | SVGA3D_B8G8R8A8_UNORM_SRGB = 115, | ||
| 253 | SVGA3D_B8G8R8X8_TYPELESS = 116, | ||
| 254 | SVGA3D_B8G8R8X8_UNORM_SRGB = 117, | ||
| 164 | 255 | ||
| 165 | /* Advanced D3D9 depth formats. */ | 256 | /* Advanced D3D9 depth formats. */ |
| 166 | SVGA3D_Z_DF16 = 118, | 257 | SVGA3D_Z_DF16 = 118, |
| 167 | SVGA3D_Z_DF24 = 119, | 258 | SVGA3D_Z_DF24 = 119, |
| 168 | SVGA3D_Z_D24S8_INT = 120, | 259 | SVGA3D_Z_D24S8_INT = 120, |
| 169 | 260 | ||
| 170 | SVGA3D_FORMAT_MAX | 261 | /* Planar video formats. */ |
| 262 | SVGA3D_YV12 = 121, | ||
| 263 | |||
| 264 | SVGA3D_FORMAT_MAX = 122, | ||
| 171 | } SVGA3dSurfaceFormat; | 265 | } SVGA3dSurfaceFormat; |
| 172 | 266 | ||
| 173 | typedef uint32 SVGA3dColor; /* a, r, g, b */ | 267 | typedef uint32 SVGA3dColor; /* a, r, g, b */ |
| @@ -957,15 +1051,21 @@ typedef enum { | |||
| 957 | } SVGA3dCubeFace; | 1051 | } SVGA3dCubeFace; |
| 958 | 1052 | ||
| 959 | typedef enum { | 1053 | typedef enum { |
| 1054 | SVGA3D_SHADERTYPE_INVALID = 0, | ||
| 1055 | SVGA3D_SHADERTYPE_MIN = 1, | ||
| 960 | SVGA3D_SHADERTYPE_VS = 1, | 1056 | SVGA3D_SHADERTYPE_VS = 1, |
| 961 | SVGA3D_SHADERTYPE_PS = 2, | 1057 | SVGA3D_SHADERTYPE_PS = 2, |
| 962 | SVGA3D_SHADERTYPE_MAX | 1058 | SVGA3D_SHADERTYPE_MAX = 3, |
| 1059 | SVGA3D_SHADERTYPE_GS = 3, | ||
| 963 | } SVGA3dShaderType; | 1060 | } SVGA3dShaderType; |
| 964 | 1061 | ||
| 1062 | #define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) | ||
| 1063 | |||
| 965 | typedef enum { | 1064 | typedef enum { |
| 966 | SVGA3D_CONST_TYPE_FLOAT = 0, | 1065 | SVGA3D_CONST_TYPE_FLOAT = 0, |
| 967 | SVGA3D_CONST_TYPE_INT = 1, | 1066 | SVGA3D_CONST_TYPE_INT = 1, |
| 968 | SVGA3D_CONST_TYPE_BOOL = 2, | 1067 | SVGA3D_CONST_TYPE_BOOL = 2, |
| 1068 | SVGA3D_CONST_TYPE_MAX | ||
| 969 | } SVGA3dShaderConstType; | 1069 | } SVGA3dShaderConstType; |
| 970 | 1070 | ||
| 971 | #define SVGA3D_MAX_SURFACE_FACES 6 | 1071 | #define SVGA3D_MAX_SURFACE_FACES 6 |
| @@ -1056,9 +1156,84 @@ typedef enum { | |||
| 1056 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 | 1156 | #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 |
| 1057 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 | 1157 | #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 |
| 1058 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 | 1158 | #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 |
| 1059 | #define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 | 1159 | #define SVGA_3D_CMD_SCREEN_DMA 1082 |
| 1060 | 1160 | #define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 | |
| 1061 | #define SVGA_3D_CMD_FUTURE_MAX 2000 | 1161 | #define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 |
| 1162 | |||
| 1163 | #define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 | ||
| 1164 | #define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 | ||
| 1165 | #define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 | ||
| 1166 | #define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 | ||
| 1167 | #define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 | ||
| 1168 | #define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 | ||
| 1169 | |||
| 1170 | #define SVGA_3D_CMD_SET_OTABLE_BASE 1091 | ||
| 1171 | #define SVGA_3D_CMD_READBACK_OTABLE 1092 | ||
| 1172 | |||
| 1173 | #define SVGA_3D_CMD_DEFINE_GB_MOB 1093 | ||
| 1174 | #define SVGA_3D_CMD_DESTROY_GB_MOB 1094 | ||
| 1175 | #define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 | ||
| 1176 | #define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 | ||
| 1177 | |||
| 1178 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 | ||
| 1179 | #define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 | ||
| 1180 | #define SVGA_3D_CMD_BIND_GB_SURFACE 1099 | ||
| 1181 | #define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 | ||
| 1182 | #define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 | ||
| 1183 | #define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 | ||
| 1184 | #define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 | ||
| 1185 | #define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 | ||
| 1186 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 | ||
| 1187 | #define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 | ||
| 1188 | |||
| 1189 | #define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 | ||
| 1190 | #define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 | ||
| 1191 | #define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 | ||
| 1192 | #define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 | ||
| 1193 | #define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 | ||
| 1194 | |||
| 1195 | #define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 | ||
| 1196 | #define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 | ||
| 1197 | #define SVGA_3D_CMD_BIND_GB_SHADER 1114 | ||
| 1198 | |||
| 1199 | #define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 | ||
| 1200 | |||
| 1201 | #define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 | ||
| 1202 | #define SVGA_3D_CMD_END_GB_QUERY 1117 | ||
| 1203 | #define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 | ||
| 1204 | |||
| 1205 | #define SVGA_3D_CMD_NOP 1119 | ||
| 1206 | |||
| 1207 | #define SVGA_3D_CMD_ENABLE_GART 1120 | ||
| 1208 | #define SVGA_3D_CMD_DISABLE_GART 1121 | ||
| 1209 | #define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 | ||
| 1210 | #define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 | ||
| 1211 | |||
| 1212 | #define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 | ||
| 1213 | #define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 | ||
| 1214 | #define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 | ||
| 1215 | #define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 | ||
| 1216 | |||
| 1217 | #define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 | ||
| 1218 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | ||
| 1219 | |||
| 1220 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | ||
| 1221 | #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 | ||
| 1222 | #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 | ||
| 1223 | #define SVGA_3D_CMD_GB_MOB_FENCE 1133 | ||
| 1224 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 | ||
| 1225 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | ||
| 1226 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | ||
| 1227 | #define SVGA_3D_CMD_NOP_ERROR 1137 | ||
| 1228 | |||
| 1229 | #define SVGA_3D_CMD_RESERVED1 1138 | ||
| 1230 | #define SVGA_3D_CMD_RESERVED2 1139 | ||
| 1231 | #define SVGA_3D_CMD_RESERVED3 1140 | ||
| 1232 | #define SVGA_3D_CMD_RESERVED4 1141 | ||
| 1233 | #define SVGA_3D_CMD_RESERVED5 1142 | ||
| 1234 | |||
| 1235 | #define SVGA_3D_CMD_MAX 1142 | ||
| 1236 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | ||
| 1062 | 1237 | ||
| 1063 | /* | 1238 | /* |
| 1064 | * Common substructures used in multiple FIFO commands: | 1239 | * Common substructures used in multiple FIFO commands: |
| @@ -1750,6 +1925,507 @@ struct { | |||
| 1750 | 1925 | ||
| 1751 | 1926 | ||
| 1752 | /* | 1927 | /* |
| 1928 | * Guest-backed surface definitions. | ||
| 1929 | */ | ||
| 1930 | |||
| 1931 | typedef uint32 SVGAMobId; | ||
| 1932 | |||
| 1933 | typedef enum SVGAMobFormat { | ||
| 1934 | SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, | ||
| 1935 | SVGA3D_MOBFMT_PTDEPTH_0 = 0, | ||
| 1936 | SVGA3D_MOBFMT_PTDEPTH_1 = 1, | ||
| 1937 | SVGA3D_MOBFMT_PTDEPTH_2 = 2, | ||
| 1938 | SVGA3D_MOBFMT_RANGE = 3, | ||
| 1939 | SVGA3D_MOBFMT_PTDEPTH64_0 = 4, | ||
| 1940 | SVGA3D_MOBFMT_PTDEPTH64_1 = 5, | ||
| 1941 | SVGA3D_MOBFMT_PTDEPTH64_2 = 6, | ||
| 1942 | SVGA3D_MOBFMT_MAX, | ||
| 1943 | } SVGAMobFormat; | ||
| 1944 | |||
| 1945 | /* | ||
| 1946 | * Sizes of opaque types. | ||
| 1947 | */ | ||
| 1948 | |||
| 1949 | #define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 | ||
| 1950 | #define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 | ||
| 1951 | #define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 | ||
| 1952 | #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 | ||
| 1953 | #define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 | ||
| 1954 | #define SVGA3D_CONTEXT_DATA_SIZE 16384 | ||
| 1955 | |||
| 1956 | /* | ||
| 1957 | * SVGA3dCmdSetOTableBase -- | ||
| 1958 | * | ||
| 1959 | * This command allows the guest to specify the base PPN of the | ||
| 1960 | * specified object table. | ||
| 1961 | */ | ||
| 1962 | |||
| 1963 | typedef enum { | ||
| 1964 | SVGA_OTABLE_MOB = 0, | ||
| 1965 | SVGA_OTABLE_MIN = 0, | ||
| 1966 | SVGA_OTABLE_SURFACE = 1, | ||
| 1967 | SVGA_OTABLE_CONTEXT = 2, | ||
| 1968 | SVGA_OTABLE_SHADER = 3, | ||
| 1969 | SVGA_OTABLE_SCREEN_TARGET = 4, | ||
| 1970 | SVGA_OTABLE_DX9_MAX = 5, | ||
| 1971 | SVGA_OTABLE_MAX = 8 | ||
| 1972 | } SVGAOTableType; | ||
| 1973 | |||
| 1974 | typedef | ||
| 1975 | struct { | ||
| 1976 | SVGAOTableType type; | ||
| 1977 | PPN baseAddress; | ||
| 1978 | uint32 sizeInBytes; | ||
| 1979 | uint32 validSizeInBytes; | ||
| 1980 | SVGAMobFormat ptDepth; | ||
| 1981 | } __packed | ||
| 1982 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | ||
| 1983 | |||
| 1984 | typedef | ||
| 1985 | struct { | ||
| 1986 | SVGAOTableType type; | ||
| 1987 | PPN64 baseAddress; | ||
| 1988 | uint32 sizeInBytes; | ||
| 1989 | uint32 validSizeInBytes; | ||
| 1990 | SVGAMobFormat ptDepth; | ||
| 1991 | } __packed | ||
| 1992 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | ||
| 1993 | |||
| 1994 | typedef | ||
| 1995 | struct { | ||
| 1996 | SVGAOTableType type; | ||
| 1997 | } __packed | ||
| 1998 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | ||
| 1999 | |||
| 2000 | /* | ||
| 2001 | * Define a memory object (Mob) in the OTable. | ||
| 2002 | */ | ||
| 2003 | |||
| 2004 | typedef | ||
| 2005 | struct SVGA3dCmdDefineGBMob { | ||
| 2006 | SVGAMobId mobid; | ||
| 2007 | SVGAMobFormat ptDepth; | ||
| 2008 | PPN base; | ||
| 2009 | uint32 sizeInBytes; | ||
| 2010 | } __packed | ||
| 2011 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | ||
| 2012 | |||
| 2013 | |||
| 2014 | /* | ||
| 2015 | * Destroys an object in the OTable. | ||
| 2016 | */ | ||
| 2017 | |||
| 2018 | typedef | ||
| 2019 | struct SVGA3dCmdDestroyGBMob { | ||
| 2020 | SVGAMobId mobid; | ||
| 2021 | } __packed | ||
| 2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | ||
| 2023 | |||
| 2024 | /* | ||
| 2025 | * Redefine an object in the OTable. | ||
| 2026 | */ | ||
| 2027 | |||
| 2028 | typedef | ||
| 2029 | struct SVGA3dCmdRedefineGBMob { | ||
| 2030 | SVGAMobId mobid; | ||
| 2031 | SVGAMobFormat ptDepth; | ||
| 2032 | PPN base; | ||
| 2033 | uint32 sizeInBytes; | ||
| 2034 | } __packed | ||
| 2035 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | ||
| 2036 | |||
| 2037 | /* | ||
| 2038 | * Define a memory object (Mob) in the OTable with a PPN64 base. | ||
| 2039 | */ | ||
| 2040 | |||
| 2041 | typedef | ||
| 2042 | struct SVGA3dCmdDefineGBMob64 { | ||
| 2043 | SVGAMobId mobid; | ||
| 2044 | SVGAMobFormat ptDepth; | ||
| 2045 | PPN64 base; | ||
| 2046 | uint32 sizeInBytes; | ||
| 2047 | } __packed | ||
| 2048 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | ||
| 2049 | |||
| 2050 | /* | ||
| 2051 | * Redefine an object in the OTable with PPN64 base. | ||
| 2052 | */ | ||
| 2053 | |||
| 2054 | typedef | ||
| 2055 | struct SVGA3dCmdRedefineGBMob64 { | ||
| 2056 | SVGAMobId mobid; | ||
| 2057 | SVGAMobFormat ptDepth; | ||
| 2058 | PPN64 base; | ||
| 2059 | uint32 sizeInBytes; | ||
| 2060 | } __packed | ||
| 2061 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | ||
| 2062 | |||
| 2063 | /* | ||
| 2064 | * Notification that the page tables have been modified. | ||
| 2065 | */ | ||
| 2066 | |||
| 2067 | typedef | ||
| 2068 | struct SVGA3dCmdUpdateGBMobMapping { | ||
| 2069 | SVGAMobId mobid; | ||
| 2070 | } __packed | ||
| 2071 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | ||
| 2072 | |||
| 2073 | /* | ||
| 2074 | * Define a guest-backed surface. | ||
| 2075 | */ | ||
| 2076 | |||
| 2077 | typedef | ||
| 2078 | struct SVGA3dCmdDefineGBSurface { | ||
| 2079 | uint32 sid; | ||
| 2080 | SVGA3dSurfaceFlags surfaceFlags; | ||
| 2081 | SVGA3dSurfaceFormat format; | ||
| 2082 | uint32 numMipLevels; | ||
| 2083 | uint32 multisampleCount; | ||
| 2084 | SVGA3dTextureFilter autogenFilter; | ||
| 2085 | SVGA3dSize size; | ||
| 2086 | } __packed | ||
| 2087 | SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
| 2088 | |||
| 2089 | /* | ||
| 2090 | * Destroy a guest-backed surface. | ||
| 2091 | */ | ||
| 2092 | |||
| 2093 | typedef | ||
| 2094 | struct SVGA3dCmdDestroyGBSurface { | ||
| 2095 | uint32 sid; | ||
| 2096 | } __packed | ||
| 2097 | SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
| 2098 | |||
| 2099 | /* | ||
| 2100 | * Bind a guest-backed surface to an object. | ||
| 2101 | */ | ||
| 2102 | |||
| 2103 | typedef | ||
| 2104 | struct SVGA3dCmdBindGBSurface { | ||
| 2105 | uint32 sid; | ||
| 2106 | SVGAMobId mobid; | ||
| 2107 | } __packed | ||
| 2108 | SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
| 2109 | |||
| 2110 | /* | ||
| 2111 | * Conditionally bind a mob to a guest backed surface if testMobid | ||
| 2112 | * matches the currently bound mob. Optionally issue a readback on | ||
| 2113 | * the surface while it is still bound to the old mobid if the mobid | ||
| 2114 | * is changed by this command. | ||
| 2115 | */ | ||
| 2116 | |||
| 2117 | #define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) | ||
| 2118 | |||
| 2119 | typedef | ||
| 2120 | struct{ | ||
| 2121 | uint32 sid; | ||
| 2122 | SVGAMobId testMobid; | ||
| 2123 | SVGAMobId mobid; | ||
| 2124 | uint32 flags; | ||
| 2125 | } __packed | ||
| 2126 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | ||
| 2127 | |||
| 2128 | /* | ||
| 2129 | * Update an image in a guest-backed surface. | ||
| 2130 | * (Inform the device that the guest-contents have been updated.) | ||
| 2131 | */ | ||
| 2132 | |||
| 2133 | typedef | ||
| 2134 | struct SVGA3dCmdUpdateGBImage { | ||
| 2135 | SVGA3dSurfaceImageId image; | ||
| 2136 | SVGA3dBox box; | ||
| 2137 | } __packed | ||
| 2138 | SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
| 2139 | |||
| 2140 | /* | ||
| 2141 | * Update an entire guest-backed surface. | ||
| 2142 | * (Inform the device that the guest-contents have been updated.) | ||
| 2143 | */ | ||
| 2144 | |||
| 2145 | typedef | ||
| 2146 | struct SVGA3dCmdUpdateGBSurface { | ||
| 2147 | uint32 sid; | ||
| 2148 | } __packed | ||
| 2149 | SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
| 2150 | |||
| 2151 | /* | ||
| 2152 | * Readback an image in a guest-backed surface. | ||
| 2153 | * (Request the device to flush the dirty contents into the guest.) | ||
| 2154 | */ | ||
| 2155 | |||
| 2156 | typedef | ||
| 2157 | struct SVGA3dCmdReadbackGBImage { | ||
| 2158 | SVGA3dSurfaceImageId image; | ||
| 2159 | } __packed | ||
| 2160 | SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
| 2161 | |||
| 2162 | /* | ||
| 2163 | * Readback an entire guest-backed surface. | ||
| 2164 | * (Request the device to flush the dirty contents into the guest.) | ||
| 2165 | */ | ||
| 2166 | |||
| 2167 | typedef | ||
| 2168 | struct SVGA3dCmdReadbackGBSurface { | ||
| 2169 | uint32 sid; | ||
| 2170 | } __packed | ||
| 2171 | SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
| 2172 | |||
| 2173 | /* | ||
| 2174 | * Readback a sub rect of an image in a guest-backed surface. After | ||
| 2175 | * issuing this command the driver is required to issue an update call | ||
| 2176 | * of the same region before issuing any other commands that reference | ||
| 2177 | * this surface or rendering is not guaranteed. | ||
| 2178 | */ | ||
| 2179 | |||
| 2180 | typedef | ||
| 2181 | struct SVGA3dCmdReadbackGBImagePartial { | ||
| 2182 | SVGA3dSurfaceImageId image; | ||
| 2183 | SVGA3dBox box; | ||
| 2184 | uint32 invertBox; | ||
| 2185 | } __packed | ||
| 2186 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | ||
| 2187 | |||
| 2188 | /* | ||
| 2189 | * Invalidate an image in a guest-backed surface. | ||
| 2190 | * (Notify the device that the contents can be lost.) | ||
| 2191 | */ | ||
| 2192 | |||
| 2193 | typedef | ||
| 2194 | struct SVGA3dCmdInvalidateGBImage { | ||
| 2195 | SVGA3dSurfaceImageId image; | ||
| 2196 | } __packed | ||
| 2197 | SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
| 2198 | |||
| 2199 | /* | ||
| 2200 | * Invalidate an entire guest-backed surface. | ||
| 2201 | * (Notify the device that the contents if all images can be lost.) | ||
| 2202 | */ | ||
| 2203 | |||
| 2204 | typedef | ||
| 2205 | struct SVGA3dCmdInvalidateGBSurface { | ||
| 2206 | uint32 sid; | ||
| 2207 | } __packed | ||
| 2208 | SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
| 2209 | |||
| 2210 | /* | ||
| 2211 | * Invalidate a sub rect of an image in a guest-backed surface. After | ||
| 2212 | * issuing this command the driver is required to issue an update call | ||
| 2213 | * of the same region before issuing any other commands that reference | ||
| 2214 | * this surface or rendering is not guaranteed. | ||
| 2215 | */ | ||
| 2216 | |||
| 2217 | typedef | ||
| 2218 | struct SVGA3dCmdInvalidateGBImagePartial { | ||
| 2219 | SVGA3dSurfaceImageId image; | ||
| 2220 | SVGA3dBox box; | ||
| 2221 | uint32 invertBox; | ||
| 2222 | } __packed | ||
| 2223 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | ||
| 2224 | |||
| 2225 | /* | ||
| 2226 | * Define a guest-backed context. | ||
| 2227 | */ | ||
| 2228 | |||
| 2229 | typedef | ||
| 2230 | struct SVGA3dCmdDefineGBContext { | ||
| 2231 | uint32 cid; | ||
| 2232 | } __packed | ||
| 2233 | SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
| 2234 | |||
| 2235 | /* | ||
| 2236 | * Destroy a guest-backed context. | ||
| 2237 | */ | ||
| 2238 | |||
| 2239 | typedef | ||
| 2240 | struct SVGA3dCmdDestroyGBContext { | ||
| 2241 | uint32 cid; | ||
| 2242 | } __packed | ||
| 2243 | SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
| 2244 | |||
| 2245 | /* | ||
| 2246 | * Bind a guest-backed context. | ||
| 2247 | * | ||
| 2248 | * validContents should be set to 0 for new contexts, | ||
| 2249 | * and 1 if this is an old context which is getting paged | ||
| 2250 | * back on to the device. | ||
| 2251 | * | ||
| 2252 | * For new contexts, it is recommended that the driver | ||
| 2253 | * issue commands to initialize all interesting state | ||
| 2254 | * prior to rendering. | ||
| 2255 | */ | ||
| 2256 | |||
| 2257 | typedef | ||
| 2258 | struct SVGA3dCmdBindGBContext { | ||
| 2259 | uint32 cid; | ||
| 2260 | SVGAMobId mobid; | ||
| 2261 | uint32 validContents; | ||
| 2262 | } __packed | ||
| 2263 | SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
| 2264 | |||
| 2265 | /* | ||
| 2266 | * Readback a guest-backed context. | ||
| 2267 | * (Request that the device flush the contents back into guest memory.) | ||
| 2268 | */ | ||
| 2269 | |||
| 2270 | typedef | ||
| 2271 | struct SVGA3dCmdReadbackGBContext { | ||
| 2272 | uint32 cid; | ||
| 2273 | } __packed | ||
| 2274 | SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
| 2275 | |||
| 2276 | /* | ||
| 2277 | * Invalidate a guest-backed context. | ||
| 2278 | */ | ||
| 2279 | typedef | ||
| 2280 | struct SVGA3dCmdInvalidateGBContext { | ||
| 2281 | uint32 cid; | ||
| 2282 | } __packed | ||
| 2283 | SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
| 2284 | |||
| 2285 | /* | ||
| 2286 | * Define a guest-backed shader. | ||
| 2287 | */ | ||
| 2288 | |||
| 2289 | typedef | ||
| 2290 | struct SVGA3dCmdDefineGBShader { | ||
| 2291 | uint32 shid; | ||
| 2292 | SVGA3dShaderType type; | ||
| 2293 | uint32 sizeInBytes; | ||
| 2294 | } __packed | ||
| 2295 | SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
| 2296 | |||
| 2297 | /* | ||
| 2298 | * Bind a guest-backed shader. | ||
| 2299 | */ | ||
| 2300 | |||
| 2301 | typedef struct SVGA3dCmdBindGBShader { | ||
| 2302 | uint32 shid; | ||
| 2303 | SVGAMobId mobid; | ||
| 2304 | uint32 offsetInBytes; | ||
| 2305 | } __packed | ||
| 2306 | SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
| 2307 | |||
| 2308 | /* | ||
| 2309 | * Destroy a guest-backed shader. | ||
| 2310 | */ | ||
| 2311 | |||
| 2312 | typedef struct SVGA3dCmdDestroyGBShader { | ||
| 2313 | uint32 shid; | ||
| 2314 | } __packed | ||
| 2315 | SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
| 2316 | |||
| 2317 | typedef | ||
| 2318 | struct { | ||
| 2319 | uint32 cid; | ||
| 2320 | uint32 regStart; | ||
| 2321 | SVGA3dShaderType shaderType; | ||
| 2322 | SVGA3dShaderConstType constType; | ||
| 2323 | |||
| 2324 | /* | ||
| 2325 | * Followed by a variable number of shader constants. | ||
| 2326 | * | ||
| 2327 | * Note that FLOAT and INT constants are 4-dwords in length, while | ||
| 2328 | * BOOL constants are 1-dword in length. | ||
| 2329 | */ | ||
| 2330 | } __packed | ||
| 2331 | SVGA3dCmdSetGBShaderConstInline; | ||
| 2332 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | ||
| 2333 | |||
| 2334 | typedef | ||
| 2335 | struct { | ||
| 2336 | uint32 cid; | ||
| 2337 | SVGA3dQueryType type; | ||
| 2338 | } __packed | ||
| 2339 | SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
| 2340 | |||
| 2341 | typedef | ||
| 2342 | struct { | ||
| 2343 | uint32 cid; | ||
| 2344 | SVGA3dQueryType type; | ||
| 2345 | SVGAMobId mobid; | ||
| 2346 | uint32 offset; | ||
| 2347 | } __packed | ||
| 2348 | SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
| 2349 | |||
| 2350 | |||
| 2351 | /* | ||
| 2352 | * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- | ||
| 2353 | * | ||
| 2354 | * The semantics of this command are identical to the | ||
| 2355 | * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written | ||
| 2356 | * to a Mob instead of a GMR. | ||
| 2357 | */ | ||
| 2358 | |||
| 2359 | typedef | ||
| 2360 | struct { | ||
| 2361 | uint32 cid; | ||
| 2362 | SVGA3dQueryType type; | ||
| 2363 | SVGAMobId mobid; | ||
| 2364 | uint32 offset; | ||
| 2365 | } __packed | ||
| 2366 | SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
| 2367 | |||
| 2368 | typedef | ||
| 2369 | struct { | ||
| 2370 | SVGAMobId mobid; | ||
| 2371 | uint32 fbOffset; | ||
| 2372 | uint32 initalized; | ||
| 2373 | } __packed | ||
| 2374 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | ||
| 2375 | |||
| 2376 | typedef | ||
| 2377 | struct { | ||
| 2378 | SVGAMobId mobid; | ||
| 2379 | uint32 gartOffset; | ||
| 2380 | } __packed | ||
| 2381 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | ||
| 2382 | |||
| 2383 | |||
| 2384 | typedef | ||
| 2385 | struct { | ||
| 2386 | uint32 gartOffset; | ||
| 2387 | uint32 numPages; | ||
| 2388 | } __packed | ||
| 2389 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | ||
| 2390 | |||
| 2391 | |||
| 2392 | /* | ||
| 2393 | * Screen Targets | ||
| 2394 | */ | ||
| 2395 | #define SVGA_STFLAG_PRIMARY (1 << 0) | ||
| 2396 | |||
| 2397 | typedef | ||
| 2398 | struct { | ||
| 2399 | uint32 stid; | ||
| 2400 | uint32 width; | ||
| 2401 | uint32 height; | ||
| 2402 | int32 xRoot; | ||
| 2403 | int32 yRoot; | ||
| 2404 | uint32 flags; | ||
| 2405 | } __packed | ||
| 2406 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | ||
| 2407 | |||
| 2408 | typedef | ||
| 2409 | struct { | ||
| 2410 | uint32 stid; | ||
| 2411 | } __packed | ||
| 2412 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | ||
| 2413 | |||
| 2414 | typedef | ||
| 2415 | struct { | ||
| 2416 | uint32 stid; | ||
| 2417 | SVGA3dSurfaceImageId image; | ||
| 2418 | } __packed | ||
| 2419 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | ||
| 2420 | |||
| 2421 | typedef | ||
| 2422 | struct { | ||
| 2423 | uint32 stid; | ||
| 2424 | SVGA3dBox box; | ||
| 2425 | } __packed | ||
| 2426 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | ||
| 2427 | |||
| 2428 | /* | ||
| 1753 | * Capability query index. | 2429 | * Capability query index. |
| 1754 | * | 2430 | * |
| 1755 | * Notes: | 2431 | * Notes: |
| @@ -1879,10 +2555,41 @@ typedef enum { | |||
| 1879 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, | 2555 | SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, |
| 1880 | 2556 | ||
| 1881 | /* | 2557 | /* |
| 1882 | * Don't add new caps into the previous section; the values in this | 2558 | * Deprecated. |
| 1883 | * enumeration must not change. You can put new values right before | 2559 | */ |
| 1884 | * SVGA3D_DEVCAP_MAX. | 2560 | SVGA3D_DEVCAP_VGPU10 = 84, |
| 2561 | |||
| 2562 | /* | ||
| 2563 | * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements | ||
| 2564 | * ored together, one for every type of video decoding supported. | ||
| 2565 | */ | ||
| 2566 | SVGA3D_DEVCAP_VIDEO_DECODE = 85, | ||
| 2567 | |||
| 2568 | /* | ||
| 2569 | * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements | ||
| 2570 | * ored together, one for every type of video processing supported. | ||
| 2571 | */ | ||
| 2572 | SVGA3D_DEVCAP_VIDEO_PROCESS = 86, | ||
| 2573 | |||
| 2574 | SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ | ||
| 2575 | SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ | ||
| 2576 | SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ | ||
| 2577 | SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ | ||
| 2578 | |||
| 2579 | SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, | ||
| 2580 | |||
| 2581 | /* | ||
| 2582 | * Does the host support the SVGA logic ops commands? | ||
| 2583 | */ | ||
| 2584 | SVGA3D_DEVCAP_LOGICOPS = 92, | ||
| 2585 | |||
| 2586 | /* | ||
| 2587 | * What support does the host have for screen targets? | ||
| 2588 | * | ||
| 2589 | * See the SVGA3D_SCREENTARGET_CAP bits below. | ||
| 1885 | */ | 2590 | */ |
| 2591 | SVGA3D_DEVCAP_SCREENTARGETS = 93, | ||
| 2592 | |||
| 1886 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ | 2593 | SVGA3D_DEVCAP_MAX /* This must be the last index. */ |
| 1887 | } SVGA3dDevCapIndex; | 2594 | } SVGA3dDevCapIndex; |
| 1888 | 2595 | ||
| @@ -1893,4 +2600,28 @@ typedef union { | |||
| 1893 | float f; | 2600 | float f; |
| 1894 | } SVGA3dDevCapResult; | 2601 | } SVGA3dDevCapResult; |
| 1895 | 2602 | ||
| 2603 | typedef enum { | ||
| 2604 | SVGA3DCAPS_RECORD_UNKNOWN = 0, | ||
| 2605 | SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, | ||
| 2606 | SVGA3DCAPS_RECORD_DEVCAPS = 0x100, | ||
| 2607 | SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, | ||
| 2608 | } SVGA3dCapsRecordType; | ||
| 2609 | |||
| 2610 | typedef | ||
| 2611 | struct SVGA3dCapsRecordHeader { | ||
| 2612 | uint32 length; | ||
| 2613 | SVGA3dCapsRecordType type; | ||
| 2614 | } | ||
| 2615 | SVGA3dCapsRecordHeader; | ||
| 2616 | |||
| 2617 | typedef | ||
| 2618 | struct SVGA3dCapsRecord { | ||
| 2619 | SVGA3dCapsRecordHeader header; | ||
| 2620 | uint32 data[1]; | ||
| 2621 | } | ||
| 2622 | SVGA3dCapsRecord; | ||
| 2623 | |||
| 2624 | |||
| 2625 | typedef uint32 SVGA3dCapPair[2]; | ||
| 2626 | |||
| 1896 | #endif /* _SVGA3D_REG_H_ */ | 2627 | #endif /* _SVGA3D_REG_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | |||
| @@ -38,8 +38,11 @@ | |||
| 38 | 38 | ||
| 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) | 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) |
| 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) | 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) |
| 41 | #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) | ||
| 41 | #define surf_size_struct SVGA3dSize | 42 | #define surf_size_struct SVGA3dSize |
| 42 | #define u32 uint32 | 43 | #define u32 uint32 |
| 44 | #define u64 uint64_t | ||
| 45 | #define U32_MAX ((u32)~0U) | ||
| 43 | 46 | ||
| 44 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
| 45 | 48 | ||
| @@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { | |||
| 704 | 707 | ||
| 705 | static inline u32 clamped_umul32(u32 a, u32 b) | 708 | static inline u32 clamped_umul32(u32 a, u32 b) |
| 706 | { | 709 | { |
| 707 | uint64_t tmp = (uint64_t) a*b; | 710 | u64 tmp = (u64) a*b; |
| 708 | return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; | 711 | return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; |
| 709 | } | 712 | } |
| 710 | 713 | ||
| 711 | static inline const struct svga3d_surface_desc * | 714 | static inline const struct svga3d_surface_desc * |
| @@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 834 | bool cubemap) | 837 | bool cubemap) |
| 835 | { | 838 | { |
| 836 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); | 839 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); |
| 837 | u32 total_size = 0; | 840 | u64 total_size = 0; |
| 838 | u32 mip; | 841 | u32 mip; |
| 839 | 842 | ||
| 840 | for (mip = 0; mip < num_mip_levels; mip++) { | 843 | for (mip = 0; mip < num_mip_levels; mip++) { |
| @@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
| 847 | if (cubemap) | 850 | if (cubemap) |
| 848 | total_size *= SVGA3D_MAX_SURFACE_FACES; | 851 | total_size *= SVGA3D_MAX_SURFACE_FACES; |
| 849 | 852 | ||
| 850 | return total_size; | 853 | return (u32) min_t(u64, total_size, (u64) U32_MAX); |
| 851 | } | 854 | } |
| 852 | 855 | ||
| 853 | 856 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 01f63cb49678..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
| @@ -169,7 +169,17 @@ enum { | |||
| 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
| 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
| 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
| 172 | SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ | 172 | SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ |
| 173 | SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ | ||
| 174 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ | ||
| 175 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | ||
| 176 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | ||
| 177 | SVGA_REG_CMD_PREPEND_LOW = 53, | ||
| 178 | SVGA_REG_CMD_PREPEND_HIGH = 54, | ||
| 179 | SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, | ||
| 180 | SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, | ||
| 181 | SVGA_REG_MOB_MAX_SIZE = 57, | ||
| 182 | SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ | ||
| 173 | 183 | ||
| 174 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 184 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
| 175 | /* Next 768 (== 256*3) registers exist for colormap */ | 185 | /* Next 768 (== 256*3) registers exist for colormap */ |
| @@ -431,7 +441,10 @@ struct SVGASignedPoint { | |||
| 431 | #define SVGA_CAP_TRACES 0x00200000 | 441 | #define SVGA_CAP_TRACES 0x00200000 |
| 432 | #define SVGA_CAP_GMR2 0x00400000 | 442 | #define SVGA_CAP_GMR2 0x00400000 |
| 433 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 | 443 | #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 |
| 434 | 444 | #define SVGA_CAP_COMMAND_BUFFERS 0x01000000 | |
| 445 | #define SVGA_CAP_DEAD1 0x02000000 | ||
| 446 | #define SVGA_CAP_CMD_BUFFERS_2 0x04000000 | ||
| 447 | #define SVGA_CAP_GBOBJECTS 0x08000000 | ||
| 435 | 448 | ||
| 436 | /* | 449 | /* |
| 437 | * FIFO register indices. | 450 | * FIFO register indices. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 0489c6152482..6327cfc36805 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
| @@ -40,6 +40,10 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | |||
| 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | 40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
| 41 | TTM_PL_FLAG_CACHED; | 41 | TTM_PL_FLAG_CACHED; |
| 42 | 42 | ||
| 43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
| 44 | TTM_PL_FLAG_CACHED | | ||
| 45 | TTM_PL_FLAG_NO_EVICT; | ||
| 46 | |||
| 43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
| 44 | TTM_PL_FLAG_CACHED; | 48 | TTM_PL_FLAG_CACHED; |
| 45 | 49 | ||
| @@ -47,6 +51,9 @@ static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | | |||
| 47 | TTM_PL_FLAG_CACHED | | 51 | TTM_PL_FLAG_CACHED | |
| 48 | TTM_PL_FLAG_NO_EVICT; | 52 | TTM_PL_FLAG_NO_EVICT; |
| 49 | 53 | ||
| 54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | | ||
| 55 | TTM_PL_FLAG_CACHED; | ||
| 56 | |||
| 50 | struct ttm_placement vmw_vram_placement = { | 57 | struct ttm_placement vmw_vram_placement = { |
| 51 | .fpfn = 0, | 58 | .fpfn = 0, |
| 52 | .lpfn = 0, | 59 | .lpfn = 0, |
| @@ -116,16 +123,26 @@ struct ttm_placement vmw_sys_placement = { | |||
| 116 | .busy_placement = &sys_placement_flags | 123 | .busy_placement = &sys_placement_flags |
| 117 | }; | 124 | }; |
| 118 | 125 | ||
| 126 | struct ttm_placement vmw_sys_ne_placement = { | ||
| 127 | .fpfn = 0, | ||
| 128 | .lpfn = 0, | ||
| 129 | .num_placement = 1, | ||
| 130 | .placement = &sys_ne_placement_flags, | ||
| 131 | .num_busy_placement = 1, | ||
| 132 | .busy_placement = &sys_ne_placement_flags | ||
| 133 | }; | ||
| 134 | |||
| 119 | static uint32_t evictable_placement_flags[] = { | 135 | static uint32_t evictable_placement_flags[] = { |
| 120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, | 136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
| 121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, | 137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
| 122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
| 139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | ||
| 123 | }; | 140 | }; |
| 124 | 141 | ||
| 125 | struct ttm_placement vmw_evictable_placement = { | 142 | struct ttm_placement vmw_evictable_placement = { |
| 126 | .fpfn = 0, | 143 | .fpfn = 0, |
| 127 | .lpfn = 0, | 144 | .lpfn = 0, |
| 128 | .num_placement = 3, | 145 | .num_placement = 4, |
| 129 | .placement = evictable_placement_flags, | 146 | .placement = evictable_placement_flags, |
| 130 | .num_busy_placement = 1, | 147 | .num_busy_placement = 1, |
| 131 | .busy_placement = &sys_placement_flags | 148 | .busy_placement = &sys_placement_flags |
| @@ -140,10 +157,21 @@ struct ttm_placement vmw_srf_placement = { | |||
| 140 | .busy_placement = gmr_vram_placement_flags | 157 | .busy_placement = gmr_vram_placement_flags |
| 141 | }; | 158 | }; |
| 142 | 159 | ||
| 160 | struct ttm_placement vmw_mob_placement = { | ||
| 161 | .fpfn = 0, | ||
| 162 | .lpfn = 0, | ||
| 163 | .num_placement = 1, | ||
| 164 | .num_busy_placement = 1, | ||
| 165 | .placement = &mob_placement_flags, | ||
| 166 | .busy_placement = &mob_placement_flags | ||
| 167 | }; | ||
| 168 | |||
| 143 | struct vmw_ttm_tt { | 169 | struct vmw_ttm_tt { |
| 144 | struct ttm_dma_tt dma_ttm; | 170 | struct ttm_dma_tt dma_ttm; |
| 145 | struct vmw_private *dev_priv; | 171 | struct vmw_private *dev_priv; |
| 146 | int gmr_id; | 172 | int gmr_id; |
| 173 | struct vmw_mob *mob; | ||
| 174 | int mem_type; | ||
| 147 | struct sg_table sgt; | 175 | struct sg_table sgt; |
| 148 | struct vmw_sg_table vsgt; | 176 | struct vmw_sg_table vsgt; |
| 149 | uint64_t sg_alloc_size; | 177 | uint64_t sg_alloc_size; |
| @@ -244,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |||
| 244 | viter->dma_address = &__vmw_piter_dma_addr; | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
| 245 | viter->page = &__vmw_piter_non_sg_page; | 273 | viter->page = &__vmw_piter_non_sg_page; |
| 246 | viter->addrs = vsgt->addrs; | 274 | viter->addrs = vsgt->addrs; |
| 275 | viter->pages = vsgt->pages; | ||
| 247 | break; | 276 | break; |
| 248 | case vmw_dma_map_populate: | 277 | case vmw_dma_map_populate: |
| 249 | case vmw_dma_map_bind: | 278 | case vmw_dma_map_bind: |
| @@ -424,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |||
| 424 | vmw_tt->mapped = false; | 453 | vmw_tt->mapped = false; |
| 425 | } | 454 | } |
| 426 | 455 | ||
| 456 | |||
| 457 | /** | ||
| 458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | ||
| 459 | * | ||
| 460 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 461 | * | ||
| 462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | ||
| 463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
| 464 | * Note that the buffer object must be either pinned or reserved before | ||
| 465 | * calling this function. | ||
| 466 | */ | ||
| 467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | ||
| 468 | { | ||
| 469 | struct vmw_ttm_tt *vmw_tt = | ||
| 470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 471 | |||
| 472 | return vmw_ttm_map_dma(vmw_tt); | ||
| 473 | } | ||
| 474 | |||
| 475 | |||
| 476 | /** | ||
| 477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | ||
| 478 | * | ||
| 479 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 480 | * | ||
| 481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | ||
| 482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
| 483 | */ | ||
| 484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | ||
| 485 | { | ||
| 486 | struct vmw_ttm_tt *vmw_tt = | ||
| 487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 488 | |||
| 489 | vmw_ttm_unmap_dma(vmw_tt); | ||
| 490 | } | ||
| 491 | |||
| 492 | |||
| 493 | /** | ||
| 494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | ||
| 495 | * TTM buffer object | ||
| 496 | * | ||
| 497 | * @bo: Pointer to a struct ttm_buffer_object | ||
| 498 | * | ||
| 499 | * Returns a pointer to a struct vmw_sg_table object. The object should | ||
| 500 | * not be freed after use. | ||
| 501 | * Note that for the device addresses to be valid, the buffer object must | ||
| 502 | * either be reserved or pinned. | ||
| 503 | */ | ||
| 504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | ||
| 505 | { | ||
| 506 | struct vmw_ttm_tt *vmw_tt = | ||
| 507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
| 508 | |||
| 509 | return &vmw_tt->vsgt; | ||
| 510 | } | ||
| 511 | |||
| 512 | |||
| 427 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
| 428 | { | 514 | { |
| 429 | struct vmw_ttm_tt *vmw_be = | 515 | struct vmw_ttm_tt *vmw_be = |
| @@ -435,9 +521,27 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
| 435 | return ret; | 521 | return ret; |
| 436 | 522 | ||
| 437 | vmw_be->gmr_id = bo_mem->start; | 523 | vmw_be->gmr_id = bo_mem->start; |
| 524 | vmw_be->mem_type = bo_mem->mem_type; | ||
| 525 | |||
| 526 | switch (bo_mem->mem_type) { | ||
| 527 | case VMW_PL_GMR: | ||
| 528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | ||
| 529 | ttm->num_pages, vmw_be->gmr_id); | ||
| 530 | case VMW_PL_MOB: | ||
| 531 | if (unlikely(vmw_be->mob == NULL)) { | ||
| 532 | vmw_be->mob = | ||
| 533 | vmw_mob_create(ttm->num_pages); | ||
| 534 | if (unlikely(vmw_be->mob == NULL)) | ||
| 535 | return -ENOMEM; | ||
| 536 | } | ||
| 438 | 537 | ||
| 439 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
| 440 | ttm->num_pages, vmw_be->gmr_id); | 539 | &vmw_be->vsgt, ttm->num_pages, |
| 540 | vmw_be->gmr_id); | ||
| 541 | default: | ||
| 542 | BUG(); | ||
| 543 | } | ||
| 544 | return 0; | ||
| 441 | } | 545 | } |
| 442 | 546 | ||
| 443 | static int vmw_ttm_unbind(struct ttm_tt *ttm) | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
| @@ -445,7 +549,16 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
| 445 | struct vmw_ttm_tt *vmw_be = | 549 | struct vmw_ttm_tt *vmw_be = |
| 446 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); | 550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
| 447 | 551 | ||
| 448 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | 552 | switch (vmw_be->mem_type) { |
| 553 | case VMW_PL_GMR: | ||
| 554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); | ||
| 555 | break; | ||
| 556 | case VMW_PL_MOB: | ||
| 557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); | ||
| 558 | break; | ||
| 559 | default: | ||
| 560 | BUG(); | ||
| 561 | } | ||
| 449 | 562 | ||
| 450 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) | 563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
| 451 | vmw_ttm_unmap_dma(vmw_be); | 564 | vmw_ttm_unmap_dma(vmw_be); |
| @@ -453,6 +566,7 @@ static int vmw_ttm_unbind(struct ttm_tt *ttm) | |||
| 453 | return 0; | 566 | return 0; |
| 454 | } | 567 | } |
| 455 | 568 | ||
| 569 | |||
| 456 | static void vmw_ttm_destroy(struct ttm_tt *ttm) | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
| 457 | { | 571 | { |
| 458 | struct vmw_ttm_tt *vmw_be = | 572 | struct vmw_ttm_tt *vmw_be = |
| @@ -463,9 +577,14 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) | |||
| 463 | ttm_dma_tt_fini(&vmw_be->dma_ttm); | 577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
| 464 | else | 578 | else |
| 465 | ttm_tt_fini(ttm); | 579 | ttm_tt_fini(ttm); |
| 580 | |||
| 581 | if (vmw_be->mob) | ||
| 582 | vmw_mob_destroy(vmw_be->mob); | ||
| 583 | |||
| 466 | kfree(vmw_be); | 584 | kfree(vmw_be); |
| 467 | } | 585 | } |
| 468 | 586 | ||
| 587 | |||
| 469 | static int vmw_ttm_populate(struct ttm_tt *ttm) | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
| 470 | { | 589 | { |
| 471 | struct vmw_ttm_tt *vmw_tt = | 590 | struct vmw_ttm_tt *vmw_tt = |
| @@ -500,6 +619,12 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) | |||
| 500 | struct vmw_private *dev_priv = vmw_tt->dev_priv; | 619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
| 501 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); | 620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
| 502 | 621 | ||
| 622 | |||
| 623 | if (vmw_tt->mob) { | ||
| 624 | vmw_mob_destroy(vmw_tt->mob); | ||
| 625 | vmw_tt->mob = NULL; | ||
| 626 | } | ||
| 627 | |||
| 503 | vmw_ttm_unmap_dma(vmw_tt); | 628 | vmw_ttm_unmap_dma(vmw_tt); |
| 504 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { | 629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
| 505 | size_t size = | 630 | size_t size = |
| @@ -517,7 +642,7 @@ static struct ttm_backend_func vmw_ttm_func = { | |||
| 517 | .destroy = vmw_ttm_destroy, | 642 | .destroy = vmw_ttm_destroy, |
| 518 | }; | 643 | }; |
| 519 | 644 | ||
| 520 | struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | 645 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
| 521 | unsigned long size, uint32_t page_flags, | 646 | unsigned long size, uint32_t page_flags, |
| 522 | struct page *dummy_read_page) | 647 | struct page *dummy_read_page) |
| 523 | { | 648 | { |
| @@ -530,6 +655,7 @@ struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, | |||
| 530 | 655 | ||
| 531 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
| 532 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
| 658 | vmw_be->mob = NULL; | ||
| 533 | 659 | ||
| 534 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) | 660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
| 535 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, | 661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
| @@ -546,12 +672,12 @@ out_no_init: | |||
| 546 | return NULL; | 672 | return NULL; |
| 547 | } | 673 | } |
| 548 | 674 | ||
| 549 | int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | 675 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 550 | { | 676 | { |
| 551 | return 0; | 677 | return 0; |
| 552 | } | 678 | } |
| 553 | 679 | ||
| 554 | int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | 680 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 555 | struct ttm_mem_type_manager *man) | 681 | struct ttm_mem_type_manager *man) |
| 556 | { | 682 | { |
| 557 | switch (type) { | 683 | switch (type) { |
| @@ -571,6 +697,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 571 | man->default_caching = TTM_PL_FLAG_CACHED; | 697 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 572 | break; | 698 | break; |
| 573 | case VMW_PL_GMR: | 699 | case VMW_PL_GMR: |
| 700 | case VMW_PL_MOB: | ||
| 574 | /* | 701 | /* |
| 575 | * "Guest Memory Regions" is an aperture like feature with | 702 | * "Guest Memory Regions" is an aperture like feature with |
| 576 | * one slot per bo. There is an upper limit of the number of | 703 | * one slot per bo. There is an upper limit of the number of |
| @@ -589,7 +716,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 589 | return 0; | 716 | return 0; |
| 590 | } | 717 | } |
| 591 | 718 | ||
| 592 | void vmw_evict_flags(struct ttm_buffer_object *bo, | 719 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
| 593 | struct ttm_placement *placement) | 720 | struct ttm_placement *placement) |
| 594 | { | 721 | { |
| 595 | *placement = vmw_sys_placement; | 722 | *placement = vmw_sys_placement; |
| @@ -618,6 +745,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg | |||
| 618 | switch (mem->mem_type) { | 745 | switch (mem->mem_type) { |
| 619 | case TTM_PL_SYSTEM: | 746 | case TTM_PL_SYSTEM: |
| 620 | case VMW_PL_GMR: | 747 | case VMW_PL_GMR: |
| 748 | case VMW_PL_MOB: | ||
| 621 | return 0; | 749 | return 0; |
| 622 | case TTM_PL_VRAM: | 750 | case TTM_PL_VRAM: |
| 623 | mem->bus.offset = mem->start << PAGE_SHIFT; | 751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| @@ -677,6 +805,38 @@ static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | |||
| 677 | VMW_FENCE_WAIT_TIMEOUT); | 805 | VMW_FENCE_WAIT_TIMEOUT); |
| 678 | } | 806 | } |
| 679 | 807 | ||
| 808 | /** | ||
| 809 | * vmw_move_notify - TTM move_notify_callback | ||
| 810 | * | ||
| 811 | * @bo: The TTM buffer object about to move. | ||
| 812 | * @mem: The truct ttm_mem_reg indicating to what memory | ||
| 813 | * region the move is taking place. | ||
| 814 | * | ||
| 815 | * Calls move_notify for all subsystems needing it. | ||
| 816 | * (currently only resources). | ||
| 817 | */ | ||
| 818 | static void vmw_move_notify(struct ttm_buffer_object *bo, | ||
| 819 | struct ttm_mem_reg *mem) | ||
| 820 | { | ||
| 821 | vmw_resource_move_notify(bo, mem); | ||
| 822 | } | ||
| 823 | |||
| 824 | |||
| 825 | /** | ||
| 826 | * vmw_swap_notify - TTM move_notify_callback | ||
| 827 | * | ||
| 828 | * @bo: The TTM buffer object about to be swapped out. | ||
| 829 | */ | ||
| 830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) | ||
| 831 | { | ||
| 832 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 833 | |||
| 834 | spin_lock(&bdev->fence_lock); | ||
| 835 | ttm_bo_wait(bo, false, false, false); | ||
| 836 | spin_unlock(&bdev->fence_lock); | ||
| 837 | } | ||
| 838 | |||
| 839 | |||
| 680 | struct ttm_bo_driver vmw_bo_driver = { | 840 | struct ttm_bo_driver vmw_bo_driver = { |
| 681 | .ttm_tt_create = &vmw_ttm_tt_create, | 841 | .ttm_tt_create = &vmw_ttm_tt_create, |
| 682 | .ttm_tt_populate = &vmw_ttm_populate, | 842 | .ttm_tt_populate = &vmw_ttm_populate, |
| @@ -691,8 +851,8 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
| 691 | .sync_obj_flush = vmw_sync_obj_flush, | 851 | .sync_obj_flush = vmw_sync_obj_flush, |
| 692 | .sync_obj_unref = vmw_sync_obj_unref, | 852 | .sync_obj_unref = vmw_sync_obj_unref, |
| 693 | .sync_obj_ref = vmw_sync_obj_ref, | 853 | .sync_obj_ref = vmw_sync_obj_ref, |
| 694 | .move_notify = NULL, | 854 | .move_notify = vmw_move_notify, |
| 695 | .swap_notify = NULL, | 855 | .swap_notify = vmw_swap_notify, |
| 696 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
| 697 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, | 857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
| 698 | .io_mem_free = &vmw_ttm_io_mem_free, | 858 | .io_mem_free = &vmw_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 00ae0925aca8..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
| @@ -32,12 +32,30 @@ | |||
| 32 | struct vmw_user_context { | 32 | struct vmw_user_context { |
| 33 | struct ttm_base_object base; | 33 | struct ttm_base_object base; |
| 34 | struct vmw_resource res; | 34 | struct vmw_resource res; |
| 35 | struct vmw_ctx_binding_state cbs; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 38 | |||
| 39 | |||
| 40 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); | ||
| 41 | |||
| 37 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
| 38 | static struct vmw_resource * | 43 | static struct vmw_resource * |
| 39 | vmw_user_context_base_to_res(struct ttm_base_object *base); | 44 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
| 40 | 45 | ||
| 46 | static int vmw_gb_context_create(struct vmw_resource *res); | ||
| 47 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
| 48 | struct ttm_validate_buffer *val_buf); | ||
| 49 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
| 50 | bool readback, | ||
| 51 | struct ttm_validate_buffer *val_buf); | ||
| 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); | ||
| 53 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
| 54 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | ||
| 55 | bool rebind); | ||
| 56 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
| 57 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | ||
| 58 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | ||
| 41 | static uint64_t vmw_user_context_size; | 59 | static uint64_t vmw_user_context_size; |
| 42 | 60 | ||
| 43 | static const struct vmw_user_resource_conv user_context_conv = { | 61 | static const struct vmw_user_resource_conv user_context_conv = { |
| @@ -62,6 +80,23 @@ static const struct vmw_res_func vmw_legacy_context_func = { | |||
| 62 | .unbind = NULL | 80 | .unbind = NULL |
| 63 | }; | 81 | }; |
| 64 | 82 | ||
| 83 | static const struct vmw_res_func vmw_gb_context_func = { | ||
| 84 | .res_type = vmw_res_context, | ||
| 85 | .needs_backup = true, | ||
| 86 | .may_evict = true, | ||
| 87 | .type_name = "guest backed contexts", | ||
| 88 | .backup_placement = &vmw_mob_placement, | ||
| 89 | .create = vmw_gb_context_create, | ||
| 90 | .destroy = vmw_gb_context_destroy, | ||
| 91 | .bind = vmw_gb_context_bind, | ||
| 92 | .unbind = vmw_gb_context_unbind | ||
| 93 | }; | ||
| 94 | |||
| 95 | static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { | ||
| 96 | [vmw_ctx_binding_shader] = vmw_context_scrub_shader, | ||
| 97 | [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, | ||
| 98 | [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; | ||
| 99 | |||
| 65 | /** | 100 | /** |
| 66 | * Context management: | 101 | * Context management: |
| 67 | */ | 102 | */ |
| @@ -76,6 +111,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 76 | } *cmd; | 111 | } *cmd; |
| 77 | 112 | ||
| 78 | 113 | ||
| 114 | if (res->func->destroy == vmw_gb_context_destroy) { | ||
| 115 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
| 116 | mutex_lock(&dev_priv->binding_mutex); | ||
| 117 | (void) vmw_context_binding_state_kill | ||
| 118 | (&container_of(res, struct vmw_user_context, res)->cbs); | ||
| 119 | (void) vmw_gb_context_destroy(res); | ||
| 120 | if (dev_priv->pinned_bo != NULL && | ||
| 121 | !dev_priv->query_cid_valid) | ||
| 122 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
| 123 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 124 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 125 | return; | ||
| 126 | } | ||
| 127 | |||
| 79 | vmw_execbuf_release_pinned_bo(dev_priv); | 128 | vmw_execbuf_release_pinned_bo(dev_priv); |
| 80 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 129 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
| 81 | if (unlikely(cmd == NULL)) { | 130 | if (unlikely(cmd == NULL)) { |
| @@ -92,6 +141,33 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
| 92 | vmw_3d_resource_dec(dev_priv, false); | 141 | vmw_3d_resource_dec(dev_priv, false); |
| 93 | } | 142 | } |
| 94 | 143 | ||
| 144 | static int vmw_gb_context_init(struct vmw_private *dev_priv, | ||
| 145 | struct vmw_resource *res, | ||
| 146 | void (*res_free) (struct vmw_resource *res)) | ||
| 147 | { | ||
| 148 | int ret; | ||
| 149 | struct vmw_user_context *uctx = | ||
| 150 | container_of(res, struct vmw_user_context, res); | ||
| 151 | |||
| 152 | ret = vmw_resource_init(dev_priv, res, true, | ||
| 153 | res_free, &vmw_gb_context_func); | ||
| 154 | res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; | ||
| 155 | |||
| 156 | if (unlikely(ret != 0)) { | ||
| 157 | if (res_free) | ||
| 158 | res_free(res); | ||
| 159 | else | ||
| 160 | kfree(res); | ||
| 161 | return ret; | ||
| 162 | } | ||
| 163 | |||
| 164 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | ||
| 165 | INIT_LIST_HEAD(&uctx->cbs.list); | ||
| 166 | |||
| 167 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
| 168 | return 0; | ||
| 169 | } | ||
| 170 | |||
| 95 | static int vmw_context_init(struct vmw_private *dev_priv, | 171 | static int vmw_context_init(struct vmw_private *dev_priv, |
| 96 | struct vmw_resource *res, | 172 | struct vmw_resource *res, |
| 97 | void (*res_free) (struct vmw_resource *res)) | 173 | void (*res_free) (struct vmw_resource *res)) |
| @@ -103,6 +179,9 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
| 103 | SVGA3dCmdDefineContext body; | 179 | SVGA3dCmdDefineContext body; |
| 104 | } *cmd; | 180 | } *cmd; |
| 105 | 181 | ||
| 182 | if (dev_priv->has_mob) | ||
| 183 | return vmw_gb_context_init(dev_priv, res, res_free); | ||
| 184 | |||
| 106 | ret = vmw_resource_init(dev_priv, res, false, | 185 | ret = vmw_resource_init(dev_priv, res, false, |
| 107 | res_free, &vmw_legacy_context_func); | 186 | res_free, &vmw_legacy_context_func); |
| 108 | 187 | ||
| @@ -154,6 +233,180 @@ struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | |||
| 154 | return (ret == 0) ? res : NULL; | 233 | return (ret == 0) ? res : NULL; |
| 155 | } | 234 | } |
| 156 | 235 | ||
| 236 | |||
| 237 | static int vmw_gb_context_create(struct vmw_resource *res) | ||
| 238 | { | ||
| 239 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 240 | int ret; | ||
| 241 | struct { | ||
| 242 | SVGA3dCmdHeader header; | ||
| 243 | SVGA3dCmdDefineGBContext body; | ||
| 244 | } *cmd; | ||
| 245 | |||
| 246 | if (likely(res->id != -1)) | ||
| 247 | return 0; | ||
| 248 | |||
| 249 | ret = vmw_resource_alloc_id(res); | ||
| 250 | if (unlikely(ret != 0)) { | ||
| 251 | DRM_ERROR("Failed to allocate a context id.\n"); | ||
| 252 | goto out_no_id; | ||
| 253 | } | ||
| 254 | |||
| 255 | if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { | ||
| 256 | ret = -EBUSY; | ||
| 257 | goto out_no_fifo; | ||
| 258 | } | ||
| 259 | |||
| 260 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 261 | if (unlikely(cmd == NULL)) { | ||
| 262 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 263 | "creation.\n"); | ||
| 264 | ret = -ENOMEM; | ||
| 265 | goto out_no_fifo; | ||
| 266 | } | ||
| 267 | |||
| 268 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; | ||
| 269 | cmd->header.size = sizeof(cmd->body); | ||
| 270 | cmd->body.cid = res->id; | ||
| 271 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 272 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 273 | |||
| 274 | return 0; | ||
| 275 | |||
| 276 | out_no_fifo: | ||
| 277 | vmw_resource_release_id(res); | ||
| 278 | out_no_id: | ||
| 279 | return ret; | ||
| 280 | } | ||
| 281 | |||
| 282 | static int vmw_gb_context_bind(struct vmw_resource *res, | ||
| 283 | struct ttm_validate_buffer *val_buf) | ||
| 284 | { | ||
| 285 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 286 | struct { | ||
| 287 | SVGA3dCmdHeader header; | ||
| 288 | SVGA3dCmdBindGBContext body; | ||
| 289 | } *cmd; | ||
| 290 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 291 | |||
| 292 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 293 | |||
| 294 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 295 | if (unlikely(cmd == NULL)) { | ||
| 296 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 297 | "binding.\n"); | ||
| 298 | return -ENOMEM; | ||
| 299 | } | ||
| 300 | |||
| 301 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
| 302 | cmd->header.size = sizeof(cmd->body); | ||
| 303 | cmd->body.cid = res->id; | ||
| 304 | cmd->body.mobid = bo->mem.start; | ||
| 305 | cmd->body.validContents = res->backup_dirty; | ||
| 306 | res->backup_dirty = false; | ||
| 307 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 308 | |||
| 309 | return 0; | ||
| 310 | } | ||
| 311 | |||
| 312 | static int vmw_gb_context_unbind(struct vmw_resource *res, | ||
| 313 | bool readback, | ||
| 314 | struct ttm_validate_buffer *val_buf) | ||
| 315 | { | ||
| 316 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 317 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 318 | struct vmw_fence_obj *fence; | ||
| 319 | struct vmw_user_context *uctx = | ||
| 320 | container_of(res, struct vmw_user_context, res); | ||
| 321 | |||
| 322 | struct { | ||
| 323 | SVGA3dCmdHeader header; | ||
| 324 | SVGA3dCmdReadbackGBContext body; | ||
| 325 | } *cmd1; | ||
| 326 | struct { | ||
| 327 | SVGA3dCmdHeader header; | ||
| 328 | SVGA3dCmdBindGBContext body; | ||
| 329 | } *cmd2; | ||
| 330 | uint32_t submit_size; | ||
| 331 | uint8_t *cmd; | ||
| 332 | |||
| 333 | |||
| 334 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 335 | |||
| 336 | mutex_lock(&dev_priv->binding_mutex); | ||
| 337 | vmw_context_binding_state_scrub(&uctx->cbs); | ||
| 338 | |||
| 339 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||
| 340 | |||
| 341 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 342 | if (unlikely(cmd == NULL)) { | ||
| 343 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 344 | "unbinding.\n"); | ||
| 345 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 346 | return -ENOMEM; | ||
| 347 | } | ||
| 348 | |||
| 349 | cmd2 = (void *) cmd; | ||
| 350 | if (readback) { | ||
| 351 | cmd1 = (void *) cmd; | ||
| 352 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; | ||
| 353 | cmd1->header.size = sizeof(cmd1->body); | ||
| 354 | cmd1->body.cid = res->id; | ||
| 355 | cmd2 = (void *) (&cmd1[1]); | ||
| 356 | } | ||
| 357 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | ||
| 358 | cmd2->header.size = sizeof(cmd2->body); | ||
| 359 | cmd2->body.cid = res->id; | ||
| 360 | cmd2->body.mobid = SVGA3D_INVALID_ID; | ||
| 361 | |||
| 362 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 363 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Create a fence object and fence the backup buffer. | ||
| 367 | */ | ||
| 368 | |||
| 369 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 370 | &fence, NULL); | ||
| 371 | |||
| 372 | vmw_fence_single_bo(bo, fence); | ||
| 373 | |||
| 374 | if (likely(fence != NULL)) | ||
| 375 | vmw_fence_obj_unreference(&fence); | ||
| 376 | |||
| 377 | return 0; | ||
| 378 | } | ||
| 379 | |||
| 380 | static int vmw_gb_context_destroy(struct vmw_resource *res) | ||
| 381 | { | ||
| 382 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 383 | struct { | ||
| 384 | SVGA3dCmdHeader header; | ||
| 385 | SVGA3dCmdDestroyGBContext body; | ||
| 386 | } *cmd; | ||
| 387 | |||
| 388 | if (likely(res->id == -1)) | ||
| 389 | return 0; | ||
| 390 | |||
| 391 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 392 | if (unlikely(cmd == NULL)) { | ||
| 393 | DRM_ERROR("Failed reserving FIFO space for context " | ||
| 394 | "destruction.\n"); | ||
| 395 | return -ENOMEM; | ||
| 396 | } | ||
| 397 | |||
| 398 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; | ||
| 399 | cmd->header.size = sizeof(cmd->body); | ||
| 400 | cmd->body.cid = res->id; | ||
| 401 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 402 | if (dev_priv->query_cid == res->id) | ||
| 403 | dev_priv->query_cid_valid = false; | ||
| 404 | vmw_resource_release_id(res); | ||
| 405 | vmw_3d_resource_dec(dev_priv, false); | ||
| 406 | |||
| 407 | return 0; | ||
| 408 | } | ||
| 409 | |||
| 157 | /** | 410 | /** |
| 158 | * User-space context management: | 411 | * User-space context management: |
| 159 | */ | 412 | */ |
| @@ -272,3 +525,380 @@ out_unlock: | |||
| 272 | return ret; | 525 | return ret; |
| 273 | 526 | ||
| 274 | } | 527 | } |
| 528 | |||
| 529 | /** | ||
| 530 | * vmw_context_scrub_shader - scrub a shader binding from a context. | ||
| 531 | * | ||
| 532 | * @bi: single binding information. | ||
| 533 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 534 | */ | ||
| 535 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
| 536 | { | ||
| 537 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 538 | struct { | ||
| 539 | SVGA3dCmdHeader header; | ||
| 540 | SVGA3dCmdSetShader body; | ||
| 541 | } *cmd; | ||
| 542 | |||
| 543 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 544 | if (unlikely(cmd == NULL)) { | ||
| 545 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 546 | "unbinding.\n"); | ||
| 547 | return -ENOMEM; | ||
| 548 | } | ||
| 549 | |||
| 550 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; | ||
| 551 | cmd->header.size = sizeof(cmd->body); | ||
| 552 | cmd->body.cid = bi->ctx->id; | ||
| 553 | cmd->body.type = bi->i1.shader_type; | ||
| 554 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
| 555 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 556 | |||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | |||
| 560 | /** | ||
| 561 | * vmw_context_scrub_render_target - scrub a render target binding | ||
| 562 | * from a context. | ||
| 563 | * | ||
| 564 | * @bi: single binding information. | ||
| 565 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 566 | */ | ||
| 567 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | ||
| 568 | bool rebind) | ||
| 569 | { | ||
| 570 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 571 | struct { | ||
| 572 | SVGA3dCmdHeader header; | ||
| 573 | SVGA3dCmdSetRenderTarget body; | ||
| 574 | } *cmd; | ||
| 575 | |||
| 576 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 577 | if (unlikely(cmd == NULL)) { | ||
| 578 | DRM_ERROR("Failed reserving FIFO space for render target " | ||
| 579 | "unbinding.\n"); | ||
| 580 | return -ENOMEM; | ||
| 581 | } | ||
| 582 | |||
| 583 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; | ||
| 584 | cmd->header.size = sizeof(cmd->body); | ||
| 585 | cmd->body.cid = bi->ctx->id; | ||
| 586 | cmd->body.type = bi->i1.rt_type; | ||
| 587 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
| 588 | cmd->body.target.face = 0; | ||
| 589 | cmd->body.target.mipmap = 0; | ||
| 590 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 591 | |||
| 592 | return 0; | ||
| 593 | } | ||
| 594 | |||
| 595 | /** | ||
| 596 | * vmw_context_scrub_texture - scrub a texture binding from a context. | ||
| 597 | * | ||
| 598 | * @bi: single binding information. | ||
| 599 | * @rebind: Whether to issue a bind instead of scrub command. | ||
| 600 | * | ||
| 601 | * TODO: Possibly complement this function with a function that takes | ||
| 602 | * a list of texture bindings and combines them to a single command. | ||
| 603 | */ | ||
| 604 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, | ||
| 605 | bool rebind) | ||
| 606 | { | ||
| 607 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
| 608 | struct { | ||
| 609 | SVGA3dCmdHeader header; | ||
| 610 | struct { | ||
| 611 | SVGA3dCmdSetTextureState c; | ||
| 612 | SVGA3dTextureState s1; | ||
| 613 | } body; | ||
| 614 | } *cmd; | ||
| 615 | |||
| 616 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 617 | if (unlikely(cmd == NULL)) { | ||
| 618 | DRM_ERROR("Failed reserving FIFO space for texture " | ||
| 619 | "unbinding.\n"); | ||
| 620 | return -ENOMEM; | ||
| 621 | } | ||
| 622 | |||
| 623 | |||
| 624 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; | ||
| 625 | cmd->header.size = sizeof(cmd->body); | ||
| 626 | cmd->body.c.cid = bi->ctx->id; | ||
| 627 | cmd->body.s1.stage = bi->i1.texture_stage; | ||
| 628 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | ||
| 629 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
| 630 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 631 | |||
| 632 | return 0; | ||
| 633 | } | ||
| 634 | |||
| 635 | /** | ||
| 636 | * vmw_context_binding_drop: Stop tracking a context binding | ||
| 637 | * | ||
| 638 | * @cb: Pointer to binding tracker storage. | ||
| 639 | * | ||
| 640 | * Stops tracking a context binding, and re-initializes its storage. | ||
| 641 | * Typically used when the context binding is replaced with a binding to | ||
| 642 | * another (or the same, for that matter) resource. | ||
| 643 | */ | ||
| 644 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) | ||
| 645 | { | ||
| 646 | list_del(&cb->ctx_list); | ||
| 647 | if (!list_empty(&cb->res_list)) | ||
| 648 | list_del(&cb->res_list); | ||
| 649 | cb->bi.ctx = NULL; | ||
| 650 | } | ||
| 651 | |||
| 652 | /** | ||
| 653 | * vmw_context_binding_add: Start tracking a context binding | ||
| 654 | * | ||
| 655 | * @cbs: Pointer to the context binding state tracker. | ||
| 656 | * @bi: Information about the binding to track. | ||
| 657 | * | ||
| 658 | * Performs basic checks on the binding to make sure arguments are within | ||
| 659 | * bounds and then starts tracking the binding in the context binding | ||
| 660 | * state structure @cbs. | ||
| 661 | */ | ||
| 662 | int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
| 663 | const struct vmw_ctx_bindinfo *bi) | ||
| 664 | { | ||
| 665 | struct vmw_ctx_binding *loc; | ||
| 666 | |||
| 667 | switch (bi->bt) { | ||
| 668 | case vmw_ctx_binding_rt: | ||
| 669 | if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { | ||
| 670 | DRM_ERROR("Illegal render target type %u.\n", | ||
| 671 | (unsigned) bi->i1.rt_type); | ||
| 672 | return -EINVAL; | ||
| 673 | } | ||
| 674 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
| 675 | break; | ||
| 676 | case vmw_ctx_binding_tex: | ||
| 677 | if (unlikely((unsigned)bi->i1.texture_stage >= | ||
| 678 | SVGA3D_NUM_TEXTURE_UNITS)) { | ||
| 679 | DRM_ERROR("Illegal texture/sampler unit %u.\n", | ||
| 680 | (unsigned) bi->i1.texture_stage); | ||
| 681 | return -EINVAL; | ||
| 682 | } | ||
| 683 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
| 684 | break; | ||
| 685 | case vmw_ctx_binding_shader: | ||
| 686 | if (unlikely((unsigned)bi->i1.shader_type >= | ||
| 687 | SVGA3D_SHADERTYPE_MAX)) { | ||
| 688 | DRM_ERROR("Illegal shader type %u.\n", | ||
| 689 | (unsigned) bi->i1.shader_type); | ||
| 690 | return -EINVAL; | ||
| 691 | } | ||
| 692 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
| 693 | break; | ||
| 694 | default: | ||
| 695 | BUG(); | ||
| 696 | } | ||
| 697 | |||
| 698 | if (loc->bi.ctx != NULL) | ||
| 699 | vmw_context_binding_drop(loc); | ||
| 700 | |||
| 701 | loc->bi = *bi; | ||
| 702 | loc->bi.scrubbed = false; | ||
| 703 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
| 704 | INIT_LIST_HEAD(&loc->res_list); | ||
| 705 | |||
| 706 | return 0; | ||
| 707 | } | ||
| 708 | |||
| 709 | /** | ||
| 710 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. | ||
| 711 | * | ||
| 712 | * @cbs: Pointer to the persistent context binding state tracker. | ||
| 713 | * @bi: Information about the binding to track. | ||
| 714 | * | ||
| 715 | */ | ||
| 716 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
| 717 | const struct vmw_ctx_bindinfo *bi) | ||
| 718 | { | ||
| 719 | struct vmw_ctx_binding *loc; | ||
| 720 | |||
| 721 | switch (bi->bt) { | ||
| 722 | case vmw_ctx_binding_rt: | ||
| 723 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
| 724 | break; | ||
| 725 | case vmw_ctx_binding_tex: | ||
| 726 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
| 727 | break; | ||
| 728 | case vmw_ctx_binding_shader: | ||
| 729 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
| 730 | break; | ||
| 731 | default: | ||
| 732 | BUG(); | ||
| 733 | } | ||
| 734 | |||
| 735 | if (loc->bi.ctx != NULL) | ||
| 736 | vmw_context_binding_drop(loc); | ||
| 737 | |||
| 738 | if (bi->res != NULL) { | ||
| 739 | loc->bi = *bi; | ||
| 740 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
| 741 | list_add_tail(&loc->res_list, &bi->res->binding_head); | ||
| 742 | } | ||
| 743 | } | ||
| 744 | |||
| 745 | /** | ||
| 746 | * vmw_context_binding_kill - Kill a binding on the device | ||
| 747 | * and stop tracking it. | ||
| 748 | * | ||
| 749 | * @cb: Pointer to binding tracker storage. | ||
| 750 | * | ||
| 751 | * Emits FIFO commands to scrub a binding represented by @cb. | ||
| 752 | * Then stops tracking the binding and re-initializes its storage. | ||
| 753 | */ | ||
| 754 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | ||
| 755 | { | ||
| 756 | if (!cb->bi.scrubbed) { | ||
| 757 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); | ||
| 758 | cb->bi.scrubbed = true; | ||
| 759 | } | ||
| 760 | vmw_context_binding_drop(cb); | ||
| 761 | } | ||
| 762 | |||
| 763 | /** | ||
| 764 | * vmw_context_binding_state_kill - Kill all bindings associated with a | ||
| 765 | * struct vmw_ctx_binding state structure, and re-initialize the structure. | ||
| 766 | * | ||
| 767 | * @cbs: Pointer to the context binding state tracker. | ||
| 768 | * | ||
| 769 | * Emits commands to scrub all bindings associated with the | ||
| 770 | * context binding state tracker. Then re-initializes the whole structure. | ||
| 771 | */ | ||
| 772 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | ||
| 773 | { | ||
| 774 | struct vmw_ctx_binding *entry, *next; | ||
| 775 | |||
| 776 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
| 777 | vmw_context_binding_kill(entry); | ||
| 778 | } | ||
| 779 | |||
| 780 | /** | ||
| 781 | * vmw_context_binding_state_scrub - Scrub all bindings associated with a | ||
| 782 | * struct vmw_ctx_binding state structure. | ||
| 783 | * | ||
| 784 | * @cbs: Pointer to the context binding state tracker. | ||
| 785 | * | ||
| 786 | * Emits commands to scrub all bindings associated with the | ||
| 787 | * context binding state tracker. | ||
| 788 | */ | ||
| 789 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) | ||
| 790 | { | ||
| 791 | struct vmw_ctx_binding *entry; | ||
| 792 | |||
| 793 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 794 | if (!entry->bi.scrubbed) { | ||
| 795 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 796 | entry->bi.scrubbed = true; | ||
| 797 | } | ||
| 798 | } | ||
| 799 | } | ||
| 800 | |||
| 801 | /** | ||
| 802 | * vmw_context_binding_res_list_kill - Kill all bindings on a | ||
| 803 | * resource binding list | ||
| 804 | * | ||
| 805 | * @head: list head of resource binding list | ||
| 806 | * | ||
| 807 | * Kills all bindings associated with a specific resource. Typically | ||
| 808 | * called before the resource is destroyed. | ||
| 809 | */ | ||
| 810 | void vmw_context_binding_res_list_kill(struct list_head *head) | ||
| 811 | { | ||
| 812 | struct vmw_ctx_binding *entry, *next; | ||
| 813 | |||
| 814 | list_for_each_entry_safe(entry, next, head, res_list) | ||
| 815 | vmw_context_binding_kill(entry); | ||
| 816 | } | ||
| 817 | |||
| 818 | /** | ||
| 819 | * vmw_context_binding_res_list_scrub - Scrub all bindings on a | ||
| 820 | * resource binding list | ||
| 821 | * | ||
| 822 | * @head: list head of resource binding list | ||
| 823 | * | ||
| 824 | * Scrub all bindings associated with a specific resource. Typically | ||
| 825 | * called before the resource is evicted. | ||
| 826 | */ | ||
| 827 | void vmw_context_binding_res_list_scrub(struct list_head *head) | ||
| 828 | { | ||
| 829 | struct vmw_ctx_binding *entry; | ||
| 830 | |||
| 831 | list_for_each_entry(entry, head, res_list) { | ||
| 832 | if (!entry->bi.scrubbed) { | ||
| 833 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
| 834 | entry->bi.scrubbed = true; | ||
| 835 | } | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | /** | ||
| 840 | * vmw_context_binding_state_transfer - Commit staged binding info | ||
| 841 | * | ||
| 842 | * @ctx: Pointer to context to commit the staged binding info to. | ||
| 843 | * @from: Staged binding info built during execbuf. | ||
| 844 | * | ||
| 845 | * Transfers binding info from a temporary structure to the persistent | ||
| 846 | * structure in the context. This can be done once commands | ||
| 847 | */ | ||
| 848 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | ||
| 849 | struct vmw_ctx_binding_state *from) | ||
| 850 | { | ||
| 851 | struct vmw_user_context *uctx = | ||
| 852 | container_of(ctx, struct vmw_user_context, res); | ||
| 853 | struct vmw_ctx_binding *entry, *next; | ||
| 854 | |||
| 855 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | ||
| 856 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | ||
| 857 | } | ||
| 858 | |||
| 859 | /** | ||
| 860 | * vmw_context_rebind_all - Rebind all scrubbed bindings of a context | ||
| 861 | * | ||
| 862 | * @ctx: The context resource | ||
| 863 | * | ||
| 864 | * Walks through the context binding list and rebinds all scrubbed | ||
| 865 | * resources. | ||
| 866 | */ | ||
| 867 | int vmw_context_rebind_all(struct vmw_resource *ctx) | ||
| 868 | { | ||
| 869 | struct vmw_ctx_binding *entry; | ||
| 870 | struct vmw_user_context *uctx = | ||
| 871 | container_of(ctx, struct vmw_user_context, res); | ||
| 872 | struct vmw_ctx_binding_state *cbs = &uctx->cbs; | ||
| 873 | int ret; | ||
| 874 | |||
| 875 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
| 876 | if (likely(!entry->bi.scrubbed)) | ||
| 877 | continue; | ||
| 878 | |||
| 879 | if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == | ||
| 880 | SVGA3D_INVALID_ID)) | ||
| 881 | continue; | ||
| 882 | |||
| 883 | ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); | ||
| 884 | if (unlikely(ret != 0)) | ||
| 885 | return ret; | ||
| 886 | |||
| 887 | entry->bi.scrubbed = false; | ||
| 888 | } | ||
| 889 | |||
| 890 | return 0; | ||
| 891 | } | ||
| 892 | |||
| 893 | /** | ||
| 894 | * vmw_context_binding_list - Return a list of context bindings | ||
| 895 | * | ||
| 896 | * @ctx: The context resource | ||
| 897 | * | ||
| 898 | * Returns the current list of bindings of the given context. Note that | ||
| 899 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. | ||
| 900 | */ | ||
| 901 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | ||
| 902 | { | ||
| 903 | return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); | ||
| 904 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index d4e54fcc0acd..a75840211b3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | |||
| @@ -290,8 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, | |||
| 290 | /** | 290 | /** |
| 291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. | 291 | * vmw_bo_pin - Pin or unpin a buffer object without moving it. |
| 292 | * | 292 | * |
| 293 | * @bo: The buffer object. Must be reserved, and present either in VRAM | 293 | * @bo: The buffer object. Must be reserved. |
| 294 | * or GMR memory. | ||
| 295 | * @pin: Whether to pin or unpin. | 294 | * @pin: Whether to pin or unpin. |
| 296 | * | 295 | * |
| 297 | */ | 296 | */ |
| @@ -303,10 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) | |||
| 303 | int ret; | 302 | int ret; |
| 304 | 303 | ||
| 305 | lockdep_assert_held(&bo->resv->lock.base); | 304 | lockdep_assert_held(&bo->resv->lock.base); |
| 306 | BUG_ON(old_mem_type != TTM_PL_VRAM && | ||
| 307 | old_mem_type != VMW_PL_GMR); | ||
| 308 | 305 | ||
| 309 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; | 306 | pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB |
| 307 | | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; | ||
| 310 | if (pin) | 308 | if (pin) |
| 311 | pl_flags |= TTM_PL_FLAG_NO_EVICT; | 309 | pl_flags |= TTM_PL_FLAG_NO_EVICT; |
| 312 | 310 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c7a549694e59..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -112,6 +112,21 @@ | |||
| 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
| 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
| 114 | struct drm_vmw_update_layout_arg) | 114 | struct drm_vmw_update_layout_arg) |
| 115 | #define DRM_IOCTL_VMW_CREATE_SHADER \ | ||
| 116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ | ||
| 117 | struct drm_vmw_shader_create_arg) | ||
| 118 | #define DRM_IOCTL_VMW_UNREF_SHADER \ | ||
| 119 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ | ||
| 120 | struct drm_vmw_shader_arg) | ||
| 121 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ | ||
| 122 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | ||
| 123 | union drm_vmw_gb_surface_create_arg) | ||
| 124 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | ||
| 125 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | ||
| 126 | union drm_vmw_gb_surface_reference_arg) | ||
| 127 | #define DRM_IOCTL_VMW_SYNCCPU \ | ||
| 128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | ||
| 129 | struct drm_vmw_synccpu_arg) | ||
| 115 | 130 | ||
| 116 | /** | 131 | /** |
| 117 | * The core DRM version of this macro doesn't account for | 132 | * The core DRM version of this macro doesn't account for |
| @@ -177,6 +192,21 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
| 177 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | 192 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
| 178 | vmw_kms_update_layout_ioctl, | 193 | vmw_kms_update_layout_ioctl, |
| 179 | DRM_MASTER | DRM_UNLOCKED), | 194 | DRM_MASTER | DRM_UNLOCKED), |
| 195 | VMW_IOCTL_DEF(VMW_CREATE_SHADER, | ||
| 196 | vmw_shader_define_ioctl, | ||
| 197 | DRM_AUTH | DRM_UNLOCKED), | ||
| 198 | VMW_IOCTL_DEF(VMW_UNREF_SHADER, | ||
| 199 | vmw_shader_destroy_ioctl, | ||
| 200 | DRM_AUTH | DRM_UNLOCKED), | ||
| 201 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | ||
| 202 | vmw_gb_surface_define_ioctl, | ||
| 203 | DRM_AUTH | DRM_UNLOCKED), | ||
| 204 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | ||
| 205 | vmw_gb_surface_reference_ioctl, | ||
| 206 | DRM_AUTH | DRM_UNLOCKED), | ||
| 207 | VMW_IOCTL_DEF(VMW_SYNCCPU, | ||
| 208 | vmw_user_dmabuf_synccpu_ioctl, | ||
| 209 | DRM_AUTH | DRM_UNLOCKED), | ||
| 180 | }; | 210 | }; |
| 181 | 211 | ||
| 182 | static struct pci_device_id vmw_pci_id_list[] = { | 212 | static struct pci_device_id vmw_pci_id_list[] = { |
| @@ -189,6 +219,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); | |||
| 189 | static int vmw_force_iommu; | 219 | static int vmw_force_iommu; |
| 190 | static int vmw_restrict_iommu; | 220 | static int vmw_restrict_iommu; |
| 191 | static int vmw_force_coherent; | 221 | static int vmw_force_coherent; |
| 222 | static int vmw_restrict_dma_mask; | ||
| 192 | 223 | ||
| 193 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 224 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
| 194 | static void vmw_master_init(struct vmw_master *); | 225 | static void vmw_master_init(struct vmw_master *); |
| @@ -203,6 +234,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); | |||
| 203 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); | 234 | module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); |
| 204 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); | 235 | MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); |
| 205 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); | 236 | module_param_named(force_coherent, vmw_force_coherent, int, 0600); |
| 237 | MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); | ||
| 238 | module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); | ||
| 206 | 239 | ||
| 207 | 240 | ||
| 208 | static void vmw_print_capabilities(uint32_t capabilities) | 241 | static void vmw_print_capabilities(uint32_t capabilities) |
| @@ -240,38 +273,52 @@ static void vmw_print_capabilities(uint32_t capabilities) | |||
| 240 | DRM_INFO(" GMR2.\n"); | 273 | DRM_INFO(" GMR2.\n"); |
| 241 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) | 274 | if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) |
| 242 | DRM_INFO(" Screen Object 2.\n"); | 275 | DRM_INFO(" Screen Object 2.\n"); |
| 276 | if (capabilities & SVGA_CAP_COMMAND_BUFFERS) | ||
| 277 | DRM_INFO(" Command Buffers.\n"); | ||
| 278 | if (capabilities & SVGA_CAP_CMD_BUFFERS_2) | ||
| 279 | DRM_INFO(" Command Buffers 2.\n"); | ||
| 280 | if (capabilities & SVGA_CAP_GBOBJECTS) | ||
| 281 | DRM_INFO(" Guest Backed Resources.\n"); | ||
| 243 | } | 282 | } |
| 244 | 283 | ||
| 245 | |||
| 246 | /** | 284 | /** |
| 247 | * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at | 285 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result |
| 248 | * the start of a buffer object. | ||
| 249 | * | 286 | * |
| 250 | * @dev_priv: The device private structure. | 287 | * @dev_priv: A device private structure. |
| 251 | * | 288 | * |
| 252 | * This function will idle the buffer using an uninterruptible wait, then | 289 | * This function creates a small buffer object that holds the query |
| 253 | * map the first page and initialize a pending occlusion query result structure, | 290 | * result for dummy queries emitted as query barriers. |
| 254 | * Finally it will unmap the buffer. | 291 | * The function will then map the first page and initialize a pending |
| 292 | * occlusion query result structure, Finally it will unmap the buffer. | ||
| 293 | * No interruptible waits are done within this function. | ||
| 255 | * | 294 | * |
| 256 | * TODO: Since we're only mapping a single page, we should optimize the map | 295 | * Returns an error if bo creation or initialization fails. |
| 257 | * to use kmap_atomic / iomap_atomic. | ||
| 258 | */ | 296 | */ |
| 259 | static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | 297 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) |
| 260 | { | 298 | { |
| 299 | int ret; | ||
| 300 | struct ttm_buffer_object *bo; | ||
| 261 | struct ttm_bo_kmap_obj map; | 301 | struct ttm_bo_kmap_obj map; |
| 262 | volatile SVGA3dQueryResult *result; | 302 | volatile SVGA3dQueryResult *result; |
| 263 | bool dummy; | 303 | bool dummy; |
| 264 | int ret; | ||
| 265 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
| 266 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
| 267 | 304 | ||
| 268 | ttm_bo_reserve(bo, false, false, false, 0); | 305 | /* |
| 269 | spin_lock(&bdev->fence_lock); | 306 | * Create the bo as pinned, so that a tryreserve will |
| 270 | ret = ttm_bo_wait(bo, false, false, false); | 307 | * immediately succeed. This is because we're the only |
| 271 | spin_unlock(&bdev->fence_lock); | 308 | * user of the bo currently. |
| 309 | */ | ||
| 310 | ret = ttm_bo_create(&dev_priv->bdev, | ||
| 311 | PAGE_SIZE, | ||
| 312 | ttm_bo_type_device, | ||
| 313 | &vmw_sys_ne_placement, | ||
| 314 | 0, false, NULL, | ||
| 315 | &bo); | ||
| 316 | |||
| 272 | if (unlikely(ret != 0)) | 317 | if (unlikely(ret != 0)) |
| 273 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, | 318 | return ret; |
| 274 | 10*HZ); | 319 | |
| 320 | ret = ttm_bo_reserve(bo, false, true, false, 0); | ||
| 321 | BUG_ON(ret != 0); | ||
| 275 | 322 | ||
| 276 | ret = ttm_bo_kmap(bo, 0, 1, &map); | 323 | ret = ttm_bo_kmap(bo, 0, 1, &map); |
| 277 | if (likely(ret == 0)) { | 324 | if (likely(ret == 0)) { |
| @@ -280,34 +327,19 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) | |||
| 280 | result->state = SVGA3D_QUERYSTATE_PENDING; | 327 | result->state = SVGA3D_QUERYSTATE_PENDING; |
| 281 | result->result32 = 0xff; | 328 | result->result32 = 0xff; |
| 282 | ttm_bo_kunmap(&map); | 329 | ttm_bo_kunmap(&map); |
| 283 | } else | 330 | } |
| 284 | DRM_ERROR("Dummy query buffer map failed.\n"); | 331 | vmw_bo_pin(bo, false); |
| 285 | ttm_bo_unreserve(bo); | 332 | ttm_bo_unreserve(bo); |
| 286 | } | ||
| 287 | 333 | ||
| 334 | if (unlikely(ret != 0)) { | ||
| 335 | DRM_ERROR("Dummy query buffer map failed.\n"); | ||
| 336 | ttm_bo_unref(&bo); | ||
| 337 | } else | ||
| 338 | dev_priv->dummy_query_bo = bo; | ||
| 288 | 339 | ||
| 289 | /** | 340 | return ret; |
| 290 | * vmw_dummy_query_bo_create - create a bo to hold a dummy query result | ||
| 291 | * | ||
| 292 | * @dev_priv: A device private structure. | ||
| 293 | * | ||
| 294 | * This function creates a small buffer object that holds the query | ||
| 295 | * result for dummy queries emitted as query barriers. | ||
| 296 | * No interruptible waits are done within this function. | ||
| 297 | * | ||
| 298 | * Returns an error if bo creation fails. | ||
| 299 | */ | ||
| 300 | static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) | ||
| 301 | { | ||
| 302 | return ttm_bo_create(&dev_priv->bdev, | ||
| 303 | PAGE_SIZE, | ||
| 304 | ttm_bo_type_device, | ||
| 305 | &vmw_vram_sys_placement, | ||
| 306 | 0, false, NULL, | ||
| 307 | &dev_priv->dummy_query_bo); | ||
| 308 | } | 341 | } |
| 309 | 342 | ||
| 310 | |||
| 311 | static int vmw_request_device(struct vmw_private *dev_priv) | 343 | static int vmw_request_device(struct vmw_private *dev_priv) |
| 312 | { | 344 | { |
| 313 | int ret; | 345 | int ret; |
| @@ -318,14 +350,24 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
| 318 | return ret; | 350 | return ret; |
| 319 | } | 351 | } |
| 320 | vmw_fence_fifo_up(dev_priv->fman); | 352 | vmw_fence_fifo_up(dev_priv->fman); |
| 353 | if (dev_priv->has_mob) { | ||
| 354 | ret = vmw_otables_setup(dev_priv); | ||
| 355 | if (unlikely(ret != 0)) { | ||
| 356 | DRM_ERROR("Unable to initialize " | ||
| 357 | "guest Memory OBjects.\n"); | ||
| 358 | goto out_no_mob; | ||
| 359 | } | ||
| 360 | } | ||
| 321 | ret = vmw_dummy_query_bo_create(dev_priv); | 361 | ret = vmw_dummy_query_bo_create(dev_priv); |
| 322 | if (unlikely(ret != 0)) | 362 | if (unlikely(ret != 0)) |
| 323 | goto out_no_query_bo; | 363 | goto out_no_query_bo; |
| 324 | vmw_dummy_query_bo_prepare(dev_priv); | ||
| 325 | 364 | ||
| 326 | return 0; | 365 | return 0; |
| 327 | 366 | ||
| 328 | out_no_query_bo: | 367 | out_no_query_bo: |
| 368 | if (dev_priv->has_mob) | ||
| 369 | vmw_otables_takedown(dev_priv); | ||
| 370 | out_no_mob: | ||
| 329 | vmw_fence_fifo_down(dev_priv->fman); | 371 | vmw_fence_fifo_down(dev_priv->fman); |
| 330 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 372 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 331 | return ret; | 373 | return ret; |
| @@ -341,10 +383,13 @@ static void vmw_release_device(struct vmw_private *dev_priv) | |||
| 341 | BUG_ON(dev_priv->pinned_bo != NULL); | 383 | BUG_ON(dev_priv->pinned_bo != NULL); |
| 342 | 384 | ||
| 343 | ttm_bo_unref(&dev_priv->dummy_query_bo); | 385 | ttm_bo_unref(&dev_priv->dummy_query_bo); |
| 386 | if (dev_priv->has_mob) | ||
| 387 | vmw_otables_takedown(dev_priv); | ||
| 344 | vmw_fence_fifo_down(dev_priv->fman); | 388 | vmw_fence_fifo_down(dev_priv->fman); |
| 345 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | 389 | vmw_fifo_release(dev_priv, &dev_priv->fifo); |
| 346 | } | 390 | } |
| 347 | 391 | ||
| 392 | |||
| 348 | /** | 393 | /** |
| 349 | * Increase the 3d resource refcount. | 394 | * Increase the 3d resource refcount. |
| 350 | * If the count was prevously zero, initialize the fifo, switching to svga | 395 | * If the count was prevously zero, initialize the fifo, switching to svga |
| @@ -510,6 +555,33 @@ out_fixup: | |||
| 510 | return 0; | 555 | return 0; |
| 511 | } | 556 | } |
| 512 | 557 | ||
| 558 | /** | ||
| 559 | * vmw_dma_masks - set required page- and dma masks | ||
| 560 | * | ||
| 561 | * @dev: Pointer to struct drm-device | ||
| 562 | * | ||
| 563 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | ||
| 564 | * restriction also for 64-bit systems. | ||
| 565 | */ | ||
| 566 | #ifdef CONFIG_INTEL_IOMMU | ||
| 567 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 568 | { | ||
| 569 | struct drm_device *dev = dev_priv->dev; | ||
| 570 | |||
| 571 | if (intel_iommu_enabled && | ||
| 572 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | ||
| 573 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | ||
| 574 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | ||
| 575 | } | ||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | #else | ||
| 579 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
| 580 | { | ||
| 581 | return 0; | ||
| 582 | } | ||
| 583 | #endif | ||
| 584 | |||
| 513 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 585 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
| 514 | { | 586 | { |
| 515 | struct vmw_private *dev_priv; | 587 | struct vmw_private *dev_priv; |
| @@ -532,6 +604,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 532 | mutex_init(&dev_priv->hw_mutex); | 604 | mutex_init(&dev_priv->hw_mutex); |
| 533 | mutex_init(&dev_priv->cmdbuf_mutex); | 605 | mutex_init(&dev_priv->cmdbuf_mutex); |
| 534 | mutex_init(&dev_priv->release_mutex); | 606 | mutex_init(&dev_priv->release_mutex); |
| 607 | mutex_init(&dev_priv->binding_mutex); | ||
| 535 | rwlock_init(&dev_priv->resource_lock); | 608 | rwlock_init(&dev_priv->resource_lock); |
| 536 | 609 | ||
| 537 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| @@ -578,14 +651,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 578 | 651 | ||
| 579 | vmw_get_initial_size(dev_priv); | 652 | vmw_get_initial_size(dev_priv); |
| 580 | 653 | ||
| 581 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 654 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 582 | dev_priv->max_gmr_descriptors = | ||
| 583 | vmw_read(dev_priv, | ||
| 584 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
| 585 | dev_priv->max_gmr_ids = | 655 | dev_priv->max_gmr_ids = |
| 586 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | 656 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); |
| 587 | } | ||
| 588 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
| 589 | dev_priv->max_gmr_pages = | 657 | dev_priv->max_gmr_pages = |
| 590 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); | 658 | vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); |
| 591 | dev_priv->memory_size = | 659 | dev_priv->memory_size = |
| @@ -598,23 +666,45 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 598 | */ | 666 | */ |
| 599 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
| 600 | } | 668 | } |
| 669 | dev_priv->max_mob_pages = 0; | ||
| 670 | dev_priv->max_mob_size = 0; | ||
| 671 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 672 | uint64_t mem_size = | ||
| 673 | vmw_read(dev_priv, | ||
| 674 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); | ||
| 675 | |||
| 676 | dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; | ||
| 677 | dev_priv->prim_bb_mem = | ||
| 678 | vmw_read(dev_priv, | ||
| 679 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | ||
| 680 | dev_priv->max_mob_size = | ||
| 681 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); | ||
| 682 | } else | ||
| 683 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
| 684 | |||
| 685 | ret = vmw_dma_masks(dev_priv); | ||
| 686 | if (unlikely(ret != 0)) { | ||
| 687 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 688 | goto out_err0; | ||
| 689 | } | ||
| 690 | |||
| 691 | if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) | ||
| 692 | dev_priv->prim_bb_mem = dev_priv->vram_size; | ||
| 601 | 693 | ||
| 602 | mutex_unlock(&dev_priv->hw_mutex); | 694 | mutex_unlock(&dev_priv->hw_mutex); |
| 603 | 695 | ||
| 604 | vmw_print_capabilities(dev_priv->capabilities); | 696 | vmw_print_capabilities(dev_priv->capabilities); |
| 605 | 697 | ||
| 606 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | 698 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| 607 | DRM_INFO("Max GMR ids is %u\n", | 699 | DRM_INFO("Max GMR ids is %u\n", |
| 608 | (unsigned)dev_priv->max_gmr_ids); | 700 | (unsigned)dev_priv->max_gmr_ids); |
| 609 | DRM_INFO("Max GMR descriptors is %u\n", | ||
| 610 | (unsigned)dev_priv->max_gmr_descriptors); | ||
| 611 | } | ||
| 612 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | ||
| 613 | DRM_INFO("Max number of GMR pages is %u\n", | 701 | DRM_INFO("Max number of GMR pages is %u\n", |
| 614 | (unsigned)dev_priv->max_gmr_pages); | 702 | (unsigned)dev_priv->max_gmr_pages); |
| 615 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", | 703 | DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", |
| 616 | (unsigned)dev_priv->memory_size / 1024); | 704 | (unsigned)dev_priv->memory_size / 1024); |
| 617 | } | 705 | } |
| 706 | DRM_INFO("Maximum display memory size is %u kiB\n", | ||
| 707 | dev_priv->prim_bb_mem / 1024); | ||
| 618 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | 708 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", |
| 619 | dev_priv->vram_start, dev_priv->vram_size / 1024); | 709 | dev_priv->vram_start, dev_priv->vram_size / 1024); |
| 620 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 710 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
| @@ -649,12 +739,22 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 649 | dev_priv->has_gmr = true; | 739 | dev_priv->has_gmr = true; |
| 650 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | 740 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || |
| 651 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | 741 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, |
| 652 | dev_priv->max_gmr_ids) != 0) { | 742 | VMW_PL_GMR) != 0) { |
| 653 | DRM_INFO("No GMR memory available. " | 743 | DRM_INFO("No GMR memory available. " |
| 654 | "Graphics memory resources are very limited.\n"); | 744 | "Graphics memory resources are very limited.\n"); |
| 655 | dev_priv->has_gmr = false; | 745 | dev_priv->has_gmr = false; |
| 656 | } | 746 | } |
| 657 | 747 | ||
| 748 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 749 | dev_priv->has_mob = true; | ||
| 750 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
| 751 | VMW_PL_MOB) != 0) { | ||
| 752 | DRM_INFO("No MOB memory available. " | ||
| 753 | "3D will be disabled.\n"); | ||
| 754 | dev_priv->has_mob = false; | ||
| 755 | } | ||
| 756 | } | ||
| 757 | |||
| 658 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 758 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
| 659 | dev_priv->mmio_size); | 759 | dev_priv->mmio_size); |
| 660 | 760 | ||
| @@ -757,6 +857,8 @@ out_err4: | |||
| 757 | iounmap(dev_priv->mmio_virt); | 857 | iounmap(dev_priv->mmio_virt); |
| 758 | out_err3: | 858 | out_err3: |
| 759 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 859 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 860 | if (dev_priv->has_mob) | ||
| 861 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 760 | if (dev_priv->has_gmr) | 862 | if (dev_priv->has_gmr) |
| 761 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 863 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 762 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 864 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| @@ -801,6 +903,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 801 | ttm_object_device_release(&dev_priv->tdev); | 903 | ttm_object_device_release(&dev_priv->tdev); |
| 802 | iounmap(dev_priv->mmio_virt); | 904 | iounmap(dev_priv->mmio_virt); |
| 803 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 905 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 906 | if (dev_priv->has_mob) | ||
| 907 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 804 | if (dev_priv->has_gmr) | 908 | if (dev_priv->has_gmr) |
| 805 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | 909 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); |
| 806 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | 910 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); |
| @@ -840,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev, | |||
| 840 | drm_master_put(&vmw_fp->locked_master); | 944 | drm_master_put(&vmw_fp->locked_master); |
| 841 | } | 945 | } |
| 842 | 946 | ||
| 947 | vmw_compat_shader_man_destroy(vmw_fp->shman); | ||
| 843 | ttm_object_file_release(&vmw_fp->tfile); | 948 | ttm_object_file_release(&vmw_fp->tfile); |
| 844 | kfree(vmw_fp); | 949 | kfree(vmw_fp); |
| 845 | } | 950 | } |
| @@ -859,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
| 859 | if (unlikely(vmw_fp->tfile == NULL)) | 964 | if (unlikely(vmw_fp->tfile == NULL)) |
| 860 | goto out_no_tfile; | 965 | goto out_no_tfile; |
| 861 | 966 | ||
| 967 | vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); | ||
| 968 | if (IS_ERR(vmw_fp->shman)) | ||
| 969 | goto out_no_shman; | ||
| 970 | |||
| 862 | file_priv->driver_priv = vmw_fp; | 971 | file_priv->driver_priv = vmw_fp; |
| 863 | dev_priv->bdev.dev_mapping = dev->dev_mapping; | 972 | dev_priv->bdev.dev_mapping = dev->dev_mapping; |
| 864 | 973 | ||
| 865 | return 0; | 974 | return 0; |
| 866 | 975 | ||
| 976 | out_no_shman: | ||
| 977 | ttm_object_file_release(&vmw_fp->tfile); | ||
| 867 | out_no_tfile: | 978 | out_no_tfile: |
| 868 | kfree(vmw_fp); | 979 | kfree(vmw_fp); |
| 869 | return ret; | 980 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 20890ad8408b..07831554dad7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -40,9 +40,9 @@ | |||
| 40 | #include <drm/ttm/ttm_module.h> | 40 | #include <drm/ttm/ttm_module.h> |
| 41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
| 42 | 42 | ||
| 43 | #define VMWGFX_DRIVER_DATE "20120209" | 43 | #define VMWGFX_DRIVER_DATE "20140228" |
| 44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
| 45 | #define VMWGFX_DRIVER_MINOR 4 | 45 | #define VMWGFX_DRIVER_MINOR 5 |
| 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
| 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
| 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
| @@ -50,19 +50,39 @@ | |||
| 50 | #define VMWGFX_MAX_VALIDATIONS 2048 | 50 | #define VMWGFX_MAX_VALIDATIONS 2048 |
| 51 | #define VMWGFX_MAX_DISPLAYS 16 | 51 | #define VMWGFX_MAX_DISPLAYS 16 |
| 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 | 52 | #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 |
| 53 | #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 | ||
| 54 | |||
| 55 | /* | ||
| 56 | * Perhaps we should have sysfs entries for these. | ||
| 57 | */ | ||
| 58 | #define VMWGFX_NUM_GB_CONTEXT 256 | ||
| 59 | #define VMWGFX_NUM_GB_SHADER 20000 | ||
| 60 | #define VMWGFX_NUM_GB_SURFACE 32768 | ||
| 61 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | ||
| 62 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | ||
| 63 | VMWGFX_NUM_GB_SHADER +\ | ||
| 64 | VMWGFX_NUM_GB_SURFACE +\ | ||
| 65 | VMWGFX_NUM_GB_SCREEN_TARGET) | ||
| 53 | 66 | ||
| 54 | #define VMW_PL_GMR TTM_PL_PRIV0 | 67 | #define VMW_PL_GMR TTM_PL_PRIV0 |
| 55 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 | 68 | #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 |
| 69 | #define VMW_PL_MOB TTM_PL_PRIV1 | ||
| 70 | #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 | ||
| 56 | 71 | ||
| 57 | #define VMW_RES_CONTEXT ttm_driver_type0 | 72 | #define VMW_RES_CONTEXT ttm_driver_type0 |
| 58 | #define VMW_RES_SURFACE ttm_driver_type1 | 73 | #define VMW_RES_SURFACE ttm_driver_type1 |
| 59 | #define VMW_RES_STREAM ttm_driver_type2 | 74 | #define VMW_RES_STREAM ttm_driver_type2 |
| 60 | #define VMW_RES_FENCE ttm_driver_type3 | 75 | #define VMW_RES_FENCE ttm_driver_type3 |
| 76 | #define VMW_RES_SHADER ttm_driver_type4 | ||
| 77 | |||
| 78 | struct vmw_compat_shader_manager; | ||
| 61 | 79 | ||
| 62 | struct vmw_fpriv { | 80 | struct vmw_fpriv { |
| 63 | struct drm_master *locked_master; | 81 | struct drm_master *locked_master; |
| 64 | struct ttm_object_file *tfile; | 82 | struct ttm_object_file *tfile; |
| 65 | struct list_head fence_events; | 83 | struct list_head fence_events; |
| 84 | bool gb_aware; | ||
| 85 | struct vmw_compat_shader_manager *shman; | ||
| 66 | }; | 86 | }; |
| 67 | 87 | ||
| 68 | struct vmw_dma_buffer { | 88 | struct vmw_dma_buffer { |
| @@ -82,6 +102,7 @@ struct vmw_dma_buffer { | |||
| 82 | struct vmw_validate_buffer { | 102 | struct vmw_validate_buffer { |
| 83 | struct ttm_validate_buffer base; | 103 | struct ttm_validate_buffer base; |
| 84 | struct drm_hash_item hash; | 104 | struct drm_hash_item hash; |
| 105 | bool validate_as_mob; | ||
| 85 | }; | 106 | }; |
| 86 | 107 | ||
| 87 | struct vmw_res_func; | 108 | struct vmw_res_func; |
| @@ -98,6 +119,7 @@ struct vmw_resource { | |||
| 98 | const struct vmw_res_func *func; | 119 | const struct vmw_res_func *func; |
| 99 | struct list_head lru_head; /* Protected by the resource lock */ | 120 | struct list_head lru_head; /* Protected by the resource lock */ |
| 100 | struct list_head mob_head; /* Protected by @backup reserved */ | 121 | struct list_head mob_head; /* Protected by @backup reserved */ |
| 122 | struct list_head binding_head; /* Protected by binding_mutex */ | ||
| 101 | void (*res_free) (struct vmw_resource *res); | 123 | void (*res_free) (struct vmw_resource *res); |
| 102 | void (*hw_destroy) (struct vmw_resource *res); | 124 | void (*hw_destroy) (struct vmw_resource *res); |
| 103 | }; | 125 | }; |
| @@ -106,6 +128,7 @@ enum vmw_res_type { | |||
| 106 | vmw_res_context, | 128 | vmw_res_context, |
| 107 | vmw_res_surface, | 129 | vmw_res_surface, |
| 108 | vmw_res_stream, | 130 | vmw_res_stream, |
| 131 | vmw_res_shader, | ||
| 109 | vmw_res_max | 132 | vmw_res_max |
| 110 | }; | 133 | }; |
| 111 | 134 | ||
| @@ -154,6 +177,7 @@ struct vmw_fifo_state { | |||
| 154 | }; | 177 | }; |
| 155 | 178 | ||
| 156 | struct vmw_relocation { | 179 | struct vmw_relocation { |
| 180 | SVGAMobId *mob_loc; | ||
| 157 | SVGAGuestPtr *location; | 181 | SVGAGuestPtr *location; |
| 158 | uint32_t index; | 182 | uint32_t index; |
| 159 | }; | 183 | }; |
| @@ -229,11 +253,77 @@ struct vmw_piter { | |||
| 229 | struct page *(*page)(struct vmw_piter *); | 253 | struct page *(*page)(struct vmw_piter *); |
| 230 | }; | 254 | }; |
| 231 | 255 | ||
| 256 | /* | ||
| 257 | * enum vmw_ctx_binding_type - abstract resource to context binding types | ||
| 258 | */ | ||
| 259 | enum vmw_ctx_binding_type { | ||
| 260 | vmw_ctx_binding_shader, | ||
| 261 | vmw_ctx_binding_rt, | ||
| 262 | vmw_ctx_binding_tex, | ||
| 263 | vmw_ctx_binding_max | ||
| 264 | }; | ||
| 265 | |||
| 266 | /** | ||
| 267 | * struct vmw_ctx_bindinfo - structure representing a single context binding | ||
| 268 | * | ||
| 269 | * @ctx: Pointer to the context structure. NULL means the binding is not | ||
| 270 | * active. | ||
| 271 | * @res: Non ref-counted pointer to the bound resource. | ||
| 272 | * @bt: The binding type. | ||
| 273 | * @i1: Union of information needed to unbind. | ||
| 274 | */ | ||
| 275 | struct vmw_ctx_bindinfo { | ||
| 276 | struct vmw_resource *ctx; | ||
| 277 | struct vmw_resource *res; | ||
| 278 | enum vmw_ctx_binding_type bt; | ||
| 279 | bool scrubbed; | ||
| 280 | union { | ||
| 281 | SVGA3dShaderType shader_type; | ||
| 282 | SVGA3dRenderTargetType rt_type; | ||
| 283 | uint32 texture_stage; | ||
| 284 | } i1; | ||
| 285 | }; | ||
| 286 | |||
| 287 | /** | ||
| 288 | * struct vmw_ctx_binding - structure representing a single context binding | ||
| 289 | * - suitable for tracking in a context | ||
| 290 | * | ||
| 291 | * @ctx_list: List head for context. | ||
| 292 | * @res_list: List head for bound resource. | ||
| 293 | * @bi: Binding info | ||
| 294 | */ | ||
| 295 | struct vmw_ctx_binding { | ||
| 296 | struct list_head ctx_list; | ||
| 297 | struct list_head res_list; | ||
| 298 | struct vmw_ctx_bindinfo bi; | ||
| 299 | }; | ||
| 300 | |||
| 301 | |||
| 302 | /** | ||
| 303 | * struct vmw_ctx_binding_state - context binding state | ||
| 304 | * | ||
| 305 | * @list: linked list of individual bindings. | ||
| 306 | * @render_targets: Render target bindings. | ||
| 307 | * @texture_units: Texture units/samplers bindings. | ||
| 308 | * @shaders: Shader bindings. | ||
| 309 | * | ||
| 310 | * Note that this structure also provides storage space for the individual | ||
| 311 | * struct vmw_ctx_binding objects, so that no dynamic allocation is needed | ||
| 312 | * for individual bindings. | ||
| 313 | * | ||
| 314 | */ | ||
| 315 | struct vmw_ctx_binding_state { | ||
| 316 | struct list_head list; | ||
| 317 | struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; | ||
| 318 | struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; | ||
| 319 | struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; | ||
| 320 | }; | ||
| 321 | |||
| 232 | struct vmw_sw_context{ | 322 | struct vmw_sw_context{ |
| 233 | struct drm_open_hash res_ht; | 323 | struct drm_open_hash res_ht; |
| 234 | bool res_ht_initialized; | 324 | bool res_ht_initialized; |
| 235 | bool kernel; /**< is the called made from the kernel */ | 325 | bool kernel; /**< is the called made from the kernel */ |
| 236 | struct ttm_object_file *tfile; | 326 | struct vmw_fpriv *fp; |
| 237 | struct list_head validate_nodes; | 327 | struct list_head validate_nodes; |
| 238 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 328 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
| 239 | uint32_t cur_reloc; | 329 | uint32_t cur_reloc; |
| @@ -250,6 +340,8 @@ struct vmw_sw_context{ | |||
| 250 | struct vmw_resource *last_query_ctx; | 340 | struct vmw_resource *last_query_ctx; |
| 251 | bool needs_post_query_barrier; | 341 | bool needs_post_query_barrier; |
| 252 | struct vmw_resource *error_resource; | 342 | struct vmw_resource *error_resource; |
| 343 | struct vmw_ctx_binding_state staged_bindings; | ||
| 344 | struct list_head staged_shaders; | ||
| 253 | }; | 345 | }; |
| 254 | 346 | ||
| 255 | struct vmw_legacy_display; | 347 | struct vmw_legacy_display; |
| @@ -281,6 +373,7 @@ struct vmw_private { | |||
| 281 | unsigned int io_start; | 373 | unsigned int io_start; |
| 282 | uint32_t vram_start; | 374 | uint32_t vram_start; |
| 283 | uint32_t vram_size; | 375 | uint32_t vram_size; |
| 376 | uint32_t prim_bb_mem; | ||
| 284 | uint32_t mmio_start; | 377 | uint32_t mmio_start; |
| 285 | uint32_t mmio_size; | 378 | uint32_t mmio_size; |
| 286 | uint32_t fb_max_width; | 379 | uint32_t fb_max_width; |
| @@ -290,11 +383,13 @@ struct vmw_private { | |||
| 290 | __le32 __iomem *mmio_virt; | 383 | __le32 __iomem *mmio_virt; |
| 291 | int mmio_mtrr; | 384 | int mmio_mtrr; |
| 292 | uint32_t capabilities; | 385 | uint32_t capabilities; |
| 293 | uint32_t max_gmr_descriptors; | ||
| 294 | uint32_t max_gmr_ids; | 386 | uint32_t max_gmr_ids; |
| 295 | uint32_t max_gmr_pages; | 387 | uint32_t max_gmr_pages; |
| 388 | uint32_t max_mob_pages; | ||
| 389 | uint32_t max_mob_size; | ||
| 296 | uint32_t memory_size; | 390 | uint32_t memory_size; |
| 297 | bool has_gmr; | 391 | bool has_gmr; |
| 392 | bool has_mob; | ||
| 298 | struct mutex hw_mutex; | 393 | struct mutex hw_mutex; |
| 299 | 394 | ||
| 300 | /* | 395 | /* |
| @@ -370,6 +465,7 @@ struct vmw_private { | |||
| 370 | 465 | ||
| 371 | struct vmw_sw_context ctx; | 466 | struct vmw_sw_context ctx; |
| 372 | struct mutex cmdbuf_mutex; | 467 | struct mutex cmdbuf_mutex; |
| 468 | struct mutex binding_mutex; | ||
| 373 | 469 | ||
| 374 | /** | 470 | /** |
| 375 | * Operating mode. | 471 | * Operating mode. |
| @@ -415,6 +511,12 @@ struct vmw_private { | |||
| 415 | * DMA mapping stuff. | 511 | * DMA mapping stuff. |
| 416 | */ | 512 | */ |
| 417 | enum vmw_dma_map_mode map_mode; | 513 | enum vmw_dma_map_mode map_mode; |
| 514 | |||
| 515 | /* | ||
| 516 | * Guest Backed stuff | ||
| 517 | */ | ||
| 518 | struct ttm_buffer_object *otable_bo; | ||
| 519 | struct vmw_otable *otables; | ||
| 418 | }; | 520 | }; |
| 419 | 521 | ||
| 420 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 522 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
| @@ -471,23 +573,14 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | |||
| 471 | * Resource utilities - vmwgfx_resource.c | 573 | * Resource utilities - vmwgfx_resource.c |
| 472 | */ | 574 | */ |
| 473 | struct vmw_user_resource_conv; | 575 | struct vmw_user_resource_conv; |
| 474 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
| 475 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
| 476 | 576 | ||
| 477 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
| 478 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | 577 | extern void vmw_resource_unreference(struct vmw_resource **p_res); |
| 479 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 578 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
| 579 | extern struct vmw_resource * | ||
| 580 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); | ||
| 480 | extern int vmw_resource_validate(struct vmw_resource *res); | 581 | extern int vmw_resource_validate(struct vmw_resource *res); |
| 481 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); | 582 | extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); |
| 482 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 583 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
| 483 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 484 | struct drm_file *file_priv); | ||
| 485 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 486 | struct drm_file *file_priv); | ||
| 487 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
| 488 | struct ttm_object_file *tfile, | ||
| 489 | int id, | ||
| 490 | struct vmw_resource **p_res); | ||
| 491 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, | 584 | extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, |
| 492 | struct ttm_object_file *tfile, | 585 | struct ttm_object_file *tfile, |
| 493 | uint32_t handle, | 586 | uint32_t handle, |
| @@ -499,18 +592,6 @@ extern int vmw_user_resource_lookup_handle( | |||
| 499 | uint32_t handle, | 592 | uint32_t handle, |
| 500 | const struct vmw_user_resource_conv *converter, | 593 | const struct vmw_user_resource_conv *converter, |
| 501 | struct vmw_resource **p_res); | 594 | struct vmw_resource **p_res); |
| 502 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
| 503 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 504 | struct drm_file *file_priv); | ||
| 505 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 506 | struct drm_file *file_priv); | ||
| 507 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 508 | struct drm_file *file_priv); | ||
| 509 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 510 | struct ttm_object_file *tfile, | ||
| 511 | uint32_t handle, int *id); | ||
| 512 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
| 513 | struct vmw_surface *srf); | ||
| 514 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 595 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
| 515 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 596 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
| 516 | struct vmw_dma_buffer *vmw_bo, | 597 | struct vmw_dma_buffer *vmw_bo, |
| @@ -519,10 +600,21 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
| 519 | void (*bo_free) (struct ttm_buffer_object *bo)); | 600 | void (*bo_free) (struct ttm_buffer_object *bo)); |
| 520 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | 601 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
| 521 | struct ttm_object_file *tfile); | 602 | struct ttm_object_file *tfile); |
| 603 | extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
| 604 | struct ttm_object_file *tfile, | ||
| 605 | uint32_t size, | ||
| 606 | bool shareable, | ||
| 607 | uint32_t *handle, | ||
| 608 | struct vmw_dma_buffer **p_dma_buf); | ||
| 609 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
| 610 | struct vmw_dma_buffer *dma_buf, | ||
| 611 | uint32_t *handle); | ||
| 522 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 612 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 523 | struct drm_file *file_priv); | 613 | struct drm_file *file_priv); |
| 524 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 614 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
| 525 | struct drm_file *file_priv); | 615 | struct drm_file *file_priv); |
| 616 | extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
| 617 | struct drm_file *file_priv); | ||
| 526 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | 618 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, |
| 527 | uint32_t cur_validate_node); | 619 | uint32_t cur_validate_node); |
| 528 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | 620 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); |
| @@ -622,10 +714,16 @@ extern struct ttm_placement vmw_vram_sys_placement; | |||
| 622 | extern struct ttm_placement vmw_vram_gmr_placement; | 714 | extern struct ttm_placement vmw_vram_gmr_placement; |
| 623 | extern struct ttm_placement vmw_vram_gmr_ne_placement; | 715 | extern struct ttm_placement vmw_vram_gmr_ne_placement; |
| 624 | extern struct ttm_placement vmw_sys_placement; | 716 | extern struct ttm_placement vmw_sys_placement; |
| 717 | extern struct ttm_placement vmw_sys_ne_placement; | ||
| 625 | extern struct ttm_placement vmw_evictable_placement; | 718 | extern struct ttm_placement vmw_evictable_placement; |
| 626 | extern struct ttm_placement vmw_srf_placement; | 719 | extern struct ttm_placement vmw_srf_placement; |
| 720 | extern struct ttm_placement vmw_mob_placement; | ||
| 627 | extern struct ttm_bo_driver vmw_bo_driver; | 721 | extern struct ttm_bo_driver vmw_bo_driver; |
| 628 | extern int vmw_dma_quiescent(struct drm_device *dev); | 722 | extern int vmw_dma_quiescent(struct drm_device *dev); |
| 723 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); | ||
| 724 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); | ||
| 725 | extern const struct vmw_sg_table * | ||
| 726 | vmw_bo_sg_table(struct ttm_buffer_object *bo); | ||
| 629 | extern void vmw_piter_start(struct vmw_piter *viter, | 727 | extern void vmw_piter_start(struct vmw_piter *viter, |
| 630 | const struct vmw_sg_table *vsgt, | 728 | const struct vmw_sg_table *vsgt, |
| 631 | unsigned long p_offs); | 729 | unsigned long p_offs); |
| @@ -701,7 +799,7 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
| 701 | * IRQs and wating - vmwgfx_irq.c | 799 | * IRQs and wating - vmwgfx_irq.c |
| 702 | */ | 800 | */ |
| 703 | 801 | ||
| 704 | extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); | 802 | extern irqreturn_t vmw_irq_handler(int irq, void *arg); |
| 705 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, | 803 | extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, |
| 706 | uint32_t seqno, bool interruptible, | 804 | uint32_t seqno, bool interruptible, |
| 707 | unsigned long timeout); | 805 | unsigned long timeout); |
| @@ -832,6 +930,101 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, | |||
| 832 | uint32_t handle, uint32_t flags, | 930 | uint32_t handle, uint32_t flags, |
| 833 | int *prime_fd); | 931 | int *prime_fd); |
| 834 | 932 | ||
| 933 | /* | ||
| 934 | * MemoryOBject management - vmwgfx_mob.c | ||
| 935 | */ | ||
| 936 | struct vmw_mob; | ||
| 937 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, | ||
| 938 | const struct vmw_sg_table *vsgt, | ||
| 939 | unsigned long num_data_pages, int32_t mob_id); | ||
| 940 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
| 941 | struct vmw_mob *mob); | ||
| 942 | extern void vmw_mob_destroy(struct vmw_mob *mob); | ||
| 943 | extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); | ||
| 944 | extern int vmw_otables_setup(struct vmw_private *dev_priv); | ||
| 945 | extern void vmw_otables_takedown(struct vmw_private *dev_priv); | ||
| 946 | |||
| 947 | /* | ||
| 948 | * Context management - vmwgfx_context.c | ||
| 949 | */ | ||
| 950 | |||
| 951 | extern const struct vmw_user_resource_conv *user_context_converter; | ||
| 952 | |||
| 953 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
| 954 | |||
| 955 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
| 956 | struct ttm_object_file *tfile, | ||
| 957 | int id, | ||
| 958 | struct vmw_resource **p_res); | ||
| 959 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 960 | struct drm_file *file_priv); | ||
| 961 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 962 | struct drm_file *file_priv); | ||
| 963 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
| 964 | const struct vmw_ctx_bindinfo *ci); | ||
| 965 | extern void | ||
| 966 | vmw_context_binding_state_transfer(struct vmw_resource *res, | ||
| 967 | struct vmw_ctx_binding_state *cbs); | ||
| 968 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | ||
| 969 | extern void vmw_context_binding_res_list_scrub(struct list_head *head); | ||
| 970 | extern int vmw_context_rebind_all(struct vmw_resource *ctx); | ||
| 971 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||
| 972 | |||
| 973 | /* | ||
| 974 | * Surface management - vmwgfx_surface.c | ||
| 975 | */ | ||
| 976 | |||
| 977 | extern const struct vmw_user_resource_conv *user_surface_converter; | ||
| 978 | |||
| 979 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
| 980 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 981 | struct drm_file *file_priv); | ||
| 982 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 983 | struct drm_file *file_priv); | ||
| 984 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 985 | struct drm_file *file_priv); | ||
| 986 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 987 | struct drm_file *file_priv); | ||
| 988 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 989 | struct drm_file *file_priv); | ||
| 990 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 991 | struct ttm_object_file *tfile, | ||
| 992 | uint32_t handle, int *id); | ||
| 993 | extern int vmw_surface_validate(struct vmw_private *dev_priv, | ||
| 994 | struct vmw_surface *srf); | ||
| 995 | |||
| 996 | /* | ||
| 997 | * Shader management - vmwgfx_shader.c | ||
| 998 | */ | ||
| 999 | |||
| 1000 | extern const struct vmw_user_resource_conv *user_shader_converter; | ||
| 1001 | |||
| 1002 | extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
| 1003 | struct drm_file *file_priv); | ||
| 1004 | extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 1005 | struct drm_file *file_priv); | ||
| 1006 | extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 1007 | SVGA3dShaderType shader_type, | ||
| 1008 | u32 *user_key); | ||
| 1009 | extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 1010 | struct list_head *list); | ||
| 1011 | extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 1012 | struct list_head *list); | ||
| 1013 | extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 1014 | u32 user_key, | ||
| 1015 | SVGA3dShaderType shader_type, | ||
| 1016 | struct list_head *list); | ||
| 1017 | extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 1018 | u32 user_key, const void *bytecode, | ||
| 1019 | SVGA3dShaderType shader_type, | ||
| 1020 | size_t size, | ||
| 1021 | struct ttm_object_file *tfile, | ||
| 1022 | struct list_head *list); | ||
| 1023 | extern struct vmw_compat_shader_manager * | ||
| 1024 | vmw_compat_shader_man_create(struct vmw_private *dev_priv); | ||
| 1025 | extern void | ||
| 1026 | vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); | ||
| 1027 | |||
| 835 | 1028 | ||
| 836 | /** | 1029 | /** |
| 837 | * Inline helper functions | 1030 | * Inline helper functions |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 599f6469a1eb..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -54,6 +54,8 @@ struct vmw_resource_relocation { | |||
| 54 | * @res: Ref-counted pointer to the resource. | 54 | * @res: Ref-counted pointer to the resource. |
| 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | 55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. |
| 56 | * @new_backup: Refcounted pointer to the new backup buffer. | 56 | * @new_backup: Refcounted pointer to the new backup buffer. |
| 57 | * @staged_bindings: If @res is a context, tracks bindings set up during | ||
| 58 | * the command batch. Otherwise NULL. | ||
| 57 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | 59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
| 58 | * @first_usage: Set to true the first time the resource is referenced in | 60 | * @first_usage: Set to true the first time the resource is referenced in |
| 59 | * the command stream. | 61 | * the command stream. |
| @@ -65,12 +67,32 @@ struct vmw_resource_val_node { | |||
| 65 | struct drm_hash_item hash; | 67 | struct drm_hash_item hash; |
| 66 | struct vmw_resource *res; | 68 | struct vmw_resource *res; |
| 67 | struct vmw_dma_buffer *new_backup; | 69 | struct vmw_dma_buffer *new_backup; |
| 70 | struct vmw_ctx_binding_state *staged_bindings; | ||
| 68 | unsigned long new_backup_offset; | 71 | unsigned long new_backup_offset; |
| 69 | bool first_usage; | 72 | bool first_usage; |
| 70 | bool no_buffer_needed; | 73 | bool no_buffer_needed; |
| 71 | }; | 74 | }; |
| 72 | 75 | ||
| 73 | /** | 76 | /** |
| 77 | * struct vmw_cmd_entry - Describe a command for the verifier | ||
| 78 | * | ||
| 79 | * @user_allow: Whether allowed from the execbuf ioctl. | ||
| 80 | * @gb_disable: Whether disabled if guest-backed objects are available. | ||
| 81 | * @gb_enable: Whether enabled iff guest-backed objects are available. | ||
| 82 | */ | ||
| 83 | struct vmw_cmd_entry { | ||
| 84 | int (*func) (struct vmw_private *, struct vmw_sw_context *, | ||
| 85 | SVGA3dCmdHeader *); | ||
| 86 | bool user_allow; | ||
| 87 | bool gb_disable; | ||
| 88 | bool gb_enable; | ||
| 89 | }; | ||
| 90 | |||
| 91 | #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ | ||
| 92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | ||
| 93 | (_gb_disable), (_gb_enable)} | ||
| 94 | |||
| 95 | /** | ||
| 74 | * vmw_resource_unreserve - unreserve resources previously reserved for | 96 | * vmw_resource_unreserve - unreserve resources previously reserved for |
| 75 | * command submission. | 97 | * command submission. |
| 76 | * | 98 | * |
| @@ -87,6 +109,18 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
| 87 | struct vmw_dma_buffer *new_backup = | 109 | struct vmw_dma_buffer *new_backup = |
| 88 | backoff ? NULL : val->new_backup; | 110 | backoff ? NULL : val->new_backup; |
| 89 | 111 | ||
| 112 | /* | ||
| 113 | * Transfer staged context bindings to the | ||
| 114 | * persistent context binding tracker. | ||
| 115 | */ | ||
| 116 | if (unlikely(val->staged_bindings)) { | ||
| 117 | if (!backoff) { | ||
| 118 | vmw_context_binding_state_transfer | ||
| 119 | (val->res, val->staged_bindings); | ||
| 120 | } | ||
| 121 | kfree(val->staged_bindings); | ||
| 122 | val->staged_bindings = NULL; | ||
| 123 | } | ||
| 90 | vmw_resource_unreserve(res, new_backup, | 124 | vmw_resource_unreserve(res, new_backup, |
| 91 | val->new_backup_offset); | 125 | val->new_backup_offset); |
| 92 | vmw_dmabuf_unreference(&val->new_backup); | 126 | vmw_dmabuf_unreference(&val->new_backup); |
| @@ -146,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
| 146 | } | 180 | } |
| 147 | 181 | ||
| 148 | /** | 182 | /** |
| 183 | * vmw_resource_context_res_add - Put resources previously bound to a context on | ||
| 184 | * the validation list | ||
| 185 | * | ||
| 186 | * @dev_priv: Pointer to a device private structure | ||
| 187 | * @sw_context: Pointer to a software context used for this command submission | ||
| 188 | * @ctx: Pointer to the context resource | ||
| 189 | * | ||
| 190 | * This function puts all resources that were previously bound to @ctx on | ||
| 191 | * the resource validation list. This is part of the context state reemission | ||
| 192 | */ | ||
| 193 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | ||
| 194 | struct vmw_sw_context *sw_context, | ||
| 195 | struct vmw_resource *ctx) | ||
| 196 | { | ||
| 197 | struct list_head *binding_list; | ||
| 198 | struct vmw_ctx_binding *entry; | ||
| 199 | int ret = 0; | ||
| 200 | struct vmw_resource *res; | ||
| 201 | |||
| 202 | mutex_lock(&dev_priv->binding_mutex); | ||
| 203 | binding_list = vmw_context_binding_list(ctx); | ||
| 204 | |||
| 205 | list_for_each_entry(entry, binding_list, ctx_list) { | ||
| 206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); | ||
| 207 | if (unlikely(res == NULL)) | ||
| 208 | continue; | ||
| 209 | |||
| 210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); | ||
| 211 | vmw_resource_unreference(&res); | ||
| 212 | if (unlikely(ret != 0)) | ||
| 213 | break; | ||
| 214 | } | ||
| 215 | |||
| 216 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 149 | * vmw_resource_relocation_add - Add a relocation to the relocation list | 221 | * vmw_resource_relocation_add - Add a relocation to the relocation list |
| 150 | * | 222 | * |
| 151 | * @list: Pointer to head of relocation list. | 223 | * @list: Pointer to head of relocation list. |
| @@ -201,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, | |||
| 201 | { | 273 | { |
| 202 | struct vmw_resource_relocation *rel; | 274 | struct vmw_resource_relocation *rel; |
| 203 | 275 | ||
| 204 | list_for_each_entry(rel, list, head) | 276 | list_for_each_entry(rel, list, head) { |
| 205 | cb[rel->offset] = rel->res->id; | 277 | if (likely(rel->res != NULL)) |
| 278 | cb[rel->offset] = rel->res->id; | ||
| 279 | else | ||
| 280 | cb[rel->offset] = SVGA_3D_CMD_NOP; | ||
| 281 | } | ||
| 206 | } | 282 | } |
| 207 | 283 | ||
| 208 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 284 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
| @@ -224,6 +300,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
| 224 | * | 300 | * |
| 225 | * @sw_context: The software context used for this command submission batch. | 301 | * @sw_context: The software context used for this command submission batch. |
| 226 | * @bo: The buffer object to add. | 302 | * @bo: The buffer object to add. |
| 303 | * @validate_as_mob: Validate this buffer as a MOB. | ||
| 227 | * @p_val_node: If non-NULL Will be updated with the validate node number | 304 | * @p_val_node: If non-NULL Will be updated with the validate node number |
| 228 | * on return. | 305 | * on return. |
| 229 | * | 306 | * |
| @@ -232,6 +309,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
| 232 | */ | 309 | */ |
| 233 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | 310 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
| 234 | struct ttm_buffer_object *bo, | 311 | struct ttm_buffer_object *bo, |
| 312 | bool validate_as_mob, | ||
| 235 | uint32_t *p_val_node) | 313 | uint32_t *p_val_node) |
| 236 | { | 314 | { |
| 237 | uint32_t val_node; | 315 | uint32_t val_node; |
| @@ -244,6 +322,10 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
| 244 | &hash) == 0)) { | 322 | &hash) == 0)) { |
| 245 | vval_buf = container_of(hash, struct vmw_validate_buffer, | 323 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
| 246 | hash); | 324 | hash); |
| 325 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { | ||
| 326 | DRM_ERROR("Inconsistent buffer usage.\n"); | ||
| 327 | return -EINVAL; | ||
| 328 | } | ||
| 247 | val_buf = &vval_buf->base; | 329 | val_buf = &vval_buf->base; |
| 248 | val_node = vval_buf - sw_context->val_bufs; | 330 | val_node = vval_buf - sw_context->val_bufs; |
| 249 | } else { | 331 | } else { |
| @@ -266,6 +348,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
| 266 | val_buf->bo = ttm_bo_reference(bo); | 348 | val_buf->bo = ttm_bo_reference(bo); |
| 267 | val_buf->reserved = false; | 349 | val_buf->reserved = false; |
| 268 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
| 351 | vval_buf->validate_as_mob = validate_as_mob; | ||
| 269 | } | 352 | } |
| 270 | 353 | ||
| 271 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 354 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
| @@ -302,7 +385,8 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | |||
| 302 | struct ttm_buffer_object *bo = &res->backup->base; | 385 | struct ttm_buffer_object *bo = &res->backup->base; |
| 303 | 386 | ||
| 304 | ret = vmw_bo_to_validate_list | 387 | ret = vmw_bo_to_validate_list |
| 305 | (sw_context, bo, NULL); | 388 | (sw_context, bo, |
| 389 | vmw_resource_needs_backup(res), NULL); | ||
| 306 | 390 | ||
| 307 | if (unlikely(ret != 0)) | 391 | if (unlikely(ret != 0)) |
| 308 | return ret; | 392 | return ret; |
| @@ -339,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
| 339 | } | 423 | } |
| 340 | 424 | ||
| 341 | /** | 425 | /** |
| 342 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | 426 | * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it |
| 343 | * on the resource validate list unless it's already there. | 427 | * on the resource validate list unless it's already there. |
| 344 | * | 428 | * |
| 345 | * @dev_priv: Pointer to a device private structure. | 429 | * @dev_priv: Pointer to a device private structure. |
| 346 | * @sw_context: Pointer to the software context. | 430 | * @sw_context: Pointer to the software context. |
| 347 | * @res_type: Resource type. | 431 | * @res_type: Resource type. |
| 348 | * @converter: User-space visisble type specific information. | 432 | * @converter: User-space visisble type specific information. |
| 349 | * @id: Pointer to the location in the command buffer currently being | 433 | * @id: user-space resource id handle. |
| 434 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 350 | * parsed from where the user-space resource id handle is located. | 435 | * parsed from where the user-space resource id handle is located. |
| 436 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 437 | * on exit. | ||
| 351 | */ | 438 | */ |
| 352 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, | 439 | static int |
| 353 | struct vmw_sw_context *sw_context, | 440 | vmw_cmd_compat_res_check(struct vmw_private *dev_priv, |
| 354 | enum vmw_res_type res_type, | 441 | struct vmw_sw_context *sw_context, |
| 355 | const struct vmw_user_resource_conv *converter, | 442 | enum vmw_res_type res_type, |
| 356 | uint32_t *id, | 443 | const struct vmw_user_resource_conv *converter, |
| 357 | struct vmw_resource_val_node **p_val) | 444 | uint32_t id, |
| 445 | uint32_t *id_loc, | ||
| 446 | struct vmw_resource_val_node **p_val) | ||
| 358 | { | 447 | { |
| 359 | struct vmw_res_cache_entry *rcache = | 448 | struct vmw_res_cache_entry *rcache = |
| 360 | &sw_context->res_cache[res_type]; | 449 | &sw_context->res_cache[res_type]; |
| @@ -362,15 +451,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 362 | struct vmw_resource_val_node *node; | 451 | struct vmw_resource_val_node *node; |
| 363 | int ret; | 452 | int ret; |
| 364 | 453 | ||
| 365 | if (*id == SVGA3D_INVALID_ID) | 454 | if (id == SVGA3D_INVALID_ID) { |
| 455 | if (p_val) | ||
| 456 | *p_val = NULL; | ||
| 457 | if (res_type == vmw_res_context) { | ||
| 458 | DRM_ERROR("Illegal context invalid id.\n"); | ||
| 459 | return -EINVAL; | ||
| 460 | } | ||
| 366 | return 0; | 461 | return 0; |
| 462 | } | ||
| 367 | 463 | ||
| 368 | /* | 464 | /* |
| 369 | * Fastpath in case of repeated commands referencing the same | 465 | * Fastpath in case of repeated commands referencing the same |
| 370 | * resource | 466 | * resource |
| 371 | */ | 467 | */ |
| 372 | 468 | ||
| 373 | if (likely(rcache->valid && *id == rcache->handle)) { | 469 | if (likely(rcache->valid && id == rcache->handle)) { |
| 374 | const struct vmw_resource *res = rcache->res; | 470 | const struct vmw_resource *res = rcache->res; |
| 375 | 471 | ||
| 376 | rcache->node->first_usage = false; | 472 | rcache->node->first_usage = false; |
| @@ -379,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 379 | 475 | ||
| 380 | return vmw_resource_relocation_add | 476 | return vmw_resource_relocation_add |
| 381 | (&sw_context->res_relocations, res, | 477 | (&sw_context->res_relocations, res, |
| 382 | id - sw_context->buf_start); | 478 | id_loc - sw_context->buf_start); |
| 383 | } | 479 | } |
| 384 | 480 | ||
| 385 | ret = vmw_user_resource_lookup_handle(dev_priv, | 481 | ret = vmw_user_resource_lookup_handle(dev_priv, |
| 386 | sw_context->tfile, | 482 | sw_context->fp->tfile, |
| 387 | *id, | 483 | id, |
| 388 | converter, | 484 | converter, |
| 389 | &res); | 485 | &res); |
| 390 | if (unlikely(ret != 0)) { | 486 | if (unlikely(ret != 0)) { |
| 391 | DRM_ERROR("Could not find or use resource 0x%08x.\n", | 487 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
| 392 | (unsigned) *id); | 488 | (unsigned) id); |
| 393 | dump_stack(); | 489 | dump_stack(); |
| 394 | return ret; | 490 | return ret; |
| 395 | } | 491 | } |
| 396 | 492 | ||
| 397 | rcache->valid = true; | 493 | rcache->valid = true; |
| 398 | rcache->res = res; | 494 | rcache->res = res; |
| 399 | rcache->handle = *id; | 495 | rcache->handle = id; |
| 400 | 496 | ||
| 401 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 497 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
| 402 | res, | 498 | res, |
| 403 | id - sw_context->buf_start); | 499 | id_loc - sw_context->buf_start); |
| 404 | if (unlikely(ret != 0)) | 500 | if (unlikely(ret != 0)) |
| 405 | goto out_no_reloc; | 501 | goto out_no_reloc; |
| 406 | 502 | ||
| @@ -411,6 +507,22 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
| 411 | rcache->node = node; | 507 | rcache->node = node; |
| 412 | if (p_val) | 508 | if (p_val) |
| 413 | *p_val = node; | 509 | *p_val = node; |
| 510 | |||
| 511 | if (dev_priv->has_mob && node->first_usage && | ||
| 512 | res_type == vmw_res_context) { | ||
| 513 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | ||
| 514 | if (unlikely(ret != 0)) | ||
| 515 | goto out_no_reloc; | ||
| 516 | node->staged_bindings = | ||
| 517 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | ||
| 518 | if (node->staged_bindings == NULL) { | ||
| 519 | DRM_ERROR("Failed to allocate context binding " | ||
| 520 | "information.\n"); | ||
| 521 | goto out_no_reloc; | ||
| 522 | } | ||
| 523 | INIT_LIST_HEAD(&node->staged_bindings->list); | ||
| 524 | } | ||
| 525 | |||
| 414 | vmw_resource_unreference(&res); | 526 | vmw_resource_unreference(&res); |
| 415 | return 0; | 527 | return 0; |
| 416 | 528 | ||
| @@ -422,6 +534,59 @@ out_no_reloc: | |||
| 422 | } | 534 | } |
| 423 | 535 | ||
| 424 | /** | 536 | /** |
| 537 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | ||
| 538 | * on the resource validate list unless it's already there. | ||
| 539 | * | ||
| 540 | * @dev_priv: Pointer to a device private structure. | ||
| 541 | * @sw_context: Pointer to the software context. | ||
| 542 | * @res_type: Resource type. | ||
| 543 | * @converter: User-space visisble type specific information. | ||
| 544 | * @id_loc: Pointer to the location in the command buffer currently being | ||
| 545 | * parsed from where the user-space resource id handle is located. | ||
| 546 | * @p_val: Pointer to pointer to resource validalidation node. Populated | ||
| 547 | * on exit. | ||
| 548 | */ | ||
| 549 | static int | ||
| 550 | vmw_cmd_res_check(struct vmw_private *dev_priv, | ||
| 551 | struct vmw_sw_context *sw_context, | ||
| 552 | enum vmw_res_type res_type, | ||
| 553 | const struct vmw_user_resource_conv *converter, | ||
| 554 | uint32_t *id_loc, | ||
| 555 | struct vmw_resource_val_node **p_val) | ||
| 556 | { | ||
| 557 | return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, | ||
| 558 | converter, *id_loc, id_loc, p_val); | ||
| 559 | } | ||
| 560 | |||
| 561 | /** | ||
| 562 | * vmw_rebind_contexts - Rebind all resources previously bound to | ||
| 563 | * referenced contexts. | ||
| 564 | * | ||
| 565 | * @sw_context: Pointer to the software context. | ||
| 566 | * | ||
| 567 | * Rebind context binding points that have been scrubbed because of eviction. | ||
| 568 | */ | ||
| 569 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | ||
| 570 | { | ||
| 571 | struct vmw_resource_val_node *val; | ||
| 572 | int ret; | ||
| 573 | |||
| 574 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
| 575 | if (likely(!val->staged_bindings)) | ||
| 576 | continue; | ||
| 577 | |||
| 578 | ret = vmw_context_rebind_all(val->res); | ||
| 579 | if (unlikely(ret != 0)) { | ||
| 580 | if (ret != -ERESTARTSYS) | ||
| 581 | DRM_ERROR("Failed to rebind context.\n"); | ||
| 582 | return ret; | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | } | ||
| 588 | |||
| 589 | /** | ||
| 425 | * vmw_cmd_cid_check - Check a command header for valid context information. | 590 | * vmw_cmd_cid_check - Check a command header for valid context information. |
| 426 | * | 591 | * |
| 427 | * @dev_priv: Pointer to a device private structure. | 592 | * @dev_priv: Pointer to a device private structure. |
| @@ -437,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
| 437 | { | 602 | { |
| 438 | struct vmw_cid_cmd { | 603 | struct vmw_cid_cmd { |
| 439 | SVGA3dCmdHeader header; | 604 | SVGA3dCmdHeader header; |
| 440 | __le32 cid; | 605 | uint32_t cid; |
| 441 | } *cmd; | 606 | } *cmd; |
| 442 | 607 | ||
| 443 | cmd = container_of(header, struct vmw_cid_cmd, header); | 608 | cmd = container_of(header, struct vmw_cid_cmd, header); |
| @@ -453,17 +618,35 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
| 453 | SVGA3dCmdHeader header; | 618 | SVGA3dCmdHeader header; |
| 454 | SVGA3dCmdSetRenderTarget body; | 619 | SVGA3dCmdSetRenderTarget body; |
| 455 | } *cmd; | 620 | } *cmd; |
| 621 | struct vmw_resource_val_node *ctx_node; | ||
| 622 | struct vmw_resource_val_node *res_node; | ||
| 456 | int ret; | 623 | int ret; |
| 457 | 624 | ||
| 458 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 625 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 626 | |||
| 627 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 628 | user_context_converter, &cmd->body.cid, | ||
| 629 | &ctx_node); | ||
| 459 | if (unlikely(ret != 0)) | 630 | if (unlikely(ret != 0)) |
| 460 | return ret; | 631 | return ret; |
| 461 | 632 | ||
| 462 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 463 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 633 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 464 | user_surface_converter, | 634 | user_surface_converter, |
| 465 | &cmd->body.target.sid, NULL); | 635 | &cmd->body.target.sid, &res_node); |
| 466 | return ret; | 636 | if (unlikely(ret != 0)) |
| 637 | return ret; | ||
| 638 | |||
| 639 | if (dev_priv->has_mob) { | ||
| 640 | struct vmw_ctx_bindinfo bi; | ||
| 641 | |||
| 642 | bi.ctx = ctx_node->res; | ||
| 643 | bi.res = res_node ? res_node->res : NULL; | ||
| 644 | bi.bt = vmw_ctx_binding_rt; | ||
| 645 | bi.i1.rt_type = cmd->body.type; | ||
| 646 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
| 647 | } | ||
| 648 | |||
| 649 | return 0; | ||
| 467 | } | 650 | } |
| 468 | 651 | ||
| 469 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | 652 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
| @@ -519,11 +702,6 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
| 519 | 702 | ||
| 520 | cmd = container_of(header, struct vmw_sid_cmd, header); | 703 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 521 | 704 | ||
| 522 | if (unlikely(!sw_context->kernel)) { | ||
| 523 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
| 524 | return -EPERM; | ||
| 525 | } | ||
| 526 | |||
| 527 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 705 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 528 | user_surface_converter, | 706 | user_surface_converter, |
| 529 | &cmd->body.srcImage.sid, NULL); | 707 | &cmd->body.srcImage.sid, NULL); |
| @@ -541,11 +719,6 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
| 541 | 719 | ||
| 542 | cmd = container_of(header, struct vmw_sid_cmd, header); | 720 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 543 | 721 | ||
| 544 | if (unlikely(!sw_context->kernel)) { | ||
| 545 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); | ||
| 546 | return -EPERM; | ||
| 547 | } | ||
| 548 | |||
| 549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 722 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 550 | user_surface_converter, &cmd->body.sid, | 723 | user_surface_converter, &cmd->body.sid, |
| 551 | NULL); | 724 | NULL); |
| @@ -586,7 +759,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
| 586 | sw_context->needs_post_query_barrier = true; | 759 | sw_context->needs_post_query_barrier = true; |
| 587 | ret = vmw_bo_to_validate_list(sw_context, | 760 | ret = vmw_bo_to_validate_list(sw_context, |
| 588 | sw_context->cur_query_bo, | 761 | sw_context->cur_query_bo, |
| 589 | NULL); | 762 | dev_priv->has_mob, NULL); |
| 590 | if (unlikely(ret != 0)) | 763 | if (unlikely(ret != 0)) |
| 591 | return ret; | 764 | return ret; |
| 592 | } | 765 | } |
| @@ -594,7 +767,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
| 594 | 767 | ||
| 595 | ret = vmw_bo_to_validate_list(sw_context, | 768 | ret = vmw_bo_to_validate_list(sw_context, |
| 596 | dev_priv->dummy_query_bo, | 769 | dev_priv->dummy_query_bo, |
| 597 | NULL); | 770 | dev_priv->has_mob, NULL); |
| 598 | if (unlikely(ret != 0)) | 771 | if (unlikely(ret != 0)) |
| 599 | return ret; | 772 | return ret; |
| 600 | 773 | ||
| @@ -672,6 +845,66 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | |||
| 672 | } | 845 | } |
| 673 | 846 | ||
| 674 | /** | 847 | /** |
| 848 | * vmw_translate_mob_pointer - Prepare to translate a user-space buffer | ||
| 849 | * handle to a MOB id. | ||
| 850 | * | ||
| 851 | * @dev_priv: Pointer to a device private structure. | ||
| 852 | * @sw_context: The software context used for this command batch validation. | ||
| 853 | * @id: Pointer to the user-space handle to be translated. | ||
| 854 | * @vmw_bo_p: Points to a location that, on successful return will carry | ||
| 855 | * a reference-counted pointer to the DMA buffer identified by the | ||
| 856 | * user-space handle in @id. | ||
| 857 | * | ||
| 858 | * This function saves information needed to translate a user-space buffer | ||
| 859 | * handle to a MOB id. The translation does not take place immediately, but | ||
| 860 | * during a call to vmw_apply_relocations(). This function builds a relocation | ||
| 861 | * list and a list of buffers to validate. The former needs to be freed using | ||
| 862 | * either vmw_apply_relocations() or vmw_free_relocations(). The latter | ||
| 863 | * needs to be freed using vmw_clear_validations. | ||
| 864 | */ | ||
| 865 | static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | ||
| 866 | struct vmw_sw_context *sw_context, | ||
| 867 | SVGAMobId *id, | ||
| 868 | struct vmw_dma_buffer **vmw_bo_p) | ||
| 869 | { | ||
| 870 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
| 871 | struct ttm_buffer_object *bo; | ||
| 872 | uint32_t handle = *id; | ||
| 873 | struct vmw_relocation *reloc; | ||
| 874 | int ret; | ||
| 875 | |||
| 876 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | ||
| 877 | if (unlikely(ret != 0)) { | ||
| 878 | DRM_ERROR("Could not find or use MOB buffer.\n"); | ||
| 879 | return -EINVAL; | ||
| 880 | } | ||
| 881 | bo = &vmw_bo->base; | ||
| 882 | |||
| 883 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | ||
| 884 | DRM_ERROR("Max number relocations per submission" | ||
| 885 | " exceeded\n"); | ||
| 886 | ret = -EINVAL; | ||
| 887 | goto out_no_reloc; | ||
| 888 | } | ||
| 889 | |||
| 890 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | ||
| 891 | reloc->mob_loc = id; | ||
| 892 | reloc->location = NULL; | ||
| 893 | |||
| 894 | ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); | ||
| 895 | if (unlikely(ret != 0)) | ||
| 896 | goto out_no_reloc; | ||
| 897 | |||
| 898 | *vmw_bo_p = vmw_bo; | ||
| 899 | return 0; | ||
| 900 | |||
| 901 | out_no_reloc: | ||
| 902 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 903 | vmw_bo_p = NULL; | ||
| 904 | return ret; | ||
| 905 | } | ||
| 906 | |||
| 907 | /** | ||
| 675 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer | 908 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
| 676 | * handle to a valid SVGAGuestPtr | 909 | * handle to a valid SVGAGuestPtr |
| 677 | * | 910 | * |
| @@ -701,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 701 | struct vmw_relocation *reloc; | 934 | struct vmw_relocation *reloc; |
| 702 | int ret; | 935 | int ret; |
| 703 | 936 | ||
| 704 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 937 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 705 | if (unlikely(ret != 0)) { | 938 | if (unlikely(ret != 0)) { |
| 706 | DRM_ERROR("Could not find or use GMR region.\n"); | 939 | DRM_ERROR("Could not find or use GMR region.\n"); |
| 707 | return -EINVAL; | 940 | return -EINVAL; |
| @@ -718,7 +951,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 718 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 951 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
| 719 | reloc->location = ptr; | 952 | reloc->location = ptr; |
| 720 | 953 | ||
| 721 | ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); | 954 | ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); |
| 722 | if (unlikely(ret != 0)) | 955 | if (unlikely(ret != 0)) |
| 723 | goto out_no_reloc; | 956 | goto out_no_reloc; |
| 724 | 957 | ||
| @@ -732,6 +965,30 @@ out_no_reloc: | |||
| 732 | } | 965 | } |
| 733 | 966 | ||
| 734 | /** | 967 | /** |
| 968 | * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. | ||
| 969 | * | ||
| 970 | * @dev_priv: Pointer to a device private struct. | ||
| 971 | * @sw_context: The software context used for this command submission. | ||
| 972 | * @header: Pointer to the command header in the command stream. | ||
| 973 | */ | ||
| 974 | static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, | ||
| 975 | struct vmw_sw_context *sw_context, | ||
| 976 | SVGA3dCmdHeader *header) | ||
| 977 | { | ||
| 978 | struct vmw_begin_gb_query_cmd { | ||
| 979 | SVGA3dCmdHeader header; | ||
| 980 | SVGA3dCmdBeginGBQuery q; | ||
| 981 | } *cmd; | ||
| 982 | |||
| 983 | cmd = container_of(header, struct vmw_begin_gb_query_cmd, | ||
| 984 | header); | ||
| 985 | |||
| 986 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 987 | user_context_converter, &cmd->q.cid, | ||
| 988 | NULL); | ||
| 989 | } | ||
| 990 | |||
| 991 | /** | ||
| 735 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | 992 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. |
| 736 | * | 993 | * |
| 737 | * @dev_priv: Pointer to a device private struct. | 994 | * @dev_priv: Pointer to a device private struct. |
| @@ -750,12 +1007,64 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | |||
| 750 | cmd = container_of(header, struct vmw_begin_query_cmd, | 1007 | cmd = container_of(header, struct vmw_begin_query_cmd, |
| 751 | header); | 1008 | header); |
| 752 | 1009 | ||
| 1010 | if (unlikely(dev_priv->has_mob)) { | ||
| 1011 | struct { | ||
| 1012 | SVGA3dCmdHeader header; | ||
| 1013 | SVGA3dCmdBeginGBQuery q; | ||
| 1014 | } gb_cmd; | ||
| 1015 | |||
| 1016 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 1017 | |||
| 1018 | gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; | ||
| 1019 | gb_cmd.header.size = cmd->header.size; | ||
| 1020 | gb_cmd.q.cid = cmd->q.cid; | ||
| 1021 | gb_cmd.q.type = cmd->q.type; | ||
| 1022 | |||
| 1023 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 1024 | return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); | ||
| 1025 | } | ||
| 1026 | |||
| 753 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1027 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
| 754 | user_context_converter, &cmd->q.cid, | 1028 | user_context_converter, &cmd->q.cid, |
| 755 | NULL); | 1029 | NULL); |
| 756 | } | 1030 | } |
| 757 | 1031 | ||
| 758 | /** | 1032 | /** |
| 1033 | * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. | ||
| 1034 | * | ||
| 1035 | * @dev_priv: Pointer to a device private struct. | ||
| 1036 | * @sw_context: The software context used for this command submission. | ||
| 1037 | * @header: Pointer to the command header in the command stream. | ||
| 1038 | */ | ||
| 1039 | static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, | ||
| 1040 | struct vmw_sw_context *sw_context, | ||
| 1041 | SVGA3dCmdHeader *header) | ||
| 1042 | { | ||
| 1043 | struct vmw_dma_buffer *vmw_bo; | ||
| 1044 | struct vmw_query_cmd { | ||
| 1045 | SVGA3dCmdHeader header; | ||
| 1046 | SVGA3dCmdEndGBQuery q; | ||
| 1047 | } *cmd; | ||
| 1048 | int ret; | ||
| 1049 | |||
| 1050 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 1051 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 1052 | if (unlikely(ret != 0)) | ||
| 1053 | return ret; | ||
| 1054 | |||
| 1055 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
| 1056 | &cmd->q.mobid, | ||
| 1057 | &vmw_bo); | ||
| 1058 | if (unlikely(ret != 0)) | ||
| 1059 | return ret; | ||
| 1060 | |||
| 1061 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); | ||
| 1062 | |||
| 1063 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 1064 | return ret; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | /** | ||
| 759 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | 1068 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. |
| 760 | * | 1069 | * |
| 761 | * @dev_priv: Pointer to a device private struct. | 1070 | * @dev_priv: Pointer to a device private struct. |
| @@ -774,6 +1083,25 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
| 774 | int ret; | 1083 | int ret; |
| 775 | 1084 | ||
| 776 | cmd = container_of(header, struct vmw_query_cmd, header); | 1085 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 1086 | if (dev_priv->has_mob) { | ||
| 1087 | struct { | ||
| 1088 | SVGA3dCmdHeader header; | ||
| 1089 | SVGA3dCmdEndGBQuery q; | ||
| 1090 | } gb_cmd; | ||
| 1091 | |||
| 1092 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 1093 | |||
| 1094 | gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; | ||
| 1095 | gb_cmd.header.size = cmd->header.size; | ||
| 1096 | gb_cmd.q.cid = cmd->q.cid; | ||
| 1097 | gb_cmd.q.type = cmd->q.type; | ||
| 1098 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
| 1099 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
| 1100 | |||
| 1101 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 1102 | return vmw_cmd_end_gb_query(dev_priv, sw_context, header); | ||
| 1103 | } | ||
| 1104 | |||
| 777 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1105 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 778 | if (unlikely(ret != 0)) | 1106 | if (unlikely(ret != 0)) |
| 779 | return ret; | 1107 | return ret; |
| @@ -790,7 +1118,40 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
| 790 | return ret; | 1118 | return ret; |
| 791 | } | 1119 | } |
| 792 | 1120 | ||
| 793 | /* | 1121 | /** |
| 1122 | * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. | ||
| 1123 | * | ||
| 1124 | * @dev_priv: Pointer to a device private struct. | ||
| 1125 | * @sw_context: The software context used for this command submission. | ||
| 1126 | * @header: Pointer to the command header in the command stream. | ||
| 1127 | */ | ||
| 1128 | static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, | ||
| 1129 | struct vmw_sw_context *sw_context, | ||
| 1130 | SVGA3dCmdHeader *header) | ||
| 1131 | { | ||
| 1132 | struct vmw_dma_buffer *vmw_bo; | ||
| 1133 | struct vmw_query_cmd { | ||
| 1134 | SVGA3dCmdHeader header; | ||
| 1135 | SVGA3dCmdWaitForGBQuery q; | ||
| 1136 | } *cmd; | ||
| 1137 | int ret; | ||
| 1138 | |||
| 1139 | cmd = container_of(header, struct vmw_query_cmd, header); | ||
| 1140 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 1141 | if (unlikely(ret != 0)) | ||
| 1142 | return ret; | ||
| 1143 | |||
| 1144 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, | ||
| 1145 | &cmd->q.mobid, | ||
| 1146 | &vmw_bo); | ||
| 1147 | if (unlikely(ret != 0)) | ||
| 1148 | return ret; | ||
| 1149 | |||
| 1150 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 1151 | return 0; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | /** | ||
| 794 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | 1155 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. |
| 795 | * | 1156 | * |
| 796 | * @dev_priv: Pointer to a device private struct. | 1157 | * @dev_priv: Pointer to a device private struct. |
| @@ -809,6 +1170,25 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
| 809 | int ret; | 1170 | int ret; |
| 810 | 1171 | ||
| 811 | cmd = container_of(header, struct vmw_query_cmd, header); | 1172 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 1173 | if (dev_priv->has_mob) { | ||
| 1174 | struct { | ||
| 1175 | SVGA3dCmdHeader header; | ||
| 1176 | SVGA3dCmdWaitForGBQuery q; | ||
| 1177 | } gb_cmd; | ||
| 1178 | |||
| 1179 | BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); | ||
| 1180 | |||
| 1181 | gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
| 1182 | gb_cmd.header.size = cmd->header.size; | ||
| 1183 | gb_cmd.q.cid = cmd->q.cid; | ||
| 1184 | gb_cmd.q.type = cmd->q.type; | ||
| 1185 | gb_cmd.q.mobid = cmd->q.guestResult.gmrId; | ||
| 1186 | gb_cmd.q.offset = cmd->q.guestResult.offset; | ||
| 1187 | |||
| 1188 | memcpy(cmd, &gb_cmd, sizeof(*cmd)); | ||
| 1189 | return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); | ||
| 1190 | } | ||
| 1191 | |||
| 812 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1192 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 813 | if (unlikely(ret != 0)) | 1193 | if (unlikely(ret != 0)) |
| 814 | return ret; | 1194 | return ret; |
| @@ -853,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 853 | 1233 | ||
| 854 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); | 1234 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
| 855 | 1235 | ||
| 856 | vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); | 1236 | vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, |
| 1237 | header); | ||
| 857 | 1238 | ||
| 858 | out_no_surface: | 1239 | out_no_surface: |
| 859 | vmw_dmabuf_unreference(&vmw_bo); | 1240 | vmw_dmabuf_unreference(&vmw_bo); |
| @@ -921,15 +1302,22 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
| 921 | struct vmw_tex_state_cmd { | 1302 | struct vmw_tex_state_cmd { |
| 922 | SVGA3dCmdHeader header; | 1303 | SVGA3dCmdHeader header; |
| 923 | SVGA3dCmdSetTextureState state; | 1304 | SVGA3dCmdSetTextureState state; |
| 924 | }; | 1305 | } *cmd; |
| 925 | 1306 | ||
| 926 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | 1307 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
| 927 | ((unsigned long) header + header->size + sizeof(header)); | 1308 | ((unsigned long) header + header->size + sizeof(header)); |
| 928 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1309 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
| 929 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1310 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
| 1311 | struct vmw_resource_val_node *ctx_node; | ||
| 1312 | struct vmw_resource_val_node *res_node; | ||
| 930 | int ret; | 1313 | int ret; |
| 931 | 1314 | ||
| 932 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1315 | cmd = container_of(header, struct vmw_tex_state_cmd, |
| 1316 | header); | ||
| 1317 | |||
| 1318 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1319 | user_context_converter, &cmd->state.cid, | ||
| 1320 | &ctx_node); | ||
| 933 | if (unlikely(ret != 0)) | 1321 | if (unlikely(ret != 0)) |
| 934 | return ret; | 1322 | return ret; |
| 935 | 1323 | ||
| @@ -939,9 +1327,20 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
| 939 | 1327 | ||
| 940 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1328 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
| 941 | user_surface_converter, | 1329 | user_surface_converter, |
| 942 | &cur_state->value, NULL); | 1330 | &cur_state->value, &res_node); |
| 943 | if (unlikely(ret != 0)) | 1331 | if (unlikely(ret != 0)) |
| 944 | return ret; | 1332 | return ret; |
| 1333 | |||
| 1334 | if (dev_priv->has_mob) { | ||
| 1335 | struct vmw_ctx_bindinfo bi; | ||
| 1336 | |||
| 1337 | bi.ctx = ctx_node->res; | ||
| 1338 | bi.res = res_node ? res_node->res : NULL; | ||
| 1339 | bi.bt = vmw_ctx_binding_tex; | ||
| 1340 | bi.i1.texture_stage = cur_state->stage; | ||
| 1341 | vmw_context_binding_add(ctx_node->staged_bindings, | ||
| 1342 | &bi); | ||
| 1343 | } | ||
| 945 | } | 1344 | } |
| 946 | 1345 | ||
| 947 | return 0; | 1346 | return 0; |
| @@ -971,6 +1370,314 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
| 971 | } | 1370 | } |
| 972 | 1371 | ||
| 973 | /** | 1372 | /** |
| 1373 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | ||
| 1374 | * | ||
| 1375 | * @dev_priv: Pointer to a device private struct. | ||
| 1376 | * @sw_context: The software context being used for this batch. | ||
| 1377 | * @res_type: The resource type. | ||
| 1378 | * @converter: Information about user-space binding for this resource type. | ||
| 1379 | * @res_id: Pointer to the user-space resource handle in the command stream. | ||
| 1380 | * @buf_id: Pointer to the user-space backup buffer handle in the command | ||
| 1381 | * stream. | ||
| 1382 | * @backup_offset: Offset of backup into MOB. | ||
| 1383 | * | ||
| 1384 | * This function prepares for registering a switch of backup buffers | ||
| 1385 | * in the resource metadata just prior to unreserving. | ||
| 1386 | */ | ||
| 1387 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | ||
| 1388 | struct vmw_sw_context *sw_context, | ||
| 1389 | enum vmw_res_type res_type, | ||
| 1390 | const struct vmw_user_resource_conv | ||
| 1391 | *converter, | ||
| 1392 | uint32_t *res_id, | ||
| 1393 | uint32_t *buf_id, | ||
| 1394 | unsigned long backup_offset) | ||
| 1395 | { | ||
| 1396 | int ret; | ||
| 1397 | struct vmw_dma_buffer *dma_buf; | ||
| 1398 | struct vmw_resource_val_node *val_node; | ||
| 1399 | |||
| 1400 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | ||
| 1401 | converter, res_id, &val_node); | ||
| 1402 | if (unlikely(ret != 0)) | ||
| 1403 | return ret; | ||
| 1404 | |||
| 1405 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
| 1406 | if (unlikely(ret != 0)) | ||
| 1407 | return ret; | ||
| 1408 | |||
| 1409 | if (val_node->first_usage) | ||
| 1410 | val_node->no_buffer_needed = true; | ||
| 1411 | |||
| 1412 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
| 1413 | val_node->new_backup = dma_buf; | ||
| 1414 | val_node->new_backup_offset = backup_offset; | ||
| 1415 | |||
| 1416 | return 0; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | /** | ||
| 1420 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE | ||
| 1421 | * command | ||
| 1422 | * | ||
| 1423 | * @dev_priv: Pointer to a device private struct. | ||
| 1424 | * @sw_context: The software context being used for this batch. | ||
| 1425 | * @header: Pointer to the command header in the command stream. | ||
| 1426 | */ | ||
| 1427 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, | ||
| 1428 | struct vmw_sw_context *sw_context, | ||
| 1429 | SVGA3dCmdHeader *header) | ||
| 1430 | { | ||
| 1431 | struct vmw_bind_gb_surface_cmd { | ||
| 1432 | SVGA3dCmdHeader header; | ||
| 1433 | SVGA3dCmdBindGBSurface body; | ||
| 1434 | } *cmd; | ||
| 1435 | |||
| 1436 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); | ||
| 1437 | |||
| 1438 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, | ||
| 1439 | user_surface_converter, | ||
| 1440 | &cmd->body.sid, &cmd->body.mobid, | ||
| 1441 | 0); | ||
| 1442 | } | ||
| 1443 | |||
| 1444 | /** | ||
| 1445 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE | ||
| 1446 | * command | ||
| 1447 | * | ||
| 1448 | * @dev_priv: Pointer to a device private struct. | ||
| 1449 | * @sw_context: The software context being used for this batch. | ||
| 1450 | * @header: Pointer to the command header in the command stream. | ||
| 1451 | */ | ||
| 1452 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, | ||
| 1453 | struct vmw_sw_context *sw_context, | ||
| 1454 | SVGA3dCmdHeader *header) | ||
| 1455 | { | ||
| 1456 | struct vmw_gb_surface_cmd { | ||
| 1457 | SVGA3dCmdHeader header; | ||
| 1458 | SVGA3dCmdUpdateGBImage body; | ||
| 1459 | } *cmd; | ||
| 1460 | |||
| 1461 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1462 | |||
| 1463 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1464 | user_surface_converter, | ||
| 1465 | &cmd->body.image.sid, NULL); | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | /** | ||
| 1469 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE | ||
| 1470 | * command | ||
| 1471 | * | ||
| 1472 | * @dev_priv: Pointer to a device private struct. | ||
| 1473 | * @sw_context: The software context being used for this batch. | ||
| 1474 | * @header: Pointer to the command header in the command stream. | ||
| 1475 | */ | ||
| 1476 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, | ||
| 1477 | struct vmw_sw_context *sw_context, | ||
| 1478 | SVGA3dCmdHeader *header) | ||
| 1479 | { | ||
| 1480 | struct vmw_gb_surface_cmd { | ||
| 1481 | SVGA3dCmdHeader header; | ||
| 1482 | SVGA3dCmdUpdateGBSurface body; | ||
| 1483 | } *cmd; | ||
| 1484 | |||
| 1485 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1486 | |||
| 1487 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1488 | user_surface_converter, | ||
| 1489 | &cmd->body.sid, NULL); | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | /** | ||
| 1493 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE | ||
| 1494 | * command | ||
| 1495 | * | ||
| 1496 | * @dev_priv: Pointer to a device private struct. | ||
| 1497 | * @sw_context: The software context being used for this batch. | ||
| 1498 | * @header: Pointer to the command header in the command stream. | ||
| 1499 | */ | ||
| 1500 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, | ||
| 1501 | struct vmw_sw_context *sw_context, | ||
| 1502 | SVGA3dCmdHeader *header) | ||
| 1503 | { | ||
| 1504 | struct vmw_gb_surface_cmd { | ||
| 1505 | SVGA3dCmdHeader header; | ||
| 1506 | SVGA3dCmdReadbackGBImage body; | ||
| 1507 | } *cmd; | ||
| 1508 | |||
| 1509 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1510 | |||
| 1511 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1512 | user_surface_converter, | ||
| 1513 | &cmd->body.image.sid, NULL); | ||
| 1514 | } | ||
| 1515 | |||
| 1516 | /** | ||
| 1517 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE | ||
| 1518 | * command | ||
| 1519 | * | ||
| 1520 | * @dev_priv: Pointer to a device private struct. | ||
| 1521 | * @sw_context: The software context being used for this batch. | ||
| 1522 | * @header: Pointer to the command header in the command stream. | ||
| 1523 | */ | ||
| 1524 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, | ||
| 1525 | struct vmw_sw_context *sw_context, | ||
| 1526 | SVGA3dCmdHeader *header) | ||
| 1527 | { | ||
| 1528 | struct vmw_gb_surface_cmd { | ||
| 1529 | SVGA3dCmdHeader header; | ||
| 1530 | SVGA3dCmdReadbackGBSurface body; | ||
| 1531 | } *cmd; | ||
| 1532 | |||
| 1533 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1534 | |||
| 1535 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1536 | user_surface_converter, | ||
| 1537 | &cmd->body.sid, NULL); | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | /** | ||
| 1541 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE | ||
| 1542 | * command | ||
| 1543 | * | ||
| 1544 | * @dev_priv: Pointer to a device private struct. | ||
| 1545 | * @sw_context: The software context being used for this batch. | ||
| 1546 | * @header: Pointer to the command header in the command stream. | ||
| 1547 | */ | ||
| 1548 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, | ||
| 1549 | struct vmw_sw_context *sw_context, | ||
| 1550 | SVGA3dCmdHeader *header) | ||
| 1551 | { | ||
| 1552 | struct vmw_gb_surface_cmd { | ||
| 1553 | SVGA3dCmdHeader header; | ||
| 1554 | SVGA3dCmdInvalidateGBImage body; | ||
| 1555 | } *cmd; | ||
| 1556 | |||
| 1557 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1558 | |||
| 1559 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1560 | user_surface_converter, | ||
| 1561 | &cmd->body.image.sid, NULL); | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | /** | ||
| 1565 | * vmw_cmd_invalidate_gb_surface - Validate an | ||
| 1566 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command | ||
| 1567 | * | ||
| 1568 | * @dev_priv: Pointer to a device private struct. | ||
| 1569 | * @sw_context: The software context being used for this batch. | ||
| 1570 | * @header: Pointer to the command header in the command stream. | ||
| 1571 | */ | ||
| 1572 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | ||
| 1573 | struct vmw_sw_context *sw_context, | ||
| 1574 | SVGA3dCmdHeader *header) | ||
| 1575 | { | ||
| 1576 | struct vmw_gb_surface_cmd { | ||
| 1577 | SVGA3dCmdHeader header; | ||
| 1578 | SVGA3dCmdInvalidateGBSurface body; | ||
| 1579 | } *cmd; | ||
| 1580 | |||
| 1581 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
| 1582 | |||
| 1583 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
| 1584 | user_surface_converter, | ||
| 1585 | &cmd->body.sid, NULL); | ||
| 1586 | } | ||
| 1587 | |||
| 1588 | |||
| 1589 | /** | ||
| 1590 | * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE | ||
| 1591 | * command | ||
| 1592 | * | ||
| 1593 | * @dev_priv: Pointer to a device private struct. | ||
| 1594 | * @sw_context: The software context being used for this batch. | ||
| 1595 | * @header: Pointer to the command header in the command stream. | ||
| 1596 | */ | ||
| 1597 | static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | ||
| 1598 | struct vmw_sw_context *sw_context, | ||
| 1599 | SVGA3dCmdHeader *header) | ||
| 1600 | { | ||
| 1601 | struct vmw_shader_define_cmd { | ||
| 1602 | SVGA3dCmdHeader header; | ||
| 1603 | SVGA3dCmdDefineShader body; | ||
| 1604 | } *cmd; | ||
| 1605 | int ret; | ||
| 1606 | size_t size; | ||
| 1607 | |||
| 1608 | cmd = container_of(header, struct vmw_shader_define_cmd, | ||
| 1609 | header); | ||
| 1610 | |||
| 1611 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1612 | user_context_converter, &cmd->body.cid, | ||
| 1613 | NULL); | ||
| 1614 | if (unlikely(ret != 0)) | ||
| 1615 | return ret; | ||
| 1616 | |||
| 1617 | if (unlikely(!dev_priv->has_mob)) | ||
| 1618 | return 0; | ||
| 1619 | |||
| 1620 | size = cmd->header.size - sizeof(cmd->body); | ||
| 1621 | ret = vmw_compat_shader_add(sw_context->fp->shman, | ||
| 1622 | cmd->body.shid, cmd + 1, | ||
| 1623 | cmd->body.type, size, | ||
| 1624 | sw_context->fp->tfile, | ||
| 1625 | &sw_context->staged_shaders); | ||
| 1626 | if (unlikely(ret != 0)) | ||
| 1627 | return ret; | ||
| 1628 | |||
| 1629 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1630 | NULL, &cmd->header.id - | ||
| 1631 | sw_context->buf_start); | ||
| 1632 | |||
| 1633 | return 0; | ||
| 1634 | } | ||
| 1635 | |||
| 1636 | /** | ||
| 1637 | * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY | ||
| 1638 | * command | ||
| 1639 | * | ||
| 1640 | * @dev_priv: Pointer to a device private struct. | ||
| 1641 | * @sw_context: The software context being used for this batch. | ||
| 1642 | * @header: Pointer to the command header in the command stream. | ||
| 1643 | */ | ||
| 1644 | static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | ||
| 1645 | struct vmw_sw_context *sw_context, | ||
| 1646 | SVGA3dCmdHeader *header) | ||
| 1647 | { | ||
| 1648 | struct vmw_shader_destroy_cmd { | ||
| 1649 | SVGA3dCmdHeader header; | ||
| 1650 | SVGA3dCmdDestroyShader body; | ||
| 1651 | } *cmd; | ||
| 1652 | int ret; | ||
| 1653 | |||
| 1654 | cmd = container_of(header, struct vmw_shader_destroy_cmd, | ||
| 1655 | header); | ||
| 1656 | |||
| 1657 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1658 | user_context_converter, &cmd->body.cid, | ||
| 1659 | NULL); | ||
| 1660 | if (unlikely(ret != 0)) | ||
| 1661 | return ret; | ||
| 1662 | |||
| 1663 | if (unlikely(!dev_priv->has_mob)) | ||
| 1664 | return 0; | ||
| 1665 | |||
| 1666 | ret = vmw_compat_shader_remove(sw_context->fp->shman, | ||
| 1667 | cmd->body.shid, | ||
| 1668 | cmd->body.type, | ||
| 1669 | &sw_context->staged_shaders); | ||
| 1670 | if (unlikely(ret != 0)) | ||
| 1671 | return ret; | ||
| 1672 | |||
| 1673 | return vmw_resource_relocation_add(&sw_context->res_relocations, | ||
| 1674 | NULL, &cmd->header.id - | ||
| 1675 | sw_context->buf_start); | ||
| 1676 | |||
| 1677 | return 0; | ||
| 1678 | } | ||
| 1679 | |||
| 1680 | /** | ||
| 974 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1681 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
| 975 | * command | 1682 | * command |
| 976 | * | 1683 | * |
| @@ -986,18 +1693,105 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
| 986 | SVGA3dCmdHeader header; | 1693 | SVGA3dCmdHeader header; |
| 987 | SVGA3dCmdSetShader body; | 1694 | SVGA3dCmdSetShader body; |
| 988 | } *cmd; | 1695 | } *cmd; |
| 1696 | struct vmw_resource_val_node *ctx_node; | ||
| 989 | int ret; | 1697 | int ret; |
| 990 | 1698 | ||
| 991 | cmd = container_of(header, struct vmw_set_shader_cmd, | 1699 | cmd = container_of(header, struct vmw_set_shader_cmd, |
| 992 | header); | 1700 | header); |
| 993 | 1701 | ||
| 994 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 1702 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
| 1703 | user_context_converter, &cmd->body.cid, | ||
| 1704 | &ctx_node); | ||
| 995 | if (unlikely(ret != 0)) | 1705 | if (unlikely(ret != 0)) |
| 996 | return ret; | 1706 | return ret; |
| 997 | 1707 | ||
| 1708 | if (dev_priv->has_mob) { | ||
| 1709 | struct vmw_ctx_bindinfo bi; | ||
| 1710 | struct vmw_resource_val_node *res_node; | ||
| 1711 | u32 shid = cmd->body.shid; | ||
| 1712 | |||
| 1713 | if (shid != SVGA3D_INVALID_ID) | ||
| 1714 | (void) vmw_compat_shader_lookup(sw_context->fp->shman, | ||
| 1715 | cmd->body.type, | ||
| 1716 | &shid); | ||
| 1717 | |||
| 1718 | ret = vmw_cmd_compat_res_check(dev_priv, sw_context, | ||
| 1719 | vmw_res_shader, | ||
| 1720 | user_shader_converter, | ||
| 1721 | shid, | ||
| 1722 | &cmd->body.shid, &res_node); | ||
| 1723 | if (unlikely(ret != 0)) | ||
| 1724 | return ret; | ||
| 1725 | |||
| 1726 | bi.ctx = ctx_node->res; | ||
| 1727 | bi.res = res_node ? res_node->res : NULL; | ||
| 1728 | bi.bt = vmw_ctx_binding_shader; | ||
| 1729 | bi.i1.shader_type = cmd->body.type; | ||
| 1730 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | ||
| 1731 | } | ||
| 1732 | |||
| 998 | return 0; | 1733 | return 0; |
| 999 | } | 1734 | } |
| 1000 | 1735 | ||
| 1736 | /** | ||
| 1737 | * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST | ||
| 1738 | * command | ||
| 1739 | * | ||
| 1740 | * @dev_priv: Pointer to a device private struct. | ||
| 1741 | * @sw_context: The software context being used for this batch. | ||
| 1742 | * @header: Pointer to the command header in the command stream. | ||
| 1743 | */ | ||
| 1744 | static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, | ||
| 1745 | struct vmw_sw_context *sw_context, | ||
| 1746 | SVGA3dCmdHeader *header) | ||
| 1747 | { | ||
| 1748 | struct vmw_set_shader_const_cmd { | ||
| 1749 | SVGA3dCmdHeader header; | ||
| 1750 | SVGA3dCmdSetShaderConst body; | ||
| 1751 | } *cmd; | ||
| 1752 | int ret; | ||
| 1753 | |||
| 1754 | cmd = container_of(header, struct vmw_set_shader_const_cmd, | ||
| 1755 | header); | ||
| 1756 | |||
| 1757 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
| 1758 | user_context_converter, &cmd->body.cid, | ||
| 1759 | NULL); | ||
| 1760 | if (unlikely(ret != 0)) | ||
| 1761 | return ret; | ||
| 1762 | |||
| 1763 | if (dev_priv->has_mob) | ||
| 1764 | header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; | ||
| 1765 | |||
| 1766 | return 0; | ||
| 1767 | } | ||
| 1768 | |||
| 1769 | /** | ||
| 1770 | * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER | ||
| 1771 | * command | ||
| 1772 | * | ||
| 1773 | * @dev_priv: Pointer to a device private struct. | ||
| 1774 | * @sw_context: The software context being used for this batch. | ||
| 1775 | * @header: Pointer to the command header in the command stream. | ||
| 1776 | */ | ||
| 1777 | static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, | ||
| 1778 | struct vmw_sw_context *sw_context, | ||
| 1779 | SVGA3dCmdHeader *header) | ||
| 1780 | { | ||
| 1781 | struct vmw_bind_gb_shader_cmd { | ||
| 1782 | SVGA3dCmdHeader header; | ||
| 1783 | SVGA3dCmdBindGBShader body; | ||
| 1784 | } *cmd; | ||
| 1785 | |||
| 1786 | cmd = container_of(header, struct vmw_bind_gb_shader_cmd, | ||
| 1787 | header); | ||
| 1788 | |||
| 1789 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, | ||
| 1790 | user_shader_converter, | ||
| 1791 | &cmd->body.shid, &cmd->body.mobid, | ||
| 1792 | cmd->body.offsetInBytes); | ||
| 1793 | } | ||
| 1794 | |||
| 1001 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1795 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
| 1002 | struct vmw_sw_context *sw_context, | 1796 | struct vmw_sw_context *sw_context, |
| 1003 | void *buf, uint32_t *size) | 1797 | void *buf, uint32_t *size) |
| @@ -1041,50 +1835,173 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
| 1041 | return 0; | 1835 | return 0; |
| 1042 | } | 1836 | } |
| 1043 | 1837 | ||
| 1044 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 1838 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
| 1045 | struct vmw_sw_context *, | 1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
| 1046 | SVGA3dCmdHeader *); | 1840 | false, false, false), |
| 1047 | 1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | |
| 1048 | #define VMW_CMD_DEF(cmd, func) \ | 1842 | false, false, false), |
| 1049 | [cmd - SVGA_3D_CMD_BASE] = func | 1843 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, |
| 1050 | 1844 | true, false, false), | |
| 1051 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | 1845 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, |
| 1052 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), | 1846 | true, false, false), |
| 1053 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), | 1847 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, |
| 1054 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), | 1848 | true, false, false), |
| 1055 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), | 1849 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, |
| 1056 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), | 1850 | false, false, false), |
| 1057 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), | 1851 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, |
| 1058 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), | 1852 | false, false, false), |
| 1059 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), | 1853 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, |
| 1060 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), | 1854 | true, false, false), |
| 1061 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | 1855 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, |
| 1856 | true, false, false), | ||
| 1857 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, | ||
| 1858 | true, false, false), | ||
| 1062 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | 1859 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
| 1063 | &vmw_cmd_set_render_target_check), | 1860 | &vmw_cmd_set_render_target_check, true, false, false), |
| 1064 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), | 1861 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, |
| 1065 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), | 1862 | true, false, false), |
| 1066 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | 1863 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, |
| 1067 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | 1864 | true, false, false), |
| 1068 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), | 1865 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, |
| 1069 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), | 1866 | true, false, false), |
| 1070 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), | 1867 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, |
| 1071 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | 1868 | true, false, false), |
| 1072 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | 1869 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, |
| 1073 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 1870 | true, false, false), |
| 1074 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), | 1871 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, |
| 1075 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 1872 | true, false, false), |
| 1076 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 1873 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, |
| 1077 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 1874 | true, false, false), |
| 1078 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), | 1875 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, |
| 1079 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), | 1876 | false, false, false), |
| 1080 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | 1877 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, |
| 1081 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 1878 | true, false, false), |
| 1879 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, | ||
| 1880 | true, false, false), | ||
| 1881 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, | ||
| 1882 | true, false, false), | ||
| 1883 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, | ||
| 1884 | true, false, false), | ||
| 1885 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, | ||
| 1886 | true, false, false), | ||
| 1887 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, | ||
| 1888 | true, false, false), | ||
| 1889 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, | ||
| 1890 | true, false, false), | ||
| 1891 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, | ||
| 1892 | true, false, false), | ||
| 1893 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, | ||
| 1894 | true, false, false), | ||
| 1895 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, | ||
| 1896 | true, false, false), | ||
| 1082 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1897 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
| 1083 | &vmw_cmd_blt_surf_screen_check), | 1898 | &vmw_cmd_blt_surf_screen_check, false, false, false), |
| 1084 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), | 1899 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, |
| 1085 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | 1900 | false, false, false), |
| 1086 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | 1901 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, |
| 1087 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | 1902 | false, false, false), |
| 1903 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
| 1904 | false, false, false), | ||
| 1905 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, | ||
| 1906 | false, false, false), | ||
| 1907 | VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, | ||
| 1908 | false, false, false), | ||
| 1909 | VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, | ||
| 1910 | false, false, false), | ||
| 1911 | VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, | ||
| 1912 | false, false, false), | ||
| 1913 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, | ||
| 1914 | false, false, false), | ||
| 1915 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, | ||
| 1916 | false, false, false), | ||
| 1917 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, | ||
| 1918 | false, false, false), | ||
| 1919 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, | ||
| 1920 | false, false, false), | ||
| 1921 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, | ||
| 1922 | false, false, false), | ||
| 1923 | VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, | ||
| 1924 | false, false, false), | ||
| 1925 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, | ||
| 1926 | false, false, true), | ||
| 1927 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, | ||
| 1928 | false, false, true), | ||
| 1929 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, | ||
| 1930 | false, false, true), | ||
| 1931 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, | ||
| 1932 | false, false, true), | ||
| 1933 | VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, | ||
| 1934 | false, false, true), | ||
| 1935 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, | ||
| 1936 | false, false, true), | ||
| 1937 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1938 | false, false, true), | ||
| 1939 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1940 | false, false, true), | ||
| 1941 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, | ||
| 1942 | true, false, true), | ||
| 1943 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, | ||
| 1944 | false, false, true), | ||
| 1945 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, | ||
| 1946 | true, false, true), | ||
| 1947 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, | ||
| 1948 | &vmw_cmd_update_gb_surface, true, false, true), | ||
| 1949 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, | ||
| 1950 | &vmw_cmd_readback_gb_image, true, false, true), | ||
| 1951 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, | ||
| 1952 | &vmw_cmd_readback_gb_surface, true, false, true), | ||
| 1953 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, | ||
| 1954 | &vmw_cmd_invalidate_gb_image, true, false, true), | ||
| 1955 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, | ||
| 1956 | &vmw_cmd_invalidate_gb_surface, true, false, true), | ||
| 1957 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1958 | false, false, true), | ||
| 1959 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1960 | false, false, true), | ||
| 1961 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1962 | false, false, true), | ||
| 1963 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1964 | false, false, true), | ||
| 1965 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, | ||
| 1966 | false, false, true), | ||
| 1967 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, | ||
| 1968 | false, false, true), | ||
| 1969 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, | ||
| 1970 | true, false, true), | ||
| 1971 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, | ||
| 1972 | false, false, true), | ||
| 1973 | VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, | ||
| 1974 | false, false, false), | ||
| 1975 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, | ||
| 1976 | true, false, true), | ||
| 1977 | VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, | ||
| 1978 | true, false, true), | ||
| 1979 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, | ||
| 1980 | true, false, true), | ||
| 1981 | VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, | ||
| 1982 | true, false, true), | ||
| 1983 | VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, | ||
| 1984 | false, false, true), | ||
| 1985 | VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, | ||
| 1986 | false, false, true), | ||
| 1987 | VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, | ||
| 1988 | false, false, true), | ||
| 1989 | VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, | ||
| 1990 | false, false, true), | ||
| 1991 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1992 | false, false, true), | ||
| 1993 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1994 | false, false, true), | ||
| 1995 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1996 | false, false, true), | ||
| 1997 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, | ||
| 1998 | false, false, true), | ||
| 1999 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
| 2000 | false, false, true), | ||
| 2001 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | ||
| 2002 | false, false, true), | ||
| 2003 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, | ||
| 2004 | true, false, true) | ||
| 1088 | }; | 2005 | }; |
| 1089 | 2006 | ||
| 1090 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 2007 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
| @@ -1095,6 +2012,8 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1095 | uint32_t size_remaining = *size; | 2012 | uint32_t size_remaining = *size; |
| 1096 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 2013 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
| 1097 | int ret; | 2014 | int ret; |
| 2015 | const struct vmw_cmd_entry *entry; | ||
| 2016 | bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; | ||
| 1098 | 2017 | ||
| 1099 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); | 2018 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
| 1100 | /* Handle any none 3D commands */ | 2019 | /* Handle any none 3D commands */ |
| @@ -1107,18 +2026,43 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 1107 | 2026 | ||
| 1108 | cmd_id -= SVGA_3D_CMD_BASE; | 2027 | cmd_id -= SVGA_3D_CMD_BASE; |
| 1109 | if (unlikely(*size > size_remaining)) | 2028 | if (unlikely(*size > size_remaining)) |
| 1110 | goto out_err; | 2029 | goto out_invalid; |
| 1111 | 2030 | ||
| 1112 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | 2031 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
| 1113 | goto out_err; | 2032 | goto out_invalid; |
| 2033 | |||
| 2034 | entry = &vmw_cmd_entries[cmd_id]; | ||
| 2035 | if (unlikely(!entry->func)) | ||
| 2036 | goto out_invalid; | ||
| 2037 | |||
| 2038 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | ||
| 2039 | goto out_privileged; | ||
| 2040 | |||
| 2041 | if (unlikely(entry->gb_disable && gb)) | ||
| 2042 | goto out_old; | ||
| 1114 | 2043 | ||
| 1115 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); | 2044 | if (unlikely(entry->gb_enable && !gb)) |
| 2045 | goto out_new; | ||
| 2046 | |||
| 2047 | ret = entry->func(dev_priv, sw_context, header); | ||
| 1116 | if (unlikely(ret != 0)) | 2048 | if (unlikely(ret != 0)) |
| 1117 | goto out_err; | 2049 | goto out_invalid; |
| 1118 | 2050 | ||
| 1119 | return 0; | 2051 | return 0; |
| 1120 | out_err: | 2052 | out_invalid: |
| 1121 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", | 2053 | DRM_ERROR("Invalid SVGA3D command: %d\n", |
| 2054 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 2055 | return -EINVAL; | ||
| 2056 | out_privileged: | ||
| 2057 | DRM_ERROR("Privileged SVGA3D command: %d\n", | ||
| 2058 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 2059 | return -EPERM; | ||
| 2060 | out_old: | ||
| 2061 | DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", | ||
| 2062 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 2063 | return -EINVAL; | ||
| 2064 | out_new: | ||
| 2065 | DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", | ||
| 1122 | cmd_id + SVGA_3D_CMD_BASE); | 2066 | cmd_id + SVGA_3D_CMD_BASE); |
| 1123 | return -EINVAL; | 2067 | return -EINVAL; |
| 1124 | } | 2068 | } |
| @@ -1174,6 +2118,9 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
| 1174 | case VMW_PL_GMR: | 2118 | case VMW_PL_GMR: |
| 1175 | reloc->location->gmrId = bo->mem.start; | 2119 | reloc->location->gmrId = bo->mem.start; |
| 1176 | break; | 2120 | break; |
| 2121 | case VMW_PL_MOB: | ||
| 2122 | *reloc->mob_loc = bo->mem.start; | ||
| 2123 | break; | ||
| 1177 | default: | 2124 | default: |
| 1178 | BUG(); | 2125 | BUG(); |
| 1179 | } | 2126 | } |
| @@ -1198,6 +2145,8 @@ static void vmw_resource_list_unreference(struct list_head *list) | |||
| 1198 | list_for_each_entry_safe(val, val_next, list, head) { | 2145 | list_for_each_entry_safe(val, val_next, list, head) { |
| 1199 | list_del_init(&val->head); | 2146 | list_del_init(&val->head); |
| 1200 | vmw_resource_unreference(&val->res); | 2147 | vmw_resource_unreference(&val->res); |
| 2148 | if (unlikely(val->staged_bindings)) | ||
| 2149 | kfree(val->staged_bindings); | ||
| 1201 | kfree(val); | 2150 | kfree(val); |
| 1202 | } | 2151 | } |
| 1203 | } | 2152 | } |
| @@ -1224,7 +2173,8 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context) | |||
| 1224 | } | 2173 | } |
| 1225 | 2174 | ||
| 1226 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 2175 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
| 1227 | struct ttm_buffer_object *bo) | 2176 | struct ttm_buffer_object *bo, |
| 2177 | bool validate_as_mob) | ||
| 1228 | { | 2178 | { |
| 1229 | int ret; | 2179 | int ret; |
| 1230 | 2180 | ||
| @@ -1238,6 +2188,9 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 1238 | dev_priv->dummy_query_bo_pinned)) | 2188 | dev_priv->dummy_query_bo_pinned)) |
| 1239 | return 0; | 2189 | return 0; |
| 1240 | 2190 | ||
| 2191 | if (validate_as_mob) | ||
| 2192 | return ttm_bo_validate(bo, &vmw_mob_placement, true, false); | ||
| 2193 | |||
| 1241 | /** | 2194 | /** |
| 1242 | * Put BO in VRAM if there is space, otherwise as a GMR. | 2195 | * Put BO in VRAM if there is space, otherwise as a GMR. |
| 1243 | * If there is no space in VRAM and GMR ids are all used up, | 2196 | * If there is no space in VRAM and GMR ids are all used up, |
| @@ -1259,7 +2212,6 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 1259 | return ret; | 2212 | return ret; |
| 1260 | } | 2213 | } |
| 1261 | 2214 | ||
| 1262 | |||
| 1263 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 2215 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
| 1264 | struct vmw_sw_context *sw_context) | 2216 | struct vmw_sw_context *sw_context) |
| 1265 | { | 2217 | { |
| @@ -1267,7 +2219,8 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv, | |||
| 1267 | int ret; | 2219 | int ret; |
| 1268 | 2220 | ||
| 1269 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { | 2221 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
| 1270 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); | 2222 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, |
| 2223 | entry->validate_as_mob); | ||
| 1271 | if (unlikely(ret != 0)) | 2224 | if (unlikely(ret != 0)) |
| 1272 | return ret; | 2225 | return ret; |
| 1273 | } | 2226 | } |
| @@ -1461,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1461 | } else | 2414 | } else |
| 1462 | sw_context->kernel = true; | 2415 | sw_context->kernel = true; |
| 1463 | 2416 | ||
| 1464 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 2417 | sw_context->fp = vmw_fpriv(file_priv); |
| 1465 | sw_context->cur_reloc = 0; | 2418 | sw_context->cur_reloc = 0; |
| 1466 | sw_context->cur_val_buf = 0; | 2419 | sw_context->cur_val_buf = 0; |
| 1467 | sw_context->fence_flags = 0; | 2420 | sw_context->fence_flags = 0; |
| @@ -1478,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1478 | goto out_unlock; | 2431 | goto out_unlock; |
| 1479 | sw_context->res_ht_initialized = true; | 2432 | sw_context->res_ht_initialized = true; |
| 1480 | } | 2433 | } |
| 2434 | INIT_LIST_HEAD(&sw_context->staged_shaders); | ||
| 1481 | 2435 | ||
| 1482 | INIT_LIST_HEAD(&resource_list); | 2436 | INIT_LIST_HEAD(&resource_list); |
| 1483 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 2437 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
| 1484 | command_size); | 2438 | command_size); |
| 1485 | if (unlikely(ret != 0)) | 2439 | if (unlikely(ret != 0)) |
| 1486 | goto out_err; | 2440 | goto out_err_nores; |
| 1487 | 2441 | ||
| 1488 | ret = vmw_resources_reserve(sw_context); | 2442 | ret = vmw_resources_reserve(sw_context); |
| 1489 | if (unlikely(ret != 0)) | 2443 | if (unlikely(ret != 0)) |
| 1490 | goto out_err; | 2444 | goto out_err_nores; |
| 1491 | 2445 | ||
| 1492 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); | 2446 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); |
| 1493 | if (unlikely(ret != 0)) | 2447 | if (unlikely(ret != 0)) |
| @@ -1509,11 +2463,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1509 | goto out_err; | 2463 | goto out_err; |
| 1510 | } | 2464 | } |
| 1511 | 2465 | ||
| 2466 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | ||
| 2467 | if (unlikely(ret != 0)) { | ||
| 2468 | ret = -ERESTARTSYS; | ||
| 2469 | goto out_err; | ||
| 2470 | } | ||
| 2471 | |||
| 2472 | if (dev_priv->has_mob) { | ||
| 2473 | ret = vmw_rebind_contexts(sw_context); | ||
| 2474 | if (unlikely(ret != 0)) | ||
| 2475 | goto out_unlock_binding; | ||
| 2476 | } | ||
| 2477 | |||
| 1512 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2478 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
| 1513 | if (unlikely(cmd == NULL)) { | 2479 | if (unlikely(cmd == NULL)) { |
| 1514 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 2480 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
| 1515 | ret = -ENOMEM; | 2481 | ret = -ENOMEM; |
| 1516 | goto out_err; | 2482 | goto out_unlock_binding; |
| 1517 | } | 2483 | } |
| 1518 | 2484 | ||
| 1519 | vmw_apply_relocations(sw_context); | 2485 | vmw_apply_relocations(sw_context); |
| @@ -1538,6 +2504,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1538 | DRM_ERROR("Fence submission error. Syncing.\n"); | 2504 | DRM_ERROR("Fence submission error. Syncing.\n"); |
| 1539 | 2505 | ||
| 1540 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 2506 | vmw_resource_list_unreserve(&sw_context->resource_list, false); |
| 2507 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 2508 | |||
| 1541 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 2509 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
| 1542 | (void *) fence); | 2510 | (void *) fence); |
| 1543 | 2511 | ||
| @@ -1558,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1558 | } | 2526 | } |
| 1559 | 2527 | ||
| 1560 | list_splice_init(&sw_context->resource_list, &resource_list); | 2528 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 2529 | vmw_compat_shaders_commit(sw_context->fp->shman, | ||
| 2530 | &sw_context->staged_shaders); | ||
| 1561 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2531 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1562 | 2532 | ||
| 1563 | /* | 2533 | /* |
| @@ -1568,11 +2538,14 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
| 1568 | 2538 | ||
| 1569 | return 0; | 2539 | return 0; |
| 1570 | 2540 | ||
| 2541 | out_unlock_binding: | ||
| 2542 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1571 | out_err: | 2543 | out_err: |
| 1572 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 1573 | vmw_free_relocations(sw_context); | ||
| 1574 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 2544 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
| 2545 | out_err_nores: | ||
| 1575 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | 2546 | vmw_resource_list_unreserve(&sw_context->resource_list, true); |
| 2547 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
| 2548 | vmw_free_relocations(sw_context); | ||
| 1576 | vmw_clear_validations(sw_context); | 2549 | vmw_clear_validations(sw_context); |
| 1577 | if (unlikely(dev_priv->pinned_bo != NULL && | 2550 | if (unlikely(dev_priv->pinned_bo != NULL && |
| 1578 | !dev_priv->query_cid_valid)) | 2551 | !dev_priv->query_cid_valid)) |
| @@ -1581,6 +2554,8 @@ out_unlock: | |||
| 1581 | list_splice_init(&sw_context->resource_list, &resource_list); | 2554 | list_splice_init(&sw_context->resource_list, &resource_list); |
| 1582 | error_resource = sw_context->error_resource; | 2555 | error_resource = sw_context->error_resource; |
| 1583 | sw_context->error_resource = NULL; | 2556 | sw_context->error_resource = NULL; |
| 2557 | vmw_compat_shaders_revert(sw_context->fp->shman, | ||
| 2558 | &sw_context->staged_shaders); | ||
| 1584 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 2559 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1585 | 2560 | ||
| 1586 | /* | 2561 | /* |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c62d20e8a6f1..436b013b4231 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | |||
| 271 | spin_unlock_irq(&fman->lock); | 271 | spin_unlock_irq(&fman->lock); |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | 274 | static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, |
| 275 | struct list_head *list) | 275 | struct list_head *list) |
| 276 | { | 276 | { |
| 277 | struct vmw_fence_action *action, *next_action; | 277 | struct vmw_fence_action *action, *next_action; |
| @@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | |||
| 897 | * Note that the action callbacks may be executed before this function | 897 | * Note that the action callbacks may be executed before this function |
| 898 | * returns. | 898 | * returns. |
| 899 | */ | 899 | */ |
| 900 | void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | 900 | static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, |
| 901 | struct vmw_fence_action *action) | 901 | struct vmw_fence_action *action) |
| 902 | { | 902 | { |
| 903 | struct vmw_fence_manager *fman = fence->fman; | 903 | struct vmw_fence_manager *fman = fence->fman; |
| @@ -993,7 +993,7 @@ struct vmw_event_fence_pending { | |||
| 993 | struct drm_vmw_event_fence event; | 993 | struct drm_vmw_event_fence event; |
| 994 | }; | 994 | }; |
| 995 | 995 | ||
| 996 | int vmw_event_fence_action_create(struct drm_file *file_priv, | 996 | static int vmw_event_fence_action_create(struct drm_file *file_priv, |
| 997 | struct vmw_fence_obj *fence, | 997 | struct vmw_fence_obj *fence, |
| 998 | uint32_t flags, | 998 | uint32_t flags, |
| 999 | uint64_t user_data, | 999 | uint64_t user_data, |
| @@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |||
| 1080 | */ | 1080 | */ |
| 1081 | if (arg->handle) { | 1081 | if (arg->handle) { |
| 1082 | struct ttm_base_object *base = | 1082 | struct ttm_base_object *base = |
| 1083 | ttm_base_object_lookup(vmw_fp->tfile, arg->handle); | 1083 | ttm_base_object_lookup_for_ref(dev_priv->tdev, |
| 1084 | arg->handle); | ||
| 1084 | 1085 | ||
| 1085 | if (unlikely(base == NULL)) { | 1086 | if (unlikely(base == NULL)) { |
| 1086 | DRM_ERROR("Fence event invalid fence object handle " | 1087 | DRM_ERROR("Fence event invalid fence object handle " |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 3eb148667d63..6ccd993e26bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -35,6 +35,23 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
| 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 37 | 37 | ||
| 38 | if (!(dev_priv->capabilities & SVGA_CAP_3D)) | ||
| 39 | return false; | ||
| 40 | |||
| 41 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 42 | uint32_t result; | ||
| 43 | |||
| 44 | if (!dev_priv->has_mob) | ||
| 45 | return false; | ||
| 46 | |||
| 47 | mutex_lock(&dev_priv->hw_mutex); | ||
| 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | ||
| 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 50 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 51 | |||
| 52 | return (result != 0); | ||
| 53 | } | ||
| 54 | |||
| 38 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | 55 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
| 39 | return false; | 56 | return false; |
| 40 | 57 | ||
| @@ -511,24 +528,16 @@ out_err: | |||
| 511 | } | 528 | } |
| 512 | 529 | ||
| 513 | /** | 530 | /** |
| 514 | * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. | 531 | * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using |
| 532 | * legacy query commands. | ||
| 515 | * | 533 | * |
| 516 | * @dev_priv: The device private structure. | 534 | * @dev_priv: The device private structure. |
| 517 | * @cid: The hardware context id used for the query. | 535 | * @cid: The hardware context id used for the query. |
| 518 | * | 536 | * |
| 519 | * This function is used to emit a dummy occlusion query with | 537 | * See the vmw_fifo_emit_dummy_query documentation. |
| 520 | * no primitives rendered between query begin and query end. | ||
| 521 | * It's used to provide a query barrier, in order to know that when | ||
| 522 | * this query is finished, all preceding queries are also finished. | ||
| 523 | * | ||
| 524 | * A Query results structure should have been initialized at the start | ||
| 525 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
| 526 | * must also be either reserved or pinned when this function is called. | ||
| 527 | * | ||
| 528 | * Returns -ENOMEM on failure to reserve fifo space. | ||
| 529 | */ | 538 | */ |
| 530 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | 539 | static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, |
| 531 | uint32_t cid) | 540 | uint32_t cid) |
| 532 | { | 541 | { |
| 533 | /* | 542 | /* |
| 534 | * A query wait without a preceding query end will | 543 | * A query wait without a preceding query end will |
| @@ -566,3 +575,75 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | |||
| 566 | 575 | ||
| 567 | return 0; | 576 | return 0; |
| 568 | } | 577 | } |
| 578 | |||
| 579 | /** | ||
| 580 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
| 581 | * guest-backed resource query commands. | ||
| 582 | * | ||
| 583 | * @dev_priv: The device private structure. | ||
| 584 | * @cid: The hardware context id used for the query. | ||
| 585 | * | ||
| 586 | * See the vmw_fifo_emit_dummy_query documentation. | ||
| 587 | */ | ||
| 588 | static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, | ||
| 589 | uint32_t cid) | ||
| 590 | { | ||
| 591 | /* | ||
| 592 | * A query wait without a preceding query end will | ||
| 593 | * actually finish all queries for this cid | ||
| 594 | * without writing to the query result structure. | ||
| 595 | */ | ||
| 596 | |||
| 597 | struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; | ||
| 598 | struct { | ||
| 599 | SVGA3dCmdHeader header; | ||
| 600 | SVGA3dCmdWaitForGBQuery body; | ||
| 601 | } *cmd; | ||
| 602 | |||
| 603 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 604 | |||
| 605 | if (unlikely(cmd == NULL)) { | ||
| 606 | DRM_ERROR("Out of fifo space for dummy query.\n"); | ||
| 607 | return -ENOMEM; | ||
| 608 | } | ||
| 609 | |||
| 610 | cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; | ||
| 611 | cmd->header.size = sizeof(cmd->body); | ||
| 612 | cmd->body.cid = cid; | ||
| 613 | cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; | ||
| 614 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 615 | cmd->body.mobid = bo->mem.start; | ||
| 616 | cmd->body.offset = 0; | ||
| 617 | |||
| 618 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 619 | |||
| 620 | return 0; | ||
| 621 | } | ||
| 622 | |||
| 623 | |||
| 624 | /** | ||
| 625 | * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using | ||
| 626 | * appropriate resource query commands. | ||
| 627 | * | ||
| 628 | * @dev_priv: The device private structure. | ||
| 629 | * @cid: The hardware context id used for the query. | ||
| 630 | * | ||
| 631 | * This function is used to emit a dummy occlusion query with | ||
| 632 | * no primitives rendered between query begin and query end. | ||
| 633 | * It's used to provide a query barrier, in order to know that when | ||
| 634 | * this query is finished, all preceding queries are also finished. | ||
| 635 | * | ||
| 636 | * A Query results structure should have been initialized at the start | ||
| 637 | * of the dev_priv->dummy_query_bo buffer object. And that buffer object | ||
| 638 | * must also be either reserved or pinned when this function is called. | ||
| 639 | * | ||
| 640 | * Returns -ENOMEM on failure to reserve fifo space. | ||
| 641 | */ | ||
| 642 | int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | ||
| 643 | uint32_t cid) | ||
| 644 | { | ||
| 645 | if (dev_priv->has_mob) | ||
| 646 | return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); | ||
| 647 | |||
| 648 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | ||
| 649 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6ef0b035becb..61d8d803199f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
| @@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | 127 | ||
| 128 | static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, | ||
| 129 | struct list_head *desc_pages) | ||
| 130 | { | ||
| 131 | struct page *page, *next; | ||
| 132 | struct svga_guest_mem_descriptor *page_virtual; | ||
| 133 | unsigned int desc_per_page = PAGE_SIZE / | ||
| 134 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
| 135 | |||
| 136 | if (list_empty(desc_pages)) | ||
| 137 | return; | ||
| 138 | |||
| 139 | list_for_each_entry_safe(page, next, desc_pages, lru) { | ||
| 140 | list_del_init(&page->lru); | ||
| 141 | |||
| 142 | if (likely(desc_dma != DMA_ADDR_INVALID)) { | ||
| 143 | dma_unmap_page(dev, desc_dma, PAGE_SIZE, | ||
| 144 | DMA_TO_DEVICE); | ||
| 145 | } | ||
| 146 | |||
| 147 | page_virtual = kmap_atomic(page); | ||
| 148 | desc_dma = (dma_addr_t) | ||
| 149 | le32_to_cpu(page_virtual[desc_per_page].ppn) << | ||
| 150 | PAGE_SHIFT; | ||
| 151 | kunmap_atomic(page_virtual); | ||
| 152 | |||
| 153 | __free_page(page); | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | /** | ||
| 158 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | ||
| 159 | * the number of used descriptors. | ||
| 160 | * | ||
| 161 | */ | ||
| 162 | |||
| 163 | static int vmw_gmr_build_descriptors(struct device *dev, | ||
| 164 | struct list_head *desc_pages, | ||
| 165 | struct vmw_piter *iter, | ||
| 166 | unsigned long num_pages, | ||
| 167 | dma_addr_t *first_dma) | ||
| 168 | { | ||
| 169 | struct page *page; | ||
| 170 | struct svga_guest_mem_descriptor *page_virtual = NULL; | ||
| 171 | struct svga_guest_mem_descriptor *desc_virtual = NULL; | ||
| 172 | unsigned int desc_per_page; | ||
| 173 | unsigned long prev_pfn; | ||
| 174 | unsigned long pfn; | ||
| 175 | int ret; | ||
| 176 | dma_addr_t desc_dma; | ||
| 177 | |||
| 178 | desc_per_page = PAGE_SIZE / | ||
| 179 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
| 180 | |||
| 181 | while (likely(num_pages != 0)) { | ||
| 182 | page = alloc_page(__GFP_HIGHMEM); | ||
| 183 | if (unlikely(page == NULL)) { | ||
| 184 | ret = -ENOMEM; | ||
| 185 | goto out_err; | ||
| 186 | } | ||
| 187 | |||
| 188 | list_add_tail(&page->lru, desc_pages); | ||
| 189 | page_virtual = kmap_atomic(page); | ||
| 190 | desc_virtual = page_virtual - 1; | ||
| 191 | prev_pfn = ~(0UL); | ||
| 192 | |||
| 193 | while (likely(num_pages != 0)) { | ||
| 194 | pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; | ||
| 195 | |||
| 196 | if (pfn != prev_pfn + 1) { | ||
| 197 | |||
| 198 | if (desc_virtual - page_virtual == | ||
| 199 | desc_per_page - 1) | ||
| 200 | break; | ||
| 201 | |||
| 202 | (++desc_virtual)->ppn = cpu_to_le32(pfn); | ||
| 203 | desc_virtual->num_pages = cpu_to_le32(1); | ||
| 204 | } else { | ||
| 205 | uint32_t tmp = | ||
| 206 | le32_to_cpu(desc_virtual->num_pages); | ||
| 207 | desc_virtual->num_pages = cpu_to_le32(tmp + 1); | ||
| 208 | } | ||
| 209 | prev_pfn = pfn; | ||
| 210 | --num_pages; | ||
| 211 | vmw_piter_next(iter); | ||
| 212 | } | ||
| 213 | |||
| 214 | (++desc_virtual)->ppn = DMA_PAGE_INVALID; | ||
| 215 | desc_virtual->num_pages = cpu_to_le32(0); | ||
| 216 | kunmap_atomic(page_virtual); | ||
| 217 | } | ||
| 218 | |||
| 219 | desc_dma = 0; | ||
| 220 | list_for_each_entry_reverse(page, desc_pages, lru) { | ||
| 221 | page_virtual = kmap_atomic(page); | ||
| 222 | page_virtual[desc_per_page].ppn = cpu_to_le32 | ||
| 223 | (desc_dma >> PAGE_SHIFT); | ||
| 224 | kunmap_atomic(page_virtual); | ||
| 225 | desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
| 226 | DMA_TO_DEVICE); | ||
| 227 | |||
| 228 | if (unlikely(dma_mapping_error(dev, desc_dma))) | ||
| 229 | goto out_err; | ||
| 230 | } | ||
| 231 | *first_dma = desc_dma; | ||
| 232 | |||
| 233 | return 0; | ||
| 234 | out_err: | ||
| 235 | vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, | ||
| 240 | int gmr_id, dma_addr_t desc_dma) | ||
| 241 | { | ||
| 242 | mutex_lock(&dev_priv->hw_mutex); | ||
| 243 | |||
| 244 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 245 | wmb(); | ||
| 246 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); | ||
| 247 | mb(); | ||
| 248 | |||
| 249 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 250 | |||
| 251 | } | ||
| 252 | |||
| 253 | int vmw_gmr_bind(struct vmw_private *dev_priv, | 128 | int vmw_gmr_bind(struct vmw_private *dev_priv, |
| 254 | const struct vmw_sg_table *vsgt, | 129 | const struct vmw_sg_table *vsgt, |
| 255 | unsigned long num_pages, | 130 | unsigned long num_pages, |
| 256 | int gmr_id) | 131 | int gmr_id) |
| 257 | { | 132 | { |
| 258 | struct list_head desc_pages; | ||
| 259 | dma_addr_t desc_dma = 0; | ||
| 260 | struct device *dev = dev_priv->dev->dev; | ||
| 261 | struct vmw_piter data_iter; | 133 | struct vmw_piter data_iter; |
| 262 | int ret; | ||
| 263 | 134 | ||
| 264 | vmw_piter_start(&data_iter, vsgt, 0); | 135 | vmw_piter_start(&data_iter, vsgt, 0); |
| 265 | 136 | ||
| 266 | if (unlikely(!vmw_piter_next(&data_iter))) | 137 | if (unlikely(!vmw_piter_next(&data_iter))) |
| 267 | return 0; | 138 | return 0; |
| 268 | 139 | ||
| 269 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) | 140 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) |
| 270 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); | ||
| 271 | |||
| 272 | if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) | ||
| 273 | return -EINVAL; | ||
| 274 | |||
| 275 | if (vsgt->num_regions > dev_priv->max_gmr_descriptors) | ||
| 276 | return -EINVAL; | 141 | return -EINVAL; |
| 277 | 142 | ||
| 278 | INIT_LIST_HEAD(&desc_pages); | 143 | return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); |
| 279 | |||
| 280 | ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, | ||
| 281 | num_pages, &desc_dma); | ||
| 282 | if (unlikely(ret != 0)) | ||
| 283 | return ret; | ||
| 284 | |||
| 285 | vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); | ||
| 286 | vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); | ||
| 287 | |||
| 288 | return 0; | ||
| 289 | } | 144 | } |
| 290 | 145 | ||
| 291 | 146 | ||
| 292 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | 147 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) |
| 293 | { | 148 | { |
| 294 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { | 149 | if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) |
| 295 | vmw_gmr2_unbind(dev_priv, gmr_id); | 150 | vmw_gmr2_unbind(dev_priv, gmr_id); |
| 296 | return; | ||
| 297 | } | ||
| 298 | |||
| 299 | mutex_lock(&dev_priv->hw_mutex); | ||
| 300 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 301 | wmb(); | ||
| 302 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); | ||
| 303 | mb(); | ||
| 304 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 305 | } | 151 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c5c054ae9056..b1273e8e9a69 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
| @@ -125,10 +125,21 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | |||
| 125 | return -ENOMEM; | 125 | return -ENOMEM; |
| 126 | 126 | ||
| 127 | spin_lock_init(&gman->lock); | 127 | spin_lock_init(&gman->lock); |
| 128 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
| 129 | gman->used_gmr_pages = 0; | 128 | gman->used_gmr_pages = 0; |
| 130 | ida_init(&gman->gmr_ida); | 129 | ida_init(&gman->gmr_ida); |
| 131 | gman->max_gmr_ids = p_size; | 130 | |
| 131 | switch (p_size) { | ||
| 132 | case VMW_PL_GMR: | ||
| 133 | gman->max_gmr_ids = dev_priv->max_gmr_ids; | ||
| 134 | gman->max_gmr_pages = dev_priv->max_gmr_pages; | ||
| 135 | break; | ||
| 136 | case VMW_PL_MOB: | ||
| 137 | gman->max_gmr_ids = VMWGFX_NUM_MOB; | ||
| 138 | gman->max_gmr_pages = dev_priv->max_mob_pages; | ||
| 139 | break; | ||
| 140 | default: | ||
| 141 | BUG(); | ||
| 142 | } | ||
| 132 | man->priv = (void *) gman; | 143 | man->priv = (void *) gman; |
| 133 | return 0; | 144 | return 0; |
| 134 | } | 145 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 45d5b5ab6ca9..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -29,12 +29,18 @@ | |||
| 29 | #include <drm/vmwgfx_drm.h> | 29 | #include <drm/vmwgfx_drm.h> |
| 30 | #include "vmwgfx_kms.h" | 30 | #include "vmwgfx_kms.h" |
| 31 | 31 | ||
| 32 | struct svga_3d_compat_cap { | ||
| 33 | SVGA3dCapsRecordHeader header; | ||
| 34 | SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; | ||
| 35 | }; | ||
| 36 | |||
| 32 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | 37 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, |
| 33 | struct drm_file *file_priv) | 38 | struct drm_file *file_priv) |
| 34 | { | 39 | { |
| 35 | struct vmw_private *dev_priv = vmw_priv(dev); | 40 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 36 | struct drm_vmw_getparam_arg *param = | 41 | struct drm_vmw_getparam_arg *param = |
| 37 | (struct drm_vmw_getparam_arg *)data; | 42 | (struct drm_vmw_getparam_arg *)data; |
| 43 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 38 | 44 | ||
| 39 | switch (param->param) { | 45 | switch (param->param) { |
| 40 | case DRM_VMW_PARAM_NUM_STREAMS: | 46 | case DRM_VMW_PARAM_NUM_STREAMS: |
| @@ -53,13 +59,18 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 53 | param->value = dev_priv->fifo.capabilities; | 59 | param->value = dev_priv->fifo.capabilities; |
| 54 | break; | 60 | break; |
| 55 | case DRM_VMW_PARAM_MAX_FB_SIZE: | 61 | case DRM_VMW_PARAM_MAX_FB_SIZE: |
| 56 | param->value = dev_priv->vram_size; | 62 | param->value = dev_priv->prim_bb_mem; |
| 57 | break; | 63 | break; |
| 58 | case DRM_VMW_PARAM_FIFO_HW_VERSION: | 64 | case DRM_VMW_PARAM_FIFO_HW_VERSION: |
| 59 | { | 65 | { |
| 60 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 66 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 61 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 67 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
| 62 | 68 | ||
| 69 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { | ||
| 70 | param->value = SVGA3D_HWVERSION_WS8_B1; | ||
| 71 | break; | ||
| 72 | } | ||
| 73 | |||
| 63 | param->value = | 74 | param->value = |
| 64 | ioread32(fifo_mem + | 75 | ioread32(fifo_mem + |
| 65 | ((fifo->capabilities & | 76 | ((fifo->capabilities & |
| @@ -69,7 +80,30 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 69 | break; | 80 | break; |
| 70 | } | 81 | } |
| 71 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: | 82 | case DRM_VMW_PARAM_MAX_SURF_MEMORY: |
| 72 | param->value = dev_priv->memory_size; | 83 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && |
| 84 | !vmw_fp->gb_aware) | ||
| 85 | param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; | ||
| 86 | else | ||
| 87 | param->value = dev_priv->memory_size; | ||
| 88 | break; | ||
| 89 | case DRM_VMW_PARAM_3D_CAPS_SIZE: | ||
| 90 | if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && | ||
| 91 | vmw_fp->gb_aware) | ||
| 92 | param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); | ||
| 93 | else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) | ||
| 94 | param->value = sizeof(struct svga_3d_compat_cap) + | ||
| 95 | sizeof(uint32_t); | ||
| 96 | else | ||
| 97 | param->value = (SVGA_FIFO_3D_CAPS_LAST - | ||
| 98 | SVGA_FIFO_3D_CAPS + 1) * | ||
| 99 | sizeof(uint32_t); | ||
| 100 | break; | ||
| 101 | case DRM_VMW_PARAM_MAX_MOB_MEMORY: | ||
| 102 | vmw_fp->gb_aware = true; | ||
| 103 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | ||
| 104 | break; | ||
| 105 | case DRM_VMW_PARAM_MAX_MOB_SIZE: | ||
| 106 | param->value = dev_priv->max_mob_size; | ||
| 73 | break; | 107 | break; |
| 74 | default: | 108 | default: |
| 75 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 109 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
| @@ -80,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
| 80 | return 0; | 114 | return 0; |
| 81 | } | 115 | } |
| 82 | 116 | ||
| 117 | static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | ||
| 118 | size_t size) | ||
| 119 | { | ||
| 120 | struct svga_3d_compat_cap *compat_cap = | ||
| 121 | (struct svga_3d_compat_cap *) bounce; | ||
| 122 | unsigned int i; | ||
| 123 | size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); | ||
| 124 | unsigned int max_size; | ||
| 125 | |||
| 126 | if (size < pair_offset) | ||
| 127 | return -EINVAL; | ||
| 128 | |||
| 129 | max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); | ||
| 130 | |||
| 131 | if (max_size > SVGA3D_DEVCAP_MAX) | ||
| 132 | max_size = SVGA3D_DEVCAP_MAX; | ||
| 133 | |||
| 134 | compat_cap->header.length = | ||
| 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | ||
| 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | ||
| 137 | |||
| 138 | mutex_lock(&dev_priv->hw_mutex); | ||
| 139 | for (i = 0; i < max_size; ++i) { | ||
| 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
| 141 | compat_cap->pairs[i][0] = i; | ||
| 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 143 | } | ||
| 144 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 83 | 149 | ||
| 84 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | 150 | int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, |
| 85 | struct drm_file *file_priv) | 151 | struct drm_file *file_priv) |
| @@ -92,29 +158,58 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 92 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); | 158 | void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); |
| 93 | void *bounce; | 159 | void *bounce; |
| 94 | int ret; | 160 | int ret; |
| 161 | bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); | ||
| 162 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 95 | 163 | ||
| 96 | if (unlikely(arg->pad64 != 0)) { | 164 | if (unlikely(arg->pad64 != 0)) { |
| 97 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); | 165 | DRM_ERROR("Illegal GET_3D_CAP argument.\n"); |
| 98 | return -EINVAL; | 166 | return -EINVAL; |
| 99 | } | 167 | } |
| 100 | 168 | ||
| 101 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; | 169 | if (gb_objects && vmw_fp->gb_aware) |
| 170 | size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); | ||
| 171 | else if (gb_objects) | ||
| 172 | size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); | ||
| 173 | else | ||
| 174 | size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * | ||
| 175 | sizeof(uint32_t); | ||
| 102 | 176 | ||
| 103 | if (arg->max_size < size) | 177 | if (arg->max_size < size) |
| 104 | size = arg->max_size; | 178 | size = arg->max_size; |
| 105 | 179 | ||
| 106 | bounce = vmalloc(size); | 180 | bounce = vzalloc(size); |
| 107 | if (unlikely(bounce == NULL)) { | 181 | if (unlikely(bounce == NULL)) { |
| 108 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); | 182 | DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); |
| 109 | return -ENOMEM; | 183 | return -ENOMEM; |
| 110 | } | 184 | } |
| 111 | 185 | ||
| 112 | fifo_mem = dev_priv->mmio_virt; | 186 | if (gb_objects && vmw_fp->gb_aware) { |
| 113 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | 187 | int i, num; |
| 188 | uint32_t *bounce32 = (uint32_t *) bounce; | ||
| 189 | |||
| 190 | num = size / sizeof(uint32_t); | ||
| 191 | if (num > SVGA3D_DEVCAP_MAX) | ||
| 192 | num = SVGA3D_DEVCAP_MAX; | ||
| 193 | |||
| 194 | mutex_lock(&dev_priv->hw_mutex); | ||
| 195 | for (i = 0; i < num; ++i) { | ||
| 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | ||
| 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
| 198 | } | ||
| 199 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 200 | } else if (gb_objects) { | ||
| 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | ||
| 202 | if (unlikely(ret != 0)) | ||
| 203 | goto out_err; | ||
| 204 | } else { | ||
| 205 | fifo_mem = dev_priv->mmio_virt; | ||
| 206 | memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); | ||
| 207 | } | ||
| 114 | 208 | ||
| 115 | ret = copy_to_user(buffer, bounce, size); | 209 | ret = copy_to_user(buffer, bounce, size); |
| 116 | if (ret) | 210 | if (ret) |
| 117 | ret = -EFAULT; | 211 | ret = -EFAULT; |
| 212 | out_err: | ||
| 118 | vfree(bounce); | 213 | vfree(bounce); |
| 119 | 214 | ||
| 120 | if (unlikely(ret != 0)) | 215 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4640adbcaf91..0c423766c441 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | #define VMW_FENCE_WRAP (1 << 24) | 31 | #define VMW_FENCE_WRAP (1 << 24) |
| 32 | 32 | ||
| 33 | irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | 33 | irqreturn_t vmw_irq_handler(int irq, void *arg) |
| 34 | { | 34 | { |
| 35 | struct drm_device *dev = (struct drm_device *)arg; | 35 | struct drm_device *dev = (struct drm_device *)arg; |
| 36 | struct vmw_private *dev_priv = vmw_priv(dev); | 36 | struct vmw_private *dev_priv = vmw_priv(dev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 03f1c2038631..8a650413dea5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -40,7 +40,7 @@ struct vmw_clip_rect { | |||
| 40 | * Clip @num_rects number of @rects against @clip storing the | 40 | * Clip @num_rects number of @rects against @clip storing the |
| 41 | * results in @out_rects and the number of passed rects in @out_num. | 41 | * results in @out_rects and the number of passed rects in @out_num. |
| 42 | */ | 42 | */ |
| 43 | void vmw_clip_cliprects(struct drm_clip_rect *rects, | 43 | static void vmw_clip_cliprects(struct drm_clip_rect *rects, |
| 44 | int num_rects, | 44 | int num_rects, |
| 45 | struct vmw_clip_rect clip, | 45 | struct vmw_clip_rect clip, |
| 46 | SVGASignedRect *out_rects, | 46 | SVGASignedRect *out_rects, |
| @@ -423,7 +423,7 @@ struct vmw_framebuffer_surface { | |||
| 423 | struct drm_master *master; | 423 | struct drm_master *master; |
| 424 | }; | 424 | }; |
| 425 | 425 | ||
| 426 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 426 | static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
| 427 | { | 427 | { |
| 428 | struct vmw_framebuffer_surface *vfbs = | 428 | struct vmw_framebuffer_surface *vfbs = |
| 429 | vmw_framebuffer_to_vfbs(framebuffer); | 429 | vmw_framebuffer_to_vfbs(framebuffer); |
| @@ -589,7 +589,7 @@ out_free_tmp: | |||
| 589 | return ret; | 589 | return ret; |
| 590 | } | 590 | } |
| 591 | 591 | ||
| 592 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 592 | static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
| 593 | struct drm_file *file_priv, | 593 | struct drm_file *file_priv, |
| 594 | unsigned flags, unsigned color, | 594 | unsigned flags, unsigned color, |
| 595 | struct drm_clip_rect *clips, | 595 | struct drm_clip_rect *clips, |
| @@ -609,9 +609,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
| 609 | if (!dev_priv->sou_priv) | 609 | if (!dev_priv->sou_priv) |
| 610 | return -EINVAL; | 610 | return -EINVAL; |
| 611 | 611 | ||
| 612 | drm_modeset_lock_all(dev_priv->dev); | ||
| 613 | |||
| 612 | ret = ttm_read_lock(&vmaster->lock, true); | 614 | ret = ttm_read_lock(&vmaster->lock, true); |
| 613 | if (unlikely(ret != 0)) | 615 | if (unlikely(ret != 0)) { |
| 616 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 614 | return ret; | 617 | return ret; |
| 618 | } | ||
| 615 | 619 | ||
| 616 | if (!num_clips) { | 620 | if (!num_clips) { |
| 617 | num_clips = 1; | 621 | num_clips = 1; |
| @@ -629,6 +633,9 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
| 629 | clips, num_clips, inc, NULL); | 633 | clips, num_clips, inc, NULL); |
| 630 | 634 | ||
| 631 | ttm_read_unlock(&vmaster->lock); | 635 | ttm_read_unlock(&vmaster->lock); |
| 636 | |||
| 637 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 638 | |||
| 632 | return 0; | 639 | return 0; |
| 633 | } | 640 | } |
| 634 | 641 | ||
| @@ -665,9 +672,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
| 665 | 672 | ||
| 666 | if (unlikely(surface->mip_levels[0] != 1 || | 673 | if (unlikely(surface->mip_levels[0] != 1 || |
| 667 | surface->num_sizes != 1 || | 674 | surface->num_sizes != 1 || |
| 668 | surface->sizes[0].width < mode_cmd->width || | 675 | surface->base_size.width < mode_cmd->width || |
| 669 | surface->sizes[0].height < mode_cmd->height || | 676 | surface->base_size.height < mode_cmd->height || |
| 670 | surface->sizes[0].depth != 1)) { | 677 | surface->base_size.depth != 1)) { |
| 671 | DRM_ERROR("Incompatible surface dimensions " | 678 | DRM_ERROR("Incompatible surface dimensions " |
| 672 | "for requested mode.\n"); | 679 | "for requested mode.\n"); |
| 673 | return -EINVAL; | 680 | return -EINVAL; |
| @@ -754,7 +761,7 @@ struct vmw_framebuffer_dmabuf { | |||
| 754 | struct vmw_dma_buffer *buffer; | 761 | struct vmw_dma_buffer *buffer; |
| 755 | }; | 762 | }; |
| 756 | 763 | ||
| 757 | void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | 764 | static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) |
| 758 | { | 765 | { |
| 759 | struct vmw_framebuffer_dmabuf *vfbd = | 766 | struct vmw_framebuffer_dmabuf *vfbd = |
| 760 | vmw_framebuffer_to_vfbd(framebuffer); | 767 | vmw_framebuffer_to_vfbd(framebuffer); |
| @@ -940,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv, | |||
| 940 | return ret; | 947 | return ret; |
| 941 | } | 948 | } |
| 942 | 949 | ||
| 943 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 950 | static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
| 944 | struct drm_file *file_priv, | 951 | struct drm_file *file_priv, |
| 945 | unsigned flags, unsigned color, | 952 | unsigned flags, unsigned color, |
| 946 | struct drm_clip_rect *clips, | 953 | struct drm_clip_rect *clips, |
| @@ -953,9 +960,13 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
| 953 | struct drm_clip_rect norect; | 960 | struct drm_clip_rect norect; |
| 954 | int ret, increment = 1; | 961 | int ret, increment = 1; |
| 955 | 962 | ||
| 963 | drm_modeset_lock_all(dev_priv->dev); | ||
| 964 | |||
| 956 | ret = ttm_read_lock(&vmaster->lock, true); | 965 | ret = ttm_read_lock(&vmaster->lock, true); |
| 957 | if (unlikely(ret != 0)) | 966 | if (unlikely(ret != 0)) { |
| 967 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 958 | return ret; | 968 | return ret; |
| 969 | } | ||
| 959 | 970 | ||
| 960 | if (!num_clips) { | 971 | if (!num_clips) { |
| 961 | num_clips = 1; | 972 | num_clips = 1; |
| @@ -979,6 +990,9 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
| 979 | } | 990 | } |
| 980 | 991 | ||
| 981 | ttm_read_unlock(&vmaster->lock); | 992 | ttm_read_unlock(&vmaster->lock); |
| 993 | |||
| 994 | drm_modeset_unlock_all(dev_priv->dev); | ||
| 995 | |||
| 982 | return ret; | 996 | return ret; |
| 983 | } | 997 | } |
| 984 | 998 | ||
| @@ -1631,7 +1645,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | |||
| 1631 | uint32_t pitch, | 1645 | uint32_t pitch, |
| 1632 | uint32_t height) | 1646 | uint32_t height) |
| 1633 | { | 1647 | { |
| 1634 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | 1648 | return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; |
| 1635 | } | 1649 | } |
| 1636 | 1650 | ||
| 1637 | 1651 | ||
| @@ -1663,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc) | |||
| 1663 | * Small shared kms functions. | 1677 | * Small shared kms functions. |
| 1664 | */ | 1678 | */ |
| 1665 | 1679 | ||
| 1666 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | 1680 | static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, |
| 1667 | struct drm_vmw_rect *rects) | 1681 | struct drm_vmw_rect *rects) |
| 1668 | { | 1682 | { |
| 1669 | struct drm_device *dev = dev_priv->dev; | 1683 | struct drm_device *dev = dev_priv->dev; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c new file mode 100644 index 000000000000..04a64b8cd3cd --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
| @@ -0,0 +1,656 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | |||
| 30 | /* | ||
| 31 | * If we set up the screen target otable, screen objects stop working. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) | ||
| 35 | |||
| 36 | #ifdef CONFIG_64BIT | ||
| 37 | #define VMW_PPN_SIZE 8 | ||
| 38 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 | ||
| 39 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 | ||
| 40 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 | ||
| 41 | #else | ||
| 42 | #define VMW_PPN_SIZE 4 | ||
| 43 | #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 | ||
| 44 | #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 | ||
| 45 | #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 | ||
| 46 | #endif | ||
| 47 | |||
| 48 | /* | ||
| 49 | * struct vmw_mob - Structure containing page table and metadata for a | ||
| 50 | * Guest Memory OBject. | ||
| 51 | * | ||
| 52 | * @num_pages Number of pages that make up the page table. | ||
| 53 | * @pt_level The indirection level of the page table. 0-2. | ||
| 54 | * @pt_root_page DMA address of the level 0 page of the page table. | ||
| 55 | */ | ||
| 56 | struct vmw_mob { | ||
| 57 | struct ttm_buffer_object *pt_bo; | ||
| 58 | unsigned long num_pages; | ||
| 59 | unsigned pt_level; | ||
| 60 | dma_addr_t pt_root_page; | ||
| 61 | uint32_t id; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * struct vmw_otable - Guest Memory OBject table metadata | ||
| 66 | * | ||
| 67 | * @size: Size of the table (page-aligned). | ||
| 68 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
| 69 | */ | ||
| 70 | struct vmw_otable { | ||
| 71 | unsigned long size; | ||
| 72 | struct vmw_mob *page_table; | ||
| 73 | }; | ||
| 74 | |||
| 75 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
| 76 | struct vmw_mob *mob); | ||
| 77 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
| 78 | struct vmw_piter data_iter, | ||
| 79 | unsigned long num_data_pages); | ||
| 80 | |||
| 81 | /* | ||
| 82 | * vmw_setup_otable_base - Issue an object table base setup command to | ||
| 83 | * the device | ||
| 84 | * | ||
| 85 | * @dev_priv: Pointer to a device private structure | ||
| 86 | * @type: Type of object table base | ||
| 87 | * @offset Start of table offset into dev_priv::otable_bo | ||
| 88 | * @otable Pointer to otable metadata; | ||
| 89 | * | ||
| 90 | * This function returns -ENOMEM if it fails to reserve fifo space, | ||
| 91 | * and may block waiting for fifo space. | ||
| 92 | */ | ||
| 93 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | ||
| 94 | SVGAOTableType type, | ||
| 95 | unsigned long offset, | ||
| 96 | struct vmw_otable *otable) | ||
| 97 | { | ||
| 98 | struct { | ||
| 99 | SVGA3dCmdHeader header; | ||
| 100 | SVGA3dCmdSetOTableBase64 body; | ||
| 101 | } *cmd; | ||
| 102 | struct vmw_mob *mob; | ||
| 103 | const struct vmw_sg_table *vsgt; | ||
| 104 | struct vmw_piter iter; | ||
| 105 | int ret; | ||
| 106 | |||
| 107 | BUG_ON(otable->page_table != NULL); | ||
| 108 | |||
| 109 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | ||
| 110 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||
| 111 | WARN_ON(!vmw_piter_next(&iter)); | ||
| 112 | |||
| 113 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | ||
| 114 | if (unlikely(mob == NULL)) { | ||
| 115 | DRM_ERROR("Failed creating OTable page table.\n"); | ||
| 116 | return -ENOMEM; | ||
| 117 | } | ||
| 118 | |||
| 119 | if (otable->size <= PAGE_SIZE) { | ||
| 120 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
| 121 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
| 122 | } else if (vsgt->num_regions == 1) { | ||
| 123 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
| 124 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
| 125 | } else { | ||
| 126 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
| 127 | if (unlikely(ret != 0)) | ||
| 128 | goto out_no_populate; | ||
| 129 | |||
| 130 | vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); | ||
| 131 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
| 132 | } | ||
| 133 | |||
| 134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 135 | if (unlikely(cmd == NULL)) { | ||
| 136 | DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); | ||
| 137 | ret = -ENOMEM; | ||
| 138 | goto out_no_fifo; | ||
| 139 | } | ||
| 140 | |||
| 141 | memset(cmd, 0, sizeof(*cmd)); | ||
| 142 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; | ||
| 143 | cmd->header.size = sizeof(cmd->body); | ||
| 144 | cmd->body.type = type; | ||
| 145 | cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); | ||
| 146 | cmd->body.sizeInBytes = otable->size; | ||
| 147 | cmd->body.validSizeInBytes = 0; | ||
| 148 | cmd->body.ptDepth = mob->pt_level; | ||
| 149 | |||
| 150 | /* | ||
| 151 | * The device doesn't support this, But the otable size is | ||
| 152 | * determined at compile-time, so this BUG shouldn't trigger | ||
| 153 | * randomly. | ||
| 154 | */ | ||
| 155 | BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); | ||
| 156 | |||
| 157 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 158 | otable->page_table = mob; | ||
| 159 | |||
| 160 | return 0; | ||
| 161 | |||
| 162 | out_no_fifo: | ||
| 163 | out_no_populate: | ||
| 164 | vmw_mob_destroy(mob); | ||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | |||
| 168 | /* | ||
| 169 | * vmw_takedown_otable_base - Issue an object table base takedown command | ||
| 170 | * to the device | ||
| 171 | * | ||
| 172 | * @dev_priv: Pointer to a device private structure | ||
| 173 | * @type: Type of object table base | ||
| 174 | * | ||
| 175 | */ | ||
| 176 | static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | ||
| 177 | SVGAOTableType type, | ||
| 178 | struct vmw_otable *otable) | ||
| 179 | { | ||
| 180 | struct { | ||
| 181 | SVGA3dCmdHeader header; | ||
| 182 | SVGA3dCmdSetOTableBase body; | ||
| 183 | } *cmd; | ||
| 184 | struct ttm_buffer_object *bo; | ||
| 185 | |||
| 186 | if (otable->page_table == NULL) | ||
| 187 | return; | ||
| 188 | |||
| 189 | bo = otable->page_table->pt_bo; | ||
| 190 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 191 | if (unlikely(cmd == NULL)) { | ||
| 192 | DRM_ERROR("Failed reserving FIFO space for OTable " | ||
| 193 | "takedown.\n"); | ||
| 194 | } else { | ||
| 195 | memset(cmd, 0, sizeof(*cmd)); | ||
| 196 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | ||
| 197 | cmd->header.size = sizeof(cmd->body); | ||
| 198 | cmd->body.type = type; | ||
| 199 | cmd->body.baseAddress = 0; | ||
| 200 | cmd->body.sizeInBytes = 0; | ||
| 201 | cmd->body.validSizeInBytes = 0; | ||
| 202 | cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; | ||
| 203 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 204 | } | ||
| 205 | |||
| 206 | if (bo) { | ||
| 207 | int ret; | ||
| 208 | |||
| 209 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 210 | BUG_ON(ret != 0); | ||
| 211 | |||
| 212 | vmw_fence_single_bo(bo, NULL); | ||
| 213 | ttm_bo_unreserve(bo); | ||
| 214 | } | ||
| 215 | |||
| 216 | vmw_mob_destroy(otable->page_table); | ||
| 217 | otable->page_table = NULL; | ||
| 218 | } | ||
| 219 | |||
| 220 | /* | ||
| 221 | * vmw_otables_setup - Set up guest backed memory object tables | ||
| 222 | * | ||
| 223 | * @dev_priv: Pointer to a device private structure | ||
| 224 | * | ||
| 225 | * Takes care of the device guest backed surface | ||
| 226 | * initialization, by setting up the guest backed memory object tables. | ||
| 227 | * Returns 0 on success and various error codes on failure. A succesful return | ||
| 228 | * means the object tables can be taken down using the vmw_otables_takedown | ||
| 229 | * function. | ||
| 230 | */ | ||
| 231 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
| 232 | { | ||
| 233 | unsigned long offset; | ||
| 234 | unsigned long bo_size; | ||
| 235 | struct vmw_otable *otables; | ||
| 236 | SVGAOTableType i; | ||
| 237 | int ret; | ||
| 238 | |||
| 239 | otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), | ||
| 240 | GFP_KERNEL); | ||
| 241 | if (unlikely(otables == NULL)) { | ||
| 242 | DRM_ERROR("Failed to allocate space for otable " | ||
| 243 | "metadata.\n"); | ||
| 244 | return -ENOMEM; | ||
| 245 | } | ||
| 246 | |||
| 247 | otables[SVGA_OTABLE_MOB].size = | ||
| 248 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
| 249 | otables[SVGA_OTABLE_SURFACE].size = | ||
| 250 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
| 251 | otables[SVGA_OTABLE_CONTEXT].size = | ||
| 252 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
| 253 | otables[SVGA_OTABLE_SHADER].size = | ||
| 254 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
| 255 | otables[SVGA_OTABLE_SCREEN_TARGET].size = | ||
| 256 | VMWGFX_NUM_GB_SCREEN_TARGET * | ||
| 257 | SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; | ||
| 258 | |||
| 259 | bo_size = 0; | ||
| 260 | for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | ||
| 261 | otables[i].size = | ||
| 262 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | ||
| 263 | bo_size += otables[i].size; | ||
| 264 | } | ||
| 265 | |||
| 266 | ret = ttm_bo_create(&dev_priv->bdev, bo_size, | ||
| 267 | ttm_bo_type_device, | ||
| 268 | &vmw_sys_ne_placement, | ||
| 269 | 0, false, NULL, | ||
| 270 | &dev_priv->otable_bo); | ||
| 271 | |||
| 272 | if (unlikely(ret != 0)) | ||
| 273 | goto out_no_bo; | ||
| 274 | |||
| 275 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); | ||
| 276 | BUG_ON(ret != 0); | ||
| 277 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | ||
| 278 | if (unlikely(ret != 0)) | ||
| 279 | goto out_unreserve; | ||
| 280 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | ||
| 281 | if (unlikely(ret != 0)) | ||
| 282 | goto out_unreserve; | ||
| 283 | |||
| 284 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
| 285 | |||
| 286 | offset = 0; | ||
| 287 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | ||
| 288 | ret = vmw_setup_otable_base(dev_priv, i, offset, | ||
| 289 | &otables[i]); | ||
| 290 | if (unlikely(ret != 0)) | ||
| 291 | goto out_no_setup; | ||
| 292 | offset += otables[i].size; | ||
| 293 | } | ||
| 294 | |||
| 295 | dev_priv->otables = otables; | ||
| 296 | return 0; | ||
| 297 | |||
| 298 | out_unreserve: | ||
| 299 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
| 300 | out_no_setup: | ||
| 301 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
| 302 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | ||
| 303 | |||
| 304 | ttm_bo_unref(&dev_priv->otable_bo); | ||
| 305 | out_no_bo: | ||
| 306 | kfree(otables); | ||
| 307 | return ret; | ||
| 308 | } | ||
| 309 | |||
| 310 | |||
| 311 | /* | ||
| 312 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
| 313 | * | ||
| 314 | * @dev_priv: Pointer to a device private structure | ||
| 315 | * | ||
| 316 | * Take down the Guest Memory Object tables. | ||
| 317 | */ | ||
| 318 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
| 319 | { | ||
| 320 | SVGAOTableType i; | ||
| 321 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | ||
| 322 | int ret; | ||
| 323 | |||
| 324 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | ||
| 325 | vmw_takedown_otable_base(dev_priv, i, | ||
| 326 | &dev_priv->otables[i]); | ||
| 327 | |||
| 328 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 329 | BUG_ON(ret != 0); | ||
| 330 | |||
| 331 | vmw_fence_single_bo(bo, NULL); | ||
| 332 | ttm_bo_unreserve(bo); | ||
| 333 | |||
| 334 | ttm_bo_unref(&dev_priv->otable_bo); | ||
| 335 | kfree(dev_priv->otables); | ||
| 336 | dev_priv->otables = NULL; | ||
| 337 | } | ||
| 338 | |||
| 339 | |||
| 340 | /* | ||
| 341 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | ||
| 342 | * needed for a guest backed memory object. | ||
| 343 | * | ||
| 344 | * @data_pages: Number of data pages in the memory object buffer. | ||
| 345 | */ | ||
| 346 | static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) | ||
| 347 | { | ||
| 348 | unsigned long data_size = data_pages * PAGE_SIZE; | ||
| 349 | unsigned long tot_size = 0; | ||
| 350 | |||
| 351 | while (likely(data_size > PAGE_SIZE)) { | ||
| 352 | data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); | ||
| 353 | data_size *= VMW_PPN_SIZE; | ||
| 354 | tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
| 355 | } | ||
| 356 | |||
| 357 | return tot_size >> PAGE_SHIFT; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* | ||
| 361 | * vmw_mob_create - Create a mob, but don't populate it. | ||
| 362 | * | ||
| 363 | * @data_pages: Number of data pages of the underlying buffer object. | ||
| 364 | */ | ||
| 365 | struct vmw_mob *vmw_mob_create(unsigned long data_pages) | ||
| 366 | { | ||
| 367 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | ||
| 368 | |||
| 369 | if (unlikely(mob == NULL)) | ||
| 370 | return NULL; | ||
| 371 | |||
| 372 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | ||
| 373 | |||
| 374 | return mob; | ||
| 375 | } | ||
| 376 | |||
| 377 | /* | ||
| 378 | * vmw_mob_pt_populate - Populate the mob pagetable | ||
| 379 | * | ||
| 380 | * @mob: Pointer to the mob the pagetable of which we want to | ||
| 381 | * populate. | ||
| 382 | * | ||
| 383 | * This function allocates memory to be used for the pagetable, and | ||
| 384 | * adjusts TTM memory accounting accordingly. Returns ENOMEM if | ||
| 385 | * memory resources aren't sufficient and may cause TTM buffer objects | ||
| 386 | * to be swapped out by using the TTM memory accounting function. | ||
| 387 | */ | ||
| 388 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | ||
| 389 | struct vmw_mob *mob) | ||
| 390 | { | ||
| 391 | int ret; | ||
| 392 | BUG_ON(mob->pt_bo != NULL); | ||
| 393 | |||
| 394 | ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, | ||
| 395 | ttm_bo_type_device, | ||
| 396 | &vmw_sys_ne_placement, | ||
| 397 | 0, false, NULL, &mob->pt_bo); | ||
| 398 | if (unlikely(ret != 0)) | ||
| 399 | return ret; | ||
| 400 | |||
| 401 | ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); | ||
| 402 | |||
| 403 | BUG_ON(ret != 0); | ||
| 404 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | ||
| 405 | if (unlikely(ret != 0)) | ||
| 406 | goto out_unreserve; | ||
| 407 | ret = vmw_bo_map_dma(mob->pt_bo); | ||
| 408 | if (unlikely(ret != 0)) | ||
| 409 | goto out_unreserve; | ||
| 410 | |||
| 411 | ttm_bo_unreserve(mob->pt_bo); | ||
| 412 | |||
| 413 | return 0; | ||
| 414 | |||
| 415 | out_unreserve: | ||
| 416 | ttm_bo_unreserve(mob->pt_bo); | ||
| 417 | ttm_bo_unref(&mob->pt_bo); | ||
| 418 | |||
| 419 | return ret; | ||
| 420 | } | ||
| 421 | |||
| 422 | /** | ||
| 423 | * vmw_mob_assign_ppn - Assign a value to a page table entry | ||
| 424 | * | ||
| 425 | * @addr: Pointer to pointer to page table entry. | ||
| 426 | * @val: The page table entry | ||
| 427 | * | ||
| 428 | * Assigns a value to a page table entry pointed to by *@addr and increments | ||
| 429 | * *@addr according to the page table entry size. | ||
| 430 | */ | ||
| 431 | #if (VMW_PPN_SIZE == 8) | ||
| 432 | static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) | ||
| 433 | { | ||
| 434 | *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); | ||
| 435 | *addr += 2; | ||
| 436 | } | ||
| 437 | #else | ||
| 438 | static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) | ||
| 439 | { | ||
| 440 | *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); | ||
| 441 | } | ||
| 442 | #endif | ||
| 443 | |||
| 444 | /* | ||
| 445 | * vmw_mob_build_pt - Build a pagetable | ||
| 446 | * | ||
| 447 | * @data_addr: Array of DMA addresses to the underlying buffer | ||
| 448 | * object's data pages. | ||
| 449 | * @num_data_pages: Number of buffer object data pages. | ||
| 450 | * @pt_pages: Array of page pointers to the page table pages. | ||
| 451 | * | ||
| 452 | * Returns the number of page table pages actually used. | ||
| 453 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | ||
| 454 | */ | ||
| 455 | static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, | ||
| 456 | unsigned long num_data_pages, | ||
| 457 | struct vmw_piter *pt_iter) | ||
| 458 | { | ||
| 459 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | ||
| 460 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | ||
| 461 | unsigned long pt_page; | ||
| 462 | __le32 *addr, *save_addr; | ||
| 463 | unsigned long i; | ||
| 464 | struct page *page; | ||
| 465 | |||
| 466 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | ||
| 467 | page = vmw_piter_page(pt_iter); | ||
| 468 | |||
| 469 | save_addr = addr = kmap_atomic(page); | ||
| 470 | |||
| 471 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | ||
| 472 | vmw_mob_assign_ppn(&addr, | ||
| 473 | vmw_piter_dma_addr(data_iter)); | ||
| 474 | if (unlikely(--num_data_pages == 0)) | ||
| 475 | break; | ||
| 476 | WARN_ON(!vmw_piter_next(data_iter)); | ||
| 477 | } | ||
| 478 | kunmap_atomic(save_addr); | ||
| 479 | vmw_piter_next(pt_iter); | ||
| 480 | } | ||
| 481 | |||
| 482 | return num_pt_pages; | ||
| 483 | } | ||
| 484 | |||
| 485 | /* | ||
| 486 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | ||
| 487 | * | ||
| 488 | * @mob: Pointer to a mob whose page table needs setting up. | ||
| 489 | * @data_addr Array of DMA addresses to the buffer object's data | ||
| 490 | * pages. | ||
| 491 | * @num_data_pages: Number of buffer object data pages. | ||
| 492 | * | ||
| 493 | * Uses tail recursion to set up a multilevel mob page table. | ||
| 494 | */ | ||
| 495 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | ||
| 496 | struct vmw_piter data_iter, | ||
| 497 | unsigned long num_data_pages) | ||
| 498 | { | ||
| 499 | unsigned long num_pt_pages = 0; | ||
| 500 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
| 501 | struct vmw_piter save_pt_iter; | ||
| 502 | struct vmw_piter pt_iter; | ||
| 503 | const struct vmw_sg_table *vsgt; | ||
| 504 | int ret; | ||
| 505 | |||
| 506 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 507 | BUG_ON(ret != 0); | ||
| 508 | |||
| 509 | vsgt = vmw_bo_sg_table(bo); | ||
| 510 | vmw_piter_start(&pt_iter, vsgt, 0); | ||
| 511 | BUG_ON(!vmw_piter_next(&pt_iter)); | ||
| 512 | mob->pt_level = 0; | ||
| 513 | while (likely(num_data_pages > 1)) { | ||
| 514 | ++mob->pt_level; | ||
| 515 | BUG_ON(mob->pt_level > 2); | ||
| 516 | save_pt_iter = pt_iter; | ||
| 517 | num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, | ||
| 518 | &pt_iter); | ||
| 519 | data_iter = save_pt_iter; | ||
| 520 | num_data_pages = num_pt_pages; | ||
| 521 | } | ||
| 522 | |||
| 523 | mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); | ||
| 524 | ttm_bo_unreserve(bo); | ||
| 525 | } | ||
| 526 | |||
| 527 | /* | ||
| 528 | * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. | ||
| 529 | * | ||
| 530 | * @mob: Pointer to a mob to destroy. | ||
| 531 | */ | ||
| 532 | void vmw_mob_destroy(struct vmw_mob *mob) | ||
| 533 | { | ||
| 534 | if (mob->pt_bo) | ||
| 535 | ttm_bo_unref(&mob->pt_bo); | ||
| 536 | kfree(mob); | ||
| 537 | } | ||
| 538 | |||
| 539 | /* | ||
| 540 | * vmw_mob_unbind - Hide a mob from the device. | ||
| 541 | * | ||
| 542 | * @dev_priv: Pointer to a device private. | ||
| 543 | * @mob_id: Device id of the mob to unbind. | ||
| 544 | */ | ||
| 545 | void vmw_mob_unbind(struct vmw_private *dev_priv, | ||
| 546 | struct vmw_mob *mob) | ||
| 547 | { | ||
| 548 | struct { | ||
| 549 | SVGA3dCmdHeader header; | ||
| 550 | SVGA3dCmdDestroyGBMob body; | ||
| 551 | } *cmd; | ||
| 552 | int ret; | ||
| 553 | struct ttm_buffer_object *bo = mob->pt_bo; | ||
| 554 | |||
| 555 | if (bo) { | ||
| 556 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | ||
| 557 | /* | ||
| 558 | * Noone else should be using this buffer. | ||
| 559 | */ | ||
| 560 | BUG_ON(ret != 0); | ||
| 561 | } | ||
| 562 | |||
| 563 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 564 | if (unlikely(cmd == NULL)) { | ||
| 565 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
| 566 | "Object unbinding.\n"); | ||
| 567 | } else { | ||
| 568 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; | ||
| 569 | cmd->header.size = sizeof(cmd->body); | ||
| 570 | cmd->body.mobid = mob->id; | ||
| 571 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 572 | } | ||
| 573 | if (bo) { | ||
| 574 | vmw_fence_single_bo(bo, NULL); | ||
| 575 | ttm_bo_unreserve(bo); | ||
| 576 | } | ||
| 577 | vmw_3d_resource_dec(dev_priv, false); | ||
| 578 | } | ||
| 579 | |||
| 580 | /* | ||
| 581 | * vmw_mob_bind - Make a mob visible to the device after first | ||
| 582 | * populating it if necessary. | ||
| 583 | * | ||
| 584 | * @dev_priv: Pointer to a device private. | ||
| 585 | * @mob: Pointer to the mob we're making visible. | ||
| 586 | * @data_addr: Array of DMA addresses to the data pages of the underlying | ||
| 587 | * buffer object. | ||
| 588 | * @num_data_pages: Number of data pages of the underlying buffer | ||
| 589 | * object. | ||
| 590 | * @mob_id: Device id of the mob to bind | ||
| 591 | * | ||
| 592 | * This function is intended to be interfaced with the ttm_tt backend | ||
| 593 | * code. | ||
| 594 | */ | ||
| 595 | int vmw_mob_bind(struct vmw_private *dev_priv, | ||
| 596 | struct vmw_mob *mob, | ||
| 597 | const struct vmw_sg_table *vsgt, | ||
| 598 | unsigned long num_data_pages, | ||
| 599 | int32_t mob_id) | ||
| 600 | { | ||
| 601 | int ret; | ||
| 602 | bool pt_set_up = false; | ||
| 603 | struct vmw_piter data_iter; | ||
| 604 | struct { | ||
| 605 | SVGA3dCmdHeader header; | ||
| 606 | SVGA3dCmdDefineGBMob64 body; | ||
| 607 | } *cmd; | ||
| 608 | |||
| 609 | mob->id = mob_id; | ||
| 610 | vmw_piter_start(&data_iter, vsgt, 0); | ||
| 611 | if (unlikely(!vmw_piter_next(&data_iter))) | ||
| 612 | return 0; | ||
| 613 | |||
| 614 | if (likely(num_data_pages == 1)) { | ||
| 615 | mob->pt_level = VMW_MOBFMT_PTDEPTH_0; | ||
| 616 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
| 617 | } else if (vsgt->num_regions == 1) { | ||
| 618 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
| 619 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
| 620 | } else if (unlikely(mob->pt_bo == NULL)) { | ||
| 621 | ret = vmw_mob_pt_populate(dev_priv, mob); | ||
| 622 | if (unlikely(ret != 0)) | ||
| 623 | return ret; | ||
| 624 | |||
| 625 | vmw_mob_pt_setup(mob, data_iter, num_data_pages); | ||
| 626 | pt_set_up = true; | ||
| 627 | mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; | ||
| 628 | } | ||
| 629 | |||
| 630 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 631 | |||
| 632 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 633 | if (unlikely(cmd == NULL)) { | ||
| 634 | DRM_ERROR("Failed reserving FIFO space for Memory " | ||
| 635 | "Object binding.\n"); | ||
| 636 | goto out_no_cmd_space; | ||
| 637 | } | ||
| 638 | |||
| 639 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; | ||
| 640 | cmd->header.size = sizeof(cmd->body); | ||
| 641 | cmd->body.mobid = mob_id; | ||
| 642 | cmd->body.ptDepth = mob->pt_level; | ||
| 643 | cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); | ||
| 644 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | ||
| 645 | |||
| 646 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 647 | |||
| 648 | return 0; | ||
| 649 | |||
| 650 | out_no_cmd_space: | ||
| 651 | vmw_3d_resource_dec(dev_priv, false); | ||
| 652 | if (pt_set_up) | ||
| 653 | ttm_bo_unref(&mob->pt_bo); | ||
| 654 | |||
| 655 | return -ENOMEM; | ||
| 656 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 9b5ea2ac7ddf..9757b57f8388 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | |||
| 88 | return res; | 88 | return res; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | struct vmw_resource * | ||
| 92 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) | ||
| 93 | { | ||
| 94 | return kref_get_unless_zero(&res->kref) ? res : NULL; | ||
| 95 | } | ||
| 91 | 96 | ||
| 92 | /** | 97 | /** |
| 93 | * vmw_resource_release_id - release a resource id to the id manager. | 98 | * vmw_resource_release_id - release a resource id to the id manager. |
| @@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) | |||
| 136 | vmw_dmabuf_unreference(&res->backup); | 141 | vmw_dmabuf_unreference(&res->backup); |
| 137 | } | 142 | } |
| 138 | 143 | ||
| 139 | if (likely(res->hw_destroy != NULL)) | 144 | if (likely(res->hw_destroy != NULL)) { |
| 140 | res->hw_destroy(res); | 145 | res->hw_destroy(res); |
| 146 | mutex_lock(&dev_priv->binding_mutex); | ||
| 147 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
| 148 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 149 | } | ||
| 141 | 150 | ||
| 142 | id = res->id; | 151 | id = res->id; |
| 143 | if (res->res_free != NULL) | 152 | if (res->res_free != NULL) |
| @@ -215,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | |||
| 215 | res->func = func; | 224 | res->func = func; |
| 216 | INIT_LIST_HEAD(&res->lru_head); | 225 | INIT_LIST_HEAD(&res->lru_head); |
| 217 | INIT_LIST_HEAD(&res->mob_head); | 226 | INIT_LIST_HEAD(&res->mob_head); |
| 227 | INIT_LIST_HEAD(&res->binding_head); | ||
| 218 | res->id = -1; | 228 | res->id = -1; |
| 219 | res->backup = NULL; | 229 | res->backup = NULL; |
| 220 | res->backup_offset = 0; | 230 | res->backup_offset = 0; |
| @@ -417,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
| 417 | INIT_LIST_HEAD(&vmw_bo->res_list); | 427 | INIT_LIST_HEAD(&vmw_bo->res_list); |
| 418 | 428 | ||
| 419 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 429 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
| 420 | (user) ? ttm_bo_type_device : | 430 | ttm_bo_type_device, placement, |
| 421 | ttm_bo_type_kernel, placement, | ||
| 422 | 0, interruptible, | 431 | 0, interruptible, |
| 423 | NULL, acc_size, NULL, bo_free); | 432 | NULL, acc_size, NULL, bo_free); |
| 424 | return ret; | 433 | return ret; |
| @@ -441,6 +450,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | |||
| 441 | ttm_bo_unref(&bo); | 450 | ttm_bo_unref(&bo); |
| 442 | } | 451 | } |
| 443 | 452 | ||
| 453 | static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, | ||
| 454 | enum ttm_ref_type ref_type) | ||
| 455 | { | ||
| 456 | struct vmw_user_dma_buffer *user_bo; | ||
| 457 | user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); | ||
| 458 | |||
| 459 | switch (ref_type) { | ||
| 460 | case TTM_REF_SYNCCPU_WRITE: | ||
| 461 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
| 462 | break; | ||
| 463 | default: | ||
| 464 | BUG(); | ||
| 465 | } | ||
| 466 | } | ||
| 467 | |||
| 444 | /** | 468 | /** |
| 445 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer | 469 | * vmw_user_dmabuf_alloc - Allocate a user dma buffer |
| 446 | * | 470 | * |
| @@ -471,6 +495,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
| 471 | } | 495 | } |
| 472 | 496 | ||
| 473 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, | 497 | ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, |
| 498 | (dev_priv->has_mob) ? | ||
| 499 | &vmw_sys_placement : | ||
| 474 | &vmw_vram_sys_placement, true, | 500 | &vmw_vram_sys_placement, true, |
| 475 | &vmw_user_dmabuf_destroy); | 501 | &vmw_user_dmabuf_destroy); |
| 476 | if (unlikely(ret != 0)) | 502 | if (unlikely(ret != 0)) |
| @@ -482,7 +508,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
| 482 | &user_bo->prime, | 508 | &user_bo->prime, |
| 483 | shareable, | 509 | shareable, |
| 484 | ttm_buffer_type, | 510 | ttm_buffer_type, |
| 485 | &vmw_user_dmabuf_release, NULL); | 511 | &vmw_user_dmabuf_release, |
| 512 | &vmw_user_dmabuf_ref_obj_release); | ||
| 486 | if (unlikely(ret != 0)) { | 513 | if (unlikely(ret != 0)) { |
| 487 | ttm_bo_unref(&tmp); | 514 | ttm_bo_unref(&tmp); |
| 488 | goto out_no_base_object; | 515 | goto out_no_base_object; |
| @@ -515,6 +542,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | |||
| 515 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; | 542 | vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; |
| 516 | } | 543 | } |
| 517 | 544 | ||
| 545 | /** | ||
| 546 | * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu | ||
| 547 | * access, idling previous GPU operations on the buffer and optionally | ||
| 548 | * blocking it for further command submissions. | ||
| 549 | * | ||
| 550 | * @user_bo: Pointer to the buffer object being grabbed for CPU access | ||
| 551 | * @tfile: Identifying the caller. | ||
| 552 | * @flags: Flags indicating how the grab should be performed. | ||
| 553 | * | ||
| 554 | * A blocking grab will be automatically released when @tfile is closed. | ||
| 555 | */ | ||
| 556 | static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, | ||
| 557 | struct ttm_object_file *tfile, | ||
| 558 | uint32_t flags) | ||
| 559 | { | ||
| 560 | struct ttm_buffer_object *bo = &user_bo->dma.base; | ||
| 561 | bool existed; | ||
| 562 | int ret; | ||
| 563 | |||
| 564 | if (flags & drm_vmw_synccpu_allow_cs) { | ||
| 565 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 566 | |||
| 567 | spin_lock(&bdev->fence_lock); | ||
| 568 | ret = ttm_bo_wait(bo, false, true, | ||
| 569 | !!(flags & drm_vmw_synccpu_dontblock)); | ||
| 570 | spin_unlock(&bdev->fence_lock); | ||
| 571 | return ret; | ||
| 572 | } | ||
| 573 | |||
| 574 | ret = ttm_bo_synccpu_write_grab | ||
| 575 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); | ||
| 576 | if (unlikely(ret != 0)) | ||
| 577 | return ret; | ||
| 578 | |||
| 579 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, | ||
| 580 | TTM_REF_SYNCCPU_WRITE, &existed); | ||
| 581 | if (ret != 0 || existed) | ||
| 582 | ttm_bo_synccpu_write_release(&user_bo->dma.base); | ||
| 583 | |||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | |||
| 587 | /** | ||
| 588 | * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, | ||
| 589 | * and unblock command submission on the buffer if blocked. | ||
| 590 | * | ||
| 591 | * @handle: Handle identifying the buffer object. | ||
| 592 | * @tfile: Identifying the caller. | ||
| 593 | * @flags: Flags indicating the type of release. | ||
| 594 | */ | ||
| 595 | static int vmw_user_dmabuf_synccpu_release(uint32_t handle, | ||
| 596 | struct ttm_object_file *tfile, | ||
| 597 | uint32_t flags) | ||
| 598 | { | ||
| 599 | if (!(flags & drm_vmw_synccpu_allow_cs)) | ||
| 600 | return ttm_ref_object_base_unref(tfile, handle, | ||
| 601 | TTM_REF_SYNCCPU_WRITE); | ||
| 602 | |||
| 603 | return 0; | ||
| 604 | } | ||
| 605 | |||
| 606 | /** | ||
| 607 | * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu | ||
| 608 | * functionality. | ||
| 609 | * | ||
| 610 | * @dev: Identifies the drm device. | ||
| 611 | * @data: Pointer to the ioctl argument. | ||
| 612 | * @file_priv: Identifies the caller. | ||
| 613 | * | ||
| 614 | * This function checks the ioctl arguments for validity and calls the | ||
| 615 | * relevant synccpu functions. | ||
| 616 | */ | ||
| 617 | int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, | ||
| 618 | struct drm_file *file_priv) | ||
| 619 | { | ||
| 620 | struct drm_vmw_synccpu_arg *arg = | ||
| 621 | (struct drm_vmw_synccpu_arg *) data; | ||
| 622 | struct vmw_dma_buffer *dma_buf; | ||
| 623 | struct vmw_user_dma_buffer *user_bo; | ||
| 624 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 625 | int ret; | ||
| 626 | |||
| 627 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | ||
| 628 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | | ||
| 629 | drm_vmw_synccpu_dontblock | | ||
| 630 | drm_vmw_synccpu_allow_cs)) != 0) { | ||
| 631 | DRM_ERROR("Illegal synccpu flags.\n"); | ||
| 632 | return -EINVAL; | ||
| 633 | } | ||
| 634 | |||
| 635 | switch (arg->op) { | ||
| 636 | case drm_vmw_synccpu_grab: | ||
| 637 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); | ||
| 638 | if (unlikely(ret != 0)) | ||
| 639 | return ret; | ||
| 640 | |||
| 641 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, | ||
| 642 | dma); | ||
| 643 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); | ||
| 644 | vmw_dmabuf_unreference(&dma_buf); | ||
| 645 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && | ||
| 646 | ret != -EBUSY)) { | ||
| 647 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", | ||
| 648 | (unsigned int) arg->handle); | ||
| 649 | return ret; | ||
| 650 | } | ||
| 651 | break; | ||
| 652 | case drm_vmw_synccpu_release: | ||
| 653 | ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, | ||
| 654 | arg->flags); | ||
| 655 | if (unlikely(ret != 0)) { | ||
| 656 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", | ||
| 657 | (unsigned int) arg->handle); | ||
| 658 | return ret; | ||
| 659 | } | ||
| 660 | break; | ||
| 661 | default: | ||
| 662 | DRM_ERROR("Invalid synccpu operation.\n"); | ||
| 663 | return -EINVAL; | ||
| 664 | } | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | |||
| 518 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 669 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
| 519 | struct drm_file *file_priv) | 670 | struct drm_file *file_priv) |
| 520 | { | 671 | { |
| @@ -591,7 +742,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
| 591 | } | 742 | } |
| 592 | 743 | ||
| 593 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 744 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
| 594 | struct vmw_dma_buffer *dma_buf) | 745 | struct vmw_dma_buffer *dma_buf, |
| 746 | uint32_t *handle) | ||
| 595 | { | 747 | { |
| 596 | struct vmw_user_dma_buffer *user_bo; | 748 | struct vmw_user_dma_buffer *user_bo; |
| 597 | 749 | ||
| @@ -599,6 +751,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | |||
| 599 | return -EINVAL; | 751 | return -EINVAL; |
| 600 | 752 | ||
| 601 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | 753 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
| 754 | |||
| 755 | *handle = user_bo->prime.base.hash.key; | ||
| 602 | return ttm_ref_object_add(tfile, &user_bo->prime.base, | 756 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
| 603 | TTM_REF_USAGE, NULL); | 757 | TTM_REF_USAGE, NULL); |
| 604 | } | 758 | } |
| @@ -1291,11 +1445,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
| 1291 | * @mem: The truct ttm_mem_reg indicating to what memory | 1445 | * @mem: The truct ttm_mem_reg indicating to what memory |
| 1292 | * region the move is taking place. | 1446 | * region the move is taking place. |
| 1293 | * | 1447 | * |
| 1294 | * For now does nothing. | 1448 | * Evicts the Guest Backed hardware resource if the backup |
| 1449 | * buffer is being moved out of MOB memory. | ||
| 1450 | * Note that this function should not race with the resource | ||
| 1451 | * validation code as long as it accesses only members of struct | ||
| 1452 | * resource that remain static while bo::res is !NULL and | ||
| 1453 | * while we have @bo reserved. struct resource::backup is *not* a | ||
| 1454 | * static member. The resource validation code will take care | ||
| 1455 | * to set @bo::res to NULL, while having @bo reserved when the | ||
| 1456 | * buffer is no longer bound to the resource, so @bo:res can be | ||
| 1457 | * used to determine whether there is a need to unbind and whether | ||
| 1458 | * it is safe to unbind. | ||
| 1295 | */ | 1459 | */ |
| 1296 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, | 1460 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
| 1297 | struct ttm_mem_reg *mem) | 1461 | struct ttm_mem_reg *mem) |
| 1298 | { | 1462 | { |
| 1463 | struct vmw_dma_buffer *dma_buf; | ||
| 1464 | |||
| 1465 | if (mem == NULL) | ||
| 1466 | return; | ||
| 1467 | |||
| 1468 | if (bo->destroy != vmw_dmabuf_bo_free && | ||
| 1469 | bo->destroy != vmw_user_dmabuf_destroy) | ||
| 1470 | return; | ||
| 1471 | |||
| 1472 | dma_buf = container_of(bo, struct vmw_dma_buffer, base); | ||
| 1473 | |||
| 1474 | if (mem->mem_type != VMW_PL_MOB) { | ||
| 1475 | struct vmw_resource *res, *n; | ||
| 1476 | struct ttm_bo_device *bdev = bo->bdev; | ||
| 1477 | struct ttm_validate_buffer val_buf; | ||
| 1478 | |||
| 1479 | val_buf.bo = bo; | ||
| 1480 | |||
| 1481 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { | ||
| 1482 | |||
| 1483 | if (unlikely(res->func->unbind == NULL)) | ||
| 1484 | continue; | ||
| 1485 | |||
| 1486 | (void) res->func->unbind(res, true, &val_buf); | ||
| 1487 | res->backup_dirty = true; | ||
| 1488 | res->res_dirty = false; | ||
| 1489 | list_del_init(&res->mob_head); | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | spin_lock(&bdev->fence_lock); | ||
| 1493 | (void) ttm_bo_wait(bo, false, false, false); | ||
| 1494 | spin_unlock(&bdev->fence_lock); | ||
| 1495 | } | ||
| 1299 | } | 1496 | } |
| 1300 | 1497 | ||
| 1301 | /** | 1498 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c new file mode 100644 index 000000000000..ee3856578a12 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
| @@ -0,0 +1,812 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "vmwgfx_resource_priv.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | |||
| 32 | #define VMW_COMPAT_SHADER_HT_ORDER 12 | ||
| 33 | |||
| 34 | struct vmw_shader { | ||
| 35 | struct vmw_resource res; | ||
| 36 | SVGA3dShaderType type; | ||
| 37 | uint32_t size; | ||
| 38 | }; | ||
| 39 | |||
| 40 | struct vmw_user_shader { | ||
| 41 | struct ttm_base_object base; | ||
| 42 | struct vmw_shader shader; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /** | ||
| 46 | * enum vmw_compat_shader_state - Staging state for compat shaders | ||
| 47 | */ | ||
| 48 | enum vmw_compat_shader_state { | ||
| 49 | VMW_COMPAT_COMMITED, | ||
| 50 | VMW_COMPAT_ADD, | ||
| 51 | VMW_COMPAT_DEL | ||
| 52 | }; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * struct vmw_compat_shader - Metadata for compat shaders. | ||
| 56 | * | ||
| 57 | * @handle: The TTM handle of the guest backed shader. | ||
| 58 | * @tfile: The struct ttm_object_file the guest backed shader is registered | ||
| 59 | * with. | ||
| 60 | * @hash: Hash item for lookup. | ||
| 61 | * @head: List head for staging lists or the compat shader manager list. | ||
| 62 | * @state: Staging state. | ||
| 63 | * | ||
| 64 | * The structure is protected by the cmdbuf lock. | ||
| 65 | */ | ||
| 66 | struct vmw_compat_shader { | ||
| 67 | u32 handle; | ||
| 68 | struct ttm_object_file *tfile; | ||
| 69 | struct drm_hash_item hash; | ||
| 70 | struct list_head head; | ||
| 71 | enum vmw_compat_shader_state state; | ||
| 72 | }; | ||
| 73 | |||
| 74 | /** | ||
| 75 | * struct vmw_compat_shader_manager - Compat shader manager. | ||
| 76 | * | ||
| 77 | * @shaders: Hash table containing staged and commited compat shaders | ||
| 78 | * @list: List of commited shaders. | ||
| 79 | * @dev_priv: Pointer to a device private structure. | ||
| 80 | * | ||
| 81 | * @shaders and @list are protected by the cmdbuf mutex for now. | ||
| 82 | */ | ||
| 83 | struct vmw_compat_shader_manager { | ||
| 84 | struct drm_open_hash shaders; | ||
| 85 | struct list_head list; | ||
| 86 | struct vmw_private *dev_priv; | ||
| 87 | }; | ||
| 88 | |||
| 89 | static void vmw_user_shader_free(struct vmw_resource *res); | ||
| 90 | static struct vmw_resource * | ||
| 91 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | ||
| 92 | |||
| 93 | static int vmw_gb_shader_create(struct vmw_resource *res); | ||
| 94 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
| 95 | struct ttm_validate_buffer *val_buf); | ||
| 96 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
| 97 | bool readback, | ||
| 98 | struct ttm_validate_buffer *val_buf); | ||
| 99 | static int vmw_gb_shader_destroy(struct vmw_resource *res); | ||
| 100 | |||
| 101 | static uint64_t vmw_user_shader_size; | ||
| 102 | |||
| 103 | static const struct vmw_user_resource_conv user_shader_conv = { | ||
| 104 | .object_type = VMW_RES_SHADER, | ||
| 105 | .base_obj_to_res = vmw_user_shader_base_to_res, | ||
| 106 | .res_free = vmw_user_shader_free | ||
| 107 | }; | ||
| 108 | |||
| 109 | const struct vmw_user_resource_conv *user_shader_converter = | ||
| 110 | &user_shader_conv; | ||
| 111 | |||
| 112 | |||
| 113 | static const struct vmw_res_func vmw_gb_shader_func = { | ||
| 114 | .res_type = vmw_res_shader, | ||
| 115 | .needs_backup = true, | ||
| 116 | .may_evict = true, | ||
| 117 | .type_name = "guest backed shaders", | ||
| 118 | .backup_placement = &vmw_mob_placement, | ||
| 119 | .create = vmw_gb_shader_create, | ||
| 120 | .destroy = vmw_gb_shader_destroy, | ||
| 121 | .bind = vmw_gb_shader_bind, | ||
| 122 | .unbind = vmw_gb_shader_unbind | ||
| 123 | }; | ||
| 124 | |||
| 125 | /** | ||
| 126 | * Shader management: | ||
| 127 | */ | ||
| 128 | |||
| 129 | static inline struct vmw_shader * | ||
| 130 | vmw_res_to_shader(struct vmw_resource *res) | ||
| 131 | { | ||
| 132 | return container_of(res, struct vmw_shader, res); | ||
| 133 | } | ||
| 134 | |||
| 135 | static void vmw_hw_shader_destroy(struct vmw_resource *res) | ||
| 136 | { | ||
| 137 | (void) vmw_gb_shader_destroy(res); | ||
| 138 | } | ||
| 139 | |||
| 140 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||
| 141 | struct vmw_resource *res, | ||
| 142 | uint32_t size, | ||
| 143 | uint64_t offset, | ||
| 144 | SVGA3dShaderType type, | ||
| 145 | struct vmw_dma_buffer *byte_code, | ||
| 146 | void (*res_free) (struct vmw_resource *res)) | ||
| 147 | { | ||
| 148 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
| 149 | int ret; | ||
| 150 | |||
| 151 | ret = vmw_resource_init(dev_priv, res, true, | ||
| 152 | res_free, &vmw_gb_shader_func); | ||
| 153 | |||
| 154 | |||
| 155 | if (unlikely(ret != 0)) { | ||
| 156 | if (res_free) | ||
| 157 | res_free(res); | ||
| 158 | else | ||
| 159 | kfree(res); | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | res->backup_size = size; | ||
| 164 | if (byte_code) { | ||
| 165 | res->backup = vmw_dmabuf_reference(byte_code); | ||
| 166 | res->backup_offset = offset; | ||
| 167 | } | ||
| 168 | shader->size = size; | ||
| 169 | shader->type = type; | ||
| 170 | |||
| 171 | vmw_resource_activate(res, vmw_hw_shader_destroy); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | static int vmw_gb_shader_create(struct vmw_resource *res) | ||
| 176 | { | ||
| 177 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 178 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
| 179 | int ret; | ||
| 180 | struct { | ||
| 181 | SVGA3dCmdHeader header; | ||
| 182 | SVGA3dCmdDefineGBShader body; | ||
| 183 | } *cmd; | ||
| 184 | |||
| 185 | if (likely(res->id != -1)) | ||
| 186 | return 0; | ||
| 187 | |||
| 188 | ret = vmw_resource_alloc_id(res); | ||
| 189 | if (unlikely(ret != 0)) { | ||
| 190 | DRM_ERROR("Failed to allocate a shader id.\n"); | ||
| 191 | goto out_no_id; | ||
| 192 | } | ||
| 193 | |||
| 194 | if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { | ||
| 195 | ret = -EBUSY; | ||
| 196 | goto out_no_fifo; | ||
| 197 | } | ||
| 198 | |||
| 199 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 200 | if (unlikely(cmd == NULL)) { | ||
| 201 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 202 | "creation.\n"); | ||
| 203 | ret = -ENOMEM; | ||
| 204 | goto out_no_fifo; | ||
| 205 | } | ||
| 206 | |||
| 207 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; | ||
| 208 | cmd->header.size = sizeof(cmd->body); | ||
| 209 | cmd->body.shid = res->id; | ||
| 210 | cmd->body.type = shader->type; | ||
| 211 | cmd->body.sizeInBytes = shader->size; | ||
| 212 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 213 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | |||
| 217 | out_no_fifo: | ||
| 218 | vmw_resource_release_id(res); | ||
| 219 | out_no_id: | ||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | |||
| 223 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
| 224 | struct ttm_validate_buffer *val_buf) | ||
| 225 | { | ||
| 226 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 227 | struct { | ||
| 228 | SVGA3dCmdHeader header; | ||
| 229 | SVGA3dCmdBindGBShader body; | ||
| 230 | } *cmd; | ||
| 231 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 232 | |||
| 233 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 234 | |||
| 235 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 236 | if (unlikely(cmd == NULL)) { | ||
| 237 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 238 | "binding.\n"); | ||
| 239 | return -ENOMEM; | ||
| 240 | } | ||
| 241 | |||
| 242 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
| 243 | cmd->header.size = sizeof(cmd->body); | ||
| 244 | cmd->body.shid = res->id; | ||
| 245 | cmd->body.mobid = bo->mem.start; | ||
| 246 | cmd->body.offsetInBytes = 0; | ||
| 247 | res->backup_dirty = false; | ||
| 248 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 249 | |||
| 250 | return 0; | ||
| 251 | } | ||
| 252 | |||
| 253 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
| 254 | bool readback, | ||
| 255 | struct ttm_validate_buffer *val_buf) | ||
| 256 | { | ||
| 257 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 258 | struct { | ||
| 259 | SVGA3dCmdHeader header; | ||
| 260 | SVGA3dCmdBindGBShader body; | ||
| 261 | } *cmd; | ||
| 262 | struct vmw_fence_obj *fence; | ||
| 263 | |||
| 264 | BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||
| 265 | |||
| 266 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 267 | if (unlikely(cmd == NULL)) { | ||
| 268 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 269 | "unbinding.\n"); | ||
| 270 | return -ENOMEM; | ||
| 271 | } | ||
| 272 | |||
| 273 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
| 274 | cmd->header.size = sizeof(cmd->body); | ||
| 275 | cmd->body.shid = res->id; | ||
| 276 | cmd->body.mobid = SVGA3D_INVALID_ID; | ||
| 277 | cmd->body.offsetInBytes = 0; | ||
| 278 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 279 | |||
| 280 | /* | ||
| 281 | * Create a fence object and fence the backup buffer. | ||
| 282 | */ | ||
| 283 | |||
| 284 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 285 | &fence, NULL); | ||
| 286 | |||
| 287 | vmw_fence_single_bo(val_buf->bo, fence); | ||
| 288 | |||
| 289 | if (likely(fence != NULL)) | ||
| 290 | vmw_fence_obj_unreference(&fence); | ||
| 291 | |||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | |||
| 295 | static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||
| 296 | { | ||
| 297 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 298 | struct { | ||
| 299 | SVGA3dCmdHeader header; | ||
| 300 | SVGA3dCmdDestroyGBShader body; | ||
| 301 | } *cmd; | ||
| 302 | |||
| 303 | if (likely(res->id == -1)) | ||
| 304 | return 0; | ||
| 305 | |||
| 306 | mutex_lock(&dev_priv->binding_mutex); | ||
| 307 | vmw_context_binding_res_list_scrub(&res->binding_head); | ||
| 308 | |||
| 309 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 310 | if (unlikely(cmd == NULL)) { | ||
| 311 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
| 312 | "destruction.\n"); | ||
| 313 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 314 | return -ENOMEM; | ||
| 315 | } | ||
| 316 | |||
| 317 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; | ||
| 318 | cmd->header.size = sizeof(cmd->body); | ||
| 319 | cmd->body.shid = res->id; | ||
| 320 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 321 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 322 | vmw_resource_release_id(res); | ||
| 323 | vmw_3d_resource_dec(dev_priv, false); | ||
| 324 | |||
| 325 | return 0; | ||
| 326 | } | ||
| 327 | |||
| 328 | /** | ||
| 329 | * User-space shader management: | ||
| 330 | */ | ||
| 331 | |||
| 332 | static struct vmw_resource * | ||
| 333 | vmw_user_shader_base_to_res(struct ttm_base_object *base) | ||
| 334 | { | ||
| 335 | return &(container_of(base, struct vmw_user_shader, base)-> | ||
| 336 | shader.res); | ||
| 337 | } | ||
| 338 | |||
| 339 | static void vmw_user_shader_free(struct vmw_resource *res) | ||
| 340 | { | ||
| 341 | struct vmw_user_shader *ushader = | ||
| 342 | container_of(res, struct vmw_user_shader, shader.res); | ||
| 343 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 344 | |||
| 345 | ttm_base_object_kfree(ushader, base); | ||
| 346 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 347 | vmw_user_shader_size); | ||
| 348 | } | ||
| 349 | |||
| 350 | /** | ||
| 351 | * This function is called when user space has no more references on the | ||
| 352 | * base object. It releases the base-object's reference on the resource object. | ||
| 353 | */ | ||
| 354 | |||
| 355 | static void vmw_user_shader_base_release(struct ttm_base_object **p_base) | ||
| 356 | { | ||
| 357 | struct ttm_base_object *base = *p_base; | ||
| 358 | struct vmw_resource *res = vmw_user_shader_base_to_res(base); | ||
| 359 | |||
| 360 | *p_base = NULL; | ||
| 361 | vmw_resource_unreference(&res); | ||
| 362 | } | ||
| 363 | |||
| 364 | int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 365 | struct drm_file *file_priv) | ||
| 366 | { | ||
| 367 | struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; | ||
| 368 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 369 | |||
| 370 | return ttm_ref_object_base_unref(tfile, arg->handle, | ||
| 371 | TTM_REF_USAGE); | ||
| 372 | } | ||
| 373 | |||
| 374 | static int vmw_shader_alloc(struct vmw_private *dev_priv, | ||
| 375 | struct vmw_dma_buffer *buffer, | ||
| 376 | size_t shader_size, | ||
| 377 | size_t offset, | ||
| 378 | SVGA3dShaderType shader_type, | ||
| 379 | struct ttm_object_file *tfile, | ||
| 380 | u32 *handle) | ||
| 381 | { | ||
| 382 | struct vmw_user_shader *ushader; | ||
| 383 | struct vmw_resource *res, *tmp; | ||
| 384 | int ret; | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
| 388 | * by maximum number_of shaders anyway. | ||
| 389 | */ | ||
| 390 | if (unlikely(vmw_user_shader_size == 0)) | ||
| 391 | vmw_user_shader_size = | ||
| 392 | ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; | ||
| 393 | |||
| 394 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 395 | vmw_user_shader_size, | ||
| 396 | false, true); | ||
| 397 | if (unlikely(ret != 0)) { | ||
| 398 | if (ret != -ERESTARTSYS) | ||
| 399 | DRM_ERROR("Out of graphics memory for shader " | ||
| 400 | "creation.\n"); | ||
| 401 | goto out; | ||
| 402 | } | ||
| 403 | |||
| 404 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
| 405 | if (unlikely(ushader == NULL)) { | ||
| 406 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
| 407 | vmw_user_shader_size); | ||
| 408 | ret = -ENOMEM; | ||
| 409 | goto out; | ||
| 410 | } | ||
| 411 | |||
| 412 | res = &ushader->shader.res; | ||
| 413 | ushader->base.shareable = false; | ||
| 414 | ushader->base.tfile = NULL; | ||
| 415 | |||
| 416 | /* | ||
| 417 | * From here on, the destructor takes over resource freeing. | ||
| 418 | */ | ||
| 419 | |||
| 420 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, | ||
| 421 | offset, shader_type, buffer, | ||
| 422 | vmw_user_shader_free); | ||
| 423 | if (unlikely(ret != 0)) | ||
| 424 | goto out; | ||
| 425 | |||
| 426 | tmp = vmw_resource_reference(res); | ||
| 427 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
| 428 | VMW_RES_SHADER, | ||
| 429 | &vmw_user_shader_base_release, NULL); | ||
| 430 | |||
| 431 | if (unlikely(ret != 0)) { | ||
| 432 | vmw_resource_unreference(&tmp); | ||
| 433 | goto out_err; | ||
| 434 | } | ||
| 435 | |||
| 436 | if (handle) | ||
| 437 | *handle = ushader->base.hash.key; | ||
| 438 | out_err: | ||
| 439 | vmw_resource_unreference(&res); | ||
| 440 | out: | ||
| 441 | return ret; | ||
| 442 | } | ||
| 443 | |||
| 444 | |||
| 445 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
| 446 | struct drm_file *file_priv) | ||
| 447 | { | ||
| 448 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 449 | struct drm_vmw_shader_create_arg *arg = | ||
| 450 | (struct drm_vmw_shader_create_arg *)data; | ||
| 451 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 452 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 453 | struct vmw_dma_buffer *buffer = NULL; | ||
| 454 | SVGA3dShaderType shader_type; | ||
| 455 | int ret; | ||
| 456 | |||
| 457 | if (arg->buffer_handle != SVGA3D_INVALID_ID) { | ||
| 458 | ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | ||
| 459 | &buffer); | ||
| 460 | if (unlikely(ret != 0)) { | ||
| 461 | DRM_ERROR("Could not find buffer for shader " | ||
| 462 | "creation.\n"); | ||
| 463 | return ret; | ||
| 464 | } | ||
| 465 | |||
| 466 | if ((u64)buffer->base.num_pages * PAGE_SIZE < | ||
| 467 | (u64)arg->size + (u64)arg->offset) { | ||
| 468 | DRM_ERROR("Illegal buffer- or shader size.\n"); | ||
| 469 | ret = -EINVAL; | ||
| 470 | goto out_bad_arg; | ||
| 471 | } | ||
| 472 | } | ||
| 473 | |||
| 474 | switch (arg->shader_type) { | ||
| 475 | case drm_vmw_shader_type_vs: | ||
| 476 | shader_type = SVGA3D_SHADERTYPE_VS; | ||
| 477 | break; | ||
| 478 | case drm_vmw_shader_type_ps: | ||
| 479 | shader_type = SVGA3D_SHADERTYPE_PS; | ||
| 480 | break; | ||
| 481 | case drm_vmw_shader_type_gs: | ||
| 482 | shader_type = SVGA3D_SHADERTYPE_GS; | ||
| 483 | break; | ||
| 484 | default: | ||
| 485 | DRM_ERROR("Illegal shader type.\n"); | ||
| 486 | ret = -EINVAL; | ||
| 487 | goto out_bad_arg; | ||
| 488 | } | ||
| 489 | |||
| 490 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 491 | if (unlikely(ret != 0)) | ||
| 492 | goto out_bad_arg; | ||
| 493 | |||
| 494 | ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, | ||
| 495 | shader_type, tfile, &arg->shader_handle); | ||
| 496 | |||
| 497 | ttm_read_unlock(&vmaster->lock); | ||
| 498 | out_bad_arg: | ||
| 499 | vmw_dmabuf_unreference(&buffer); | ||
| 500 | return ret; | ||
| 501 | } | ||
| 502 | |||
| 503 | /** | ||
| 504 | * vmw_compat_shader_lookup - Look up a compat shader | ||
| 505 | * | ||
| 506 | * @man: Pointer to the compat shader manager. | ||
| 507 | * @shader_type: The shader type, that combined with the user_key identifies | ||
| 508 | * the shader. | ||
| 509 | * @user_key: On entry, this should be a pointer to the user_key. | ||
| 510 | * On successful exit, it will contain the guest-backed shader's TTM handle. | ||
| 511 | * | ||
| 512 | * Returns 0 on success. Non-zero on failure, in which case the value pointed | ||
| 513 | * to by @user_key is unmodified. | ||
| 514 | */ | ||
| 515 | int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, | ||
| 516 | SVGA3dShaderType shader_type, | ||
| 517 | u32 *user_key) | ||
| 518 | { | ||
| 519 | struct drm_hash_item *hash; | ||
| 520 | int ret; | ||
| 521 | unsigned long key = *user_key | (shader_type << 24); | ||
| 522 | |||
| 523 | ret = drm_ht_find_item(&man->shaders, key, &hash); | ||
| 524 | if (unlikely(ret != 0)) | ||
| 525 | return ret; | ||
| 526 | |||
| 527 | *user_key = drm_hash_entry(hash, struct vmw_compat_shader, | ||
| 528 | hash)->handle; | ||
| 529 | |||
| 530 | return 0; | ||
| 531 | } | ||
| 532 | |||
| 533 | /** | ||
| 534 | * vmw_compat_shader_free - Free a compat shader. | ||
| 535 | * | ||
| 536 | * @man: Pointer to the compat shader manager. | ||
| 537 | * @entry: Pointer to a struct vmw_compat_shader. | ||
| 538 | * | ||
| 539 | * Frees a struct vmw_compat_shder entry and drops its reference to the | ||
| 540 | * guest backed shader. | ||
| 541 | */ | ||
| 542 | static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, | ||
| 543 | struct vmw_compat_shader *entry) | ||
| 544 | { | ||
| 545 | list_del(&entry->head); | ||
| 546 | WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); | ||
| 547 | WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 548 | TTM_REF_USAGE)); | ||
| 549 | kfree(entry); | ||
| 550 | } | ||
| 551 | |||
| 552 | /** | ||
| 553 | * vmw_compat_shaders_commit - Commit a list of compat shader actions. | ||
| 554 | * | ||
| 555 | * @man: Pointer to the compat shader manager. | ||
| 556 | * @list: Caller's list of compat shader actions. | ||
| 557 | * | ||
| 558 | * This function commits a list of compat shader additions or removals. | ||
| 559 | * It is typically called when the execbuf ioctl call triggering these | ||
| 560 | * actions has commited the fifo contents to the device. | ||
| 561 | */ | ||
| 562 | void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, | ||
| 563 | struct list_head *list) | ||
| 564 | { | ||
| 565 | struct vmw_compat_shader *entry, *next; | ||
| 566 | |||
| 567 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 568 | list_del(&entry->head); | ||
| 569 | switch (entry->state) { | ||
| 570 | case VMW_COMPAT_ADD: | ||
| 571 | entry->state = VMW_COMPAT_COMMITED; | ||
| 572 | list_add_tail(&entry->head, &man->list); | ||
| 573 | break; | ||
| 574 | case VMW_COMPAT_DEL: | ||
| 575 | ttm_ref_object_base_unref(entry->tfile, entry->handle, | ||
| 576 | TTM_REF_USAGE); | ||
| 577 | kfree(entry); | ||
| 578 | break; | ||
| 579 | default: | ||
| 580 | BUG(); | ||
| 581 | break; | ||
| 582 | } | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | /** | ||
| 587 | * vmw_compat_shaders_revert - Revert a list of compat shader actions | ||
| 588 | * | ||
| 589 | * @man: Pointer to the compat shader manager. | ||
| 590 | * @list: Caller's list of compat shader actions. | ||
| 591 | * | ||
| 592 | * This function reverts a list of compat shader additions or removals. | ||
| 593 | * It is typically called when the execbuf ioctl call triggering these | ||
| 594 | * actions failed for some reason, and the command stream was never | ||
| 595 | * submitted. | ||
| 596 | */ | ||
| 597 | void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, | ||
| 598 | struct list_head *list) | ||
| 599 | { | ||
| 600 | struct vmw_compat_shader *entry, *next; | ||
| 601 | int ret; | ||
| 602 | |||
| 603 | list_for_each_entry_safe(entry, next, list, head) { | ||
| 604 | switch (entry->state) { | ||
| 605 | case VMW_COMPAT_ADD: | ||
| 606 | vmw_compat_shader_free(man, entry); | ||
| 607 | break; | ||
| 608 | case VMW_COMPAT_DEL: | ||
| 609 | ret = drm_ht_insert_item(&man->shaders, &entry->hash); | ||
| 610 | list_del(&entry->head); | ||
| 611 | list_add_tail(&entry->head, &man->list); | ||
| 612 | entry->state = VMW_COMPAT_COMMITED; | ||
| 613 | break; | ||
| 614 | default: | ||
| 615 | BUG(); | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | } | ||
| 619 | } | ||
| 620 | |||
| 621 | /** | ||
| 622 | * vmw_compat_shader_remove - Stage a compat shader for removal. | ||
| 623 | * | ||
| 624 | * @man: Pointer to the compat shader manager | ||
| 625 | * @user_key: The key that is used to identify the shader. The key is | ||
| 626 | * unique to the shader type. | ||
| 627 | * @shader_type: Shader type. | ||
| 628 | * @list: Caller's list of staged shader actions. | ||
| 629 | * | ||
| 630 | * This function stages a compat shader for removal and removes the key from | ||
| 631 | * the shader manager's hash table. If the shader was previously only staged | ||
| 632 | * for addition it is completely removed (But the execbuf code may keep a | ||
| 633 | * reference if it was bound to a context between addition and removal). If | ||
| 634 | * it was previously commited to the manager, it is staged for removal. | ||
| 635 | */ | ||
| 636 | int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, | ||
| 637 | u32 user_key, SVGA3dShaderType shader_type, | ||
| 638 | struct list_head *list) | ||
| 639 | { | ||
| 640 | struct vmw_compat_shader *entry; | ||
| 641 | struct drm_hash_item *hash; | ||
| 642 | int ret; | ||
| 643 | |||
| 644 | ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), | ||
| 645 | &hash); | ||
| 646 | if (likely(ret != 0)) | ||
| 647 | return -EINVAL; | ||
| 648 | |||
| 649 | entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); | ||
| 650 | |||
| 651 | switch (entry->state) { | ||
| 652 | case VMW_COMPAT_ADD: | ||
| 653 | vmw_compat_shader_free(man, entry); | ||
| 654 | break; | ||
| 655 | case VMW_COMPAT_COMMITED: | ||
| 656 | (void) drm_ht_remove_item(&man->shaders, &entry->hash); | ||
| 657 | list_del(&entry->head); | ||
| 658 | entry->state = VMW_COMPAT_DEL; | ||
| 659 | list_add_tail(&entry->head, list); | ||
| 660 | break; | ||
| 661 | default: | ||
| 662 | BUG(); | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | /** | ||
| 670 | * vmw_compat_shader_add - Create a compat shader and add the | ||
| 671 | * key to the manager | ||
| 672 | * | ||
| 673 | * @man: Pointer to the compat shader manager | ||
| 674 | * @user_key: The key that is used to identify the shader. The key is | ||
| 675 | * unique to the shader type. | ||
| 676 | * @bytecode: Pointer to the bytecode of the shader. | ||
| 677 | * @shader_type: Shader type. | ||
| 678 | * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is | ||
| 679 | * to be created with. | ||
| 680 | * @list: Caller's list of staged shader actions. | ||
| 681 | * | ||
| 682 | * Note that only the key is added to the shader manager's hash table. | ||
| 683 | * The shader is not yet added to the shader manager's list of shaders. | ||
| 684 | */ | ||
| 685 | int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, | ||
| 686 | u32 user_key, const void *bytecode, | ||
| 687 | SVGA3dShaderType shader_type, | ||
| 688 | size_t size, | ||
| 689 | struct ttm_object_file *tfile, | ||
| 690 | struct list_head *list) | ||
| 691 | { | ||
| 692 | struct vmw_dma_buffer *buf; | ||
| 693 | struct ttm_bo_kmap_obj map; | ||
| 694 | bool is_iomem; | ||
| 695 | struct vmw_compat_shader *compat; | ||
| 696 | u32 handle; | ||
| 697 | int ret; | ||
| 698 | |||
| 699 | if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) | ||
| 700 | return -EINVAL; | ||
| 701 | |||
| 702 | /* Allocate and pin a DMA buffer */ | ||
| 703 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||
| 704 | if (unlikely(buf == NULL)) | ||
| 705 | return -ENOMEM; | ||
| 706 | |||
| 707 | ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, | ||
| 708 | true, vmw_dmabuf_bo_free); | ||
| 709 | if (unlikely(ret != 0)) | ||
| 710 | goto out; | ||
| 711 | |||
| 712 | ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); | ||
| 713 | if (unlikely(ret != 0)) | ||
| 714 | goto no_reserve; | ||
| 715 | |||
| 716 | /* Map and copy shader bytecode. */ | ||
| 717 | ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, | ||
| 718 | &map); | ||
| 719 | if (unlikely(ret != 0)) { | ||
| 720 | ttm_bo_unreserve(&buf->base); | ||
| 721 | goto no_reserve; | ||
| 722 | } | ||
| 723 | |||
| 724 | memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); | ||
| 725 | WARN_ON(is_iomem); | ||
| 726 | |||
| 727 | ttm_bo_kunmap(&map); | ||
| 728 | ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); | ||
| 729 | WARN_ON(ret != 0); | ||
| 730 | ttm_bo_unreserve(&buf->base); | ||
| 731 | |||
| 732 | /* Create a guest-backed shader container backed by the dma buffer */ | ||
| 733 | ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, | ||
| 734 | tfile, &handle); | ||
| 735 | vmw_dmabuf_unreference(&buf); | ||
| 736 | if (unlikely(ret != 0)) | ||
| 737 | goto no_reserve; | ||
| 738 | /* | ||
| 739 | * Create a compat shader structure and stage it for insertion | ||
| 740 | * in the manager | ||
| 741 | */ | ||
| 742 | compat = kzalloc(sizeof(*compat), GFP_KERNEL); | ||
| 743 | if (compat == NULL) | ||
| 744 | goto no_compat; | ||
| 745 | |||
| 746 | compat->hash.key = user_key | (shader_type << 24); | ||
| 747 | ret = drm_ht_insert_item(&man->shaders, &compat->hash); | ||
| 748 | if (unlikely(ret != 0)) | ||
| 749 | goto out_invalid_key; | ||
| 750 | |||
| 751 | compat->state = VMW_COMPAT_ADD; | ||
| 752 | compat->handle = handle; | ||
| 753 | compat->tfile = tfile; | ||
| 754 | list_add_tail(&compat->head, list); | ||
| 755 | |||
| 756 | return 0; | ||
| 757 | |||
| 758 | out_invalid_key: | ||
| 759 | kfree(compat); | ||
| 760 | no_compat: | ||
| 761 | ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); | ||
| 762 | no_reserve: | ||
| 763 | out: | ||
| 764 | return ret; | ||
| 765 | } | ||
| 766 | |||
| 767 | /** | ||
| 768 | * vmw_compat_shader_man_create - Create a compat shader manager | ||
| 769 | * | ||
| 770 | * @dev_priv: Pointer to a device private structure. | ||
| 771 | * | ||
| 772 | * Typically done at file open time. If successful returns a pointer to a | ||
| 773 | * compat shader manager. Otherwise returns an error pointer. | ||
| 774 | */ | ||
| 775 | struct vmw_compat_shader_manager * | ||
| 776 | vmw_compat_shader_man_create(struct vmw_private *dev_priv) | ||
| 777 | { | ||
| 778 | struct vmw_compat_shader_manager *man; | ||
| 779 | int ret; | ||
| 780 | |||
| 781 | man = kzalloc(sizeof(*man), GFP_KERNEL); | ||
| 782 | if (man == NULL) | ||
| 783 | return ERR_PTR(-ENOMEM); | ||
| 784 | |||
| 785 | man->dev_priv = dev_priv; | ||
| 786 | INIT_LIST_HEAD(&man->list); | ||
| 787 | ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); | ||
| 788 | if (ret == 0) | ||
| 789 | return man; | ||
| 790 | |||
| 791 | kfree(man); | ||
| 792 | return ERR_PTR(ret); | ||
| 793 | } | ||
| 794 | |||
| 795 | /** | ||
| 796 | * vmw_compat_shader_man_destroy - Destroy a compat shader manager | ||
| 797 | * | ||
| 798 | * @man: Pointer to the shader manager to destroy. | ||
| 799 | * | ||
| 800 | * Typically done at file close time. | ||
| 801 | */ | ||
| 802 | void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) | ||
| 803 | { | ||
| 804 | struct vmw_compat_shader *entry, *next; | ||
| 805 | |||
| 806 | mutex_lock(&man->dev_priv->cmdbuf_mutex); | ||
| 807 | list_for_each_entry_safe(entry, next, &man->list, head) | ||
| 808 | vmw_compat_shader_free(man, entry); | ||
| 809 | |||
| 810 | mutex_unlock(&man->dev_priv->cmdbuf_mutex); | ||
| 811 | kfree(man); | ||
| 812 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7de2ea8bd553..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -41,7 +41,6 @@ struct vmw_user_surface { | |||
| 41 | struct ttm_prime_object prime; | 41 | struct ttm_prime_object prime; |
| 42 | struct vmw_surface srf; | 42 | struct vmw_surface srf; |
| 43 | uint32_t size; | 43 | uint32_t size; |
| 44 | uint32_t backup_handle; | ||
| 45 | }; | 44 | }; |
| 46 | 45 | ||
| 47 | /** | 46 | /** |
| @@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |||
| 68 | struct ttm_validate_buffer *val_buf); | 67 | struct ttm_validate_buffer *val_buf); |
| 69 | static int vmw_legacy_srf_create(struct vmw_resource *res); | 68 | static int vmw_legacy_srf_create(struct vmw_resource *res); |
| 70 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | 69 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); |
| 70 | static int vmw_gb_surface_create(struct vmw_resource *res); | ||
| 71 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
| 72 | struct ttm_validate_buffer *val_buf); | ||
| 73 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
| 74 | bool readback, | ||
| 75 | struct ttm_validate_buffer *val_buf); | ||
| 76 | static int vmw_gb_surface_destroy(struct vmw_resource *res); | ||
| 77 | |||
| 71 | 78 | ||
| 72 | static const struct vmw_user_resource_conv user_surface_conv = { | 79 | static const struct vmw_user_resource_conv user_surface_conv = { |
| 73 | .object_type = VMW_RES_SURFACE, | 80 | .object_type = VMW_RES_SURFACE, |
| @@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = { | |||
| 93 | .unbind = &vmw_legacy_srf_unbind | 100 | .unbind = &vmw_legacy_srf_unbind |
| 94 | }; | 101 | }; |
| 95 | 102 | ||
| 103 | static const struct vmw_res_func vmw_gb_surface_func = { | ||
| 104 | .res_type = vmw_res_surface, | ||
| 105 | .needs_backup = true, | ||
| 106 | .may_evict = true, | ||
| 107 | .type_name = "guest backed surfaces", | ||
| 108 | .backup_placement = &vmw_mob_placement, | ||
| 109 | .create = vmw_gb_surface_create, | ||
| 110 | .destroy = vmw_gb_surface_destroy, | ||
| 111 | .bind = vmw_gb_surface_bind, | ||
| 112 | .unbind = vmw_gb_surface_unbind | ||
| 113 | }; | ||
| 114 | |||
| 96 | /** | 115 | /** |
| 97 | * struct vmw_surface_dma - SVGA3D DMA command | 116 | * struct vmw_surface_dma - SVGA3D DMA command |
| 98 | */ | 117 | */ |
| @@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
| 291 | struct vmw_surface *srf; | 310 | struct vmw_surface *srf; |
| 292 | void *cmd; | 311 | void *cmd; |
| 293 | 312 | ||
| 313 | if (res->func->destroy == vmw_gb_surface_destroy) { | ||
| 314 | (void) vmw_gb_surface_destroy(res); | ||
| 315 | return; | ||
| 316 | } | ||
| 317 | |||
| 294 | if (res->id != -1) { | 318 | if (res->id != -1) { |
| 295 | 319 | ||
| 296 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 320 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
| @@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
| 549 | struct vmw_resource *res = &srf->res; | 573 | struct vmw_resource *res = &srf->res; |
| 550 | 574 | ||
| 551 | BUG_ON(res_free == NULL); | 575 | BUG_ON(res_free == NULL); |
| 552 | (void) vmw_3d_resource_inc(dev_priv, false); | 576 | if (!dev_priv->has_mob) |
| 577 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 553 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 578 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
| 579 | (dev_priv->has_mob) ? &vmw_gb_surface_func : | ||
| 554 | &vmw_legacy_surface_func); | 580 | &vmw_legacy_surface_func); |
| 555 | 581 | ||
| 556 | if (unlikely(ret != 0)) { | 582 | if (unlikely(ret != 0)) { |
| 557 | vmw_3d_resource_dec(dev_priv, false); | 583 | if (!dev_priv->has_mob) |
| 584 | vmw_3d_resource_dec(dev_priv, false); | ||
| 558 | res_free(res); | 585 | res_free(res); |
| 559 | return ret; | 586 | return ret; |
| 560 | } | 587 | } |
| @@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 750 | 777 | ||
| 751 | srf->base_size = *srf->sizes; | 778 | srf->base_size = *srf->sizes; |
| 752 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 779 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
| 753 | srf->multisample_count = 1; | 780 | srf->multisample_count = 0; |
| 754 | 781 | ||
| 755 | cur_bo_offset = 0; | 782 | cur_bo_offset = 0; |
| 756 | cur_offset = srf->offsets; | 783 | cur_offset = srf->offsets; |
| @@ -803,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 803 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
| 804 | goto out_unlock; | 831 | goto out_unlock; |
| 805 | 832 | ||
| 833 | /* | ||
| 834 | * A gb-aware client referencing a shared surface will | ||
| 835 | * expect a backup buffer to be present. | ||
| 836 | */ | ||
| 837 | if (dev_priv->has_mob && req->shareable) { | ||
| 838 | uint32_t backup_handle; | ||
| 839 | |||
| 840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 841 | res->backup_size, | ||
| 842 | true, | ||
| 843 | &backup_handle, | ||
| 844 | &res->backup); | ||
| 845 | if (unlikely(ret != 0)) { | ||
| 846 | vmw_resource_unreference(&res); | ||
| 847 | goto out_unlock; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 806 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
| 807 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
| 808 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
| @@ -843,6 +888,7 @@ out_unlock: | |||
| 843 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | 888 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 844 | struct drm_file *file_priv) | 889 | struct drm_file *file_priv) |
| 845 | { | 890 | { |
| 891 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 846 | union drm_vmw_surface_reference_arg *arg = | 892 | union drm_vmw_surface_reference_arg *arg = |
| 847 | (union drm_vmw_surface_reference_arg *)data; | 893 | (union drm_vmw_surface_reference_arg *)data; |
| 848 | struct drm_vmw_surface_arg *req = &arg->req; | 894 | struct drm_vmw_surface_arg *req = &arg->req; |
| @@ -854,7 +900,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 854 | struct ttm_base_object *base; | 900 | struct ttm_base_object *base; |
| 855 | int ret = -EINVAL; | 901 | int ret = -EINVAL; |
| 856 | 902 | ||
| 857 | base = ttm_base_object_lookup(tfile, req->sid); | 903 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); |
| 858 | if (unlikely(base == NULL)) { | 904 | if (unlikely(base == NULL)) { |
| 859 | DRM_ERROR("Could not find surface to reference.\n"); | 905 | DRM_ERROR("Could not find surface to reference.\n"); |
| 860 | return -EINVAL; | 906 | return -EINVAL; |
| @@ -880,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 880 | rep->size_addr; | 926 | rep->size_addr; |
| 881 | 927 | ||
| 882 | if (user_sizes) | 928 | if (user_sizes) |
| 883 | ret = copy_to_user(user_sizes, srf->sizes, | 929 | ret = copy_to_user(user_sizes, &srf->base_size, |
| 884 | srf->num_sizes * sizeof(*srf->sizes)); | 930 | sizeof(srf->base_size)); |
| 885 | if (unlikely(ret != 0)) { | 931 | if (unlikely(ret != 0)) { |
| 886 | DRM_ERROR("copy_to_user failed %p %u\n", | 932 | DRM_ERROR("copy_to_user failed %p %u\n", |
| 887 | user_sizes, srf->num_sizes); | 933 | user_sizes, srf->num_sizes); |
| @@ -893,3 +939,436 @@ out_no_reference: | |||
| 893 | 939 | ||
| 894 | return ret; | 940 | return ret; |
| 895 | } | 941 | } |
| 942 | |||
| 943 | /** | ||
| 944 | * vmw_surface_define_encode - Encode a surface_define command. | ||
| 945 | * | ||
| 946 | * @srf: Pointer to a struct vmw_surface object. | ||
| 947 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
| 948 | */ | ||
| 949 | static int vmw_gb_surface_create(struct vmw_resource *res) | ||
| 950 | { | ||
| 951 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 952 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
| 953 | uint32_t cmd_len, submit_len; | ||
| 954 | int ret; | ||
| 955 | struct { | ||
| 956 | SVGA3dCmdHeader header; | ||
| 957 | SVGA3dCmdDefineGBSurface body; | ||
| 958 | } *cmd; | ||
| 959 | |||
| 960 | if (likely(res->id != -1)) | ||
| 961 | return 0; | ||
| 962 | |||
| 963 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
| 964 | ret = vmw_resource_alloc_id(res); | ||
| 965 | if (unlikely(ret != 0)) { | ||
| 966 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
| 967 | goto out_no_id; | ||
| 968 | } | ||
| 969 | |||
| 970 | if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { | ||
| 971 | ret = -EBUSY; | ||
| 972 | goto out_no_fifo; | ||
| 973 | } | ||
| 974 | |||
| 975 | cmd_len = sizeof(cmd->body); | ||
| 976 | submit_len = sizeof(*cmd); | ||
| 977 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | ||
| 978 | if (unlikely(cmd == NULL)) { | ||
| 979 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 980 | "creation.\n"); | ||
| 981 | ret = -ENOMEM; | ||
| 982 | goto out_no_fifo; | ||
| 983 | } | ||
| 984 | |||
| 985 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||
| 986 | cmd->header.size = cmd_len; | ||
| 987 | cmd->body.sid = srf->res.id; | ||
| 988 | cmd->body.surfaceFlags = srf->flags; | ||
| 989 | cmd->body.format = cpu_to_le32(srf->format); | ||
| 990 | cmd->body.numMipLevels = srf->mip_levels[0]; | ||
| 991 | cmd->body.multisampleCount = srf->multisample_count; | ||
| 992 | cmd->body.autogenFilter = srf->autogen_filter; | ||
| 993 | cmd->body.size.width = srf->base_size.width; | ||
| 994 | cmd->body.size.height = srf->base_size.height; | ||
| 995 | cmd->body.size.depth = srf->base_size.depth; | ||
| 996 | vmw_fifo_commit(dev_priv, submit_len); | ||
| 997 | |||
| 998 | return 0; | ||
| 999 | |||
| 1000 | out_no_fifo: | ||
| 1001 | vmw_resource_release_id(res); | ||
| 1002 | out_no_id: | ||
| 1003 | vmw_3d_resource_dec(dev_priv, false); | ||
| 1004 | return ret; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | |||
| 1008 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
| 1009 | struct ttm_validate_buffer *val_buf) | ||
| 1010 | { | ||
| 1011 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1012 | struct { | ||
| 1013 | SVGA3dCmdHeader header; | ||
| 1014 | SVGA3dCmdBindGBSurface body; | ||
| 1015 | } *cmd1; | ||
| 1016 | struct { | ||
| 1017 | SVGA3dCmdHeader header; | ||
| 1018 | SVGA3dCmdUpdateGBSurface body; | ||
| 1019 | } *cmd2; | ||
| 1020 | uint32_t submit_size; | ||
| 1021 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 1022 | |||
| 1023 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 1024 | |||
| 1025 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | ||
| 1026 | |||
| 1027 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 1028 | if (unlikely(cmd1 == NULL)) { | ||
| 1029 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1030 | "binding.\n"); | ||
| 1031 | return -ENOMEM; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
| 1035 | cmd1->header.size = sizeof(cmd1->body); | ||
| 1036 | cmd1->body.sid = res->id; | ||
| 1037 | cmd1->body.mobid = bo->mem.start; | ||
| 1038 | if (res->backup_dirty) { | ||
| 1039 | cmd2 = (void *) &cmd1[1]; | ||
| 1040 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; | ||
| 1041 | cmd2->header.size = sizeof(cmd2->body); | ||
| 1042 | cmd2->body.sid = res->id; | ||
| 1043 | res->backup_dirty = false; | ||
| 1044 | } | ||
| 1045 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 1046 | |||
| 1047 | return 0; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
| 1051 | bool readback, | ||
| 1052 | struct ttm_validate_buffer *val_buf) | ||
| 1053 | { | ||
| 1054 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1055 | struct ttm_buffer_object *bo = val_buf->bo; | ||
| 1056 | struct vmw_fence_obj *fence; | ||
| 1057 | |||
| 1058 | struct { | ||
| 1059 | SVGA3dCmdHeader header; | ||
| 1060 | SVGA3dCmdReadbackGBSurface body; | ||
| 1061 | } *cmd1; | ||
| 1062 | struct { | ||
| 1063 | SVGA3dCmdHeader header; | ||
| 1064 | SVGA3dCmdInvalidateGBSurface body; | ||
| 1065 | } *cmd2; | ||
| 1066 | struct { | ||
| 1067 | SVGA3dCmdHeader header; | ||
| 1068 | SVGA3dCmdBindGBSurface body; | ||
| 1069 | } *cmd3; | ||
| 1070 | uint32_t submit_size; | ||
| 1071 | uint8_t *cmd; | ||
| 1072 | |||
| 1073 | |||
| 1074 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
| 1075 | |||
| 1076 | submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); | ||
| 1077 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 1078 | if (unlikely(cmd == NULL)) { | ||
| 1079 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1080 | "unbinding.\n"); | ||
| 1081 | return -ENOMEM; | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | if (readback) { | ||
| 1085 | cmd1 = (void *) cmd; | ||
| 1086 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; | ||
| 1087 | cmd1->header.size = sizeof(cmd1->body); | ||
| 1088 | cmd1->body.sid = res->id; | ||
| 1089 | cmd3 = (void *) &cmd1[1]; | ||
| 1090 | } else { | ||
| 1091 | cmd2 = (void *) cmd; | ||
| 1092 | cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; | ||
| 1093 | cmd2->header.size = sizeof(cmd2->body); | ||
| 1094 | cmd2->body.sid = res->id; | ||
| 1095 | cmd3 = (void *) &cmd2[1]; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
| 1099 | cmd3->header.size = sizeof(cmd3->body); | ||
| 1100 | cmd3->body.sid = res->id; | ||
| 1101 | cmd3->body.mobid = SVGA3D_INVALID_ID; | ||
| 1102 | |||
| 1103 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 1104 | |||
| 1105 | /* | ||
| 1106 | * Create a fence object and fence the backup buffer. | ||
| 1107 | */ | ||
| 1108 | |||
| 1109 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
| 1110 | &fence, NULL); | ||
| 1111 | |||
| 1112 | vmw_fence_single_bo(val_buf->bo, fence); | ||
| 1113 | |||
| 1114 | if (likely(fence != NULL)) | ||
| 1115 | vmw_fence_obj_unreference(&fence); | ||
| 1116 | |||
| 1117 | return 0; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||
| 1121 | { | ||
| 1122 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1123 | struct { | ||
| 1124 | SVGA3dCmdHeader header; | ||
| 1125 | SVGA3dCmdDestroyGBSurface body; | ||
| 1126 | } *cmd; | ||
| 1127 | |||
| 1128 | if (likely(res->id == -1)) | ||
| 1129 | return 0; | ||
| 1130 | |||
| 1131 | mutex_lock(&dev_priv->binding_mutex); | ||
| 1132 | vmw_context_binding_res_list_scrub(&res->binding_head); | ||
| 1133 | |||
| 1134 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 1135 | if (unlikely(cmd == NULL)) { | ||
| 1136 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 1137 | "destruction.\n"); | ||
| 1138 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1139 | return -ENOMEM; | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; | ||
| 1143 | cmd->header.size = sizeof(cmd->body); | ||
| 1144 | cmd->body.sid = res->id; | ||
| 1145 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 1146 | mutex_unlock(&dev_priv->binding_mutex); | ||
| 1147 | vmw_resource_release_id(res); | ||
| 1148 | vmw_3d_resource_dec(dev_priv, false); | ||
| 1149 | |||
| 1150 | return 0; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | /** | ||
| 1154 | * vmw_gb_surface_define_ioctl - Ioctl function implementing | ||
| 1155 | * the user surface define functionality. | ||
| 1156 | * | ||
| 1157 | * @dev: Pointer to a struct drm_device. | ||
| 1158 | * @data: Pointer to data copied from / to user-space. | ||
| 1159 | * @file_priv: Pointer to a drm file private structure. | ||
| 1160 | */ | ||
| 1161 | int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 1162 | struct drm_file *file_priv) | ||
| 1163 | { | ||
| 1164 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1165 | struct vmw_user_surface *user_srf; | ||
| 1166 | struct vmw_surface *srf; | ||
| 1167 | struct vmw_resource *res; | ||
| 1168 | struct vmw_resource *tmp; | ||
| 1169 | union drm_vmw_gb_surface_create_arg *arg = | ||
| 1170 | (union drm_vmw_gb_surface_create_arg *)data; | ||
| 1171 | struct drm_vmw_gb_surface_create_req *req = &arg->req; | ||
| 1172 | struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; | ||
| 1173 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1174 | int ret; | ||
| 1175 | uint32_t size; | ||
| 1176 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 1177 | const struct svga3d_surface_desc *desc; | ||
| 1178 | uint32_t backup_handle; | ||
| 1179 | |||
| 1180 | if (unlikely(vmw_user_surface_size == 0)) | ||
| 1181 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
| 1182 | 128; | ||
| 1183 | |||
| 1184 | size = vmw_user_surface_size + 128; | ||
| 1185 | |||
| 1186 | desc = svga3dsurface_get_desc(req->format); | ||
| 1187 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | ||
| 1188 | DRM_ERROR("Invalid surface format for surface creation.\n"); | ||
| 1189 | return -EINVAL; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 1193 | if (unlikely(ret != 0)) | ||
| 1194 | return ret; | ||
| 1195 | |||
| 1196 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
| 1197 | size, false, true); | ||
| 1198 | if (unlikely(ret != 0)) { | ||
| 1199 | if (ret != -ERESTARTSYS) | ||
| 1200 | DRM_ERROR("Out of graphics memory for surface" | ||
| 1201 | " creation.\n"); | ||
| 1202 | goto out_unlock; | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
| 1206 | if (unlikely(user_srf == NULL)) { | ||
| 1207 | ret = -ENOMEM; | ||
| 1208 | goto out_no_user_srf; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | srf = &user_srf->srf; | ||
| 1212 | res = &srf->res; | ||
| 1213 | |||
| 1214 | srf->flags = req->svga3d_flags; | ||
| 1215 | srf->format = req->format; | ||
| 1216 | srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; | ||
| 1217 | srf->mip_levels[0] = req->mip_levels; | ||
| 1218 | srf->num_sizes = 1; | ||
| 1219 | srf->sizes = NULL; | ||
| 1220 | srf->offsets = NULL; | ||
| 1221 | user_srf->size = size; | ||
| 1222 | srf->base_size = req->base_size; | ||
| 1223 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
| 1224 | srf->multisample_count = req->multisample_count; | ||
| 1225 | res->backup_size = svga3dsurface_get_serialized_size | ||
| 1226 | (srf->format, srf->base_size, srf->mip_levels[0], | ||
| 1227 | srf->flags & SVGA3D_SURFACE_CUBEMAP); | ||
| 1228 | |||
| 1229 | user_srf->prime.base.shareable = false; | ||
| 1230 | user_srf->prime.base.tfile = NULL; | ||
| 1231 | |||
| 1232 | /** | ||
| 1233 | * From this point, the generic resource management functions | ||
| 1234 | * destroy the object on failure. | ||
| 1235 | */ | ||
| 1236 | |||
| 1237 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
| 1238 | if (unlikely(ret != 0)) | ||
| 1239 | goto out_unlock; | ||
| 1240 | |||
| 1241 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | ||
| 1242 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | ||
| 1243 | &res->backup); | ||
| 1244 | } else if (req->drm_surface_flags & | ||
| 1245 | drm_vmw_surface_flag_create_buffer) | ||
| 1246 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 1247 | res->backup_size, | ||
| 1248 | req->drm_surface_flags & | ||
| 1249 | drm_vmw_surface_flag_shareable, | ||
| 1250 | &backup_handle, | ||
| 1251 | &res->backup); | ||
| 1252 | |||
| 1253 | if (unlikely(ret != 0)) { | ||
| 1254 | vmw_resource_unreference(&res); | ||
| 1255 | goto out_unlock; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | tmp = vmw_resource_reference(&srf->res); | ||
| 1259 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | ||
| 1260 | req->drm_surface_flags & | ||
| 1261 | drm_vmw_surface_flag_shareable, | ||
| 1262 | VMW_RES_SURFACE, | ||
| 1263 | &vmw_user_surface_base_release, NULL); | ||
| 1264 | |||
| 1265 | if (unlikely(ret != 0)) { | ||
| 1266 | vmw_resource_unreference(&tmp); | ||
| 1267 | vmw_resource_unreference(&res); | ||
| 1268 | goto out_unlock; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | rep->handle = user_srf->prime.base.hash.key; | ||
| 1272 | rep->backup_size = res->backup_size; | ||
| 1273 | if (res->backup) { | ||
| 1274 | rep->buffer_map_handle = | ||
| 1275 | drm_vma_node_offset_addr(&res->backup->base.vma_node); | ||
| 1276 | rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; | ||
| 1277 | rep->buffer_handle = backup_handle; | ||
| 1278 | } else { | ||
| 1279 | rep->buffer_map_handle = 0; | ||
| 1280 | rep->buffer_size = 0; | ||
| 1281 | rep->buffer_handle = SVGA3D_INVALID_ID; | ||
| 1282 | } | ||
| 1283 | |||
| 1284 | vmw_resource_unreference(&res); | ||
| 1285 | |||
| 1286 | ttm_read_unlock(&vmaster->lock); | ||
| 1287 | return 0; | ||
| 1288 | out_no_user_srf: | ||
| 1289 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
| 1290 | out_unlock: | ||
| 1291 | ttm_read_unlock(&vmaster->lock); | ||
| 1292 | return ret; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | /** | ||
| 1296 | * vmw_gb_surface_reference_ioctl - Ioctl function implementing | ||
| 1297 | * the user surface reference functionality. | ||
| 1298 | * | ||
| 1299 | * @dev: Pointer to a struct drm_device. | ||
| 1300 | * @data: Pointer to data copied from / to user-space. | ||
| 1301 | * @file_priv: Pointer to a drm file private structure. | ||
| 1302 | */ | ||
| 1303 | int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 1304 | struct drm_file *file_priv) | ||
| 1305 | { | ||
| 1306 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1307 | union drm_vmw_gb_surface_reference_arg *arg = | ||
| 1308 | (union drm_vmw_gb_surface_reference_arg *)data; | ||
| 1309 | struct drm_vmw_surface_arg *req = &arg->req; | ||
| 1310 | struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; | ||
| 1311 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1312 | struct vmw_surface *srf; | ||
| 1313 | struct vmw_user_surface *user_srf; | ||
| 1314 | struct ttm_base_object *base; | ||
| 1315 | uint32_t backup_handle; | ||
| 1316 | int ret = -EINVAL; | ||
| 1317 | |||
| 1318 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | ||
| 1319 | if (unlikely(base == NULL)) { | ||
| 1320 | DRM_ERROR("Could not find surface to reference.\n"); | ||
| 1321 | return -EINVAL; | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | ||
| 1325 | goto out_bad_resource; | ||
| 1326 | |||
| 1327 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | ||
| 1328 | srf = &user_srf->srf; | ||
| 1329 | if (srf->res.backup == NULL) { | ||
| 1330 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | ||
| 1331 | goto out_bad_resource; | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
| 1335 | TTM_REF_USAGE, NULL); | ||
| 1336 | if (unlikely(ret != 0)) { | ||
| 1337 | DRM_ERROR("Could not add a reference to a GB surface.\n"); | ||
| 1338 | goto out_bad_resource; | ||
| 1339 | } | ||
| 1340 | |||
| 1341 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ | ||
| 1342 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, | ||
| 1343 | &backup_handle); | ||
| 1344 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 1345 | |||
| 1346 | if (unlikely(ret != 0)) { | ||
| 1347 | DRM_ERROR("Could not add a reference to a GB surface " | ||
| 1348 | "backup buffer.\n"); | ||
| 1349 | (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
| 1350 | req->sid, | ||
| 1351 | TTM_REF_USAGE); | ||
| 1352 | goto out_bad_resource; | ||
| 1353 | } | ||
| 1354 | |||
| 1355 | rep->creq.svga3d_flags = srf->flags; | ||
| 1356 | rep->creq.format = srf->format; | ||
| 1357 | rep->creq.mip_levels = srf->mip_levels[0]; | ||
| 1358 | rep->creq.drm_surface_flags = 0; | ||
| 1359 | rep->creq.multisample_count = srf->multisample_count; | ||
| 1360 | rep->creq.autogen_filter = srf->autogen_filter; | ||
| 1361 | rep->creq.buffer_handle = backup_handle; | ||
| 1362 | rep->creq.base_size = srf->base_size; | ||
| 1363 | rep->crep.handle = user_srf->prime.base.hash.key; | ||
| 1364 | rep->crep.backup_size = srf->res.backup_size; | ||
| 1365 | rep->crep.buffer_handle = backup_handle; | ||
| 1366 | rep->crep.buffer_map_handle = | ||
| 1367 | drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); | ||
| 1368 | rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; | ||
| 1369 | |||
| 1370 | out_bad_resource: | ||
| 1371 | ttm_base_object_unref(&base); | ||
| 1372 | |||
| 1373 | return ret; | ||
| 1374 | } | ||
