diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2012-11-21 05:45:13 -0500 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2014-01-17 01:52:26 -0500 |
commit | a97e21923b421993258e8487f2a5700c1ba3897f (patch) | |
tree | 7c5f9513eacf947564f46c06e1df205b2f95d151 /drivers/gpu/drm/vmwgfx | |
parent | 58a0c5f036464bd891880b30bde196320e904b81 (diff) |
drm/vmwgfx: Hook up guest-backed surfaces
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Zack Rusin <zackr@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 231 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 451 |
5 files changed, 708 insertions, 4 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index eaffb0524092..84f8f4c4ad0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -112,6 +112,12 @@ | |||
112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | 112 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ |
113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | 113 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ |
114 | struct drm_vmw_update_layout_arg) | 114 | struct drm_vmw_update_layout_arg) |
115 | #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ | ||
116 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ | ||
117 | union drm_vmw_gb_surface_create_arg) | ||
118 | #define DRM_IOCTL_VMW_GB_SURFACE_REF \ | ||
119 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ | ||
120 | union drm_vmw_gb_surface_reference_arg) | ||
115 | 121 | ||
116 | /** | 122 | /** |
117 | * The core DRM version of this macro doesn't account for | 123 | * The core DRM version of this macro doesn't account for |
@@ -177,6 +183,12 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
177 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | 183 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, |
178 | vmw_kms_update_layout_ioctl, | 184 | vmw_kms_update_layout_ioctl, |
179 | DRM_MASTER | DRM_UNLOCKED), | 185 | DRM_MASTER | DRM_UNLOCKED), |
186 | VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, | ||
187 | vmw_gb_surface_define_ioctl, | ||
188 | DRM_AUTH | DRM_UNLOCKED), | ||
189 | VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, | ||
190 | vmw_gb_surface_reference_ioctl, | ||
191 | DRM_AUTH | DRM_UNLOCKED), | ||
180 | }; | 192 | }; |
181 | 193 | ||
182 | static struct pci_device_id vmw_pci_id_list[] = { | 194 | static struct pci_device_id vmw_pci_id_list[] = { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3d672adf0ea4..71da388a8081 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -528,6 +528,10 @@ extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
528 | struct drm_file *file_priv); | 528 | struct drm_file *file_priv); |
529 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | 529 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
530 | struct drm_file *file_priv); | 530 | struct drm_file *file_priv); |
531 | extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
532 | struct drm_file *file_priv); | ||
533 | extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
534 | struct drm_file *file_priv); | ||
531 | extern int vmw_surface_check(struct vmw_private *dev_priv, | 535 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
532 | struct ttm_object_file *tfile, | 536 | struct ttm_object_file *tfile, |
533 | uint32_t handle, int *id); | 537 | uint32_t handle, int *id); |
@@ -541,6 +545,15 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
541 | void (*bo_free) (struct ttm_buffer_object *bo)); | 545 | void (*bo_free) (struct ttm_buffer_object *bo)); |
542 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, | 546 | extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, |
543 | struct ttm_object_file *tfile); | 547 | struct ttm_object_file *tfile); |
548 | extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | ||
549 | struct ttm_object_file *tfile, | ||
550 | uint32_t size, | ||
551 | bool shareable, | ||
552 | uint32_t *handle, | ||
553 | struct vmw_dma_buffer **p_dma_buf); | ||
554 | extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | ||
555 | struct vmw_dma_buffer *dma_buf, | ||
556 | uint32_t *handle); | ||
544 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | 557 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, |
545 | struct drm_file *file_priv); | 558 | struct drm_file *file_priv); |
546 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 559 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c2a6e4832e74..4d51ad0a2f51 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -1186,6 +1186,222 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | /** | 1188 | /** |
1189 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | ||
1190 | * | ||
1191 | * @dev_priv: Pointer to a device private struct. | ||
1192 | * @sw_context: The software context being used for this batch. | ||
1193 | * @res_type: The resource type. | ||
1194 | * @converter: Information about user-space binding for this resource type. | ||
1195 | * @res_id: Pointer to the user-space resource handle in the command stream. | ||
1196 | * @buf_id: Pointer to the user-space backup buffer handle in the command | ||
1197 | * stream. | ||
1198 | * @backup_offset: Offset of backup into MOB. | ||
1199 | * | ||
1200 | * This function prepares for registering a switch of backup buffers | ||
1201 | * in the resource metadata just prior to unreserving. | ||
1202 | */ | ||
1203 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | ||
1204 | struct vmw_sw_context *sw_context, | ||
1205 | enum vmw_res_type res_type, | ||
1206 | const struct vmw_user_resource_conv | ||
1207 | *converter, | ||
1208 | uint32_t *res_id, | ||
1209 | uint32_t *buf_id, | ||
1210 | unsigned long backup_offset) | ||
1211 | { | ||
1212 | int ret; | ||
1213 | struct vmw_dma_buffer *dma_buf; | ||
1214 | struct vmw_resource_val_node *val_node; | ||
1215 | |||
1216 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | ||
1217 | converter, res_id, &val_node); | ||
1218 | if (unlikely(ret != 0)) | ||
1219 | return ret; | ||
1220 | |||
1221 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
1222 | if (unlikely(ret != 0)) | ||
1223 | return ret; | ||
1224 | |||
1225 | if (val_node->first_usage) | ||
1226 | val_node->no_buffer_needed = true; | ||
1227 | |||
1228 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
1229 | val_node->new_backup = dma_buf; | ||
1230 | val_node->new_backup_offset = backup_offset; | ||
1231 | |||
1232 | return 0; | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE | ||
1237 | * command | ||
1238 | * | ||
1239 | * @dev_priv: Pointer to a device private struct. | ||
1240 | * @sw_context: The software context being used for this batch. | ||
1241 | * @header: Pointer to the command header in the command stream. | ||
1242 | */ | ||
1243 | static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, | ||
1244 | struct vmw_sw_context *sw_context, | ||
1245 | SVGA3dCmdHeader *header) | ||
1246 | { | ||
1247 | struct vmw_bind_gb_surface_cmd { | ||
1248 | SVGA3dCmdHeader header; | ||
1249 | SVGA3dCmdBindGBSurface body; | ||
1250 | } *cmd; | ||
1251 | |||
1252 | cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); | ||
1253 | |||
1254 | return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, | ||
1255 | user_surface_converter, | ||
1256 | &cmd->body.sid, &cmd->body.mobid, | ||
1257 | 0); | ||
1258 | } | ||
1259 | |||
1260 | /** | ||
1261 | * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE | ||
1262 | * command | ||
1263 | * | ||
1264 | * @dev_priv: Pointer to a device private struct. | ||
1265 | * @sw_context: The software context being used for this batch. | ||
1266 | * @header: Pointer to the command header in the command stream. | ||
1267 | */ | ||
1268 | static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, | ||
1269 | struct vmw_sw_context *sw_context, | ||
1270 | SVGA3dCmdHeader *header) | ||
1271 | { | ||
1272 | struct vmw_gb_surface_cmd { | ||
1273 | SVGA3dCmdHeader header; | ||
1274 | SVGA3dCmdUpdateGBImage body; | ||
1275 | } *cmd; | ||
1276 | |||
1277 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1278 | |||
1279 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1280 | user_surface_converter, | ||
1281 | &cmd->body.image.sid, NULL); | ||
1282 | } | ||
1283 | |||
1284 | /** | ||
1285 | * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE | ||
1286 | * command | ||
1287 | * | ||
1288 | * @dev_priv: Pointer to a device private struct. | ||
1289 | * @sw_context: The software context being used for this batch. | ||
1290 | * @header: Pointer to the command header in the command stream. | ||
1291 | */ | ||
1292 | static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, | ||
1293 | struct vmw_sw_context *sw_context, | ||
1294 | SVGA3dCmdHeader *header) | ||
1295 | { | ||
1296 | struct vmw_gb_surface_cmd { | ||
1297 | SVGA3dCmdHeader header; | ||
1298 | SVGA3dCmdUpdateGBSurface body; | ||
1299 | } *cmd; | ||
1300 | |||
1301 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1302 | |||
1303 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1304 | user_surface_converter, | ||
1305 | &cmd->body.sid, NULL); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE | ||
1310 | * command | ||
1311 | * | ||
1312 | * @dev_priv: Pointer to a device private struct. | ||
1313 | * @sw_context: The software context being used for this batch. | ||
1314 | * @header: Pointer to the command header in the command stream. | ||
1315 | */ | ||
1316 | static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, | ||
1317 | struct vmw_sw_context *sw_context, | ||
1318 | SVGA3dCmdHeader *header) | ||
1319 | { | ||
1320 | struct vmw_gb_surface_cmd { | ||
1321 | SVGA3dCmdHeader header; | ||
1322 | SVGA3dCmdReadbackGBImage body; | ||
1323 | } *cmd; | ||
1324 | |||
1325 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1326 | |||
1327 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1328 | user_surface_converter, | ||
1329 | &cmd->body.image.sid, NULL); | ||
1330 | } | ||
1331 | |||
1332 | /** | ||
1333 | * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE | ||
1334 | * command | ||
1335 | * | ||
1336 | * @dev_priv: Pointer to a device private struct. | ||
1337 | * @sw_context: The software context being used for this batch. | ||
1338 | * @header: Pointer to the command header in the command stream. | ||
1339 | */ | ||
1340 | static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, | ||
1341 | struct vmw_sw_context *sw_context, | ||
1342 | SVGA3dCmdHeader *header) | ||
1343 | { | ||
1344 | struct vmw_gb_surface_cmd { | ||
1345 | SVGA3dCmdHeader header; | ||
1346 | SVGA3dCmdReadbackGBSurface body; | ||
1347 | } *cmd; | ||
1348 | |||
1349 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1350 | |||
1351 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1352 | user_surface_converter, | ||
1353 | &cmd->body.sid, NULL); | ||
1354 | } | ||
1355 | |||
1356 | /** | ||
1357 | * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE | ||
1358 | * command | ||
1359 | * | ||
1360 | * @dev_priv: Pointer to a device private struct. | ||
1361 | * @sw_context: The software context being used for this batch. | ||
1362 | * @header: Pointer to the command header in the command stream. | ||
1363 | */ | ||
1364 | static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, | ||
1365 | struct vmw_sw_context *sw_context, | ||
1366 | SVGA3dCmdHeader *header) | ||
1367 | { | ||
1368 | struct vmw_gb_surface_cmd { | ||
1369 | SVGA3dCmdHeader header; | ||
1370 | SVGA3dCmdInvalidateGBImage body; | ||
1371 | } *cmd; | ||
1372 | |||
1373 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1374 | |||
1375 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1376 | user_surface_converter, | ||
1377 | &cmd->body.image.sid, NULL); | ||
1378 | } | ||
1379 | |||
1380 | /** | ||
1381 | * vmw_cmd_invalidate_gb_surface - Validate an | ||
1382 | * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command | ||
1383 | * | ||
1384 | * @dev_priv: Pointer to a device private struct. | ||
1385 | * @sw_context: The software context being used for this batch. | ||
1386 | * @header: Pointer to the command header in the command stream. | ||
1387 | */ | ||
1388 | static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, | ||
1389 | struct vmw_sw_context *sw_context, | ||
1390 | SVGA3dCmdHeader *header) | ||
1391 | { | ||
1392 | struct vmw_gb_surface_cmd { | ||
1393 | SVGA3dCmdHeader header; | ||
1394 | SVGA3dCmdInvalidateGBSurface body; | ||
1395 | } *cmd; | ||
1396 | |||
1397 | cmd = container_of(header, struct vmw_gb_surface_cmd, header); | ||
1398 | |||
1399 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
1400 | user_surface_converter, | ||
1401 | &cmd->body.sid, NULL); | ||
1402 | } | ||
1403 | |||
1404 | /** | ||
1189 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | 1405 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER |
1190 | * command | 1406 | * command |
1191 | * | 1407 | * |
@@ -1300,6 +1516,21 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
1300 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | 1516 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), |
1301 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | 1517 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), |
1302 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | 1518 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), |
1519 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid), | ||
1520 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid), | ||
1521 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface), | ||
1522 | VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid), | ||
1523 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image), | ||
1524 | VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, | ||
1525 | &vmw_cmd_update_gb_surface), | ||
1526 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, | ||
1527 | &vmw_cmd_readback_gb_image), | ||
1528 | VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, | ||
1529 | &vmw_cmd_readback_gb_surface), | ||
1530 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, | ||
1531 | &vmw_cmd_invalidate_gb_image), | ||
1532 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, | ||
1533 | &vmw_cmd_invalidate_gb_surface), | ||
1303 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid), | 1534 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid), |
1304 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid), | 1535 | VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid), |
1305 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid), | 1536 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid), |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6cd1560c1547..b40978f0ca96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -593,7 +593,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
593 | } | 593 | } |
594 | 594 | ||
595 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | 595 | int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, |
596 | struct vmw_dma_buffer *dma_buf) | 596 | struct vmw_dma_buffer *dma_buf, |
597 | uint32_t *handle) | ||
597 | { | 598 | { |
598 | struct vmw_user_dma_buffer *user_bo; | 599 | struct vmw_user_dma_buffer *user_bo; |
599 | 600 | ||
@@ -601,6 +602,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, | |||
601 | return -EINVAL; | 602 | return -EINVAL; |
602 | 603 | ||
603 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); | 604 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); |
605 | |||
606 | *handle = user_bo->prime.base.hash.key; | ||
604 | return ttm_ref_object_add(tfile, &user_bo->prime.base, | 607 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
605 | TTM_REF_USAGE, NULL); | 608 | TTM_REF_USAGE, NULL); |
606 | } | 609 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 0fc93398bba2..c3b53e1bafb8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -41,7 +41,6 @@ struct vmw_user_surface { | |||
41 | struct ttm_prime_object prime; | 41 | struct ttm_prime_object prime; |
42 | struct vmw_surface srf; | 42 | struct vmw_surface srf; |
43 | uint32_t size; | 43 | uint32_t size; |
44 | uint32_t backup_handle; | ||
45 | }; | 44 | }; |
46 | 45 | ||
47 | /** | 46 | /** |
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res, | |||
68 | struct ttm_validate_buffer *val_buf); | 67 | struct ttm_validate_buffer *val_buf); |
69 | static int vmw_legacy_srf_create(struct vmw_resource *res); | 68 | static int vmw_legacy_srf_create(struct vmw_resource *res); |
70 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); | 69 | static int vmw_legacy_srf_destroy(struct vmw_resource *res); |
70 | static int vmw_gb_surface_create(struct vmw_resource *res); | ||
71 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
72 | struct ttm_validate_buffer *val_buf); | ||
73 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
74 | bool readback, | ||
75 | struct ttm_validate_buffer *val_buf); | ||
76 | static int vmw_gb_surface_destroy(struct vmw_resource *res); | ||
77 | |||
71 | 78 | ||
72 | static const struct vmw_user_resource_conv user_surface_conv = { | 79 | static const struct vmw_user_resource_conv user_surface_conv = { |
73 | .object_type = VMW_RES_SURFACE, | 80 | .object_type = VMW_RES_SURFACE, |
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = { | |||
93 | .unbind = &vmw_legacy_srf_unbind | 100 | .unbind = &vmw_legacy_srf_unbind |
94 | }; | 101 | }; |
95 | 102 | ||
103 | static const struct vmw_res_func vmw_gb_surface_func = { | ||
104 | .res_type = vmw_res_surface, | ||
105 | .needs_backup = true, | ||
106 | .may_evict = true, | ||
107 | .type_name = "guest backed surfaces", | ||
108 | .backup_placement = &vmw_mob_placement, | ||
109 | .create = vmw_gb_surface_create, | ||
110 | .destroy = vmw_gb_surface_destroy, | ||
111 | .bind = vmw_gb_surface_bind, | ||
112 | .unbind = vmw_gb_surface_unbind | ||
113 | }; | ||
114 | |||
96 | /** | 115 | /** |
97 | * struct vmw_surface_dma - SVGA3D DMA command | 116 | * struct vmw_surface_dma - SVGA3D DMA command |
98 | */ | 117 | */ |
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) | |||
291 | struct vmw_surface *srf; | 310 | struct vmw_surface *srf; |
292 | void *cmd; | 311 | void *cmd; |
293 | 312 | ||
313 | if (res->func->destroy == vmw_gb_surface_destroy) { | ||
314 | (void) vmw_gb_surface_destroy(res); | ||
315 | return; | ||
316 | } | ||
317 | |||
294 | if (res->id != -1) { | 318 | if (res->id != -1) { |
295 | 319 | ||
296 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); | 320 | cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); |
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
549 | struct vmw_resource *res = &srf->res; | 573 | struct vmw_resource *res = &srf->res; |
550 | 574 | ||
551 | BUG_ON(res_free == NULL); | 575 | BUG_ON(res_free == NULL); |
552 | (void) vmw_3d_resource_inc(dev_priv, false); | 576 | if (!dev_priv->has_mob) |
577 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
553 | ret = vmw_resource_init(dev_priv, res, true, res_free, | 578 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
579 | (dev_priv->has_mob) ? &vmw_gb_surface_func : | ||
554 | &vmw_legacy_surface_func); | 580 | &vmw_legacy_surface_func); |
555 | 581 | ||
556 | if (unlikely(ret != 0)) { | 582 | if (unlikely(ret != 0)) { |
557 | vmw_3d_resource_dec(dev_priv, false); | 583 | if (!dev_priv->has_mob) |
584 | vmw_3d_resource_dec(dev_priv, false); | ||
558 | res_free(res); | 585 | res_free(res); |
559 | return ret; | 586 | return ret; |
560 | } | 587 | } |
@@ -894,3 +921,421 @@ out_no_reference: | |||
894 | 921 | ||
895 | return ret; | 922 | return ret; |
896 | } | 923 | } |
924 | |||
925 | /** | ||
926 | * vmw_surface_define_encode - Encode a surface_define command. | ||
927 | * | ||
928 | * @srf: Pointer to a struct vmw_surface object. | ||
929 | * @cmd_space: Pointer to memory area in which the commands should be encoded. | ||
930 | */ | ||
931 | static int vmw_gb_surface_create(struct vmw_resource *res) | ||
932 | { | ||
933 | struct vmw_private *dev_priv = res->dev_priv; | ||
934 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
935 | uint32_t cmd_len, submit_len; | ||
936 | int ret; | ||
937 | struct { | ||
938 | SVGA3dCmdHeader header; | ||
939 | SVGA3dCmdDefineGBSurface body; | ||
940 | } *cmd; | ||
941 | |||
942 | if (likely(res->id != -1)) | ||
943 | return 0; | ||
944 | |||
945 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
946 | ret = vmw_resource_alloc_id(res); | ||
947 | if (unlikely(ret != 0)) { | ||
948 | DRM_ERROR("Failed to allocate a surface id.\n"); | ||
949 | goto out_no_id; | ||
950 | } | ||
951 | |||
952 | if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { | ||
953 | ret = -EBUSY; | ||
954 | goto out_no_fifo; | ||
955 | } | ||
956 | |||
957 | cmd_len = sizeof(cmd->body); | ||
958 | submit_len = sizeof(*cmd); | ||
959 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | ||
960 | if (unlikely(cmd == NULL)) { | ||
961 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
962 | "creation.\n"); | ||
963 | ret = -ENOMEM; | ||
964 | goto out_no_fifo; | ||
965 | } | ||
966 | |||
967 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||
968 | cmd->header.size = cmd_len; | ||
969 | cmd->body.sid = srf->res.id; | ||
970 | cmd->body.surfaceFlags = srf->flags; | ||
971 | cmd->body.format = cpu_to_le32(srf->format); | ||
972 | cmd->body.numMipLevels = srf->mip_levels[0]; | ||
973 | cmd->body.multisampleCount = srf->multisample_count; | ||
974 | cmd->body.autogenFilter = srf->autogen_filter; | ||
975 | cmd->body.size.width = srf->base_size.width; | ||
976 | cmd->body.size.height = srf->base_size.height; | ||
977 | cmd->body.size.depth = srf->base_size.depth; | ||
978 | vmw_fifo_commit(dev_priv, submit_len); | ||
979 | |||
980 | return 0; | ||
981 | |||
982 | out_no_fifo: | ||
983 | vmw_resource_release_id(res); | ||
984 | out_no_id: | ||
985 | vmw_3d_resource_dec(dev_priv, false); | ||
986 | return ret; | ||
987 | } | ||
988 | |||
989 | |||
990 | static int vmw_gb_surface_bind(struct vmw_resource *res, | ||
991 | struct ttm_validate_buffer *val_buf) | ||
992 | { | ||
993 | struct vmw_private *dev_priv = res->dev_priv; | ||
994 | struct { | ||
995 | SVGA3dCmdHeader header; | ||
996 | SVGA3dCmdBindGBSurface body; | ||
997 | } *cmd1; | ||
998 | struct { | ||
999 | SVGA3dCmdHeader header; | ||
1000 | SVGA3dCmdUpdateGBSurface body; | ||
1001 | } *cmd2; | ||
1002 | uint32_t submit_size; | ||
1003 | struct ttm_buffer_object *bo = val_buf->bo; | ||
1004 | |||
1005 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
1006 | |||
1007 | submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); | ||
1008 | |||
1009 | cmd1 = vmw_fifo_reserve(dev_priv, submit_size); | ||
1010 | if (unlikely(cmd1 == NULL)) { | ||
1011 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1012 | "binding.\n"); | ||
1013 | return -ENOMEM; | ||
1014 | } | ||
1015 | |||
1016 | cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
1017 | cmd1->header.size = sizeof(cmd1->body); | ||
1018 | cmd1->body.sid = res->id; | ||
1019 | cmd1->body.mobid = bo->mem.start; | ||
1020 | if (res->backup_dirty) { | ||
1021 | cmd2 = (void *) &cmd1[1]; | ||
1022 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; | ||
1023 | cmd2->header.size = sizeof(cmd2->body); | ||
1024 | cmd2->body.sid = res->id; | ||
1025 | res->backup_dirty = false; | ||
1026 | } | ||
1027 | vmw_fifo_commit(dev_priv, submit_size); | ||
1028 | |||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | static int vmw_gb_surface_unbind(struct vmw_resource *res, | ||
1033 | bool readback, | ||
1034 | struct ttm_validate_buffer *val_buf) | ||
1035 | { | ||
1036 | struct vmw_private *dev_priv = res->dev_priv; | ||
1037 | struct ttm_buffer_object *bo = val_buf->bo; | ||
1038 | struct vmw_fence_obj *fence; | ||
1039 | |||
1040 | struct { | ||
1041 | SVGA3dCmdHeader header; | ||
1042 | SVGA3dCmdReadbackGBSurface body; | ||
1043 | } *cmd1; | ||
1044 | struct { | ||
1045 | SVGA3dCmdHeader header; | ||
1046 | SVGA3dCmdBindGBSurface body; | ||
1047 | } *cmd2; | ||
1048 | uint32_t submit_size; | ||
1049 | uint8_t *cmd; | ||
1050 | |||
1051 | |||
1052 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
1053 | |||
1054 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||
1055 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
1056 | if (unlikely(cmd == NULL)) { | ||
1057 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1058 | "unbinding.\n"); | ||
1059 | return -ENOMEM; | ||
1060 | } | ||
1061 | |||
1062 | cmd2 = (void *) cmd; | ||
1063 | if (readback) { | ||
1064 | cmd1 = (void *) cmd; | ||
1065 | cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; | ||
1066 | cmd1->header.size = sizeof(cmd1->body); | ||
1067 | cmd1->body.sid = res->id; | ||
1068 | cmd2 = (void *) &cmd1[1]; | ||
1069 | } | ||
1070 | cmd2->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; | ||
1071 | cmd2->header.size = sizeof(cmd2->body); | ||
1072 | cmd2->body.sid = res->id; | ||
1073 | cmd2->body.mobid = SVGA3D_INVALID_ID; | ||
1074 | |||
1075 | vmw_fifo_commit(dev_priv, submit_size); | ||
1076 | |||
1077 | /* | ||
1078 | * Create a fence object and fence the backup buffer. | ||
1079 | */ | ||
1080 | |||
1081 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
1082 | &fence, NULL); | ||
1083 | |||
1084 | vmw_fence_single_bo(val_buf->bo, fence); | ||
1085 | |||
1086 | if (likely(fence != NULL)) | ||
1087 | vmw_fence_obj_unreference(&fence); | ||
1088 | |||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | static int vmw_gb_surface_destroy(struct vmw_resource *res) | ||
1093 | { | ||
1094 | struct vmw_private *dev_priv = res->dev_priv; | ||
1095 | struct { | ||
1096 | SVGA3dCmdHeader header; | ||
1097 | SVGA3dCmdDestroyGBSurface body; | ||
1098 | } *cmd; | ||
1099 | |||
1100 | if (likely(res->id == -1)) | ||
1101 | return 0; | ||
1102 | |||
1103 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
1104 | if (unlikely(cmd == NULL)) { | ||
1105 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
1106 | "destruction.\n"); | ||
1107 | return -ENOMEM; | ||
1108 | } | ||
1109 | |||
1110 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; | ||
1111 | cmd->header.size = sizeof(cmd->body); | ||
1112 | cmd->body.sid = res->id; | ||
1113 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
1114 | vmw_resource_release_id(res); | ||
1115 | vmw_3d_resource_dec(dev_priv, false); | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1120 | /** | ||
1121 | * vmw_gb_surface_define_ioctl - Ioctl function implementing | ||
1122 | * the user surface define functionality. | ||
1123 | * | ||
1124 | * @dev: Pointer to a struct drm_device. | ||
1125 | * @data: Pointer to data copied from / to user-space. | ||
1126 | * @file_priv: Pointer to a drm file private structure. | ||
1127 | */ | ||
1128 | int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | ||
1129 | struct drm_file *file_priv) | ||
1130 | { | ||
1131 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1132 | struct vmw_user_surface *user_srf; | ||
1133 | struct vmw_surface *srf; | ||
1134 | struct vmw_resource *res; | ||
1135 | struct vmw_resource *tmp; | ||
1136 | union drm_vmw_gb_surface_create_arg *arg = | ||
1137 | (union drm_vmw_gb_surface_create_arg *)data; | ||
1138 | struct drm_vmw_gb_surface_create_req *req = &arg->req; | ||
1139 | struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; | ||
1140 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1141 | int ret; | ||
1142 | uint32_t size; | ||
1143 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1144 | const struct svga3d_surface_desc *desc; | ||
1145 | uint32_t backup_handle; | ||
1146 | |||
1147 | if (unlikely(vmw_user_surface_size == 0)) | ||
1148 | vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + | ||
1149 | 128; | ||
1150 | |||
1151 | size = vmw_user_surface_size + 128; | ||
1152 | |||
1153 | desc = svga3dsurface_get_desc(req->format); | ||
1154 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | ||
1155 | DRM_ERROR("Invalid surface format for surface creation.\n"); | ||
1156 | return -EINVAL; | ||
1157 | } | ||
1158 | |||
1159 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1160 | if (unlikely(ret != 0)) | ||
1161 | return ret; | ||
1162 | |||
1163 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
1164 | size, false, true); | ||
1165 | if (unlikely(ret != 0)) { | ||
1166 | if (ret != -ERESTARTSYS) | ||
1167 | DRM_ERROR("Out of graphics memory for surface" | ||
1168 | " creation.\n"); | ||
1169 | goto out_unlock; | ||
1170 | } | ||
1171 | |||
1172 | user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); | ||
1173 | if (unlikely(user_srf == NULL)) { | ||
1174 | ret = -ENOMEM; | ||
1175 | goto out_no_user_srf; | ||
1176 | } | ||
1177 | |||
1178 | srf = &user_srf->srf; | ||
1179 | res = &srf->res; | ||
1180 | |||
1181 | srf->flags = req->svga3d_flags; | ||
1182 | srf->format = req->format; | ||
1183 | srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; | ||
1184 | srf->mip_levels[0] = req->mip_levels; | ||
1185 | srf->num_sizes = 1; | ||
1186 | srf->sizes = NULL; | ||
1187 | srf->offsets = NULL; | ||
1188 | user_srf->size = size; | ||
1189 | srf->base_size = req->base_size; | ||
1190 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | ||
1191 | srf->multisample_count = req->multisample_count; | ||
1192 | res->backup_size = svga3dsurface_get_serialized_size | ||
1193 | (srf->format, srf->base_size, srf->mip_levels[0], | ||
1194 | srf->flags & SVGA3D_SURFACE_CUBEMAP); | ||
1195 | |||
1196 | user_srf->prime.base.shareable = false; | ||
1197 | user_srf->prime.base.tfile = NULL; | ||
1198 | |||
1199 | /** | ||
1200 | * From this point, the generic resource management functions | ||
1201 | * destroy the object on failure. | ||
1202 | */ | ||
1203 | |||
1204 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
1205 | if (unlikely(ret != 0)) | ||
1206 | goto out_unlock; | ||
1207 | |||
1208 | if (req->buffer_handle != SVGA3D_INVALID_ID) { | ||
1209 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | ||
1210 | &res->backup); | ||
1211 | } else if (req->drm_surface_flags & | ||
1212 | drm_vmw_surface_flag_create_buffer) | ||
1213 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
1214 | res->backup_size, | ||
1215 | req->drm_surface_flags & | ||
1216 | drm_vmw_surface_flag_shareable, | ||
1217 | &backup_handle, | ||
1218 | &res->backup); | ||
1219 | |||
1220 | if (unlikely(ret != 0)) { | ||
1221 | vmw_resource_unreference(&res); | ||
1222 | goto out_unlock; | ||
1223 | } | ||
1224 | |||
1225 | tmp = vmw_resource_reference(&srf->res); | ||
1226 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | ||
1227 | req->drm_surface_flags & | ||
1228 | drm_vmw_surface_flag_shareable, | ||
1229 | VMW_RES_SURFACE, | ||
1230 | &vmw_user_surface_base_release, NULL); | ||
1231 | |||
1232 | if (unlikely(ret != 0)) { | ||
1233 | vmw_resource_unreference(&tmp); | ||
1234 | vmw_resource_unreference(&res); | ||
1235 | goto out_unlock; | ||
1236 | } | ||
1237 | |||
1238 | rep->handle = user_srf->prime.base.hash.key; | ||
1239 | rep->backup_size = res->backup_size; | ||
1240 | if (res->backup) { | ||
1241 | rep->buffer_map_handle = | ||
1242 | drm_vma_node_offset_addr(&res->backup->base.vma_node); | ||
1243 | rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; | ||
1244 | rep->buffer_handle = backup_handle; | ||
1245 | } else { | ||
1246 | rep->buffer_map_handle = 0; | ||
1247 | rep->buffer_size = 0; | ||
1248 | rep->buffer_handle = SVGA3D_INVALID_ID; | ||
1249 | } | ||
1250 | |||
1251 | vmw_resource_unreference(&res); | ||
1252 | |||
1253 | ttm_read_unlock(&vmaster->lock); | ||
1254 | return 0; | ||
1255 | out_no_user_srf: | ||
1256 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
1257 | out_unlock: | ||
1258 | ttm_read_unlock(&vmaster->lock); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | /** | ||
1263 | * vmw_gb_surface_reference_ioctl - Ioctl function implementing | ||
1264 | * the user surface reference functionality. | ||
1265 | * | ||
1266 | * @dev: Pointer to a struct drm_device. | ||
1267 | * @data: Pointer to data copied from / to user-space. | ||
1268 | * @file_priv: Pointer to a drm file private structure. | ||
1269 | */ | ||
1270 | int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
1271 | struct drm_file *file_priv) | ||
1272 | { | ||
1273 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1274 | union drm_vmw_gb_surface_reference_arg *arg = | ||
1275 | (union drm_vmw_gb_surface_reference_arg *)data; | ||
1276 | struct drm_vmw_surface_arg *req = &arg->req; | ||
1277 | struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; | ||
1278 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1279 | struct vmw_surface *srf; | ||
1280 | struct vmw_user_surface *user_srf; | ||
1281 | struct ttm_base_object *base; | ||
1282 | uint32_t backup_handle; | ||
1283 | int ret = -EINVAL; | ||
1284 | |||
1285 | base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); | ||
1286 | if (unlikely(base == NULL)) { | ||
1287 | DRM_ERROR("Could not find surface to reference.\n"); | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | |||
1291 | if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) | ||
1292 | goto out_bad_resource; | ||
1293 | |||
1294 | user_srf = container_of(base, struct vmw_user_surface, prime.base); | ||
1295 | srf = &user_srf->srf; | ||
1296 | if (srf->res.backup == NULL) { | ||
1297 | DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); | ||
1298 | goto out_bad_resource; | ||
1299 | } | ||
1300 | |||
1301 | ret = ttm_ref_object_add(tfile, &user_srf->prime.base, | ||
1302 | TTM_REF_USAGE, NULL); | ||
1303 | if (unlikely(ret != 0)) { | ||
1304 | DRM_ERROR("Could not add a reference to a GB surface.\n"); | ||
1305 | goto out_bad_resource; | ||
1306 | } | ||
1307 | |||
1308 | mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ | ||
1309 | ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, | ||
1310 | &backup_handle); | ||
1311 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1312 | |||
1313 | if (unlikely(ret != 0)) { | ||
1314 | DRM_ERROR("Could not add a reference to a GB surface " | ||
1315 | "backup buffer.\n"); | ||
1316 | (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
1317 | req->sid, | ||
1318 | TTM_REF_USAGE); | ||
1319 | goto out_bad_resource; | ||
1320 | } | ||
1321 | |||
1322 | rep->creq.svga3d_flags = srf->flags; | ||
1323 | rep->creq.format = srf->format; | ||
1324 | rep->creq.mip_levels = srf->mip_levels[0]; | ||
1325 | rep->creq.drm_surface_flags = 0; | ||
1326 | rep->creq.multisample_count = srf->multisample_count; | ||
1327 | rep->creq.autogen_filter = srf->autogen_filter; | ||
1328 | rep->creq.buffer_handle = backup_handle; | ||
1329 | rep->creq.base_size = srf->base_size; | ||
1330 | rep->crep.handle = user_srf->prime.base.hash.key; | ||
1331 | rep->crep.backup_size = srf->res.backup_size; | ||
1332 | rep->crep.buffer_handle = backup_handle; | ||
1333 | rep->crep.buffer_map_handle = | ||
1334 | drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); | ||
1335 | rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; | ||
1336 | |||
1337 | out_bad_resource: | ||
1338 | ttm_base_object_unref(&base); | ||
1339 | |||
1340 | return ret; | ||
1341 | } | ||