aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c493
1 files changed, 486 insertions, 7 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8bd553..e7af580ab977 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,7 +41,6 @@ struct vmw_user_surface {
41 struct ttm_prime_object prime; 41 struct ttm_prime_object prime;
42 struct vmw_surface srf; 42 struct vmw_surface srf;
43 uint32_t size; 43 uint32_t size;
44 uint32_t backup_handle;
45}; 44};
46 45
47/** 46/**
@@ -68,6 +67,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,
68 struct ttm_validate_buffer *val_buf); 67 struct ttm_validate_buffer *val_buf);
69static int vmw_legacy_srf_create(struct vmw_resource *res); 68static int vmw_legacy_srf_create(struct vmw_resource *res);
70static int vmw_legacy_srf_destroy(struct vmw_resource *res); 69static int vmw_legacy_srf_destroy(struct vmw_resource *res);
70static int vmw_gb_surface_create(struct vmw_resource *res);
71static int vmw_gb_surface_bind(struct vmw_resource *res,
72 struct ttm_validate_buffer *val_buf);
73static int vmw_gb_surface_unbind(struct vmw_resource *res,
74 bool readback,
75 struct ttm_validate_buffer *val_buf);
76static int vmw_gb_surface_destroy(struct vmw_resource *res);
77
71 78
72static const struct vmw_user_resource_conv user_surface_conv = { 79static const struct vmw_user_resource_conv user_surface_conv = {
73 .object_type = VMW_RES_SURFACE, 80 .object_type = VMW_RES_SURFACE,
@@ -93,6 +100,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
93 .unbind = &vmw_legacy_srf_unbind 100 .unbind = &vmw_legacy_srf_unbind
94}; 101};
95 102
103static const struct vmw_res_func vmw_gb_surface_func = {
104 .res_type = vmw_res_surface,
105 .needs_backup = true,
106 .may_evict = true,
107 .type_name = "guest backed surfaces",
108 .backup_placement = &vmw_mob_placement,
109 .create = vmw_gb_surface_create,
110 .destroy = vmw_gb_surface_destroy,
111 .bind = vmw_gb_surface_bind,
112 .unbind = vmw_gb_surface_unbind
113};
114
96/** 115/**
97 * struct vmw_surface_dma - SVGA3D DMA command 116 * struct vmw_surface_dma - SVGA3D DMA command
98 */ 117 */
@@ -291,6 +310,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
291 struct vmw_surface *srf; 310 struct vmw_surface *srf;
292 void *cmd; 311 void *cmd;
293 312
313 if (res->func->destroy == vmw_gb_surface_destroy) {
314 (void) vmw_gb_surface_destroy(res);
315 return;
316 }
317
294 if (res->id != -1) { 318 if (res->id != -1) {
295 319
296 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); 320 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
@@ -549,12 +573,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
549 struct vmw_resource *res = &srf->res; 573 struct vmw_resource *res = &srf->res;
550 574
551 BUG_ON(res_free == NULL); 575 BUG_ON(res_free == NULL);
552 (void) vmw_3d_resource_inc(dev_priv, false); 576 if (!dev_priv->has_mob)
577 (void) vmw_3d_resource_inc(dev_priv, false);
553 ret = vmw_resource_init(dev_priv, res, true, res_free, 578 ret = vmw_resource_init(dev_priv, res, true, res_free,
579 (dev_priv->has_mob) ? &vmw_gb_surface_func :
554 &vmw_legacy_surface_func); 580 &vmw_legacy_surface_func);
555 581
556 if (unlikely(ret != 0)) { 582 if (unlikely(ret != 0)) {
557 vmw_3d_resource_dec(dev_priv, false); 583 if (!dev_priv->has_mob)
584 vmw_3d_resource_dec(dev_priv, false);
558 res_free(res); 585 res_free(res);
559 return ret; 586 return ret;
560 } 587 }
@@ -750,7 +777,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
750 777
751 srf->base_size = *srf->sizes; 778 srf->base_size = *srf->sizes;
752 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 779 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
753 srf->multisample_count = 1; 780 srf->multisample_count = 0;
754 781
755 cur_bo_offset = 0; 782 cur_bo_offset = 0;
756 cur_offset = srf->offsets; 783 cur_offset = srf->offsets;
@@ -803,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
803 if (unlikely(ret != 0)) 830 if (unlikely(ret != 0))
804 goto out_unlock; 831 goto out_unlock;
805 832
833 /*
834 * A gb-aware client referencing a shared surface will
835 * expect a backup buffer to be present.
836 */
837 if (dev_priv->has_mob && req->shareable) {
838 uint32_t backup_handle;
839
840 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
841 res->backup_size,
842 true,
843 &backup_handle,
844 &res->backup);
845 if (unlikely(ret != 0)) {
846 vmw_resource_unreference(&res);
847 goto out_unlock;
848 }
849 }
850
806 tmp = vmw_resource_reference(&srf->res); 851 tmp = vmw_resource_reference(&srf->res);
807 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 852 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
808 req->shareable, VMW_RES_SURFACE, 853 req->shareable, VMW_RES_SURFACE,
@@ -843,6 +888,7 @@ out_unlock:
843int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 888int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file_priv) 889 struct drm_file *file_priv)
845{ 890{
891 struct vmw_private *dev_priv = vmw_priv(dev);
846 union drm_vmw_surface_reference_arg *arg = 892 union drm_vmw_surface_reference_arg *arg =
847 (union drm_vmw_surface_reference_arg *)data; 893 (union drm_vmw_surface_reference_arg *)data;
848 struct drm_vmw_surface_arg *req = &arg->req; 894 struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +900,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
854 struct ttm_base_object *base; 900 struct ttm_base_object *base;
855 int ret = -EINVAL; 901 int ret = -EINVAL;
856 902
857 base = ttm_base_object_lookup(tfile, req->sid); 903 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
858 if (unlikely(base == NULL)) { 904 if (unlikely(base == NULL)) {
859 DRM_ERROR("Could not find surface to reference.\n"); 905 DRM_ERROR("Could not find surface to reference.\n");
860 return -EINVAL; 906 return -EINVAL;
@@ -880,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
880 rep->size_addr; 926 rep->size_addr;
881 927
882 if (user_sizes) 928 if (user_sizes)
883 ret = copy_to_user(user_sizes, srf->sizes, 929 ret = copy_to_user(user_sizes, &srf->base_size,
884 srf->num_sizes * sizeof(*srf->sizes)); 930 sizeof(srf->base_size));
885 if (unlikely(ret != 0)) { 931 if (unlikely(ret != 0)) {
886 DRM_ERROR("copy_to_user failed %p %u\n", 932 DRM_ERROR("copy_to_user failed %p %u\n",
887 user_sizes, srf->num_sizes); 933 user_sizes, srf->num_sizes);
@@ -893,3 +939,436 @@ out_no_reference:
893 939
894 return ret; 940 return ret;
895} 941}
942
943/**
944 * vmw_surface_define_encode - Encode a surface_define command.
945 *
946 * @srf: Pointer to a struct vmw_surface object.
947 * @cmd_space: Pointer to memory area in which the commands should be encoded.
948 */
949static int vmw_gb_surface_create(struct vmw_resource *res)
950{
951 struct vmw_private *dev_priv = res->dev_priv;
952 struct vmw_surface *srf = vmw_res_to_srf(res);
953 uint32_t cmd_len, submit_len;
954 int ret;
955 struct {
956 SVGA3dCmdHeader header;
957 SVGA3dCmdDefineGBSurface body;
958 } *cmd;
959
960 if (likely(res->id != -1))
961 return 0;
962
963 (void) vmw_3d_resource_inc(dev_priv, false);
964 ret = vmw_resource_alloc_id(res);
965 if (unlikely(ret != 0)) {
966 DRM_ERROR("Failed to allocate a surface id.\n");
967 goto out_no_id;
968 }
969
970 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
971 ret = -EBUSY;
972 goto out_no_fifo;
973 }
974
975 cmd_len = sizeof(cmd->body);
976 submit_len = sizeof(*cmd);
977 cmd = vmw_fifo_reserve(dev_priv, submit_len);
978 if (unlikely(cmd == NULL)) {
979 DRM_ERROR("Failed reserving FIFO space for surface "
980 "creation.\n");
981 ret = -ENOMEM;
982 goto out_no_fifo;
983 }
984
985 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
986 cmd->header.size = cmd_len;
987 cmd->body.sid = srf->res.id;
988 cmd->body.surfaceFlags = srf->flags;
989 cmd->body.format = cpu_to_le32(srf->format);
990 cmd->body.numMipLevels = srf->mip_levels[0];
991 cmd->body.multisampleCount = srf->multisample_count;
992 cmd->body.autogenFilter = srf->autogen_filter;
993 cmd->body.size.width = srf->base_size.width;
994 cmd->body.size.height = srf->base_size.height;
995 cmd->body.size.depth = srf->base_size.depth;
996 vmw_fifo_commit(dev_priv, submit_len);
997
998 return 0;
999
1000out_no_fifo:
1001 vmw_resource_release_id(res);
1002out_no_id:
1003 vmw_3d_resource_dec(dev_priv, false);
1004 return ret;
1005}
1006
1007
1008static int vmw_gb_surface_bind(struct vmw_resource *res,
1009 struct ttm_validate_buffer *val_buf)
1010{
1011 struct vmw_private *dev_priv = res->dev_priv;
1012 struct {
1013 SVGA3dCmdHeader header;
1014 SVGA3dCmdBindGBSurface body;
1015 } *cmd1;
1016 struct {
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdUpdateGBSurface body;
1019 } *cmd2;
1020 uint32_t submit_size;
1021 struct ttm_buffer_object *bo = val_buf->bo;
1022
1023 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1024
1025 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1026
1027 cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1028 if (unlikely(cmd1 == NULL)) {
1029 DRM_ERROR("Failed reserving FIFO space for surface "
1030 "binding.\n");
1031 return -ENOMEM;
1032 }
1033
1034 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1035 cmd1->header.size = sizeof(cmd1->body);
1036 cmd1->body.sid = res->id;
1037 cmd1->body.mobid = bo->mem.start;
1038 if (res->backup_dirty) {
1039 cmd2 = (void *) &cmd1[1];
1040 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1041 cmd2->header.size = sizeof(cmd2->body);
1042 cmd2->body.sid = res->id;
1043 res->backup_dirty = false;
1044 }
1045 vmw_fifo_commit(dev_priv, submit_size);
1046
1047 return 0;
1048}
1049
1050static int vmw_gb_surface_unbind(struct vmw_resource *res,
1051 bool readback,
1052 struct ttm_validate_buffer *val_buf)
1053{
1054 struct vmw_private *dev_priv = res->dev_priv;
1055 struct ttm_buffer_object *bo = val_buf->bo;
1056 struct vmw_fence_obj *fence;
1057
1058 struct {
1059 SVGA3dCmdHeader header;
1060 SVGA3dCmdReadbackGBSurface body;
1061 } *cmd1;
1062 struct {
1063 SVGA3dCmdHeader header;
1064 SVGA3dCmdInvalidateGBSurface body;
1065 } *cmd2;
1066 struct {
1067 SVGA3dCmdHeader header;
1068 SVGA3dCmdBindGBSurface body;
1069 } *cmd3;
1070 uint32_t submit_size;
1071 uint8_t *cmd;
1072
1073
1074 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1075
1076 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1077 cmd = vmw_fifo_reserve(dev_priv, submit_size);
1078 if (unlikely(cmd == NULL)) {
1079 DRM_ERROR("Failed reserving FIFO space for surface "
1080 "unbinding.\n");
1081 return -ENOMEM;
1082 }
1083
1084 if (readback) {
1085 cmd1 = (void *) cmd;
1086 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1087 cmd1->header.size = sizeof(cmd1->body);
1088 cmd1->body.sid = res->id;
1089 cmd3 = (void *) &cmd1[1];
1090 } else {
1091 cmd2 = (void *) cmd;
1092 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1093 cmd2->header.size = sizeof(cmd2->body);
1094 cmd2->body.sid = res->id;
1095 cmd3 = (void *) &cmd2[1];
1096 }
1097
1098 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1099 cmd3->header.size = sizeof(cmd3->body);
1100 cmd3->body.sid = res->id;
1101 cmd3->body.mobid = SVGA3D_INVALID_ID;
1102
1103 vmw_fifo_commit(dev_priv, submit_size);
1104
1105 /*
1106 * Create a fence object and fence the backup buffer.
1107 */
1108
1109 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1110 &fence, NULL);
1111
1112 vmw_fence_single_bo(val_buf->bo, fence);
1113
1114 if (likely(fence != NULL))
1115 vmw_fence_obj_unreference(&fence);
1116
1117 return 0;
1118}
1119
1120static int vmw_gb_surface_destroy(struct vmw_resource *res)
1121{
1122 struct vmw_private *dev_priv = res->dev_priv;
1123 struct {
1124 SVGA3dCmdHeader header;
1125 SVGA3dCmdDestroyGBSurface body;
1126 } *cmd;
1127
1128 if (likely(res->id == -1))
1129 return 0;
1130
1131 mutex_lock(&dev_priv->binding_mutex);
1132 vmw_context_binding_res_list_scrub(&res->binding_head);
1133
1134 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1135 if (unlikely(cmd == NULL)) {
1136 DRM_ERROR("Failed reserving FIFO space for surface "
1137 "destruction.\n");
1138 mutex_unlock(&dev_priv->binding_mutex);
1139 return -ENOMEM;
1140 }
1141
1142 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1143 cmd->header.size = sizeof(cmd->body);
1144 cmd->body.sid = res->id;
1145 vmw_fifo_commit(dev_priv, sizeof(*cmd));
1146 mutex_unlock(&dev_priv->binding_mutex);
1147 vmw_resource_release_id(res);
1148 vmw_3d_resource_dec(dev_priv, false);
1149
1150 return 0;
1151}
1152
1153/**
1154 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1155 * the user surface define functionality.
1156 *
1157 * @dev: Pointer to a struct drm_device.
1158 * @data: Pointer to data copied from / to user-space.
1159 * @file_priv: Pointer to a drm file private structure.
1160 */
1161int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1162 struct drm_file *file_priv)
1163{
1164 struct vmw_private *dev_priv = vmw_priv(dev);
1165 struct vmw_user_surface *user_srf;
1166 struct vmw_surface *srf;
1167 struct vmw_resource *res;
1168 struct vmw_resource *tmp;
1169 union drm_vmw_gb_surface_create_arg *arg =
1170 (union drm_vmw_gb_surface_create_arg *)data;
1171 struct drm_vmw_gb_surface_create_req *req = &arg->req;
1172 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1173 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1174 int ret;
1175 uint32_t size;
1176 struct vmw_master *vmaster = vmw_master(file_priv->master);
1177 const struct svga3d_surface_desc *desc;
1178 uint32_t backup_handle;
1179
1180 if (unlikely(vmw_user_surface_size == 0))
1181 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1182 128;
1183
1184 size = vmw_user_surface_size + 128;
1185
1186 desc = svga3dsurface_get_desc(req->format);
1187 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1188 DRM_ERROR("Invalid surface format for surface creation.\n");
1189 return -EINVAL;
1190 }
1191
1192 ret = ttm_read_lock(&vmaster->lock, true);
1193 if (unlikely(ret != 0))
1194 return ret;
1195
1196 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1197 size, false, true);
1198 if (unlikely(ret != 0)) {
1199 if (ret != -ERESTARTSYS)
1200 DRM_ERROR("Out of graphics memory for surface"
1201 " creation.\n");
1202 goto out_unlock;
1203 }
1204
1205 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1206 if (unlikely(user_srf == NULL)) {
1207 ret = -ENOMEM;
1208 goto out_no_user_srf;
1209 }
1210
1211 srf = &user_srf->srf;
1212 res = &srf->res;
1213
1214 srf->flags = req->svga3d_flags;
1215 srf->format = req->format;
1216 srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
1217 srf->mip_levels[0] = req->mip_levels;
1218 srf->num_sizes = 1;
1219 srf->sizes = NULL;
1220 srf->offsets = NULL;
1221 user_srf->size = size;
1222 srf->base_size = req->base_size;
1223 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
1224 srf->multisample_count = req->multisample_count;
1225 res->backup_size = svga3dsurface_get_serialized_size
1226 (srf->format, srf->base_size, srf->mip_levels[0],
1227 srf->flags & SVGA3D_SURFACE_CUBEMAP);
1228
1229 user_srf->prime.base.shareable = false;
1230 user_srf->prime.base.tfile = NULL;
1231
1232 /**
1233 * From this point, the generic resource management functions
1234 * destroy the object on failure.
1235 */
1236
1237 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1238 if (unlikely(ret != 0))
1239 goto out_unlock;
1240
1241 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1242 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1243 &res->backup);
1244 } else if (req->drm_surface_flags &
1245 drm_vmw_surface_flag_create_buffer)
1246 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1247 res->backup_size,
1248 req->drm_surface_flags &
1249 drm_vmw_surface_flag_shareable,
1250 &backup_handle,
1251 &res->backup);
1252
1253 if (unlikely(ret != 0)) {
1254 vmw_resource_unreference(&res);
1255 goto out_unlock;
1256 }
1257
1258 tmp = vmw_resource_reference(&srf->res);
1259 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1260 req->drm_surface_flags &
1261 drm_vmw_surface_flag_shareable,
1262 VMW_RES_SURFACE,
1263 &vmw_user_surface_base_release, NULL);
1264
1265 if (unlikely(ret != 0)) {
1266 vmw_resource_unreference(&tmp);
1267 vmw_resource_unreference(&res);
1268 goto out_unlock;
1269 }
1270
1271 rep->handle = user_srf->prime.base.hash.key;
1272 rep->backup_size = res->backup_size;
1273 if (res->backup) {
1274 rep->buffer_map_handle =
1275 drm_vma_node_offset_addr(&res->backup->base.vma_node);
1276 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1277 rep->buffer_handle = backup_handle;
1278 } else {
1279 rep->buffer_map_handle = 0;
1280 rep->buffer_size = 0;
1281 rep->buffer_handle = SVGA3D_INVALID_ID;
1282 }
1283
1284 vmw_resource_unreference(&res);
1285
1286 ttm_read_unlock(&vmaster->lock);
1287 return 0;
1288out_no_user_srf:
1289 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1290out_unlock:
1291 ttm_read_unlock(&vmaster->lock);
1292 return ret;
1293}
1294
1295/**
1296 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1297 * the user surface reference functionality.
1298 *
1299 * @dev: Pointer to a struct drm_device.
1300 * @data: Pointer to data copied from / to user-space.
1301 * @file_priv: Pointer to a drm file private structure.
1302 */
1303int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1304 struct drm_file *file_priv)
1305{
1306 struct vmw_private *dev_priv = vmw_priv(dev);
1307 union drm_vmw_gb_surface_reference_arg *arg =
1308 (union drm_vmw_gb_surface_reference_arg *)data;
1309 struct drm_vmw_surface_arg *req = &arg->req;
1310 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1311 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1312 struct vmw_surface *srf;
1313 struct vmw_user_surface *user_srf;
1314 struct ttm_base_object *base;
1315 uint32_t backup_handle;
1316 int ret = -EINVAL;
1317
1318 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
1319 if (unlikely(base == NULL)) {
1320 DRM_ERROR("Could not find surface to reference.\n");
1321 return -EINVAL;
1322 }
1323
1324 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
1325 goto out_bad_resource;
1326
1327 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1328 srf = &user_srf->srf;
1329 if (srf->res.backup == NULL) {
1330 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1331 goto out_bad_resource;
1332 }
1333
1334 ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
1335 TTM_REF_USAGE, NULL);
1336 if (unlikely(ret != 0)) {
1337 DRM_ERROR("Could not add a reference to a GB surface.\n");
1338 goto out_bad_resource;
1339 }
1340
1341 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1342 ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1343 &backup_handle);
1344 mutex_unlock(&dev_priv->cmdbuf_mutex);
1345
1346 if (unlikely(ret != 0)) {
1347 DRM_ERROR("Could not add a reference to a GB surface "
1348 "backup buffer.\n");
1349 (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1350 req->sid,
1351 TTM_REF_USAGE);
1352 goto out_bad_resource;
1353 }
1354
1355 rep->creq.svga3d_flags = srf->flags;
1356 rep->creq.format = srf->format;
1357 rep->creq.mip_levels = srf->mip_levels[0];
1358 rep->creq.drm_surface_flags = 0;
1359 rep->creq.multisample_count = srf->multisample_count;
1360 rep->creq.autogen_filter = srf->autogen_filter;
1361 rep->creq.buffer_handle = backup_handle;
1362 rep->creq.base_size = srf->base_size;
1363 rep->crep.handle = user_srf->prime.base.hash.key;
1364 rep->crep.backup_size = srf->res.backup_size;
1365 rep->crep.buffer_handle = backup_handle;
1366 rep->crep.buffer_map_handle =
1367 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1368 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1369
1370out_bad_resource:
1371 ttm_base_object_unref(&base);
1372
1373 return ret;
1374}