diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 898 |
1 files changed, 650 insertions, 248 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index e5775a0db495..534c96703c3f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -30,6 +30,181 @@ | |||
30 | #include <drm/ttm/ttm_bo_api.h> | 30 | #include <drm/ttm/ttm_bo_api.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | 32 | ||
33 | #define VMW_RES_HT_ORDER 12 | ||
34 | |||
35 | /** | ||
36 | * struct vmw_resource_relocation - Relocation info for resources | ||
37 | * | ||
38 | * @head: List head for the software context's relocation list. | ||
39 | * @res: Non-ref-counted pointer to the resource. | ||
40 | * @offset: Offset of 4 byte entries into the command buffer where the | ||
41 | * id that needs fixup is located. | ||
42 | */ | ||
43 | struct vmw_resource_relocation { | ||
44 | struct list_head head; | ||
45 | const struct vmw_resource *res; | ||
46 | unsigned long offset; | ||
47 | }; | ||
48 | |||
49 | /** | ||
50 | * struct vmw_resource_val_node - Validation info for resources | ||
51 | * | ||
52 | * @head: List head for the software context's resource list. | ||
53 | * @hash: Hash entry for quick resouce to val_node lookup. | ||
54 | * @res: Ref-counted pointer to the resource. | ||
55 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | ||
56 | * @new_backup: Refcounted pointer to the new backup buffer. | ||
57 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | ||
58 | * @first_usage: Set to true the first time the resource is referenced in | ||
59 | * the command stream. | ||
60 | * @no_buffer_needed: Resources do not need to allocate buffer backup on | ||
61 | * reservation. The command stream will provide one. | ||
62 | */ | ||
63 | struct vmw_resource_val_node { | ||
64 | struct list_head head; | ||
65 | struct drm_hash_item hash; | ||
66 | struct vmw_resource *res; | ||
67 | struct vmw_dma_buffer *new_backup; | ||
68 | unsigned long new_backup_offset; | ||
69 | bool first_usage; | ||
70 | bool no_buffer_needed; | ||
71 | }; | ||
72 | |||
73 | /** | ||
74 | * vmw_resource_unreserve - unreserve resources previously reserved for | ||
75 | * command submission. | ||
76 | * | ||
77 | * @list_head: list of resources to unreserve. | ||
78 | * @backoff: Whether command submission failed. | ||
79 | */ | ||
80 | static void vmw_resource_list_unreserve(struct list_head *list, | ||
81 | bool backoff) | ||
82 | { | ||
83 | struct vmw_resource_val_node *val; | ||
84 | |||
85 | list_for_each_entry(val, list, head) { | ||
86 | struct vmw_resource *res = val->res; | ||
87 | struct vmw_dma_buffer *new_backup = | ||
88 | backoff ? NULL : val->new_backup; | ||
89 | |||
90 | vmw_resource_unreserve(res, new_backup, | ||
91 | val->new_backup_offset); | ||
92 | vmw_dmabuf_unreference(&val->new_backup); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | |||
97 | /** | ||
98 | * vmw_resource_val_add - Add a resource to the software context's | ||
99 | * resource list if it's not already on it. | ||
100 | * | ||
101 | * @sw_context: Pointer to the software context. | ||
102 | * @res: Pointer to the resource. | ||
103 | * @p_node On successful return points to a valid pointer to a | ||
104 | * struct vmw_resource_val_node, if non-NULL on entry. | ||
105 | */ | ||
106 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | ||
107 | struct vmw_resource *res, | ||
108 | struct vmw_resource_val_node **p_node) | ||
109 | { | ||
110 | struct vmw_resource_val_node *node; | ||
111 | struct drm_hash_item *hash; | ||
112 | int ret; | ||
113 | |||
114 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, | ||
115 | &hash) == 0)) { | ||
116 | node = container_of(hash, struct vmw_resource_val_node, hash); | ||
117 | node->first_usage = false; | ||
118 | if (unlikely(p_node != NULL)) | ||
119 | *p_node = node; | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
124 | if (unlikely(node == NULL)) { | ||
125 | DRM_ERROR("Failed to allocate a resource validation " | ||
126 | "entry.\n"); | ||
127 | return -ENOMEM; | ||
128 | } | ||
129 | |||
130 | node->hash.key = (unsigned long) res; | ||
131 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); | ||
132 | if (unlikely(ret != 0)) { | ||
133 | DRM_ERROR("Failed to initialize a resource validation " | ||
134 | "entry.\n"); | ||
135 | kfree(node); | ||
136 | return ret; | ||
137 | } | ||
138 | list_add_tail(&node->head, &sw_context->resource_list); | ||
139 | node->res = vmw_resource_reference(res); | ||
140 | node->first_usage = true; | ||
141 | |||
142 | if (unlikely(p_node != NULL)) | ||
143 | *p_node = node; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * vmw_resource_relocation_add - Add a relocation to the relocation list | ||
150 | * | ||
151 | * @list: Pointer to head of relocation list. | ||
152 | * @res: The resource. | ||
153 | * @offset: Offset into the command buffer currently being parsed where the | ||
154 | * id that needs fixup is located. Granularity is 4 bytes. | ||
155 | */ | ||
156 | static int vmw_resource_relocation_add(struct list_head *list, | ||
157 | const struct vmw_resource *res, | ||
158 | unsigned long offset) | ||
159 | { | ||
160 | struct vmw_resource_relocation *rel; | ||
161 | |||
162 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); | ||
163 | if (unlikely(rel == NULL)) { | ||
164 | DRM_ERROR("Failed to allocate a resource relocation.\n"); | ||
165 | return -ENOMEM; | ||
166 | } | ||
167 | |||
168 | rel->res = res; | ||
169 | rel->offset = offset; | ||
170 | list_add_tail(&rel->head, list); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * vmw_resource_relocations_free - Free all relocations on a list | ||
177 | * | ||
178 | * @list: Pointer to the head of the relocation list. | ||
179 | */ | ||
180 | static void vmw_resource_relocations_free(struct list_head *list) | ||
181 | { | ||
182 | struct vmw_resource_relocation *rel, *n; | ||
183 | |||
184 | list_for_each_entry_safe(rel, n, list, head) { | ||
185 | list_del(&rel->head); | ||
186 | kfree(rel); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * vmw_resource_relocations_apply - Apply all relocations on a list | ||
192 | * | ||
193 | * @cb: Pointer to the start of the command buffer bein patch. This need | ||
194 | * not be the same buffer as the one being parsed when the relocation | ||
195 | * list was built, but the contents must be the same modulo the | ||
196 | * resource ids. | ||
197 | * @list: Pointer to the head of the relocation list. | ||
198 | */ | ||
199 | static void vmw_resource_relocations_apply(uint32_t *cb, | ||
200 | struct list_head *list) | ||
201 | { | ||
202 | struct vmw_resource_relocation *rel; | ||
203 | |||
204 | list_for_each_entry(rel, list, head) | ||
205 | cb[rel->offset] = rel->res->id; | ||
206 | } | ||
207 | |||
33 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | 208 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
34 | struct vmw_sw_context *sw_context, | 209 | struct vmw_sw_context *sw_context, |
35 | SVGA3dCmdHeader *header) | 210 | SVGA3dCmdHeader *header) |
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
44 | return 0; | 219 | return 0; |
45 | } | 220 | } |
46 | 221 | ||
47 | static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, | ||
48 | struct vmw_resource **p_res) | ||
49 | { | ||
50 | struct vmw_resource *res = *p_res; | ||
51 | |||
52 | if (list_empty(&res->validate_head)) { | ||
53 | list_add_tail(&res->validate_head, &sw_context->resource_list); | ||
54 | *p_res = NULL; | ||
55 | } else | ||
56 | vmw_resource_unreference(p_res); | ||
57 | } | ||
58 | |||
59 | /** | 222 | /** |
60 | * vmw_bo_to_validate_list - add a bo to a validate list | 223 | * vmw_bo_to_validate_list - add a bo to a validate list |
61 | * | 224 | * |
62 | * @sw_context: The software context used for this command submission batch. | 225 | * @sw_context: The software context used for this command submission batch. |
63 | * @bo: The buffer object to add. | 226 | * @bo: The buffer object to add. |
64 | * @fence_flags: Fence flags to be or'ed with any other fence flags for | ||
65 | * this buffer on this submission batch. | ||
66 | * @p_val_node: If non-NULL Will be updated with the validate node number | 227 | * @p_val_node: If non-NULL Will be updated with the validate node number |
67 | * on return. | 228 | * on return. |
68 | * | 229 | * |
@@ -74,21 +235,37 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
74 | uint32_t *p_val_node) | 235 | uint32_t *p_val_node) |
75 | { | 236 | { |
76 | uint32_t val_node; | 237 | uint32_t val_node; |
238 | struct vmw_validate_buffer *vval_buf; | ||
77 | struct ttm_validate_buffer *val_buf; | 239 | struct ttm_validate_buffer *val_buf; |
240 | struct drm_hash_item *hash; | ||
241 | int ret; | ||
78 | 242 | ||
79 | val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | 243 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, |
80 | 244 | &hash) == 0)) { | |
81 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | 245 | vval_buf = container_of(hash, struct vmw_validate_buffer, |
82 | DRM_ERROR("Max number of DMA buffers per submission" | 246 | hash); |
83 | " exceeded.\n"); | 247 | val_buf = &vval_buf->base; |
84 | return -EINVAL; | 248 | val_node = vval_buf - sw_context->val_bufs; |
85 | } | 249 | } else { |
86 | 250 | val_node = sw_context->cur_val_buf; | |
87 | val_buf = &sw_context->val_bufs[val_node]; | 251 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
88 | if (unlikely(val_node == sw_context->cur_val_buf)) { | 252 | DRM_ERROR("Max number of DMA buffers per submission " |
253 | "exceeded.\n"); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | vval_buf = &sw_context->val_bufs[val_node]; | ||
257 | vval_buf->hash.key = (unsigned long) bo; | ||
258 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); | ||
259 | if (unlikely(ret != 0)) { | ||
260 | DRM_ERROR("Failed to initialize a buffer validation " | ||
261 | "entry.\n"); | ||
262 | return ret; | ||
263 | } | ||
264 | ++sw_context->cur_val_buf; | ||
265 | val_buf = &vval_buf->base; | ||
89 | val_buf->bo = ttm_bo_reference(bo); | 266 | val_buf->bo = ttm_bo_reference(bo); |
267 | val_buf->reserved = false; | ||
90 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 268 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
91 | ++sw_context->cur_val_buf; | ||
92 | } | 269 | } |
93 | 270 | ||
94 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; | 271 | sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; |
@@ -99,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
99 | return 0; | 276 | return 0; |
100 | } | 277 | } |
101 | 278 | ||
102 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | 279 | /** |
103 | struct vmw_sw_context *sw_context, | 280 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
104 | SVGA3dCmdHeader *header) | 281 | * resource list. |
282 | * | ||
283 | * @sw_context: Pointer to the software context. | ||
284 | * | ||
285 | * Note that since vmware's command submission currently is protected by | ||
286 | * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, | ||
287 | * since only a single thread at once will attempt this. | ||
288 | */ | ||
289 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | ||
105 | { | 290 | { |
106 | struct vmw_resource *ctx; | 291 | struct vmw_resource_val_node *val; |
107 | |||
108 | struct vmw_cid_cmd { | ||
109 | SVGA3dCmdHeader header; | ||
110 | __le32 cid; | ||
111 | } *cmd; | ||
112 | int ret; | 292 | int ret; |
113 | 293 | ||
114 | cmd = container_of(header, struct vmw_cid_cmd, header); | 294 | list_for_each_entry(val, &sw_context->resource_list, head) { |
115 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | 295 | struct vmw_resource *res = val->res; |
116 | return 0; | ||
117 | 296 | ||
118 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, | 297 | ret = vmw_resource_reserve(res, val->no_buffer_needed); |
119 | &ctx); | 298 | if (unlikely(ret != 0)) |
120 | if (unlikely(ret != 0)) { | 299 | return ret; |
121 | DRM_ERROR("Could not find or use context %u\n", | 300 | |
122 | (unsigned) cmd->cid); | 301 | if (res->backup) { |
123 | return ret; | 302 | struct ttm_buffer_object *bo = &res->backup->base; |
303 | |||
304 | ret = vmw_bo_to_validate_list | ||
305 | (sw_context, bo, NULL); | ||
306 | |||
307 | if (unlikely(ret != 0)) | ||
308 | return ret; | ||
309 | } | ||
124 | } | 310 | } |
311 | return 0; | ||
312 | } | ||
125 | 313 | ||
126 | sw_context->last_cid = cmd->cid; | 314 | /** |
127 | sw_context->cid_valid = true; | 315 | * vmw_resources_validate - Validate all resources on the sw_context's |
128 | sw_context->cur_ctx = ctx; | 316 | * resource list. |
129 | vmw_resource_to_validate_list(sw_context, &ctx); | 317 | * |
318 | * @sw_context: Pointer to the software context. | ||
319 | * | ||
320 | * Before this function is called, all resource backup buffers must have | ||
321 | * been validated. | ||
322 | */ | ||
323 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) | ||
324 | { | ||
325 | struct vmw_resource_val_node *val; | ||
326 | int ret; | ||
327 | |||
328 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
329 | struct vmw_resource *res = val->res; | ||
130 | 330 | ||
331 | ret = vmw_resource_validate(res); | ||
332 | if (unlikely(ret != 0)) { | ||
333 | if (ret != -ERESTARTSYS) | ||
334 | DRM_ERROR("Failed to validate resource.\n"); | ||
335 | return ret; | ||
336 | } | ||
337 | } | ||
131 | return 0; | 338 | return 0; |
132 | } | 339 | } |
133 | 340 | ||
134 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | 341 | /** |
342 | * vmw_cmd_res_check - Check that a resource is present and if so, put it | ||
343 | * on the resource validate list unless it's already there. | ||
344 | * | ||
345 | * @dev_priv: Pointer to a device private structure. | ||
346 | * @sw_context: Pointer to the software context. | ||
347 | * @res_type: Resource type. | ||
348 | * @converter: User-space visisble type specific information. | ||
349 | * @id: Pointer to the location in the command buffer currently being | ||
350 | * parsed from where the user-space resource id handle is located. | ||
351 | */ | ||
352 | static int vmw_cmd_res_check(struct vmw_private *dev_priv, | ||
135 | struct vmw_sw_context *sw_context, | 353 | struct vmw_sw_context *sw_context, |
136 | uint32_t *sid) | 354 | enum vmw_res_type res_type, |
355 | const struct vmw_user_resource_conv *converter, | ||
356 | uint32_t *id, | ||
357 | struct vmw_resource_val_node **p_val) | ||
137 | { | 358 | { |
138 | struct vmw_surface *srf; | 359 | struct vmw_res_cache_entry *rcache = |
139 | int ret; | 360 | &sw_context->res_cache[res_type]; |
140 | struct vmw_resource *res; | 361 | struct vmw_resource *res; |
362 | struct vmw_resource_val_node *node; | ||
363 | int ret; | ||
141 | 364 | ||
142 | if (*sid == SVGA3D_INVALID_ID) | 365 | if (*id == SVGA3D_INVALID_ID) |
143 | return 0; | 366 | return 0; |
144 | 367 | ||
145 | if (likely((sw_context->sid_valid && | 368 | /* |
146 | *sid == sw_context->last_sid))) { | 369 | * Fastpath in case of repeated commands referencing the same |
147 | *sid = sw_context->sid_translation; | 370 | * resource |
148 | return 0; | 371 | */ |
149 | } | ||
150 | 372 | ||
151 | ret = vmw_user_surface_lookup_handle(dev_priv, | 373 | if (likely(rcache->valid && *id == rcache->handle)) { |
152 | sw_context->tfile, | 374 | const struct vmw_resource *res = rcache->res; |
153 | *sid, &srf); | 375 | |
154 | if (unlikely(ret != 0)) { | 376 | rcache->node->first_usage = false; |
155 | DRM_ERROR("Could ot find or use surface 0x%08x " | 377 | if (p_val) |
156 | "address 0x%08lx\n", | 378 | *p_val = rcache->node; |
157 | (unsigned int) *sid, | 379 | |
158 | (unsigned long) sid); | 380 | return vmw_resource_relocation_add |
159 | return ret; | 381 | (&sw_context->res_relocations, res, |
382 | id - sw_context->buf_start); | ||
160 | } | 383 | } |
161 | 384 | ||
162 | ret = vmw_surface_validate(dev_priv, srf); | 385 | ret = vmw_user_resource_lookup_handle(dev_priv, |
386 | sw_context->tfile, | ||
387 | *id, | ||
388 | converter, | ||
389 | &res); | ||
163 | if (unlikely(ret != 0)) { | 390 | if (unlikely(ret != 0)) { |
164 | if (ret != -ERESTARTSYS) | 391 | DRM_ERROR("Could not find or use resource 0x%08x.\n", |
165 | DRM_ERROR("Could not validate surface.\n"); | 392 | (unsigned) *id); |
166 | vmw_surface_unreference(&srf); | 393 | dump_stack(); |
167 | return ret; | 394 | return ret; |
168 | } | 395 | } |
169 | 396 | ||
170 | sw_context->last_sid = *sid; | 397 | rcache->valid = true; |
171 | sw_context->sid_valid = true; | 398 | rcache->res = res; |
172 | sw_context->sid_translation = srf->res.id; | 399 | rcache->handle = *id; |
173 | *sid = sw_context->sid_translation; | ||
174 | 400 | ||
175 | res = &srf->res; | 401 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
176 | vmw_resource_to_validate_list(sw_context, &res); | 402 | res, |
403 | id - sw_context->buf_start); | ||
404 | if (unlikely(ret != 0)) | ||
405 | goto out_no_reloc; | ||
406 | |||
407 | ret = vmw_resource_val_add(sw_context, res, &node); | ||
408 | if (unlikely(ret != 0)) | ||
409 | goto out_no_reloc; | ||
177 | 410 | ||
411 | rcache->node = node; | ||
412 | if (p_val) | ||
413 | *p_val = node; | ||
414 | vmw_resource_unreference(&res); | ||
178 | return 0; | 415 | return 0; |
416 | |||
417 | out_no_reloc: | ||
418 | BUG_ON(sw_context->error_resource != NULL); | ||
419 | sw_context->error_resource = res; | ||
420 | |||
421 | return ret; | ||
179 | } | 422 | } |
180 | 423 | ||
424 | /** | ||
425 | * vmw_cmd_cid_check - Check a command header for valid context information. | ||
426 | * | ||
427 | * @dev_priv: Pointer to a device private structure. | ||
428 | * @sw_context: Pointer to the software context. | ||
429 | * @header: A command header with an embedded user-space context handle. | ||
430 | * | ||
431 | * Convenience function: Call vmw_cmd_res_check with the user-space context | ||
432 | * handle embedded in @header. | ||
433 | */ | ||
434 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | ||
435 | struct vmw_sw_context *sw_context, | ||
436 | SVGA3dCmdHeader *header) | ||
437 | { | ||
438 | struct vmw_cid_cmd { | ||
439 | SVGA3dCmdHeader header; | ||
440 | __le32 cid; | ||
441 | } *cmd; | ||
442 | |||
443 | cmd = container_of(header, struct vmw_cid_cmd, header); | ||
444 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
445 | user_context_converter, &cmd->cid, NULL); | ||
446 | } | ||
181 | 447 | ||
182 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | 448 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
183 | struct vmw_sw_context *sw_context, | 449 | struct vmw_sw_context *sw_context, |
@@ -194,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
194 | return ret; | 460 | return ret; |
195 | 461 | ||
196 | cmd = container_of(header, struct vmw_sid_cmd, header); | 462 | cmd = container_of(header, struct vmw_sid_cmd, header); |
197 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); | 463 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
464 | user_surface_converter, | ||
465 | &cmd->body.target.sid, NULL); | ||
198 | return ret; | 466 | return ret; |
199 | } | 467 | } |
200 | 468 | ||
@@ -209,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | |||
209 | int ret; | 477 | int ret; |
210 | 478 | ||
211 | cmd = container_of(header, struct vmw_sid_cmd, header); | 479 | cmd = container_of(header, struct vmw_sid_cmd, header); |
212 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); | 480 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
481 | user_surface_converter, | ||
482 | &cmd->body.src.sid, NULL); | ||
213 | if (unlikely(ret != 0)) | 483 | if (unlikely(ret != 0)) |
214 | return ret; | 484 | return ret; |
215 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); | 485 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
486 | user_surface_converter, | ||
487 | &cmd->body.dest.sid, NULL); | ||
216 | } | 488 | } |
217 | 489 | ||
218 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | 490 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
@@ -226,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | |||
226 | int ret; | 498 | int ret; |
227 | 499 | ||
228 | cmd = container_of(header, struct vmw_sid_cmd, header); | 500 | cmd = container_of(header, struct vmw_sid_cmd, header); |
229 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); | 501 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
502 | user_surface_converter, | ||
503 | &cmd->body.src.sid, NULL); | ||
230 | if (unlikely(ret != 0)) | 504 | if (unlikely(ret != 0)) |
231 | return ret; | 505 | return ret; |
232 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); | 506 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
507 | user_surface_converter, | ||
508 | &cmd->body.dest.sid, NULL); | ||
233 | } | 509 | } |
234 | 510 | ||
235 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | 511 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
@@ -248,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
248 | return -EPERM; | 524 | return -EPERM; |
249 | } | 525 | } |
250 | 526 | ||
251 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); | 527 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
528 | user_surface_converter, | ||
529 | &cmd->body.srcImage.sid, NULL); | ||
252 | } | 530 | } |
253 | 531 | ||
254 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | 532 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
@@ -268,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
268 | return -EPERM; | 546 | return -EPERM; |
269 | } | 547 | } |
270 | 548 | ||
271 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); | 549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
550 | user_surface_converter, &cmd->body.sid, | ||
551 | NULL); | ||
272 | } | 552 | } |
273 | 553 | ||
274 | /** | 554 | /** |
275 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. | 555 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
276 | * | 556 | * |
277 | * @dev_priv: The device private structure. | 557 | * @dev_priv: The device private structure. |
278 | * @cid: The hardware context for the next query. | ||
279 | * @new_query_bo: The new buffer holding query results. | 558 | * @new_query_bo: The new buffer holding query results. |
280 | * @sw_context: The software context used for this command submission. | 559 | * @sw_context: The software context used for this command submission. |
281 | * | 560 | * |
@@ -283,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
283 | * query results, and if another buffer currently is pinned for query | 562 | * query results, and if another buffer currently is pinned for query |
284 | * results. If so, the function prepares the state of @sw_context for | 563 | * results. If so, the function prepares the state of @sw_context for |
285 | * switching pinned buffers after successful submission of the current | 564 | * switching pinned buffers after successful submission of the current |
286 | * command batch. It also checks whether we're using a new query context. | 565 | * command batch. |
287 | * In that case, it makes sure we emit a query barrier for the old | ||
288 | * context before the current query buffer is fenced. | ||
289 | */ | 566 | */ |
290 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | 567 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
291 | uint32_t cid, | ||
292 | struct ttm_buffer_object *new_query_bo, | 568 | struct ttm_buffer_object *new_query_bo, |
293 | struct vmw_sw_context *sw_context) | 569 | struct vmw_sw_context *sw_context) |
294 | { | 570 | { |
571 | struct vmw_res_cache_entry *ctx_entry = | ||
572 | &sw_context->res_cache[vmw_res_context]; | ||
295 | int ret; | 573 | int ret; |
296 | bool add_cid = false; | 574 | |
297 | uint32_t cid_to_add; | 575 | BUG_ON(!ctx_entry->valid); |
576 | sw_context->last_query_ctx = ctx_entry->res; | ||
298 | 577 | ||
299 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { | 578 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
300 | 579 | ||
@@ -304,9 +583,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
304 | } | 583 | } |
305 | 584 | ||
306 | if (unlikely(sw_context->cur_query_bo != NULL)) { | 585 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
307 | BUG_ON(!sw_context->query_cid_valid); | 586 | sw_context->needs_post_query_barrier = true; |
308 | add_cid = true; | ||
309 | cid_to_add = sw_context->cur_query_cid; | ||
310 | ret = vmw_bo_to_validate_list(sw_context, | 587 | ret = vmw_bo_to_validate_list(sw_context, |
311 | sw_context->cur_query_bo, | 588 | sw_context->cur_query_bo, |
312 | NULL); | 589 | NULL); |
@@ -323,27 +600,6 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
323 | 600 | ||
324 | } | 601 | } |
325 | 602 | ||
326 | if (unlikely(cid != sw_context->cur_query_cid && | ||
327 | sw_context->query_cid_valid)) { | ||
328 | add_cid = true; | ||
329 | cid_to_add = sw_context->cur_query_cid; | ||
330 | } | ||
331 | |||
332 | sw_context->cur_query_cid = cid; | ||
333 | sw_context->query_cid_valid = true; | ||
334 | |||
335 | if (add_cid) { | ||
336 | struct vmw_resource *ctx = sw_context->cur_ctx; | ||
337 | |||
338 | if (list_empty(&ctx->query_head)) | ||
339 | list_add_tail(&ctx->query_head, | ||
340 | &sw_context->query_list); | ||
341 | ret = vmw_bo_to_validate_list(sw_context, | ||
342 | dev_priv->dummy_query_bo, | ||
343 | NULL); | ||
344 | if (unlikely(ret != 0)) | ||
345 | return ret; | ||
346 | } | ||
347 | return 0; | 603 | return 0; |
348 | } | 604 | } |
349 | 605 | ||
@@ -355,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
355 | * @sw_context: The software context used for this command submission batch. | 611 | * @sw_context: The software context used for this command submission batch. |
356 | * | 612 | * |
357 | * This function will check if we're switching query buffers, and will then, | 613 | * This function will check if we're switching query buffers, and will then, |
358 | * if no other query waits are issued this command submission batch, | ||
359 | * issue a dummy occlusion query wait used as a query barrier. When the fence | 614 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
360 | * object following that query wait has signaled, we are sure that all | 615 | * object following that query wait has signaled, we are sure that all |
361 | * preseding queries have finished, and the old query buffer can be unpinned. | 616 | * preceding queries have finished, and the old query buffer can be unpinned. |
362 | * However, since both the new query buffer and the old one are fenced with | 617 | * However, since both the new query buffer and the old one are fenced with |
363 | * that fence, we can do an asynchronus unpin now, and be sure that the | 618 | * that fence, we can do an asynchronus unpin now, and be sure that the |
364 | * old query buffer won't be moved until the fence has signaled. | 619 | * old query buffer won't be moved until the fence has signaled. |
@@ -369,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
369 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | 624 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
370 | struct vmw_sw_context *sw_context) | 625 | struct vmw_sw_context *sw_context) |
371 | { | 626 | { |
372 | |||
373 | struct vmw_resource *ctx, *next_ctx; | ||
374 | int ret; | ||
375 | |||
376 | /* | 627 | /* |
377 | * The validate list should still hold references to all | 628 | * The validate list should still hold references to all |
378 | * contexts here. | 629 | * contexts here. |
379 | */ | 630 | */ |
380 | 631 | ||
381 | list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, | 632 | if (sw_context->needs_post_query_barrier) { |
382 | query_head) { | 633 | struct vmw_res_cache_entry *ctx_entry = |
383 | list_del_init(&ctx->query_head); | 634 | &sw_context->res_cache[vmw_res_context]; |
635 | struct vmw_resource *ctx; | ||
636 | int ret; | ||
384 | 637 | ||
385 | BUG_ON(list_empty(&ctx->validate_head)); | 638 | BUG_ON(!ctx_entry->valid); |
639 | ctx = ctx_entry->res; | ||
386 | 640 | ||
387 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); | 641 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
388 | 642 | ||
@@ -396,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, | |||
396 | ttm_bo_unref(&dev_priv->pinned_bo); | 650 | ttm_bo_unref(&dev_priv->pinned_bo); |
397 | } | 651 | } |
398 | 652 | ||
399 | vmw_bo_pin(sw_context->cur_query_bo, true); | 653 | if (!sw_context->needs_post_query_barrier) { |
654 | vmw_bo_pin(sw_context->cur_query_bo, true); | ||
400 | 655 | ||
401 | /* | 656 | /* |
402 | * We pin also the dummy_query_bo buffer so that we | 657 | * We pin also the dummy_query_bo buffer so that we |
403 | * don't need to validate it when emitting | 658 | * don't need to validate it when emitting |
404 | * dummy queries in context destroy paths. | 659 | * dummy queries in context destroy paths. |
405 | */ | 660 | */ |
406 | 661 | ||
407 | vmw_bo_pin(dev_priv->dummy_query_bo, true); | 662 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
408 | dev_priv->dummy_query_bo_pinned = true; | 663 | dev_priv->dummy_query_bo_pinned = true; |
409 | 664 | ||
410 | dev_priv->query_cid = sw_context->cur_query_cid; | 665 | BUG_ON(sw_context->last_query_ctx == NULL); |
411 | dev_priv->pinned_bo = | 666 | dev_priv->query_cid = sw_context->last_query_ctx->id; |
412 | ttm_bo_reference(sw_context->cur_query_bo); | 667 | dev_priv->query_cid_valid = true; |
668 | dev_priv->pinned_bo = | ||
669 | ttm_bo_reference(sw_context->cur_query_bo); | ||
670 | } | ||
413 | } | 671 | } |
414 | } | 672 | } |
415 | 673 | ||
416 | /** | 674 | /** |
417 | * vmw_query_switch_backoff - clear query barrier list | 675 | * vmw_translate_guest_pointer - Prepare to translate a user-space buffer |
418 | * @sw_context: The sw context used for this submission batch. | 676 | * handle to a valid SVGAGuestPtr |
419 | * | 677 | * |
420 | * This function is used as part of an error path, where a previously | 678 | * @dev_priv: Pointer to a device private structure. |
421 | * set up list of query barriers needs to be cleared. | 679 | * @sw_context: The software context used for this command batch validation. |
680 | * @ptr: Pointer to the user-space handle to be translated. | ||
681 | * @vmw_bo_p: Points to a location that, on successful return will carry | ||
682 | * a reference-counted pointer to the DMA buffer identified by the | ||
683 | * user-space handle in @id. | ||
422 | * | 684 | * |
685 | * This function saves information needed to translate a user-space buffer | ||
686 | * handle to a valid SVGAGuestPtr. The translation does not take place | ||
687 | * immediately, but during a call to vmw_apply_relocations(). | ||
688 | * This function builds a relocation list and a list of buffers to validate. | ||
689 | * The former needs to be freed using either vmw_apply_relocations() or | ||
690 | * vmw_free_relocations(). The latter needs to be freed using | ||
691 | * vmw_clear_validations. | ||
423 | */ | 692 | */ |
424 | static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) | ||
425 | { | ||
426 | struct list_head *list, *next; | ||
427 | |||
428 | list_for_each_safe(list, next, &sw_context->query_list) { | ||
429 | list_del_init(list); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | 693 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
434 | struct vmw_sw_context *sw_context, | 694 | struct vmw_sw_context *sw_context, |
435 | SVGAGuestPtr *ptr, | 695 | SVGAGuestPtr *ptr, |
@@ -471,6 +731,37 @@ out_no_reloc: | |||
471 | return ret; | 731 | return ret; |
472 | } | 732 | } |
473 | 733 | ||
734 | /** | ||
735 | * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. | ||
736 | * | ||
737 | * @dev_priv: Pointer to a device private struct. | ||
738 | * @sw_context: The software context used for this command submission. | ||
739 | * @header: Pointer to the command header in the command stream. | ||
740 | */ | ||
741 | static int vmw_cmd_begin_query(struct vmw_private *dev_priv, | ||
742 | struct vmw_sw_context *sw_context, | ||
743 | SVGA3dCmdHeader *header) | ||
744 | { | ||
745 | struct vmw_begin_query_cmd { | ||
746 | SVGA3dCmdHeader header; | ||
747 | SVGA3dCmdBeginQuery q; | ||
748 | } *cmd; | ||
749 | |||
750 | cmd = container_of(header, struct vmw_begin_query_cmd, | ||
751 | header); | ||
752 | |||
753 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
754 | user_context_converter, &cmd->q.cid, | ||
755 | NULL); | ||
756 | } | ||
757 | |||
758 | /** | ||
759 | * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. | ||
760 | * | ||
761 | * @dev_priv: Pointer to a device private struct. | ||
762 | * @sw_context: The software context used for this command submission. | ||
763 | * @header: Pointer to the command header in the command stream. | ||
764 | */ | ||
474 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, | 765 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
475 | struct vmw_sw_context *sw_context, | 766 | struct vmw_sw_context *sw_context, |
476 | SVGA3dCmdHeader *header) | 767 | SVGA3dCmdHeader *header) |
@@ -493,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, | |||
493 | if (unlikely(ret != 0)) | 784 | if (unlikely(ret != 0)) |
494 | return ret; | 785 | return ret; |
495 | 786 | ||
496 | ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, | 787 | ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); |
497 | &vmw_bo->base, sw_context); | ||
498 | 788 | ||
499 | vmw_dmabuf_unreference(&vmw_bo); | 789 | vmw_dmabuf_unreference(&vmw_bo); |
500 | return ret; | 790 | return ret; |
501 | } | 791 | } |
502 | 792 | ||
793 | /* | ||
794 | * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. | ||
795 | * | ||
796 | * @dev_priv: Pointer to a device private struct. | ||
797 | * @sw_context: The software context used for this command submission. | ||
798 | * @header: Pointer to the command header in the command stream. | ||
799 | */ | ||
503 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | 800 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
504 | struct vmw_sw_context *sw_context, | 801 | struct vmw_sw_context *sw_context, |
505 | SVGA3dCmdHeader *header) | 802 | SVGA3dCmdHeader *header) |
@@ -510,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
510 | SVGA3dCmdWaitForQuery q; | 807 | SVGA3dCmdWaitForQuery q; |
511 | } *cmd; | 808 | } *cmd; |
512 | int ret; | 809 | int ret; |
513 | struct vmw_resource *ctx; | ||
514 | 810 | ||
515 | cmd = container_of(header, struct vmw_query_cmd, header); | 811 | cmd = container_of(header, struct vmw_query_cmd, header); |
516 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | 812 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
@@ -524,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, | |||
524 | return ret; | 820 | return ret; |
525 | 821 | ||
526 | vmw_dmabuf_unreference(&vmw_bo); | 822 | vmw_dmabuf_unreference(&vmw_bo); |
527 | |||
528 | /* | ||
529 | * This wait will act as a barrier for previous waits for this | ||
530 | * context. | ||
531 | */ | ||
532 | |||
533 | ctx = sw_context->cur_ctx; | ||
534 | if (!list_empty(&ctx->query_head)) | ||
535 | list_del_init(&ctx->query_head); | ||
536 | |||
537 | return 0; | 823 | return 0; |
538 | } | 824 | } |
539 | 825 | ||
@@ -542,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
542 | SVGA3dCmdHeader *header) | 828 | SVGA3dCmdHeader *header) |
543 | { | 829 | { |
544 | struct vmw_dma_buffer *vmw_bo = NULL; | 830 | struct vmw_dma_buffer *vmw_bo = NULL; |
545 | struct ttm_buffer_object *bo; | ||
546 | struct vmw_surface *srf = NULL; | 831 | struct vmw_surface *srf = NULL; |
547 | struct vmw_dma_cmd { | 832 | struct vmw_dma_cmd { |
548 | SVGA3dCmdHeader header; | 833 | SVGA3dCmdHeader header; |
549 | SVGA3dCmdSurfaceDMA dma; | 834 | SVGA3dCmdSurfaceDMA dma; |
550 | } *cmd; | 835 | } *cmd; |
551 | int ret; | 836 | int ret; |
552 | struct vmw_resource *res; | ||
553 | 837 | ||
554 | cmd = container_of(header, struct vmw_dma_cmd, header); | 838 | cmd = container_of(header, struct vmw_dma_cmd, header); |
555 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, | 839 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
@@ -558,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
558 | if (unlikely(ret != 0)) | 842 | if (unlikely(ret != 0)) |
559 | return ret; | 843 | return ret; |
560 | 844 | ||
561 | bo = &vmw_bo->base; | 845 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
562 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, | 846 | user_surface_converter, &cmd->dma.host.sid, |
563 | cmd->dma.host.sid, &srf); | 847 | NULL); |
564 | if (ret) { | ||
565 | DRM_ERROR("could not find surface\n"); | ||
566 | goto out_no_reloc; | ||
567 | } | ||
568 | |||
569 | ret = vmw_surface_validate(dev_priv, srf); | ||
570 | if (unlikely(ret != 0)) { | 848 | if (unlikely(ret != 0)) { |
571 | if (ret != -ERESTARTSYS) | 849 | if (unlikely(ret != -ERESTARTSYS)) |
572 | DRM_ERROR("Culd not validate surface.\n"); | 850 | DRM_ERROR("could not find surface for DMA.\n"); |
573 | goto out_no_validate; | 851 | goto out_no_surface; |
574 | } | 852 | } |
575 | 853 | ||
576 | /* | 854 | srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); |
577 | * Patch command stream with device SID. | ||
578 | */ | ||
579 | cmd->dma.host.sid = srf->res.id; | ||
580 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | ||
581 | |||
582 | vmw_dmabuf_unreference(&vmw_bo); | ||
583 | |||
584 | res = &srf->res; | ||
585 | vmw_resource_to_validate_list(sw_context, &res); | ||
586 | 855 | ||
587 | return 0; | 856 | vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); |
588 | 857 | ||
589 | out_no_validate: | 858 | out_no_surface: |
590 | vmw_surface_unreference(&srf); | ||
591 | out_no_reloc: | ||
592 | vmw_dmabuf_unreference(&vmw_bo); | 859 | vmw_dmabuf_unreference(&vmw_bo); |
593 | return ret; | 860 | return ret; |
594 | } | 861 | } |
@@ -621,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, | |||
621 | } | 888 | } |
622 | 889 | ||
623 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { | 890 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
624 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 891 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
625 | &decl->array.surfaceId); | 892 | user_surface_converter, |
893 | &decl->array.surfaceId, NULL); | ||
626 | if (unlikely(ret != 0)) | 894 | if (unlikely(ret != 0)) |
627 | return ret; | 895 | return ret; |
628 | } | 896 | } |
@@ -636,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, | |||
636 | 904 | ||
637 | range = (SVGA3dPrimitiveRange *) decl; | 905 | range = (SVGA3dPrimitiveRange *) decl; |
638 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { | 906 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
639 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 907 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
640 | &range->indexArray.surfaceId); | 908 | user_surface_converter, |
909 | &range->indexArray.surfaceId, NULL); | ||
641 | if (unlikely(ret != 0)) | 910 | if (unlikely(ret != 0)) |
642 | return ret; | 911 | return ret; |
643 | } | 912 | } |
@@ -668,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
668 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | 937 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
669 | continue; | 938 | continue; |
670 | 939 | ||
671 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | 940 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
672 | &cur_state->value); | 941 | user_surface_converter, |
942 | &cur_state->value, NULL); | ||
673 | if (unlikely(ret != 0)) | 943 | if (unlikely(ret != 0)) |
674 | return ret; | 944 | return ret; |
675 | } | 945 | } |
@@ -700,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
700 | return ret; | 970 | return ret; |
701 | } | 971 | } |
702 | 972 | ||
973 | /** | ||
974 | * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER | ||
975 | * command | ||
976 | * | ||
977 | * @dev_priv: Pointer to a device private struct. | ||
978 | * @sw_context: The software context being used for this batch. | ||
979 | * @header: Pointer to the command header in the command stream. | ||
980 | */ | ||
981 | static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | ||
982 | struct vmw_sw_context *sw_context, | ||
983 | SVGA3dCmdHeader *header) | ||
984 | { | ||
985 | struct vmw_set_shader_cmd { | ||
986 | SVGA3dCmdHeader header; | ||
987 | SVGA3dCmdSetShader body; | ||
988 | } *cmd; | ||
989 | int ret; | ||
990 | |||
991 | cmd = container_of(header, struct vmw_set_shader_cmd, | ||
992 | header); | ||
993 | |||
994 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
995 | if (unlikely(ret != 0)) | ||
996 | return ret; | ||
997 | |||
998 | return 0; | ||
999 | } | ||
1000 | |||
703 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 1001 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
704 | struct vmw_sw_context *sw_context, | 1002 | struct vmw_sw_context *sw_context, |
705 | void *buf, uint32_t *size) | 1003 | void *buf, uint32_t *size) |
@@ -773,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
773 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | 1071 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), |
774 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | 1072 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), |
775 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 1073 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), |
776 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), | 1074 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), |
777 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 1075 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), |
778 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), | 1076 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
779 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 1077 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
780 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 1078 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), |
781 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), | 1079 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
782 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), | 1080 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
783 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | 1081 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
784 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | 1082 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
785 | &vmw_cmd_blt_surf_screen_check) | 1083 | &vmw_cmd_blt_surf_screen_check), |
1084 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), | ||
1085 | VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), | ||
1086 | VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), | ||
1087 | VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), | ||
786 | }; | 1088 | }; |
787 | 1089 | ||
788 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 1090 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
@@ -829,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, | |||
829 | int32_t cur_size = size; | 1131 | int32_t cur_size = size; |
830 | int ret; | 1132 | int ret; |
831 | 1133 | ||
1134 | sw_context->buf_start = buf; | ||
1135 | |||
832 | while (cur_size > 0) { | 1136 | while (cur_size > 0) { |
833 | size = cur_size; | 1137 | size = cur_size; |
834 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); | 1138 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
@@ -860,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
860 | 1164 | ||
861 | for (i = 0; i < sw_context->cur_reloc; ++i) { | 1165 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
862 | reloc = &sw_context->relocs[i]; | 1166 | reloc = &sw_context->relocs[i]; |
863 | validate = &sw_context->val_bufs[reloc->index]; | 1167 | validate = &sw_context->val_bufs[reloc->index].base; |
864 | bo = validate->bo; | 1168 | bo = validate->bo; |
865 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 1169 | switch (bo->mem.mem_type) { |
1170 | case TTM_PL_VRAM: | ||
866 | reloc->location->offset += bo->offset; | 1171 | reloc->location->offset += bo->offset; |
867 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; | 1172 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
868 | } else | 1173 | break; |
1174 | case VMW_PL_GMR: | ||
869 | reloc->location->gmrId = bo->mem.start; | 1175 | reloc->location->gmrId = bo->mem.start; |
1176 | break; | ||
1177 | default: | ||
1178 | BUG(); | ||
1179 | } | ||
870 | } | 1180 | } |
871 | vmw_free_relocations(sw_context); | 1181 | vmw_free_relocations(sw_context); |
872 | } | 1182 | } |
873 | 1183 | ||
1184 | /** | ||
1185 | * vmw_resource_list_unrefererence - Free up a resource list and unreference | ||
1186 | * all resources referenced by it. | ||
1187 | * | ||
1188 | * @list: The resource list. | ||
1189 | */ | ||
1190 | static void vmw_resource_list_unreference(struct list_head *list) | ||
1191 | { | ||
1192 | struct vmw_resource_val_node *val, *val_next; | ||
1193 | |||
1194 | /* | ||
1195 | * Drop references to resources held during command submission. | ||
1196 | */ | ||
1197 | |||
1198 | list_for_each_entry_safe(val, val_next, list, head) { | ||
1199 | list_del_init(&val->head); | ||
1200 | vmw_resource_unreference(&val->res); | ||
1201 | kfree(val); | ||
1202 | } | ||
1203 | } | ||
1204 | |||
874 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | 1205 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
875 | { | 1206 | { |
876 | struct ttm_validate_buffer *entry, *next; | 1207 | struct vmw_validate_buffer *entry, *next; |
877 | struct vmw_resource *res, *res_next; | 1208 | struct vmw_resource_val_node *val; |
878 | 1209 | ||
879 | /* | 1210 | /* |
880 | * Drop references to DMA buffers held during command submission. | 1211 | * Drop references to DMA buffers held during command submission. |
881 | */ | 1212 | */ |
882 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | 1213 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
883 | head) { | 1214 | base.head) { |
884 | list_del(&entry->head); | 1215 | list_del(&entry->base.head); |
885 | vmw_dmabuf_validate_clear(entry->bo); | 1216 | ttm_bo_unref(&entry->base.bo); |
886 | ttm_bo_unref(&entry->bo); | 1217 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); |
887 | sw_context->cur_val_buf--; | 1218 | sw_context->cur_val_buf--; |
888 | } | 1219 | } |
889 | BUG_ON(sw_context->cur_val_buf != 0); | 1220 | BUG_ON(sw_context->cur_val_buf != 0); |
890 | 1221 | ||
891 | /* | 1222 | list_for_each_entry(val, &sw_context->resource_list, head) |
892 | * Drop references to resources held during command submission. | 1223 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); |
893 | */ | ||
894 | vmw_resource_unreserve(&sw_context->resource_list); | ||
895 | list_for_each_entry_safe(res, res_next, &sw_context->resource_list, | ||
896 | validate_head) { | ||
897 | list_del_init(&res->validate_head); | ||
898 | vmw_resource_unreference(&res); | ||
899 | } | ||
900 | } | 1224 | } |
901 | 1225 | ||
902 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | 1226 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
@@ -939,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
939 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | 1263 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
940 | struct vmw_sw_context *sw_context) | 1264 | struct vmw_sw_context *sw_context) |
941 | { | 1265 | { |
942 | struct ttm_validate_buffer *entry; | 1266 | struct vmw_validate_buffer *entry; |
943 | int ret; | 1267 | int ret; |
944 | 1268 | ||
945 | list_for_each_entry(entry, &sw_context->validate_nodes, head) { | 1269 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { |
946 | ret = vmw_validate_single_buffer(dev_priv, entry->bo); | 1270 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); |
947 | if (unlikely(ret != 0)) | 1271 | if (unlikely(ret != 0)) |
948 | return ret; | 1272 | return ret; |
949 | } | 1273 | } |
@@ -1106,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1106 | { | 1430 | { |
1107 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 1431 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
1108 | struct vmw_fence_obj *fence = NULL; | 1432 | struct vmw_fence_obj *fence = NULL; |
1433 | struct vmw_resource *error_resource; | ||
1434 | struct list_head resource_list; | ||
1109 | uint32_t handle; | 1435 | uint32_t handle; |
1110 | void *cmd; | 1436 | void *cmd; |
1111 | int ret; | 1437 | int ret; |
@@ -1135,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1135 | sw_context->kernel = true; | 1461 | sw_context->kernel = true; |
1136 | 1462 | ||
1137 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | 1463 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
1138 | sw_context->cid_valid = false; | ||
1139 | sw_context->sid_valid = false; | ||
1140 | sw_context->cur_reloc = 0; | 1464 | sw_context->cur_reloc = 0; |
1141 | sw_context->cur_val_buf = 0; | 1465 | sw_context->cur_val_buf = 0; |
1142 | sw_context->fence_flags = 0; | 1466 | sw_context->fence_flags = 0; |
1143 | INIT_LIST_HEAD(&sw_context->query_list); | ||
1144 | INIT_LIST_HEAD(&sw_context->resource_list); | 1467 | INIT_LIST_HEAD(&sw_context->resource_list); |
1145 | sw_context->cur_query_bo = dev_priv->pinned_bo; | 1468 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
1146 | sw_context->cur_query_cid = dev_priv->query_cid; | 1469 | sw_context->last_query_ctx = NULL; |
1147 | sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); | 1470 | sw_context->needs_post_query_barrier = false; |
1148 | 1471 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | |
1149 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 1472 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
1473 | INIT_LIST_HEAD(&sw_context->res_relocations); | ||
1474 | if (!sw_context->res_ht_initialized) { | ||
1475 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); | ||
1476 | if (unlikely(ret != 0)) | ||
1477 | goto out_unlock; | ||
1478 | sw_context->res_ht_initialized = true; | ||
1479 | } | ||
1150 | 1480 | ||
1481 | INIT_LIST_HEAD(&resource_list); | ||
1151 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 1482 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
1152 | command_size); | 1483 | command_size); |
1153 | if (unlikely(ret != 0)) | 1484 | if (unlikely(ret != 0)) |
1154 | goto out_err; | 1485 | goto out_err; |
1155 | 1486 | ||
1487 | ret = vmw_resources_reserve(sw_context); | ||
1488 | if (unlikely(ret != 0)) | ||
1489 | goto out_err; | ||
1490 | |||
1156 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); | 1491 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
1157 | if (unlikely(ret != 0)) | 1492 | if (unlikely(ret != 0)) |
1158 | goto out_err; | 1493 | goto out_err; |
@@ -1161,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1161 | if (unlikely(ret != 0)) | 1496 | if (unlikely(ret != 0)) |
1162 | goto out_err; | 1497 | goto out_err; |
1163 | 1498 | ||
1164 | vmw_apply_relocations(sw_context); | 1499 | ret = vmw_resources_validate(sw_context); |
1500 | if (unlikely(ret != 0)) | ||
1501 | goto out_err; | ||
1165 | 1502 | ||
1166 | if (throttle_us) { | 1503 | if (throttle_us) { |
1167 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, | 1504 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
1168 | throttle_us); | 1505 | throttle_us); |
1169 | 1506 | ||
1170 | if (unlikely(ret != 0)) | 1507 | if (unlikely(ret != 0)) |
1171 | goto out_throttle; | 1508 | goto out_err; |
1172 | } | 1509 | } |
1173 | 1510 | ||
1174 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 1511 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
1175 | if (unlikely(cmd == NULL)) { | 1512 | if (unlikely(cmd == NULL)) { |
1176 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 1513 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
1177 | ret = -ENOMEM; | 1514 | ret = -ENOMEM; |
1178 | goto out_throttle; | 1515 | goto out_err; |
1179 | } | 1516 | } |
1180 | 1517 | ||
1518 | vmw_apply_relocations(sw_context); | ||
1181 | memcpy(cmd, kernel_commands, command_size); | 1519 | memcpy(cmd, kernel_commands, command_size); |
1520 | |||
1521 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); | ||
1522 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
1523 | |||
1182 | vmw_fifo_commit(dev_priv, command_size); | 1524 | vmw_fifo_commit(dev_priv, command_size); |
1183 | 1525 | ||
1184 | vmw_query_bo_switch_commit(dev_priv, sw_context); | 1526 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
@@ -1194,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1194 | if (ret != 0) | 1536 | if (ret != 0) |
1195 | DRM_ERROR("Fence submission error. Syncing.\n"); | 1537 | DRM_ERROR("Fence submission error. Syncing.\n"); |
1196 | 1538 | ||
1539 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | ||
1197 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, | 1540 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, |
1198 | (void *) fence); | 1541 | (void *) fence); |
1199 | 1542 | ||
1543 | if (unlikely(dev_priv->pinned_bo != NULL && | ||
1544 | !dev_priv->query_cid_valid)) | ||
1545 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); | ||
1546 | |||
1200 | vmw_clear_validations(sw_context); | 1547 | vmw_clear_validations(sw_context); |
1201 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, | 1548 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, |
1202 | user_fence_rep, fence, handle); | 1549 | user_fence_rep, fence, handle); |
@@ -1209,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
1209 | vmw_fence_obj_unreference(&fence); | 1556 | vmw_fence_obj_unreference(&fence); |
1210 | } | 1557 | } |
1211 | 1558 | ||
1559 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
1212 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1560 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1561 | |||
1562 | /* | ||
1563 | * Unreference resources outside of the cmdbuf_mutex to | ||
1564 | * avoid deadlocks in resource destruction paths. | ||
1565 | */ | ||
1566 | vmw_resource_list_unreference(&resource_list); | ||
1567 | |||
1213 | return 0; | 1568 | return 0; |
1214 | 1569 | ||
1215 | out_err: | 1570 | out_err: |
1571 | vmw_resource_relocations_free(&sw_context->res_relocations); | ||
1216 | vmw_free_relocations(sw_context); | 1572 | vmw_free_relocations(sw_context); |
1217 | out_throttle: | ||
1218 | vmw_query_switch_backoff(sw_context); | ||
1219 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | 1573 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
1574 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | ||
1220 | vmw_clear_validations(sw_context); | 1575 | vmw_clear_validations(sw_context); |
1576 | if (unlikely(dev_priv->pinned_bo != NULL && | ||
1577 | !dev_priv->query_cid_valid)) | ||
1578 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
1221 | out_unlock: | 1579 | out_unlock: |
1580 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
1581 | error_resource = sw_context->error_resource; | ||
1582 | sw_context->error_resource = NULL; | ||
1222 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1583 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1584 | |||
1585 | /* | ||
1586 | * Unreference resources outside of the cmdbuf_mutex to | ||
1587 | * avoid deadlocks in resource destruction paths. | ||
1588 | */ | ||
1589 | vmw_resource_list_unreference(&resource_list); | ||
1590 | if (unlikely(error_resource != NULL)) | ||
1591 | vmw_resource_unreference(&error_resource); | ||
1592 | |||
1223 | return ret; | 1593 | return ret; |
1224 | } | 1594 | } |
1225 | 1595 | ||
@@ -1244,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | |||
1244 | 1614 | ||
1245 | 1615 | ||
1246 | /** | 1616 | /** |
1247 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | 1617 | * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
1248 | * query bo. | 1618 | * query bo. |
1249 | * | 1619 | * |
1250 | * @dev_priv: The device private structure. | 1620 | * @dev_priv: The device private structure. |
1251 | * @only_on_cid_match: Only flush and unpin if the current active query cid | 1621 | * @fence: If non-NULL should point to a struct vmw_fence_obj issued |
1252 | * matches @cid. | 1622 | * _after_ a query barrier that flushes all queries touching the current |
1253 | * @cid: Optional context id to match. | 1623 | * buffer pointed to by @dev_priv->pinned_bo |
1254 | * | 1624 | * |
1255 | * This function should be used to unpin the pinned query bo, or | 1625 | * This function should be used to unpin the pinned query bo, or |
1256 | * as a query barrier when we need to make sure that all queries have | 1626 | * as a query barrier when we need to make sure that all queries have |
@@ -1263,23 +1633,21 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) | |||
1263 | * | 1633 | * |
1264 | * The function will synchronize on the previous query barrier, and will | 1634 | * The function will synchronize on the previous query barrier, and will |
1265 | * thus not finish until that barrier has executed. | 1635 | * thus not finish until that barrier has executed. |
1636 | * | ||
1637 | * the @dev_priv->cmdbuf_mutex needs to be held by the current thread | ||
1638 | * before calling this function. | ||
1266 | */ | 1639 | */ |
1267 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | 1640 | void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
1268 | bool only_on_cid_match, uint32_t cid) | 1641 | struct vmw_fence_obj *fence) |
1269 | { | 1642 | { |
1270 | int ret = 0; | 1643 | int ret = 0; |
1271 | struct list_head validate_list; | 1644 | struct list_head validate_list; |
1272 | struct ttm_validate_buffer pinned_val, query_val; | 1645 | struct ttm_validate_buffer pinned_val, query_val; |
1273 | struct vmw_fence_obj *fence; | 1646 | struct vmw_fence_obj *lfence = NULL; |
1274 | |||
1275 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1276 | 1647 | ||
1277 | if (dev_priv->pinned_bo == NULL) | 1648 | if (dev_priv->pinned_bo == NULL) |
1278 | goto out_unlock; | 1649 | goto out_unlock; |
1279 | 1650 | ||
1280 | if (only_on_cid_match && cid != dev_priv->query_cid) | ||
1281 | goto out_unlock; | ||
1282 | |||
1283 | INIT_LIST_HEAD(&validate_list); | 1651 | INIT_LIST_HEAD(&validate_list); |
1284 | 1652 | ||
1285 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | 1653 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
@@ -1297,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
1297 | goto out_no_reserve; | 1665 | goto out_no_reserve; |
1298 | } | 1666 | } |
1299 | 1667 | ||
1300 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | 1668 | if (dev_priv->query_cid_valid) { |
1301 | if (unlikely(ret != 0)) { | 1669 | BUG_ON(fence != NULL); |
1302 | vmw_execbuf_unpin_panic(dev_priv); | 1670 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
1303 | goto out_no_emit; | 1671 | if (unlikely(ret != 0)) { |
1672 | vmw_execbuf_unpin_panic(dev_priv); | ||
1673 | goto out_no_emit; | ||
1674 | } | ||
1675 | dev_priv->query_cid_valid = false; | ||
1304 | } | 1676 | } |
1305 | 1677 | ||
1306 | vmw_bo_pin(dev_priv->pinned_bo, false); | 1678 | vmw_bo_pin(dev_priv->pinned_bo, false); |
1307 | vmw_bo_pin(dev_priv->dummy_query_bo, false); | 1679 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
1308 | dev_priv->dummy_query_bo_pinned = false; | 1680 | dev_priv->dummy_query_bo_pinned = false; |
1309 | 1681 | ||
1310 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1682 | if (fence == NULL) { |
1683 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, | ||
1684 | NULL); | ||
1685 | fence = lfence; | ||
1686 | } | ||
1311 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); | 1687 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); |
1688 | if (lfence != NULL) | ||
1689 | vmw_fence_obj_unreference(&lfence); | ||
1312 | 1690 | ||
1313 | ttm_bo_unref(&query_val.bo); | 1691 | ttm_bo_unref(&query_val.bo); |
1314 | ttm_bo_unref(&pinned_val.bo); | 1692 | ttm_bo_unref(&pinned_val.bo); |
1315 | ttm_bo_unref(&dev_priv->pinned_bo); | 1693 | ttm_bo_unref(&dev_priv->pinned_bo); |
1316 | 1694 | ||
1317 | out_unlock: | 1695 | out_unlock: |
1318 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
1319 | return; | 1696 | return; |
1320 | 1697 | ||
1321 | out_no_emit: | 1698 | out_no_emit: |
@@ -1324,6 +1701,31 @@ out_no_reserve: | |||
1324 | ttm_bo_unref(&query_val.bo); | 1701 | ttm_bo_unref(&query_val.bo); |
1325 | ttm_bo_unref(&pinned_val.bo); | 1702 | ttm_bo_unref(&pinned_val.bo); |
1326 | ttm_bo_unref(&dev_priv->pinned_bo); | 1703 | ttm_bo_unref(&dev_priv->pinned_bo); |
1704 | } | ||
1705 | |||
1706 | /** | ||
1707 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned | ||
1708 | * query bo. | ||
1709 | * | ||
1710 | * @dev_priv: The device private structure. | ||
1711 | * | ||
1712 | * This function should be used to unpin the pinned query bo, or | ||
1713 | * as a query barrier when we need to make sure that all queries have | ||
1714 | * finished before the next fifo command. (For example on hardware | ||
1715 | * context destructions where the hardware may otherwise leak unfinished | ||
1716 | * queries). | ||
1717 | * | ||
1718 | * This function does not return any failure codes, but make attempts | ||
1719 | * to do safe unpinning in case of errors. | ||
1720 | * | ||
1721 | * The function will synchronize on the previous query barrier, and will | ||
1722 | * thus not finish until that barrier has executed. | ||
1723 | */ | ||
1724 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) | ||
1725 | { | ||
1726 | mutex_lock(&dev_priv->cmdbuf_mutex); | ||
1727 | if (dev_priv->query_cid_valid) | ||
1728 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | ||
1327 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1729 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1328 | } | 1730 | } |
1329 | 1731 | ||