diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2018-09-26 09:28:55 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2018-09-27 09:21:36 -0400 |
commit | 9c079b8ce8bf8e0394149eb39c78b04285644bcc (patch) | |
tree | 688bfedd151ab3dc2bde0d9df094b6b80a1ca8d3 | |
parent | 84e1bf06bc457f8e00e2e679d48365aeba919673 (diff) |
drm/vmwgfx: Adapt execbuf to the new validation api
Strip the old execbuf validation functionality and use the new API instead.
Also use the new API for a now removed execbuf function that was called
from the kms code.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 885 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 |
3 files changed, 360 insertions, 594 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ca46e4075a26..3b5598967e5c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #ifndef _VMWGFX_DRV_H_ | 28 | #ifndef _VMWGFX_DRV_H_ |
29 | #define _VMWGFX_DRV_H_ | 29 | #define _VMWGFX_DRV_H_ |
30 | 30 | ||
31 | #include "vmwgfx_validation.h" | ||
31 | #include "vmwgfx_reg.h" | 32 | #include "vmwgfx_reg.h" |
32 | #include <drm/drmP.h> | 33 | #include <drm/drmP.h> |
33 | #include <drm/vmwgfx_drm.h> | 34 | #include <drm/vmwgfx_drm.h> |
@@ -207,26 +208,27 @@ struct vmw_fifo_state { | |||
207 | struct vmw_relocation { | 208 | struct vmw_relocation { |
208 | SVGAMobId *mob_loc; | 209 | SVGAMobId *mob_loc; |
209 | SVGAGuestPtr *location; | 210 | SVGAGuestPtr *location; |
210 | uint32_t index; | 211 | struct vmw_buffer_object *vbo; |
211 | }; | 212 | }; |
212 | 213 | ||
213 | /** | 214 | /** |
214 | * struct vmw_res_cache_entry - resource information cache entry | 215 | * struct vmw_res_cache_entry - resource information cache entry |
215 | * | 216 | * @handle: User-space handle of a resource. |
217 | * @res: Non-ref-counted pointer to the resource. | ||
218 | * @valid_handle: Whether the @handle member is valid. | ||
216 | * @valid: Whether the entry is valid, which also implies that the execbuf | 219 | * @valid: Whether the entry is valid, which also implies that the execbuf |
217 | * code holds a reference to the resource, and it's placed on the | 220 | * code holds a reference to the resource, and it's placed on the |
218 | * validation list. | 221 | * validation list. |
219 | * @handle: User-space handle of a resource. | ||
220 | * @res: Non-ref-counted pointer to the resource. | ||
221 | * | 222 | * |
222 | * Used to avoid frequent repeated user-space handle lookups of the | 223 | * Used to avoid frequent repeated user-space handle lookups of the |
223 | * same resource. | 224 | * same resource. |
224 | */ | 225 | */ |
225 | struct vmw_res_cache_entry { | 226 | struct vmw_res_cache_entry { |
226 | bool valid; | ||
227 | uint32_t handle; | 227 | uint32_t handle; |
228 | struct vmw_resource *res; | 228 | struct vmw_resource *res; |
229 | struct vmw_resource_val_node *node; | 229 | void *private; |
230 | unsigned short valid_handle; | ||
231 | unsigned short valid; | ||
230 | }; | 232 | }; |
231 | 233 | ||
232 | /** | 234 | /** |
@@ -291,21 +293,52 @@ enum vmw_display_unit_type { | |||
291 | vmw_du_screen_target | 293 | vmw_du_screen_target |
292 | }; | 294 | }; |
293 | 295 | ||
296 | struct vmw_validation_context; | ||
297 | struct vmw_ctx_validation_info; | ||
294 | 298 | ||
299 | /** | ||
300 | * struct vmw_sw_context - Command submission context | ||
301 | * @res_ht: Pointer hash table used to find validation duplicates | ||
302 | * @kernel: Whether the command buffer originates from kernel code rather | ||
303 | * than from user-space | ||
304 | * @fp: If @kernel is false, points to the file of the client. Otherwise | ||
305 | * NULL | ||
306 | * @relocs: Array of buffer object relocations | ||
307 | * @cur_reloc: Cursor pointing to the current relocation | ||
308 | * @cmd_bounce: Command bounce buffer used for command validation before | ||
309 | * copying to fifo space | ||
310 | * @cmd_bounce_size: Current command bounce buffer size | ||
311 | * @cur_query_bo: Current buffer object used as query result buffer | ||
312 | * @res_relocations: List of resource relocations | ||
313 | * @buf_start: Pointer to start of memory where command validation takes | ||
314 | * place | ||
315 | * @res_cache: Cache of recently looked up resources | ||
316 | * @last_query_ctx: Last context that submitted a query | ||
317 | * @needs_post_query_barrier: Whether a query barrier is needed after | ||
318 | * command submission | ||
319 | * @error_resource: Pointer to hold a reference to the resource causing | ||
320 | * an error | ||
321 | * @staged_bindings: Cached per-context binding tracker | ||
322 | * @staged_bindings_inuse: Whether the cached per-context binding tracker | ||
323 | * is in use | ||
324 | * @staged_cmd_res: List of staged command buffer managed resources in this | ||
325 | * command buffer | ||
326 | * @ctx_list: List of context resources referenced in this command buffer | ||
327 | * @dx_ctx_node: Validation metadata of the current DX context | ||
328 | * @dx_query_mob: The MOB used for DX queries | ||
329 | * @dx_query_ctx: The DX context used for the last DX query | ||
330 | * @man: Pointer to the command buffer managed resource manager | ||
331 | * @ctx: The validation context | ||
332 | */ | ||
295 | struct vmw_sw_context{ | 333 | struct vmw_sw_context{ |
296 | struct drm_open_hash res_ht; | 334 | struct drm_open_hash res_ht; |
297 | bool res_ht_initialized; | 335 | bool res_ht_initialized; |
298 | bool kernel; /**< is the called made from the kernel */ | 336 | bool kernel; |
299 | struct vmw_fpriv *fp; | 337 | struct vmw_fpriv *fp; |
300 | struct list_head validate_nodes; | ||
301 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | 338 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; |
302 | uint32_t cur_reloc; | 339 | uint32_t cur_reloc; |
303 | struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; | ||
304 | uint32_t cur_val_buf; | ||
305 | uint32_t *cmd_bounce; | 340 | uint32_t *cmd_bounce; |
306 | uint32_t cmd_bounce_size; | 341 | uint32_t cmd_bounce_size; |
307 | struct list_head resource_list; | ||
308 | struct list_head ctx_resource_list; /* For contexts and cotables */ | ||
309 | struct vmw_buffer_object *cur_query_bo; | 342 | struct vmw_buffer_object *cur_query_bo; |
310 | struct list_head res_relocations; | 343 | struct list_head res_relocations; |
311 | uint32_t *buf_start; | 344 | uint32_t *buf_start; |
@@ -316,10 +349,12 @@ struct vmw_sw_context{ | |||
316 | struct vmw_ctx_binding_state *staged_bindings; | 349 | struct vmw_ctx_binding_state *staged_bindings; |
317 | bool staged_bindings_inuse; | 350 | bool staged_bindings_inuse; |
318 | struct list_head staged_cmd_res; | 351 | struct list_head staged_cmd_res; |
319 | struct vmw_resource_val_node *dx_ctx_node; | 352 | struct list_head ctx_list; |
353 | struct vmw_ctx_validation_info *dx_ctx_node; | ||
320 | struct vmw_buffer_object *dx_query_mob; | 354 | struct vmw_buffer_object *dx_query_mob; |
321 | struct vmw_resource *dx_query_ctx; | 355 | struct vmw_resource *dx_query_ctx; |
322 | struct vmw_cmdbuf_res_manager *man; | 356 | struct vmw_cmdbuf_res_manager *man; |
357 | struct vmw_validation_context *ctx; | ||
323 | }; | 358 | }; |
324 | 359 | ||
325 | struct vmw_legacy_display; | 360 | struct vmw_legacy_display; |
@@ -864,10 +899,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, | |||
864 | uint32_t fence_handle, | 899 | uint32_t fence_handle, |
865 | int32_t out_fence_fd, | 900 | int32_t out_fence_fd, |
866 | struct sync_file *sync_file); | 901 | struct sync_file *sync_file); |
867 | extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, | ||
868 | struct ttm_buffer_object *bo, | ||
869 | bool interruptible, | ||
870 | bool validate_as_mob); | ||
871 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); | 902 | bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); |
872 | 903 | ||
873 | /** | 904 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index cfc87313a431..85821a5b227c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -69,35 +69,18 @@ struct vmw_resource_relocation { | |||
69 | enum vmw_resource_relocation_type rel_type:3; | 69 | enum vmw_resource_relocation_type rel_type:3; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /** | 72 | /* |
73 | * struct vmw_resource_val_node - Validation info for resources | 73 | * struct vmw_ctx_validation_info - Extra validation metadata for contexts |
74 | * | 74 | * @head: List head of context list |
75 | * @head: List head for the software context's resource list. | 75 | * @ctx: The context resource |
76 | * @hash: Hash entry for quick resouce to val_node lookup. | 76 | * @cur: The context's persistent binding state |
77 | * @res: Ref-counted pointer to the resource. | 77 | * @staged: The binding state changes of this command buffer |
78 | * @switch_backup: Boolean whether to switch backup buffer on unreserve. | ||
79 | * @new_backup: Refcounted pointer to the new backup buffer. | ||
80 | * @staged_bindings: If @res is a context, tracks bindings set up during | ||
81 | * the command batch. Otherwise NULL. | ||
82 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | ||
83 | * @first_usage: Set to true the first time the resource is referenced in | ||
84 | * the command stream. | ||
85 | * @switching_backup: The command stream provides a new backup buffer for a | ||
86 | * resource. | ||
87 | * @no_buffer_needed: This means @switching_backup is true on first buffer | ||
88 | * reference. So resource reservation does not need to allocate a backup | ||
89 | * buffer for the resource. | ||
90 | */ | 78 | */ |
91 | struct vmw_resource_val_node { | 79 | struct vmw_ctx_validation_info { |
92 | struct list_head head; | 80 | struct list_head head; |
93 | struct drm_hash_item hash; | 81 | struct vmw_resource *ctx; |
94 | struct vmw_resource *res; | 82 | struct vmw_ctx_binding_state *cur; |
95 | struct vmw_buffer_object *new_backup; | 83 | struct vmw_ctx_binding_state *staged; |
96 | struct vmw_ctx_binding_state *staged_bindings; | ||
97 | unsigned long new_backup_offset; | ||
98 | u32 first_usage : 1; | ||
99 | u32 switching_backup : 1; | ||
100 | u32 no_buffer_needed : 1; | ||
101 | }; | 84 | }; |
102 | 85 | ||
103 | /** | 86 | /** |
@@ -127,10 +110,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
127 | struct vmw_sw_context *sw_context, | 110 | struct vmw_sw_context *sw_context, |
128 | SVGAMobId *id, | 111 | SVGAMobId *id, |
129 | struct vmw_buffer_object **vmw_bo_p); | 112 | struct vmw_buffer_object **vmw_bo_p); |
130 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | ||
131 | struct vmw_buffer_object *vbo, | ||
132 | bool validate_as_mob, | ||
133 | uint32_t *p_val_node); | ||
134 | /** | 113 | /** |
135 | * vmw_ptr_diff - Compute the offset from a to b in bytes | 114 | * vmw_ptr_diff - Compute the offset from a to b in bytes |
136 | * | 115 | * |
@@ -145,65 +124,55 @@ static size_t vmw_ptr_diff(void *a, void *b) | |||
145 | } | 124 | } |
146 | 125 | ||
147 | /** | 126 | /** |
148 | * vmw_resources_unreserve - unreserve resources previously reserved for | 127 | * vmw_execbuf_bindings_commit - Commit modified binding state |
149 | * command submission. | 128 | * @sw_context: The command submission context |
150 | * | 129 | * @backoff: Whether this is part of the error path and binding state |
151 | * @sw_context: pointer to the software context | 130 | * changes should be ignored |
152 | * @backoff: Whether command submission failed. | ||
153 | */ | 131 | */ |
154 | static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, | 132 | static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, |
155 | bool backoff) | 133 | bool backoff) |
156 | { | 134 | { |
157 | struct vmw_resource_val_node *val; | 135 | struct vmw_ctx_validation_info *entry, *next; |
158 | struct list_head *list = &sw_context->resource_list; | ||
159 | 136 | ||
160 | if (sw_context->dx_query_mob && !backoff) | 137 | list_for_each_entry_safe(entry, next, &sw_context->ctx_list, head) { |
161 | vmw_context_bind_dx_query(sw_context->dx_query_ctx, | 138 | list_del(&entry->head); |
162 | sw_context->dx_query_mob); | ||
163 | 139 | ||
164 | list_for_each_entry(val, list, head) { | 140 | if (!backoff) |
165 | struct vmw_resource *res = val->res; | 141 | vmw_binding_state_commit(entry->cur, entry->staged); |
166 | bool switch_backup = | 142 | if (entry->staged != sw_context->staged_bindings) |
167 | (backoff) ? false : val->switching_backup; | 143 | vmw_binding_state_free(entry->staged); |
168 | 144 | else | |
169 | /* | 145 | sw_context->staged_bindings_inuse = false; |
170 | * Transfer staged context bindings to the | ||
171 | * persistent context binding tracker. | ||
172 | */ | ||
173 | if (unlikely(val->staged_bindings)) { | ||
174 | if (!backoff) { | ||
175 | vmw_binding_state_commit | ||
176 | (vmw_context_binding_state(val->res), | ||
177 | val->staged_bindings); | ||
178 | } | ||
179 | |||
180 | if (val->staged_bindings != sw_context->staged_bindings) | ||
181 | vmw_binding_state_free(val->staged_bindings); | ||
182 | else | ||
183 | sw_context->staged_bindings_inuse = false; | ||
184 | val->staged_bindings = NULL; | ||
185 | } | ||
186 | vmw_resource_unreserve(res, switch_backup, val->new_backup, | ||
187 | val->new_backup_offset); | ||
188 | vmw_bo_unreference(&val->new_backup); | ||
189 | } | 146 | } |
190 | } | 147 | } |
191 | 148 | ||
192 | /** | 149 | /** |
150 | * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced | ||
151 | * @sw_context: The command submission context | ||
152 | */ | ||
153 | static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) | ||
154 | { | ||
155 | if (sw_context->dx_query_mob) | ||
156 | vmw_context_bind_dx_query(sw_context->dx_query_ctx, | ||
157 | sw_context->dx_query_mob); | ||
158 | } | ||
159 | |||
160 | /** | ||
193 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is | 161 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is |
194 | * added to the validate list. | 162 | * added to the validate list. |
195 | * | 163 | * |
196 | * @dev_priv: Pointer to the device private: | 164 | * @dev_priv: Pointer to the device private: |
197 | * @sw_context: The validation context: | 165 | * @sw_context: The command submission context |
198 | * @node: The validation node holding this context. | 166 | * @node: The validation node holding the context resource metadata |
199 | */ | 167 | */ |
200 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, | 168 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, |
201 | struct vmw_sw_context *sw_context, | 169 | struct vmw_sw_context *sw_context, |
202 | struct vmw_resource_val_node *node) | 170 | struct vmw_resource *res, |
171 | struct vmw_ctx_validation_info *node) | ||
203 | { | 172 | { |
204 | int ret; | 173 | int ret; |
205 | 174 | ||
206 | ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); | 175 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
207 | if (unlikely(ret != 0)) | 176 | if (unlikely(ret != 0)) |
208 | goto out_err; | 177 | goto out_err; |
209 | 178 | ||
@@ -220,19 +189,23 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, | |||
220 | } | 189 | } |
221 | 190 | ||
222 | if (sw_context->staged_bindings_inuse) { | 191 | if (sw_context->staged_bindings_inuse) { |
223 | node->staged_bindings = vmw_binding_state_alloc(dev_priv); | 192 | node->staged = vmw_binding_state_alloc(dev_priv); |
224 | if (IS_ERR(node->staged_bindings)) { | 193 | if (IS_ERR(node->staged)) { |
225 | DRM_ERROR("Failed to allocate context binding " | 194 | DRM_ERROR("Failed to allocate context binding " |
226 | "information.\n"); | 195 | "information.\n"); |
227 | ret = PTR_ERR(node->staged_bindings); | 196 | ret = PTR_ERR(node->staged); |
228 | node->staged_bindings = NULL; | 197 | node->staged = NULL; |
229 | goto out_err; | 198 | goto out_err; |
230 | } | 199 | } |
231 | } else { | 200 | } else { |
232 | node->staged_bindings = sw_context->staged_bindings; | 201 | node->staged = sw_context->staged_bindings; |
233 | sw_context->staged_bindings_inuse = true; | 202 | sw_context->staged_bindings_inuse = true; |
234 | } | 203 | } |
235 | 204 | ||
205 | node->ctx = res; | ||
206 | node->cur = vmw_context_binding_state(res); | ||
207 | list_add_tail(&node->head, &sw_context->ctx_list); | ||
208 | |||
236 | return 0; | 209 | return 0; |
237 | out_err: | 210 | out_err: |
238 | return ret; | 211 | return ret; |
@@ -248,61 +221,42 @@ out_err: | |||
248 | * struct vmw_resource_val_node, if non-NULL on entry. | 221 | * struct vmw_resource_val_node, if non-NULL on entry. |
249 | */ | 222 | */ |
250 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | 223 | static int vmw_resource_val_add(struct vmw_sw_context *sw_context, |
251 | struct vmw_resource *res, | 224 | struct vmw_resource *res) |
252 | struct vmw_resource_val_node **p_node) | ||
253 | { | 225 | { |
254 | struct vmw_private *dev_priv = res->dev_priv; | 226 | struct vmw_private *dev_priv = res->dev_priv; |
255 | struct vmw_resource_val_node *node; | ||
256 | struct drm_hash_item *hash; | ||
257 | int ret; | 227 | int ret; |
228 | enum vmw_res_type res_type = vmw_res_type(res); | ||
229 | struct vmw_res_cache_entry *rcache; | ||
230 | struct vmw_ctx_validation_info *ctx_info; | ||
231 | bool first_usage; | ||
232 | size_t priv_size; | ||
258 | 233 | ||
259 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, | 234 | /* |
260 | &hash) == 0)) { | 235 | * If the resource is a context, set up structures to track |
261 | node = container_of(hash, struct vmw_resource_val_node, hash); | 236 | * context bindings. |
262 | node->first_usage = false; | 237 | */ |
263 | if (unlikely(p_node != NULL)) | 238 | priv_size = (res_type == vmw_res_dx_context || |
264 | *p_node = node; | 239 | (res_type == vmw_res_context && dev_priv->has_mob)) ? |
265 | return 0; | 240 | sizeof(*ctx_info) : 0; |
266 | } | ||
267 | |||
268 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
269 | if (unlikely(!node)) { | ||
270 | DRM_ERROR("Failed to allocate a resource validation " | ||
271 | "entry.\n"); | ||
272 | return -ENOMEM; | ||
273 | } | ||
274 | 241 | ||
275 | node->hash.key = (unsigned long) res; | 242 | ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, |
276 | ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); | 243 | (void **)&ctx_info, &first_usage); |
277 | if (unlikely(ret != 0)) { | 244 | if (ret) |
278 | DRM_ERROR("Failed to initialize a resource validation " | ||
279 | "entry.\n"); | ||
280 | kfree(node); | ||
281 | return ret; | 245 | return ret; |
282 | } | ||
283 | node->res = vmw_resource_reference(res); | ||
284 | node->first_usage = true; | ||
285 | if (unlikely(p_node != NULL)) | ||
286 | *p_node = node; | ||
287 | 246 | ||
288 | if (!dev_priv->has_mob) { | 247 | if (priv_size && first_usage) { |
289 | list_add_tail(&node->head, &sw_context->resource_list); | 248 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, |
290 | return 0; | 249 | ctx_info); |
250 | if (ret) | ||
251 | return ret; | ||
291 | } | 252 | } |
292 | 253 | ||
293 | switch (vmw_res_type(res)) { | 254 | /* Cache info about the last added resource */ |
294 | case vmw_res_context: | 255 | rcache = &sw_context->res_cache[res_type]; |
295 | case vmw_res_dx_context: | 256 | rcache->res = res; |
296 | list_add(&node->head, &sw_context->ctx_resource_list); | 257 | rcache->private = ctx_info; |
297 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); | 258 | rcache->valid = 1; |
298 | break; | 259 | rcache->valid_handle = 0; |
299 | case vmw_res_cotable: | ||
300 | list_add_tail(&node->head, &sw_context->ctx_resource_list); | ||
301 | break; | ||
302 | default: | ||
303 | list_add_tail(&node->head, &sw_context->resource_list); | ||
304 | break; | ||
305 | } | ||
306 | 260 | ||
307 | return ret; | 261 | return ret; |
308 | } | 262 | } |
@@ -325,11 +279,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, | |||
325 | * First add the resource the view is pointing to, otherwise | 279 | * First add the resource the view is pointing to, otherwise |
326 | * it may be swapped out when the view is validated. | 280 | * it may be swapped out when the view is validated. |
327 | */ | 281 | */ |
328 | ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); | 282 | ret = vmw_resource_val_add(sw_context, vmw_view_srf(view)); |
329 | if (ret) | 283 | if (ret) |
330 | return ret; | 284 | return ret; |
331 | 285 | ||
332 | return vmw_resource_val_add(sw_context, view, NULL); | 286 | return vmw_resource_val_add(sw_context, view); |
333 | } | 287 | } |
334 | 288 | ||
335 | /** | 289 | /** |
@@ -347,7 +301,7 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, | |||
347 | static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, | 301 | static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, |
348 | enum vmw_view_type view_type, u32 id) | 302 | enum vmw_view_type view_type, u32 id) |
349 | { | 303 | { |
350 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 304 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
351 | struct vmw_resource *view; | 305 | struct vmw_resource *view; |
352 | int ret; | 306 | int ret; |
353 | 307 | ||
@@ -394,7 +348,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
394 | if (IS_ERR(res)) | 348 | if (IS_ERR(res)) |
395 | continue; | 349 | continue; |
396 | 350 | ||
397 | ret = vmw_resource_val_add(sw_context, res, NULL); | 351 | ret = vmw_resource_val_add(sw_context, res); |
398 | vmw_resource_unreference(&res); | 352 | vmw_resource_unreference(&res); |
399 | if (unlikely(ret != 0)) | 353 | if (unlikely(ret != 0)) |
400 | return ret; | 354 | return ret; |
@@ -415,8 +369,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
415 | if (vmw_res_type(entry->res) == vmw_res_view) | 369 | if (vmw_res_type(entry->res) == vmw_res_view) |
416 | ret = vmw_view_res_val_add(sw_context, entry->res); | 370 | ret = vmw_view_res_val_add(sw_context, entry->res); |
417 | else | 371 | else |
418 | ret = vmw_resource_val_add(sw_context, entry->res, | 372 | ret = vmw_resource_val_add(sw_context, entry->res); |
419 | NULL); | ||
420 | vmw_resource_unreference(&res); | 373 | vmw_resource_unreference(&res); |
421 | if (unlikely(ret != 0)) | 374 | if (unlikely(ret != 0)) |
422 | break; | 375 | break; |
@@ -427,9 +380,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
427 | 380 | ||
428 | dx_query_mob = vmw_context_get_dx_query_mob(ctx); | 381 | dx_query_mob = vmw_context_get_dx_query_mob(ctx); |
429 | if (dx_query_mob) | 382 | if (dx_query_mob) |
430 | ret = vmw_bo_to_validate_list(sw_context, | 383 | ret = vmw_validation_add_bo(sw_context->ctx, |
431 | dx_query_mob, | 384 | dx_query_mob, true, false); |
432 | true, NULL); | ||
433 | } | 385 | } |
434 | 386 | ||
435 | mutex_unlock(&dev_priv->binding_mutex); | 387 | mutex_unlock(&dev_priv->binding_mutex); |
@@ -532,68 +484,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, | |||
532 | } | 484 | } |
533 | 485 | ||
534 | /** | 486 | /** |
535 | * vmw_bo_to_validate_list - add a bo to a validate list | ||
536 | * | ||
537 | * @sw_context: The software context used for this command submission batch. | ||
538 | * @bo: The buffer object to add. | ||
539 | * @validate_as_mob: Validate this buffer as a MOB. | ||
540 | * @p_val_node: If non-NULL Will be updated with the validate node number | ||
541 | * on return. | ||
542 | * | ||
543 | * Returns -EINVAL if the limit of number of buffer objects per command | ||
544 | * submission is reached. | ||
545 | */ | ||
546 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | ||
547 | struct vmw_buffer_object *vbo, | ||
548 | bool validate_as_mob, | ||
549 | uint32_t *p_val_node) | ||
550 | { | ||
551 | uint32_t val_node; | ||
552 | struct vmw_validate_buffer *vval_buf; | ||
553 | struct ttm_validate_buffer *val_buf; | ||
554 | struct drm_hash_item *hash; | ||
555 | int ret; | ||
556 | |||
557 | if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo, | ||
558 | &hash) == 0)) { | ||
559 | vval_buf = container_of(hash, struct vmw_validate_buffer, | ||
560 | hash); | ||
561 | if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { | ||
562 | DRM_ERROR("Inconsistent buffer usage.\n"); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | val_buf = &vval_buf->base; | ||
566 | val_node = vval_buf - sw_context->val_bufs; | ||
567 | } else { | ||
568 | val_node = sw_context->cur_val_buf; | ||
569 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { | ||
570 | DRM_ERROR("Max number of DMA buffers per submission " | ||
571 | "exceeded.\n"); | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | vval_buf = &sw_context->val_bufs[val_node]; | ||
575 | vval_buf->hash.key = (unsigned long) vbo; | ||
576 | ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); | ||
577 | if (unlikely(ret != 0)) { | ||
578 | DRM_ERROR("Failed to initialize a buffer validation " | ||
579 | "entry.\n"); | ||
580 | return ret; | ||
581 | } | ||
582 | ++sw_context->cur_val_buf; | ||
583 | val_buf = &vval_buf->base; | ||
584 | val_buf->bo = ttm_bo_reference(&vbo->base); | ||
585 | val_buf->shared = false; | ||
586 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
587 | vval_buf->validate_as_mob = validate_as_mob; | ||
588 | } | ||
589 | |||
590 | if (p_val_node) | ||
591 | *p_val_node = val_node; | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * vmw_resources_reserve - Reserve all resources on the sw_context's | 487 | * vmw_resources_reserve - Reserve all resources on the sw_context's |
598 | * resource list. | 488 | * resource list. |
599 | * | 489 | * |
@@ -605,27 +495,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
605 | */ | 495 | */ |
606 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | 496 | static int vmw_resources_reserve(struct vmw_sw_context *sw_context) |
607 | { | 497 | { |
608 | struct vmw_resource_val_node *val; | 498 | int ret; |
609 | int ret = 0; | ||
610 | |||
611 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
612 | struct vmw_resource *res = val->res; | ||
613 | |||
614 | ret = vmw_resource_reserve(res, true, val->no_buffer_needed); | ||
615 | if (unlikely(ret != 0)) | ||
616 | return ret; | ||
617 | |||
618 | if (res->backup) { | ||
619 | struct vmw_buffer_object *vbo = res->backup; | ||
620 | |||
621 | ret = vmw_bo_to_validate_list | ||
622 | (sw_context, vbo, | ||
623 | vmw_resource_needs_backup(res), NULL); | ||
624 | 499 | ||
625 | if (unlikely(ret != 0)) | 500 | ret = vmw_validation_res_reserve(sw_context->ctx, true); |
626 | return ret; | 501 | if (ret) |
627 | } | 502 | return ret; |
628 | } | ||
629 | 503 | ||
630 | if (sw_context->dx_query_mob) { | 504 | if (sw_context->dx_query_mob) { |
631 | struct vmw_buffer_object *expected_dx_query_mob; | 505 | struct vmw_buffer_object *expected_dx_query_mob; |
@@ -642,67 +516,22 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) | |||
642 | } | 516 | } |
643 | 517 | ||
644 | /** | 518 | /** |
645 | * vmw_resources_validate - Validate all resources on the sw_context's | ||
646 | * resource list. | ||
647 | * | ||
648 | * @sw_context: Pointer to the software context. | ||
649 | * | ||
650 | * Before this function is called, all resource backup buffers must have | ||
651 | * been validated. | ||
652 | */ | ||
653 | static int vmw_resources_validate(struct vmw_sw_context *sw_context) | ||
654 | { | ||
655 | struct vmw_resource_val_node *val; | ||
656 | int ret; | ||
657 | |||
658 | list_for_each_entry(val, &sw_context->resource_list, head) { | ||
659 | struct vmw_resource *res = val->res; | ||
660 | struct vmw_buffer_object *backup = res->backup; | ||
661 | |||
662 | ret = vmw_resource_validate(res, true); | ||
663 | if (unlikely(ret != 0)) { | ||
664 | if (ret != -ERESTARTSYS) | ||
665 | DRM_ERROR("Failed to validate resource.\n"); | ||
666 | return ret; | ||
667 | } | ||
668 | |||
669 | /* Check if the resource switched backup buffer */ | ||
670 | if (backup && res->backup && (backup != res->backup)) { | ||
671 | struct vmw_buffer_object *vbo = res->backup; | ||
672 | |||
673 | ret = vmw_bo_to_validate_list | ||
674 | (sw_context, vbo, | ||
675 | vmw_resource_needs_backup(res), NULL); | ||
676 | if (ret) { | ||
677 | ttm_bo_unreserve(&vbo->base); | ||
678 | return ret; | ||
679 | } | ||
680 | } | ||
681 | } | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * vmw_cmd_res_reloc_add - Add a resource to a software context's | 519 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
687 | * relocation- and validation lists. | 520 | * relocation- and validation lists. |
688 | * | ||
689 | * @dev_priv: Pointer to a struct vmw_private identifying the device. | 521 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
690 | * @sw_context: Pointer to the software context. | 522 | * @sw_context: Pointer to the software context. |
691 | * @id_loc: Pointer to where the id that needs translation is located. | 523 | * @id_loc: Pointer to where the id that needs translation is located. |
692 | * @res: Valid pointer to a struct vmw_resource. | 524 | * @res: Valid pointer to a struct vmw_resource. |
693 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node | 525 | * |
694 | * used for this resource is returned here. | 526 | * Return: Zero on success, negative error code on error |
695 | */ | 527 | */ |
696 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | 528 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
697 | struct vmw_sw_context *sw_context, | 529 | struct vmw_sw_context *sw_context, |
698 | uint32_t *id_loc, | 530 | uint32_t *id_loc, |
699 | struct vmw_resource *res, | 531 | struct vmw_resource *res) |
700 | struct vmw_resource_val_node **p_val) | ||
701 | { | 532 | { |
702 | int ret; | 533 | int ret; |
703 | struct vmw_resource_val_node *node; | ||
704 | 534 | ||
705 | *p_val = NULL; | ||
706 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, | 535 | ret = vmw_resource_relocation_add(&sw_context->res_relocations, |
707 | res, | 536 | res, |
708 | vmw_ptr_diff(sw_context->buf_start, | 537 | vmw_ptr_diff(sw_context->buf_start, |
@@ -711,13 +540,10 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
711 | if (unlikely(ret != 0)) | 540 | if (unlikely(ret != 0)) |
712 | return ret; | 541 | return ret; |
713 | 542 | ||
714 | ret = vmw_resource_val_add(sw_context, res, &node); | 543 | ret = vmw_resource_val_add(sw_context, res); |
715 | if (unlikely(ret != 0)) | 544 | if (unlikely(ret != 0)) |
716 | return ret; | 545 | return ret; |
717 | 546 | ||
718 | if (p_val) | ||
719 | *p_val = node; | ||
720 | |||
721 | return 0; | 547 | return 0; |
722 | } | 548 | } |
723 | 549 | ||
@@ -741,17 +567,17 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
741 | enum vmw_res_type res_type, | 567 | enum vmw_res_type res_type, |
742 | const struct vmw_user_resource_conv *converter, | 568 | const struct vmw_user_resource_conv *converter, |
743 | uint32_t *id_loc, | 569 | uint32_t *id_loc, |
744 | struct vmw_resource_val_node **p_val) | 570 | struct vmw_resource **p_res) |
745 | { | 571 | { |
746 | struct vmw_res_cache_entry *rcache = | 572 | struct vmw_res_cache_entry *rcache = |
747 | &sw_context->res_cache[res_type]; | 573 | &sw_context->res_cache[res_type]; |
748 | struct vmw_resource *res; | 574 | struct vmw_resource *res; |
749 | struct vmw_resource_val_node *node; | ||
750 | int ret; | 575 | int ret; |
751 | 576 | ||
577 | if (p_res) | ||
578 | *p_res = NULL; | ||
579 | |||
752 | if (*id_loc == SVGA3D_INVALID_ID) { | 580 | if (*id_loc == SVGA3D_INVALID_ID) { |
753 | if (p_val) | ||
754 | *p_val = NULL; | ||
755 | if (res_type == vmw_res_context) { | 581 | if (res_type == vmw_res_context) { |
756 | DRM_ERROR("Illegal context invalid id.\n"); | 582 | DRM_ERROR("Illegal context invalid id.\n"); |
757 | return -EINVAL; | 583 | return -EINVAL; |
@@ -764,12 +590,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
764 | * resource | 590 | * resource |
765 | */ | 591 | */ |
766 | 592 | ||
767 | if (likely(rcache->valid && *id_loc == rcache->handle)) { | 593 | if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { |
768 | const struct vmw_resource *res = rcache->res; | 594 | struct vmw_resource *res = rcache->res; |
769 | 595 | ||
770 | rcache->node->first_usage = false; | 596 | if (p_res) |
771 | if (p_val) | 597 | *p_res = res; |
772 | *p_val = rcache->node; | ||
773 | 598 | ||
774 | return vmw_resource_relocation_add | 599 | return vmw_resource_relocation_add |
775 | (&sw_context->res_relocations, res, | 600 | (&sw_context->res_relocations, res, |
@@ -789,18 +614,19 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
789 | return ret; | 614 | return ret; |
790 | } | 615 | } |
791 | 616 | ||
792 | rcache->valid = true; | ||
793 | rcache->res = res; | ||
794 | rcache->handle = *id_loc; | ||
795 | |||
796 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, | 617 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, |
797 | res, &node); | 618 | res); |
798 | if (unlikely(ret != 0)) | 619 | if (unlikely(ret != 0)) |
799 | goto out_no_reloc; | 620 | goto out_no_reloc; |
800 | 621 | ||
801 | rcache->node = node; | 622 | if (p_res) |
802 | if (p_val) | 623 | *p_res = res; |
803 | *p_val = node; | 624 | |
625 | if (rcache->valid && rcache->res == res) { | ||
626 | rcache->valid_handle = true; | ||
627 | rcache->handle = *id_loc; | ||
628 | } | ||
629 | |||
804 | vmw_resource_unreference(&res); | 630 | vmw_resource_unreference(&res); |
805 | return 0; | 631 | return 0; |
806 | 632 | ||
@@ -861,22 +687,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) | |||
861 | */ | 687 | */ |
862 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | 688 | static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) |
863 | { | 689 | { |
864 | struct vmw_resource_val_node *val; | 690 | struct vmw_ctx_validation_info *val; |
865 | int ret; | 691 | int ret; |
866 | 692 | ||
867 | list_for_each_entry(val, &sw_context->resource_list, head) { | 693 | list_for_each_entry(val, &sw_context->ctx_list, head) { |
868 | if (unlikely(!val->staged_bindings)) | 694 | ret = vmw_binding_rebind_all(val->cur); |
869 | break; | ||
870 | |||
871 | ret = vmw_binding_rebind_all | ||
872 | (vmw_context_binding_state(val->res)); | ||
873 | if (unlikely(ret != 0)) { | 695 | if (unlikely(ret != 0)) { |
874 | if (ret != -ERESTARTSYS) | 696 | if (ret != -ERESTARTSYS) |
875 | DRM_ERROR("Failed to rebind context.\n"); | 697 | DRM_ERROR("Failed to rebind context.\n"); |
876 | return ret; | 698 | return ret; |
877 | } | 699 | } |
878 | 700 | ||
879 | ret = vmw_rebind_all_dx_query(val->res); | 701 | ret = vmw_rebind_all_dx_query(val->ctx); |
880 | if (ret != 0) | 702 | if (ret != 0) |
881 | return ret; | 703 | return ret; |
882 | } | 704 | } |
@@ -903,7 +725,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, | |||
903 | uint32 view_ids[], u32 num_views, | 725 | uint32 view_ids[], u32 num_views, |
904 | u32 first_slot) | 726 | u32 first_slot) |
905 | { | 727 | { |
906 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 728 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
907 | struct vmw_cmdbuf_res_manager *man; | 729 | struct vmw_cmdbuf_res_manager *man; |
908 | u32 i; | 730 | u32 i; |
909 | int ret; | 731 | int ret; |
@@ -933,12 +755,12 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, | |||
933 | return ret; | 755 | return ret; |
934 | } | 756 | } |
935 | } | 757 | } |
936 | binding.bi.ctx = ctx_node->res; | 758 | binding.bi.ctx = ctx_node->ctx; |
937 | binding.bi.res = view; | 759 | binding.bi.res = view; |
938 | binding.bi.bt = binding_type; | 760 | binding.bi.bt = binding_type; |
939 | binding.shader_slot = shader_slot; | 761 | binding.shader_slot = shader_slot; |
940 | binding.slot = first_slot + i; | 762 | binding.slot = first_slot + i; |
941 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 763 | vmw_binding_add(ctx_node->staged, &binding.bi, |
942 | shader_slot, binding.slot); | 764 | shader_slot, binding.slot); |
943 | if (view) | 765 | if (view) |
944 | vmw_resource_unreference(&view); | 766 | vmw_resource_unreference(&view); |
@@ -971,6 +793,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
971 | user_context_converter, &cmd->cid, NULL); | 793 | user_context_converter, &cmd->cid, NULL); |
972 | } | 794 | } |
973 | 795 | ||
796 | /** | ||
797 | * vmw_execbuf_info_from_res - Get the private validation metadata for a | ||
798 | * recently validated resource | ||
799 | * @sw_context: Pointer to the command submission context | ||
800 | * @res: The resource | ||
801 | * | ||
802 | * The resource pointed to by @res needs to be present in the command submission | ||
803 | * context's resource cache and hence the last resource of that type to be | ||
804 | * processed by the validation code. | ||
805 | * | ||
806 | * Return: a pointer to the private metadata of the resource, or NULL | ||
807 | * if it wasn't found | ||
808 | */ | ||
809 | static struct vmw_ctx_validation_info * | ||
810 | vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, | ||
811 | struct vmw_resource *res) | ||
812 | { | ||
813 | struct vmw_res_cache_entry *rcache = | ||
814 | &sw_context->res_cache[vmw_res_type(res)]; | ||
815 | |||
816 | if (rcache->valid && rcache->res == res) | ||
817 | return rcache->private; | ||
818 | |||
819 | WARN_ON_ONCE(true); | ||
820 | return NULL; | ||
821 | } | ||
822 | |||
823 | |||
974 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | 824 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
975 | struct vmw_sw_context *sw_context, | 825 | struct vmw_sw_context *sw_context, |
976 | SVGA3dCmdHeader *header) | 826 | SVGA3dCmdHeader *header) |
@@ -979,8 +829,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
979 | SVGA3dCmdHeader header; | 829 | SVGA3dCmdHeader header; |
980 | SVGA3dCmdSetRenderTarget body; | 830 | SVGA3dCmdSetRenderTarget body; |
981 | } *cmd; | 831 | } *cmd; |
982 | struct vmw_resource_val_node *ctx_node; | 832 | struct vmw_resource *ctx; |
983 | struct vmw_resource_val_node *res_node; | 833 | struct vmw_resource *res; |
984 | int ret; | 834 | int ret; |
985 | 835 | ||
986 | cmd = container_of(header, struct vmw_sid_cmd, header); | 836 | cmd = container_of(header, struct vmw_sid_cmd, header); |
@@ -993,25 +843,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
993 | 843 | ||
994 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 844 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
995 | user_context_converter, &cmd->body.cid, | 845 | user_context_converter, &cmd->body.cid, |
996 | &ctx_node); | 846 | &ctx); |
997 | if (unlikely(ret != 0)) | 847 | if (unlikely(ret != 0)) |
998 | return ret; | 848 | return ret; |
999 | 849 | ||
1000 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 850 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1001 | user_surface_converter, | 851 | user_surface_converter, |
1002 | &cmd->body.target.sid, &res_node); | 852 | &cmd->body.target.sid, &res); |
1003 | if (unlikely(ret != 0)) | 853 | if (unlikely(ret != 0)) |
1004 | return ret; | 854 | return ret; |
1005 | 855 | ||
1006 | if (dev_priv->has_mob) { | 856 | if (dev_priv->has_mob) { |
1007 | struct vmw_ctx_bindinfo_view binding; | 857 | struct vmw_ctx_bindinfo_view binding; |
858 | struct vmw_ctx_validation_info *node; | ||
1008 | 859 | ||
1009 | binding.bi.ctx = ctx_node->res; | 860 | node = vmw_execbuf_info_from_res(sw_context, ctx); |
1010 | binding.bi.res = res_node ? res_node->res : NULL; | 861 | if (!node) |
862 | return -EINVAL; | ||
863 | |||
864 | binding.bi.ctx = ctx; | ||
865 | binding.bi.res = res; | ||
1011 | binding.bi.bt = vmw_ctx_binding_rt; | 866 | binding.bi.bt = vmw_ctx_binding_rt; |
1012 | binding.slot = cmd->body.type; | 867 | binding.slot = cmd->body.type; |
1013 | vmw_binding_add(ctx_node->staged_bindings, | 868 | vmw_binding_add(node->staged, &binding.bi, 0, binding.slot); |
1014 | &binding.bi, 0, binding.slot); | ||
1015 | } | 869 | } |
1016 | 870 | ||
1017 | return 0; | 871 | return 0; |
@@ -1171,17 +1025,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, | |||
1171 | 1025 | ||
1172 | if (unlikely(sw_context->cur_query_bo != NULL)) { | 1026 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
1173 | sw_context->needs_post_query_barrier = true; | 1027 | sw_context->needs_post_query_barrier = true; |
1174 | ret = vmw_bo_to_validate_list(sw_context, | 1028 | ret = vmw_validation_add_bo(sw_context->ctx, |
1175 | sw_context->cur_query_bo, | 1029 | sw_context->cur_query_bo, |
1176 | dev_priv->has_mob, NULL); | 1030 | dev_priv->has_mob, false); |
1177 | if (unlikely(ret != 0)) | 1031 | if (unlikely(ret != 0)) |
1178 | return ret; | 1032 | return ret; |
1179 | } | 1033 | } |
1180 | sw_context->cur_query_bo = new_query_bo; | 1034 | sw_context->cur_query_bo = new_query_bo; |
1181 | 1035 | ||
1182 | ret = vmw_bo_to_validate_list(sw_context, | 1036 | ret = vmw_validation_add_bo(sw_context->ctx, |
1183 | dev_priv->dummy_query_bo, | 1037 | dev_priv->dummy_query_bo, |
1184 | dev_priv->has_mob, NULL); | 1038 | dev_priv->has_mob, false); |
1185 | if (unlikely(ret != 0)) | 1039 | if (unlikely(ret != 0)) |
1186 | return ret; | 1040 | return ret; |
1187 | 1041 | ||
@@ -1306,8 +1160,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
1306 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 1160 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
1307 | reloc->mob_loc = id; | 1161 | reloc->mob_loc = id; |
1308 | reloc->location = NULL; | 1162 | reloc->location = NULL; |
1163 | reloc->vbo = vmw_bo; | ||
1309 | 1164 | ||
1310 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index); | 1165 | ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); |
1311 | if (unlikely(ret != 0)) | 1166 | if (unlikely(ret != 0)) |
1312 | goto out_no_reloc; | 1167 | goto out_no_reloc; |
1313 | 1168 | ||
@@ -1365,8 +1220,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
1365 | 1220 | ||
1366 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | 1221 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
1367 | reloc->location = ptr; | 1222 | reloc->location = ptr; |
1223 | reloc->vbo = vmw_bo; | ||
1368 | 1224 | ||
1369 | ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index); | 1225 | ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); |
1370 | if (unlikely(ret != 0)) | 1226 | if (unlikely(ret != 0)) |
1371 | goto out_no_reloc; | 1227 | goto out_no_reloc; |
1372 | 1228 | ||
@@ -1400,7 +1256,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, | |||
1400 | } *cmd; | 1256 | } *cmd; |
1401 | 1257 | ||
1402 | int ret; | 1258 | int ret; |
1403 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 1259 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
1404 | struct vmw_resource *cotable_res; | 1260 | struct vmw_resource *cotable_res; |
1405 | 1261 | ||
1406 | 1262 | ||
@@ -1415,7 +1271,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, | |||
1415 | cmd->q.type >= SVGA3D_QUERYTYPE_MAX) | 1271 | cmd->q.type >= SVGA3D_QUERYTYPE_MAX) |
1416 | return -EINVAL; | 1272 | return -EINVAL; |
1417 | 1273 | ||
1418 | cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY); | 1274 | cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); |
1419 | ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); | 1275 | ret = vmw_cotable_notify(cotable_res, cmd->q.queryId); |
1420 | vmw_resource_unreference(&cotable_res); | 1276 | vmw_resource_unreference(&cotable_res); |
1421 | 1277 | ||
@@ -1462,7 +1318,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, | |||
1462 | return ret; | 1318 | return ret; |
1463 | 1319 | ||
1464 | sw_context->dx_query_mob = vmw_bo; | 1320 | sw_context->dx_query_mob = vmw_bo; |
1465 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; | 1321 | sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; |
1466 | 1322 | ||
1467 | vmw_bo_unreference(&vmw_bo); | 1323 | vmw_bo_unreference(&vmw_bo); |
1468 | 1324 | ||
@@ -1837,8 +1693,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1837 | ((unsigned long) header + header->size + sizeof(header)); | 1693 | ((unsigned long) header + header->size + sizeof(header)); |
1838 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | 1694 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
1839 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | 1695 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
1840 | struct vmw_resource_val_node *ctx_node; | 1696 | struct vmw_resource *ctx; |
1841 | struct vmw_resource_val_node *res_node; | 1697 | struct vmw_resource *res; |
1842 | int ret; | 1698 | int ret; |
1843 | 1699 | ||
1844 | cmd = container_of(header, struct vmw_tex_state_cmd, | 1700 | cmd = container_of(header, struct vmw_tex_state_cmd, |
@@ -1846,7 +1702,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1846 | 1702 | ||
1847 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 1703 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1848 | user_context_converter, &cmd->state.cid, | 1704 | user_context_converter, &cmd->state.cid, |
1849 | &ctx_node); | 1705 | &ctx); |
1850 | if (unlikely(ret != 0)) | 1706 | if (unlikely(ret != 0)) |
1851 | return ret; | 1707 | return ret; |
1852 | 1708 | ||
@@ -1862,19 +1718,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1862 | 1718 | ||
1863 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1719 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1864 | user_surface_converter, | 1720 | user_surface_converter, |
1865 | &cur_state->value, &res_node); | 1721 | &cur_state->value, &res); |
1866 | if (unlikely(ret != 0)) | 1722 | if (unlikely(ret != 0)) |
1867 | return ret; | 1723 | return ret; |
1868 | 1724 | ||
1869 | if (dev_priv->has_mob) { | 1725 | if (dev_priv->has_mob) { |
1870 | struct vmw_ctx_bindinfo_tex binding; | 1726 | struct vmw_ctx_bindinfo_tex binding; |
1727 | struct vmw_ctx_validation_info *node; | ||
1871 | 1728 | ||
1872 | binding.bi.ctx = ctx_node->res; | 1729 | node = vmw_execbuf_info_from_res(sw_context, ctx); |
1873 | binding.bi.res = res_node ? res_node->res : NULL; | 1730 | if (!node) |
1731 | return -EINVAL; | ||
1732 | |||
1733 | binding.bi.ctx = ctx; | ||
1734 | binding.bi.res = res; | ||
1874 | binding.bi.bt = vmw_ctx_binding_tex; | 1735 | binding.bi.bt = vmw_ctx_binding_tex; |
1875 | binding.texture_stage = cur_state->stage; | 1736 | binding.texture_stage = cur_state->stage; |
1876 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 1737 | vmw_binding_add(node->staged, &binding.bi, 0, |
1877 | 0, binding.texture_stage); | 1738 | binding.texture_stage); |
1878 | } | 1739 | } |
1879 | } | 1740 | } |
1880 | 1741 | ||
@@ -1922,24 +1783,25 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
1922 | */ | 1783 | */ |
1923 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, | 1784 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, |
1924 | struct vmw_sw_context *sw_context, | 1785 | struct vmw_sw_context *sw_context, |
1925 | struct vmw_resource_val_node *val_node, | 1786 | struct vmw_resource *res, |
1926 | uint32_t *buf_id, | 1787 | uint32_t *buf_id, |
1927 | unsigned long backup_offset) | 1788 | unsigned long backup_offset) |
1928 | { | 1789 | { |
1929 | struct vmw_buffer_object *dma_buf; | 1790 | struct vmw_buffer_object *vbo; |
1791 | void *info; | ||
1930 | int ret; | 1792 | int ret; |
1931 | 1793 | ||
1932 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | 1794 | info = vmw_execbuf_info_from_res(sw_context, res); |
1795 | if (!info) | ||
1796 | return -EINVAL; | ||
1797 | |||
1798 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); | ||
1933 | if (ret) | 1799 | if (ret) |
1934 | return ret; | 1800 | return ret; |
1935 | 1801 | ||
1936 | val_node->switching_backup = true; | 1802 | vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, |
1937 | if (val_node->first_usage) | 1803 | backup_offset); |
1938 | val_node->no_buffer_needed = true; | 1804 | vmw_bo_unreference(&vbo); |
1939 | |||
1940 | vmw_bo_unreference(&val_node->new_backup); | ||
1941 | val_node->new_backup = dma_buf; | ||
1942 | val_node->new_backup_offset = backup_offset; | ||
1943 | 1805 | ||
1944 | return 0; | 1806 | return 0; |
1945 | } | 1807 | } |
@@ -1970,15 +1832,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | |||
1970 | uint32_t *buf_id, | 1832 | uint32_t *buf_id, |
1971 | unsigned long backup_offset) | 1833 | unsigned long backup_offset) |
1972 | { | 1834 | { |
1973 | struct vmw_resource_val_node *val_node; | 1835 | struct vmw_resource *res; |
1974 | int ret; | 1836 | int ret; |
1975 | 1837 | ||
1976 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | 1838 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1977 | converter, res_id, &val_node); | 1839 | converter, res_id, &res); |
1978 | if (ret) | 1840 | if (ret) |
1979 | return ret; | 1841 | return ret; |
1980 | 1842 | ||
1981 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, | 1843 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, |
1982 | buf_id, backup_offset); | 1844 | buf_id, backup_offset); |
1983 | } | 1845 | } |
1984 | 1846 | ||
@@ -2170,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | |||
2170 | } *cmd; | 2032 | } *cmd; |
2171 | int ret; | 2033 | int ret; |
2172 | size_t size; | 2034 | size_t size; |
2173 | struct vmw_resource_val_node *val; | 2035 | struct vmw_resource *ctx; |
2174 | 2036 | ||
2175 | cmd = container_of(header, struct vmw_shader_define_cmd, | 2037 | cmd = container_of(header, struct vmw_shader_define_cmd, |
2176 | header); | 2038 | header); |
2177 | 2039 | ||
2178 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 2040 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2179 | user_context_converter, &cmd->body.cid, | 2041 | user_context_converter, &cmd->body.cid, |
2180 | &val); | 2042 | &ctx); |
2181 | if (unlikely(ret != 0)) | 2043 | if (unlikely(ret != 0)) |
2182 | return ret; | 2044 | return ret; |
2183 | 2045 | ||
@@ -2186,7 +2048,7 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, | |||
2186 | 2048 | ||
2187 | size = cmd->header.size - sizeof(cmd->body); | 2049 | size = cmd->header.size - sizeof(cmd->body); |
2188 | ret = vmw_compat_shader_add(dev_priv, | 2050 | ret = vmw_compat_shader_add(dev_priv, |
2189 | vmw_context_res_man(val->res), | 2051 | vmw_context_res_man(ctx), |
2190 | cmd->body.shid, cmd + 1, | 2052 | cmd->body.shid, cmd + 1, |
2191 | cmd->body.type, size, | 2053 | cmd->body.type, size, |
2192 | &sw_context->staged_cmd_res); | 2054 | &sw_context->staged_cmd_res); |
@@ -2217,21 +2079,21 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | |||
2217 | SVGA3dCmdDestroyShader body; | 2079 | SVGA3dCmdDestroyShader body; |
2218 | } *cmd; | 2080 | } *cmd; |
2219 | int ret; | 2081 | int ret; |
2220 | struct vmw_resource_val_node *val; | 2082 | struct vmw_resource *ctx; |
2221 | 2083 | ||
2222 | cmd = container_of(header, struct vmw_shader_destroy_cmd, | 2084 | cmd = container_of(header, struct vmw_shader_destroy_cmd, |
2223 | header); | 2085 | header); |
2224 | 2086 | ||
2225 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 2087 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2226 | user_context_converter, &cmd->body.cid, | 2088 | user_context_converter, &cmd->body.cid, |
2227 | &val); | 2089 | &ctx); |
2228 | if (unlikely(ret != 0)) | 2090 | if (unlikely(ret != 0)) |
2229 | return ret; | 2091 | return ret; |
2230 | 2092 | ||
2231 | if (unlikely(!dev_priv->has_mob)) | 2093 | if (unlikely(!dev_priv->has_mob)) |
2232 | return 0; | 2094 | return 0; |
2233 | 2095 | ||
2234 | ret = vmw_shader_remove(vmw_context_res_man(val->res), | 2096 | ret = vmw_shader_remove(vmw_context_res_man(ctx), |
2235 | cmd->body.shid, | 2097 | cmd->body.shid, |
2236 | cmd->body.type, | 2098 | cmd->body.type, |
2237 | &sw_context->staged_cmd_res); | 2099 | &sw_context->staged_cmd_res); |
@@ -2261,9 +2123,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
2261 | SVGA3dCmdHeader header; | 2123 | SVGA3dCmdHeader header; |
2262 | SVGA3dCmdSetShader body; | 2124 | SVGA3dCmdSetShader body; |
2263 | } *cmd; | 2125 | } *cmd; |
2264 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; | ||
2265 | struct vmw_ctx_bindinfo_shader binding; | 2126 | struct vmw_ctx_bindinfo_shader binding; |
2266 | struct vmw_resource *res = NULL; | 2127 | struct vmw_resource *ctx, *res = NULL; |
2128 | struct vmw_ctx_validation_info *ctx_info; | ||
2267 | int ret; | 2129 | int ret; |
2268 | 2130 | ||
2269 | cmd = container_of(header, struct vmw_set_shader_cmd, | 2131 | cmd = container_of(header, struct vmw_set_shader_cmd, |
@@ -2277,7 +2139,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
2277 | 2139 | ||
2278 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 2140 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
2279 | user_context_converter, &cmd->body.cid, | 2141 | user_context_converter, &cmd->body.cid, |
2280 | &ctx_node); | 2142 | &ctx); |
2281 | if (unlikely(ret != 0)) | 2143 | if (unlikely(ret != 0)) |
2282 | return ret; | 2144 | return ret; |
2283 | 2145 | ||
@@ -2285,34 +2147,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
2285 | return 0; | 2147 | return 0; |
2286 | 2148 | ||
2287 | if (cmd->body.shid != SVGA3D_INVALID_ID) { | 2149 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
2288 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), | 2150 | res = vmw_shader_lookup(vmw_context_res_man(ctx), |
2289 | cmd->body.shid, | 2151 | cmd->body.shid, |
2290 | cmd->body.type); | 2152 | cmd->body.type); |
2291 | 2153 | ||
2292 | if (!IS_ERR(res)) { | 2154 | if (!IS_ERR(res)) { |
2155 | struct vmw_resource *tmp_res = res; | ||
2156 | |||
2293 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, | 2157 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
2294 | &cmd->body.shid, res, | 2158 | &cmd->body.shid, res); |
2295 | &res_node); | 2159 | vmw_resource_unreference(&tmp_res); |
2296 | vmw_resource_unreference(&res); | ||
2297 | if (unlikely(ret != 0)) | 2160 | if (unlikely(ret != 0)) |
2298 | return ret; | 2161 | return ret; |
2299 | } | 2162 | } |
2300 | } | 2163 | } |
2301 | 2164 | ||
2302 | if (!res_node) { | 2165 | if (IS_ERR_OR_NULL(res)) { |
2303 | ret = vmw_cmd_res_check(dev_priv, sw_context, | 2166 | ret = vmw_cmd_res_check(dev_priv, sw_context, |
2304 | vmw_res_shader, | 2167 | vmw_res_shader, |
2305 | user_shader_converter, | 2168 | user_shader_converter, |
2306 | &cmd->body.shid, &res_node); | 2169 | &cmd->body.shid, &res); |
2307 | if (unlikely(ret != 0)) | 2170 | if (unlikely(ret != 0)) |
2308 | return ret; | 2171 | return ret; |
2309 | } | 2172 | } |
2310 | 2173 | ||
2311 | binding.bi.ctx = ctx_node->res; | 2174 | ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); |
2312 | binding.bi.res = res_node ? res_node->res : NULL; | 2175 | if (!ctx_info) |
2176 | return -EINVAL; | ||
2177 | |||
2178 | binding.bi.ctx = ctx; | ||
2179 | binding.bi.res = res; | ||
2313 | binding.bi.bt = vmw_ctx_binding_shader; | 2180 | binding.bi.bt = vmw_ctx_binding_shader; |
2314 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; | 2181 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2315 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 2182 | vmw_binding_add(ctx_info->staged, &binding.bi, |
2316 | binding.shader_slot, 0); | 2183 | binding.shader_slot, 0); |
2317 | return 0; | 2184 | return 0; |
2318 | } | 2185 | } |
@@ -2393,8 +2260,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, | |||
2393 | SVGA3dCmdHeader header; | 2260 | SVGA3dCmdHeader header; |
2394 | SVGA3dCmdDXSetSingleConstantBuffer body; | 2261 | SVGA3dCmdDXSetSingleConstantBuffer body; |
2395 | } *cmd; | 2262 | } *cmd; |
2396 | struct vmw_resource_val_node *res_node = NULL; | 2263 | struct vmw_resource *res = NULL; |
2397 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2264 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2398 | struct vmw_ctx_bindinfo_cb binding; | 2265 | struct vmw_ctx_bindinfo_cb binding; |
2399 | int ret; | 2266 | int ret; |
2400 | 2267 | ||
@@ -2406,12 +2273,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, | |||
2406 | cmd = container_of(header, typeof(*cmd), header); | 2273 | cmd = container_of(header, typeof(*cmd), header); |
2407 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2274 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2408 | user_surface_converter, | 2275 | user_surface_converter, |
2409 | &cmd->body.sid, &res_node); | 2276 | &cmd->body.sid, &res); |
2410 | if (unlikely(ret != 0)) | 2277 | if (unlikely(ret != 0)) |
2411 | return ret; | 2278 | return ret; |
2412 | 2279 | ||
2413 | binding.bi.ctx = ctx_node->res; | 2280 | binding.bi.ctx = ctx_node->ctx; |
2414 | binding.bi.res = res_node ? res_node->res : NULL; | 2281 | binding.bi.res = res; |
2415 | binding.bi.bt = vmw_ctx_binding_cb; | 2282 | binding.bi.bt = vmw_ctx_binding_cb; |
2416 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; | 2283 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2417 | binding.offset = cmd->body.offsetInBytes; | 2284 | binding.offset = cmd->body.offsetInBytes; |
@@ -2426,7 +2293,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, | |||
2426 | return -EINVAL; | 2293 | return -EINVAL; |
2427 | } | 2294 | } |
2428 | 2295 | ||
2429 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 2296 | vmw_binding_add(ctx_node->staged, &binding.bi, |
2430 | binding.shader_slot, binding.slot); | 2297 | binding.shader_slot, binding.slot); |
2431 | 2298 | ||
2432 | return 0; | 2299 | return 0; |
@@ -2482,7 +2349,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, | |||
2482 | SVGA3dCmdDXSetShader body; | 2349 | SVGA3dCmdDXSetShader body; |
2483 | } *cmd; | 2350 | } *cmd; |
2484 | struct vmw_resource *res = NULL; | 2351 | struct vmw_resource *res = NULL; |
2485 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2352 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2486 | struct vmw_ctx_bindinfo_shader binding; | 2353 | struct vmw_ctx_bindinfo_shader binding; |
2487 | int ret = 0; | 2354 | int ret = 0; |
2488 | 2355 | ||
@@ -2506,17 +2373,17 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, | |||
2506 | return PTR_ERR(res); | 2373 | return PTR_ERR(res); |
2507 | } | 2374 | } |
2508 | 2375 | ||
2509 | ret = vmw_resource_val_add(sw_context, res, NULL); | 2376 | ret = vmw_resource_val_add(sw_context, res); |
2510 | if (ret) | 2377 | if (ret) |
2511 | goto out_unref; | 2378 | goto out_unref; |
2512 | } | 2379 | } |
2513 | 2380 | ||
2514 | binding.bi.ctx = ctx_node->res; | 2381 | binding.bi.ctx = ctx_node->ctx; |
2515 | binding.bi.res = res; | 2382 | binding.bi.res = res; |
2516 | binding.bi.bt = vmw_ctx_binding_dx_shader; | 2383 | binding.bi.bt = vmw_ctx_binding_dx_shader; |
2517 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; | 2384 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
2518 | 2385 | ||
2519 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 2386 | vmw_binding_add(ctx_node->staged, &binding.bi, |
2520 | binding.shader_slot, 0); | 2387 | binding.shader_slot, 0); |
2521 | out_unref: | 2388 | out_unref: |
2522 | if (res) | 2389 | if (res) |
@@ -2537,9 +2404,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, | |||
2537 | struct vmw_sw_context *sw_context, | 2404 | struct vmw_sw_context *sw_context, |
2538 | SVGA3dCmdHeader *header) | 2405 | SVGA3dCmdHeader *header) |
2539 | { | 2406 | { |
2540 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2407 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2541 | struct vmw_ctx_bindinfo_vb binding; | 2408 | struct vmw_ctx_bindinfo_vb binding; |
2542 | struct vmw_resource_val_node *res_node; | 2409 | struct vmw_resource *res; |
2543 | struct { | 2410 | struct { |
2544 | SVGA3dCmdHeader header; | 2411 | SVGA3dCmdHeader header; |
2545 | SVGA3dCmdDXSetVertexBuffers body; | 2412 | SVGA3dCmdDXSetVertexBuffers body; |
@@ -2564,18 +2431,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, | |||
2564 | for (i = 0; i < num; i++) { | 2431 | for (i = 0; i < num; i++) { |
2565 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2432 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2566 | user_surface_converter, | 2433 | user_surface_converter, |
2567 | &cmd->buf[i].sid, &res_node); | 2434 | &cmd->buf[i].sid, &res); |
2568 | if (unlikely(ret != 0)) | 2435 | if (unlikely(ret != 0)) |
2569 | return ret; | 2436 | return ret; |
2570 | 2437 | ||
2571 | binding.bi.ctx = ctx_node->res; | 2438 | binding.bi.ctx = ctx_node->ctx; |
2572 | binding.bi.bt = vmw_ctx_binding_vb; | 2439 | binding.bi.bt = vmw_ctx_binding_vb; |
2573 | binding.bi.res = ((res_node) ? res_node->res : NULL); | 2440 | binding.bi.res = res; |
2574 | binding.offset = cmd->buf[i].offset; | 2441 | binding.offset = cmd->buf[i].offset; |
2575 | binding.stride = cmd->buf[i].stride; | 2442 | binding.stride = cmd->buf[i].stride; |
2576 | binding.slot = i + cmd->body.startBuffer; | 2443 | binding.slot = i + cmd->body.startBuffer; |
2577 | 2444 | ||
2578 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 2445 | vmw_binding_add(ctx_node->staged, &binding.bi, |
2579 | 0, binding.slot); | 2446 | 0, binding.slot); |
2580 | } | 2447 | } |
2581 | 2448 | ||
@@ -2594,9 +2461,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, | |||
2594 | struct vmw_sw_context *sw_context, | 2461 | struct vmw_sw_context *sw_context, |
2595 | SVGA3dCmdHeader *header) | 2462 | SVGA3dCmdHeader *header) |
2596 | { | 2463 | { |
2597 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2464 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2598 | struct vmw_ctx_bindinfo_ib binding; | 2465 | struct vmw_ctx_bindinfo_ib binding; |
2599 | struct vmw_resource_val_node *res_node; | 2466 | struct vmw_resource *res; |
2600 | struct { | 2467 | struct { |
2601 | SVGA3dCmdHeader header; | 2468 | SVGA3dCmdHeader header; |
2602 | SVGA3dCmdDXSetIndexBuffer body; | 2469 | SVGA3dCmdDXSetIndexBuffer body; |
@@ -2611,17 +2478,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, | |||
2611 | cmd = container_of(header, typeof(*cmd), header); | 2478 | cmd = container_of(header, typeof(*cmd), header); |
2612 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2479 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2613 | user_surface_converter, | 2480 | user_surface_converter, |
2614 | &cmd->body.sid, &res_node); | 2481 | &cmd->body.sid, &res); |
2615 | if (unlikely(ret != 0)) | 2482 | if (unlikely(ret != 0)) |
2616 | return ret; | 2483 | return ret; |
2617 | 2484 | ||
2618 | binding.bi.ctx = ctx_node->res; | 2485 | binding.bi.ctx = ctx_node->ctx; |
2619 | binding.bi.res = ((res_node) ? res_node->res : NULL); | 2486 | binding.bi.res = res; |
2620 | binding.bi.bt = vmw_ctx_binding_ib; | 2487 | binding.bi.bt = vmw_ctx_binding_ib; |
2621 | binding.offset = cmd->body.offset; | 2488 | binding.offset = cmd->body.offset; |
2622 | binding.format = cmd->body.format; | 2489 | binding.format = cmd->body.format; |
2623 | 2490 | ||
2624 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); | 2491 | vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0); |
2625 | 2492 | ||
2626 | return 0; | 2493 | return 0; |
2627 | } | 2494 | } |
@@ -2708,8 +2575,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, | |||
2708 | struct vmw_sw_context *sw_context, | 2575 | struct vmw_sw_context *sw_context, |
2709 | SVGA3dCmdHeader *header) | 2576 | SVGA3dCmdHeader *header) |
2710 | { | 2577 | { |
2711 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2578 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2712 | struct vmw_resource_val_node *srf_node; | 2579 | struct vmw_resource *srf; |
2713 | struct vmw_resource *res; | 2580 | struct vmw_resource *res; |
2714 | enum vmw_view_type view_type; | 2581 | enum vmw_view_type view_type; |
2715 | int ret; | 2582 | int ret; |
@@ -2734,19 +2601,19 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, | |||
2734 | cmd = container_of(header, typeof(*cmd), header); | 2601 | cmd = container_of(header, typeof(*cmd), header); |
2735 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2602 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2736 | user_surface_converter, | 2603 | user_surface_converter, |
2737 | &cmd->sid, &srf_node); | 2604 | &cmd->sid, &srf); |
2738 | if (unlikely(ret != 0)) | 2605 | if (unlikely(ret != 0)) |
2739 | return ret; | 2606 | return ret; |
2740 | 2607 | ||
2741 | res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); | 2608 | res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); |
2742 | ret = vmw_cotable_notify(res, cmd->defined_id); | 2609 | ret = vmw_cotable_notify(res, cmd->defined_id); |
2743 | vmw_resource_unreference(&res); | 2610 | vmw_resource_unreference(&res); |
2744 | if (unlikely(ret != 0)) | 2611 | if (unlikely(ret != 0)) |
2745 | return ret; | 2612 | return ret; |
2746 | 2613 | ||
2747 | return vmw_view_add(sw_context->man, | 2614 | return vmw_view_add(sw_context->man, |
2748 | ctx_node->res, | 2615 | ctx_node->ctx, |
2749 | srf_node->res, | 2616 | srf, |
2750 | view_type, | 2617 | view_type, |
2751 | cmd->defined_id, | 2618 | cmd->defined_id, |
2752 | header, | 2619 | header, |
@@ -2766,9 +2633,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, | |||
2766 | struct vmw_sw_context *sw_context, | 2633 | struct vmw_sw_context *sw_context, |
2767 | SVGA3dCmdHeader *header) | 2634 | SVGA3dCmdHeader *header) |
2768 | { | 2635 | { |
2769 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2636 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2770 | struct vmw_ctx_bindinfo_so binding; | 2637 | struct vmw_ctx_bindinfo_so binding; |
2771 | struct vmw_resource_val_node *res_node; | 2638 | struct vmw_resource *res; |
2772 | struct { | 2639 | struct { |
2773 | SVGA3dCmdHeader header; | 2640 | SVGA3dCmdHeader header; |
2774 | SVGA3dCmdDXSetSOTargets body; | 2641 | SVGA3dCmdDXSetSOTargets body; |
@@ -2793,18 +2660,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, | |||
2793 | for (i = 0; i < num; i++) { | 2660 | for (i = 0; i < num; i++) { |
2794 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2661 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2795 | user_surface_converter, | 2662 | user_surface_converter, |
2796 | &cmd->targets[i].sid, &res_node); | 2663 | &cmd->targets[i].sid, &res); |
2797 | if (unlikely(ret != 0)) | 2664 | if (unlikely(ret != 0)) |
2798 | return ret; | 2665 | return ret; |
2799 | 2666 | ||
2800 | binding.bi.ctx = ctx_node->res; | 2667 | binding.bi.ctx = ctx_node->ctx; |
2801 | binding.bi.res = ((res_node) ? res_node->res : NULL); | 2668 | binding.bi.res = res; |
2802 | binding.bi.bt = vmw_ctx_binding_so, | 2669 | binding.bi.bt = vmw_ctx_binding_so, |
2803 | binding.offset = cmd->targets[i].offset; | 2670 | binding.offset = cmd->targets[i].offset; |
2804 | binding.size = cmd->targets[i].sizeInBytes; | 2671 | binding.size = cmd->targets[i].sizeInBytes; |
2805 | binding.slot = i; | 2672 | binding.slot = i; |
2806 | 2673 | ||
2807 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | 2674 | vmw_binding_add(ctx_node->staged, &binding.bi, |
2808 | 0, binding.slot); | 2675 | 0, binding.slot); |
2809 | } | 2676 | } |
2810 | 2677 | ||
@@ -2815,7 +2682,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, | |||
2815 | struct vmw_sw_context *sw_context, | 2682 | struct vmw_sw_context *sw_context, |
2816 | SVGA3dCmdHeader *header) | 2683 | SVGA3dCmdHeader *header) |
2817 | { | 2684 | { |
2818 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2685 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2819 | struct vmw_resource *res; | 2686 | struct vmw_resource *res; |
2820 | /* | 2687 | /* |
2821 | * This is based on the fact that all affected define commands have | 2688 | * This is based on the fact that all affected define commands have |
@@ -2834,7 +2701,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, | |||
2834 | } | 2701 | } |
2835 | 2702 | ||
2836 | so_type = vmw_so_cmd_to_type(header->id); | 2703 | so_type = vmw_so_cmd_to_type(header->id); |
2837 | res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); | 2704 | res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); |
2838 | cmd = container_of(header, typeof(*cmd), header); | 2705 | cmd = container_of(header, typeof(*cmd), header); |
2839 | ret = vmw_cotable_notify(res, cmd->defined_id); | 2706 | ret = vmw_cotable_notify(res, cmd->defined_id); |
2840 | vmw_resource_unreference(&res); | 2707 | vmw_resource_unreference(&res); |
@@ -2882,7 +2749,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, | |||
2882 | struct vmw_sw_context *sw_context, | 2749 | struct vmw_sw_context *sw_context, |
2883 | SVGA3dCmdHeader *header) | 2750 | SVGA3dCmdHeader *header) |
2884 | { | 2751 | { |
2885 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2752 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2886 | 2753 | ||
2887 | if (unlikely(ctx_node == NULL)) { | 2754 | if (unlikely(ctx_node == NULL)) { |
2888 | DRM_ERROR("DX Context not set.\n"); | 2755 | DRM_ERROR("DX Context not set.\n"); |
@@ -2907,7 +2774,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | |||
2907 | struct vmw_sw_context *sw_context, | 2774 | struct vmw_sw_context *sw_context, |
2908 | SVGA3dCmdHeader *header) | 2775 | SVGA3dCmdHeader *header) |
2909 | { | 2776 | { |
2910 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2777 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2911 | struct { | 2778 | struct { |
2912 | SVGA3dCmdHeader header; | 2779 | SVGA3dCmdHeader header; |
2913 | union vmw_view_destroy body; | 2780 | union vmw_view_destroy body; |
@@ -2953,7 +2820,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, | |||
2953 | struct vmw_sw_context *sw_context, | 2820 | struct vmw_sw_context *sw_context, |
2954 | SVGA3dCmdHeader *header) | 2821 | SVGA3dCmdHeader *header) |
2955 | { | 2822 | { |
2956 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2823 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2957 | struct vmw_resource *res; | 2824 | struct vmw_resource *res; |
2958 | struct { | 2825 | struct { |
2959 | SVGA3dCmdHeader header; | 2826 | SVGA3dCmdHeader header; |
@@ -2966,13 +2833,13 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, | |||
2966 | return -EINVAL; | 2833 | return -EINVAL; |
2967 | } | 2834 | } |
2968 | 2835 | ||
2969 | res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); | 2836 | res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); |
2970 | ret = vmw_cotable_notify(res, cmd->body.shaderId); | 2837 | ret = vmw_cotable_notify(res, cmd->body.shaderId); |
2971 | vmw_resource_unreference(&res); | 2838 | vmw_resource_unreference(&res); |
2972 | if (ret) | 2839 | if (ret) |
2973 | return ret; | 2840 | return ret; |
2974 | 2841 | ||
2975 | return vmw_dx_shader_add(sw_context->man, ctx_node->res, | 2842 | return vmw_dx_shader_add(sw_context->man, ctx_node->ctx, |
2976 | cmd->body.shaderId, cmd->body.type, | 2843 | cmd->body.shaderId, cmd->body.type, |
2977 | &sw_context->staged_cmd_res); | 2844 | &sw_context->staged_cmd_res); |
2978 | } | 2845 | } |
@@ -2989,7 +2856,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, | |||
2989 | struct vmw_sw_context *sw_context, | 2856 | struct vmw_sw_context *sw_context, |
2990 | SVGA3dCmdHeader *header) | 2857 | SVGA3dCmdHeader *header) |
2991 | { | 2858 | { |
2992 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | 2859 | struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; |
2993 | struct { | 2860 | struct { |
2994 | SVGA3dCmdHeader header; | 2861 | SVGA3dCmdHeader header; |
2995 | SVGA3dCmdDXDestroyShader body; | 2862 | SVGA3dCmdDXDestroyShader body; |
@@ -3021,8 +2888,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, | |||
3021 | struct vmw_sw_context *sw_context, | 2888 | struct vmw_sw_context *sw_context, |
3022 | SVGA3dCmdHeader *header) | 2889 | SVGA3dCmdHeader *header) |
3023 | { | 2890 | { |
3024 | struct vmw_resource_val_node *ctx_node; | 2891 | struct vmw_resource *ctx; |
3025 | struct vmw_resource_val_node *res_node; | ||
3026 | struct vmw_resource *res; | 2892 | struct vmw_resource *res; |
3027 | struct { | 2893 | struct { |
3028 | SVGA3dCmdHeader header; | 2894 | SVGA3dCmdHeader header; |
@@ -3033,32 +2899,32 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, | |||
3033 | if (cmd->body.cid != SVGA3D_INVALID_ID) { | 2899 | if (cmd->body.cid != SVGA3D_INVALID_ID) { |
3034 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 2900 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
3035 | user_context_converter, | 2901 | user_context_converter, |
3036 | &cmd->body.cid, &ctx_node); | 2902 | &cmd->body.cid, &ctx); |
3037 | if (ret) | 2903 | if (ret) |
3038 | return ret; | 2904 | return ret; |
3039 | } else { | 2905 | } else { |
3040 | ctx_node = sw_context->dx_ctx_node; | 2906 | if (!sw_context->dx_ctx_node) { |
3041 | if (!ctx_node) { | ||
3042 | DRM_ERROR("DX Context not set.\n"); | 2907 | DRM_ERROR("DX Context not set.\n"); |
3043 | return -EINVAL; | 2908 | return -EINVAL; |
3044 | } | 2909 | } |
2910 | ctx = sw_context->dx_ctx_node->ctx; | ||
3045 | } | 2911 | } |
3046 | 2912 | ||
3047 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), | 2913 | res = vmw_shader_lookup(vmw_context_res_man(ctx), |
3048 | cmd->body.shid, 0); | 2914 | cmd->body.shid, 0); |
3049 | if (IS_ERR(res)) { | 2915 | if (IS_ERR(res)) { |
3050 | DRM_ERROR("Could not find shader to bind.\n"); | 2916 | DRM_ERROR("Could not find shader to bind.\n"); |
3051 | return PTR_ERR(res); | 2917 | return PTR_ERR(res); |
3052 | } | 2918 | } |
3053 | 2919 | ||
3054 | ret = vmw_resource_val_add(sw_context, res, &res_node); | 2920 | ret = vmw_resource_val_add(sw_context, res); |
3055 | if (ret) { | 2921 | if (ret) { |
3056 | DRM_ERROR("Error creating resource validation node.\n"); | 2922 | DRM_ERROR("Error creating resource validation node.\n"); |
3057 | goto out_unref; | 2923 | goto out_unref; |
3058 | } | 2924 | } |
3059 | 2925 | ||
3060 | 2926 | ||
3061 | ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, | 2927 | ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res, |
3062 | &cmd->body.mobid, | 2928 | &cmd->body.mobid, |
3063 | cmd->body.offsetInBytes); | 2929 | cmd->body.offsetInBytes); |
3064 | out_unref: | 2930 | out_unref: |
@@ -3645,13 +3511,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
3645 | { | 3511 | { |
3646 | uint32_t i; | 3512 | uint32_t i; |
3647 | struct vmw_relocation *reloc; | 3513 | struct vmw_relocation *reloc; |
3648 | struct ttm_validate_buffer *validate; | ||
3649 | struct ttm_buffer_object *bo; | 3514 | struct ttm_buffer_object *bo; |
3650 | 3515 | ||
3651 | for (i = 0; i < sw_context->cur_reloc; ++i) { | 3516 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
3652 | reloc = &sw_context->relocs[i]; | 3517 | reloc = &sw_context->relocs[i]; |
3653 | validate = &sw_context->val_bufs[reloc->index].base; | 3518 | bo = &reloc->vbo->base; |
3654 | bo = validate->bo; | ||
3655 | switch (bo->mem.mem_type) { | 3519 | switch (bo->mem.mem_type) { |
3656 | case TTM_PL_VRAM: | 3520 | case TTM_PL_VRAM: |
3657 | reloc->location->offset += bo->offset; | 3521 | reloc->location->offset += bo->offset; |
@@ -3670,110 +3534,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
3670 | vmw_free_relocations(sw_context); | 3534 | vmw_free_relocations(sw_context); |
3671 | } | 3535 | } |
3672 | 3536 | ||
3673 | /** | ||
3674 | * vmw_resource_list_unrefererence - Free up a resource list and unreference | ||
3675 | * all resources referenced by it. | ||
3676 | * | ||
3677 | * @list: The resource list. | ||
3678 | */ | ||
3679 | static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, | ||
3680 | struct list_head *list) | ||
3681 | { | ||
3682 | struct vmw_resource_val_node *val, *val_next; | ||
3683 | |||
3684 | /* | ||
3685 | * Drop references to resources held during command submission. | ||
3686 | */ | ||
3687 | |||
3688 | list_for_each_entry_safe(val, val_next, list, head) { | ||
3689 | list_del_init(&val->head); | ||
3690 | vmw_resource_unreference(&val->res); | ||
3691 | |||
3692 | if (val->staged_bindings) { | ||
3693 | if (val->staged_bindings != sw_context->staged_bindings) | ||
3694 | vmw_binding_state_free(val->staged_bindings); | ||
3695 | else | ||
3696 | sw_context->staged_bindings_inuse = false; | ||
3697 | val->staged_bindings = NULL; | ||
3698 | } | ||
3699 | |||
3700 | kfree(val); | ||
3701 | } | ||
3702 | } | ||
3703 | |||
3704 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | ||
3705 | { | ||
3706 | struct vmw_validate_buffer *entry, *next; | ||
3707 | struct vmw_resource_val_node *val; | ||
3708 | |||
3709 | /* | ||
3710 | * Drop references to DMA buffers held during command submission. | ||
3711 | */ | ||
3712 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | ||
3713 | base.head) { | ||
3714 | list_del(&entry->base.head); | ||
3715 | ttm_bo_unref(&entry->base.bo); | ||
3716 | (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); | ||
3717 | sw_context->cur_val_buf--; | ||
3718 | } | ||
3719 | BUG_ON(sw_context->cur_val_buf != 0); | ||
3720 | |||
3721 | list_for_each_entry(val, &sw_context->resource_list, head) | ||
3722 | (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); | ||
3723 | } | ||
3724 | |||
3725 | int vmw_validate_single_buffer(struct vmw_private *dev_priv, | ||
3726 | struct ttm_buffer_object *bo, | ||
3727 | bool interruptible, | ||
3728 | bool validate_as_mob) | ||
3729 | { | ||
3730 | struct vmw_buffer_object *vbo = | ||
3731 | container_of(bo, struct vmw_buffer_object, base); | ||
3732 | struct ttm_operation_ctx ctx = { interruptible, true }; | ||
3733 | int ret; | ||
3734 | |||
3735 | if (vbo->pin_count > 0) | ||
3736 | return 0; | ||
3737 | |||
3738 | if (validate_as_mob) | ||
3739 | return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); | ||
3740 | |||
3741 | /** | ||
3742 | * Put BO in VRAM if there is space, otherwise as a GMR. | ||
3743 | * If there is no space in VRAM and GMR ids are all used up, | ||
3744 | * start evicting GMRs to make room. If the DMA buffer can't be | ||
3745 | * used as a GMR, this will return -ENOMEM. | ||
3746 | */ | ||
3747 | |||
3748 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); | ||
3749 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | ||
3750 | return ret; | ||
3751 | |||
3752 | /** | ||
3753 | * If that failed, try VRAM again, this time evicting | ||
3754 | * previous contents. | ||
3755 | */ | ||
3756 | |||
3757 | ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); | ||
3758 | return ret; | ||
3759 | } | ||
3760 | |||
3761 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | ||
3762 | struct vmw_sw_context *sw_context) | ||
3763 | { | ||
3764 | struct vmw_validate_buffer *entry; | ||
3765 | int ret; | ||
3766 | |||
3767 | list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { | ||
3768 | ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, | ||
3769 | true, | ||
3770 | entry->validate_as_mob); | ||
3771 | if (unlikely(ret != 0)) | ||
3772 | return ret; | ||
3773 | } | ||
3774 | return 0; | ||
3775 | } | ||
3776 | |||
3777 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, | 3537 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
3778 | uint32_t size) | 3538 | uint32_t size) |
3779 | { | 3539 | { |
@@ -3946,7 +3706,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, | |||
3946 | 3706 | ||
3947 | if (sw_context->dx_ctx_node) | 3707 | if (sw_context->dx_ctx_node) |
3948 | cmd = vmw_fifo_reserve_dx(dev_priv, command_size, | 3708 | cmd = vmw_fifo_reserve_dx(dev_priv, command_size, |
3949 | sw_context->dx_ctx_node->res->id); | 3709 | sw_context->dx_ctx_node->ctx->id); |
3950 | else | 3710 | else |
3951 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 3711 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
3952 | if (!cmd) { | 3712 | if (!cmd) { |
@@ -3980,7 +3740,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, | |||
3980 | u32 command_size, | 3740 | u32 command_size, |
3981 | struct vmw_sw_context *sw_context) | 3741 | struct vmw_sw_context *sw_context) |
3982 | { | 3742 | { |
3983 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : | 3743 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : |
3984 | SVGA3D_INVALID_ID); | 3744 | SVGA3D_INVALID_ID); |
3985 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, | 3745 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, |
3986 | id, false, header); | 3746 | id, false, header); |
@@ -4057,7 +3817,6 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, | |||
4057 | struct vmw_sw_context *sw_context, | 3817 | struct vmw_sw_context *sw_context, |
4058 | uint32_t handle) | 3818 | uint32_t handle) |
4059 | { | 3819 | { |
4060 | struct vmw_resource_val_node *ctx_node; | ||
4061 | struct vmw_resource *res; | 3820 | struct vmw_resource *res; |
4062 | int ret; | 3821 | int ret; |
4063 | 3822 | ||
@@ -4073,11 +3832,11 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, | |||
4073 | return ret; | 3832 | return ret; |
4074 | } | 3833 | } |
4075 | 3834 | ||
4076 | ret = vmw_resource_val_add(sw_context, res, &ctx_node); | 3835 | ret = vmw_resource_val_add(sw_context, res); |
4077 | if (unlikely(ret != 0)) | 3836 | if (unlikely(ret != 0)) |
4078 | goto out_err; | 3837 | goto out_err; |
4079 | 3838 | ||
4080 | sw_context->dx_ctx_node = ctx_node; | 3839 | sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res); |
4081 | sw_context->man = vmw_context_res_man(res); | 3840 | sw_context->man = vmw_context_res_man(res); |
4082 | out_err: | 3841 | out_err: |
4083 | vmw_resource_unreference(&res); | 3842 | vmw_resource_unreference(&res); |
@@ -4098,14 +3857,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4098 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | 3857 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
4099 | struct vmw_fence_obj *fence = NULL; | 3858 | struct vmw_fence_obj *fence = NULL; |
4100 | struct vmw_resource *error_resource; | 3859 | struct vmw_resource *error_resource; |
4101 | struct list_head resource_list; | ||
4102 | struct vmw_cmdbuf_header *header; | 3860 | struct vmw_cmdbuf_header *header; |
4103 | struct ww_acquire_ctx ticket; | ||
4104 | uint32_t handle; | 3861 | uint32_t handle; |
4105 | int ret; | 3862 | int ret; |
4106 | int32_t out_fence_fd = -1; | 3863 | int32_t out_fence_fd = -1; |
4107 | struct sync_file *sync_file = NULL; | 3864 | struct sync_file *sync_file = NULL; |
4108 | 3865 | DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); | |
4109 | 3866 | ||
4110 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { | 3867 | if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { |
4111 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); | 3868 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); |
@@ -4158,9 +3915,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4158 | 3915 | ||
4159 | sw_context->fp = vmw_fpriv(file_priv); | 3916 | sw_context->fp = vmw_fpriv(file_priv); |
4160 | sw_context->cur_reloc = 0; | 3917 | sw_context->cur_reloc = 0; |
4161 | sw_context->cur_val_buf = 0; | 3918 | INIT_LIST_HEAD(&sw_context->ctx_list); |
4162 | INIT_LIST_HEAD(&sw_context->resource_list); | ||
4163 | INIT_LIST_HEAD(&sw_context->ctx_resource_list); | ||
4164 | sw_context->cur_query_bo = dev_priv->pinned_bo; | 3919 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
4165 | sw_context->last_query_ctx = NULL; | 3920 | sw_context->last_query_ctx = NULL; |
4166 | sw_context->needs_post_query_barrier = false; | 3921 | sw_context->needs_post_query_barrier = false; |
@@ -4168,7 +3923,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4168 | sw_context->dx_query_mob = NULL; | 3923 | sw_context->dx_query_mob = NULL; |
4169 | sw_context->dx_query_ctx = NULL; | 3924 | sw_context->dx_query_ctx = NULL; |
4170 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | 3925 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
4171 | INIT_LIST_HEAD(&sw_context->validate_nodes); | ||
4172 | INIT_LIST_HEAD(&sw_context->res_relocations); | 3926 | INIT_LIST_HEAD(&sw_context->res_relocations); |
4173 | if (sw_context->staged_bindings) | 3927 | if (sw_context->staged_bindings) |
4174 | vmw_binding_state_reset(sw_context->staged_bindings); | 3928 | vmw_binding_state_reset(sw_context->staged_bindings); |
@@ -4180,24 +3934,13 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4180 | sw_context->res_ht_initialized = true; | 3934 | sw_context->res_ht_initialized = true; |
4181 | } | 3935 | } |
4182 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); | 3936 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
4183 | INIT_LIST_HEAD(&resource_list); | 3937 | sw_context->ctx = &val_ctx; |
4184 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); | 3938 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); |
4185 | if (unlikely(ret != 0)) { | 3939 | if (unlikely(ret != 0)) |
4186 | list_splice_init(&sw_context->ctx_resource_list, | ||
4187 | &sw_context->resource_list); | ||
4188 | goto out_err_nores; | 3940 | goto out_err_nores; |
4189 | } | ||
4190 | 3941 | ||
4191 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 3942 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
4192 | command_size); | 3943 | command_size); |
4193 | /* | ||
4194 | * Merge the resource lists before checking the return status | ||
4195 | * from vmd_cmd_check_all so that all the open hashtabs will | ||
4196 | * be handled properly even if vmw_cmd_check_all fails. | ||
4197 | */ | ||
4198 | list_splice_init(&sw_context->ctx_resource_list, | ||
4199 | &sw_context->resource_list); | ||
4200 | |||
4201 | if (unlikely(ret != 0)) | 3944 | if (unlikely(ret != 0)) |
4202 | goto out_err_nores; | 3945 | goto out_err_nores; |
4203 | 3946 | ||
@@ -4205,18 +3948,18 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4205 | if (unlikely(ret != 0)) | 3948 | if (unlikely(ret != 0)) |
4206 | goto out_err_nores; | 3949 | goto out_err_nores; |
4207 | 3950 | ||
4208 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, | 3951 | ret = vmw_validation_bo_reserve(&val_ctx, true); |
4209 | true, NULL); | ||
4210 | if (unlikely(ret != 0)) | 3952 | if (unlikely(ret != 0)) |
4211 | goto out_err_nores; | 3953 | goto out_err_nores; |
4212 | 3954 | ||
4213 | ret = vmw_validate_buffers(dev_priv, sw_context); | 3955 | ret = vmw_validation_bo_validate(&val_ctx, true); |
4214 | if (unlikely(ret != 0)) | 3956 | if (unlikely(ret != 0)) |
4215 | goto out_err; | 3957 | goto out_err; |
4216 | 3958 | ||
4217 | ret = vmw_resources_validate(sw_context); | 3959 | ret = vmw_validation_res_validate(&val_ctx, true); |
4218 | if (unlikely(ret != 0)) | 3960 | if (unlikely(ret != 0)) |
4219 | goto out_err; | 3961 | goto out_err; |
3962 | vmw_validation_drop_ht(&val_ctx); | ||
4220 | 3963 | ||
4221 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); | 3964 | ret = mutex_lock_interruptible(&dev_priv->binding_mutex); |
4222 | if (unlikely(ret != 0)) { | 3965 | if (unlikely(ret != 0)) { |
@@ -4255,17 +3998,16 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4255 | if (ret != 0) | 3998 | if (ret != 0) |
4256 | DRM_ERROR("Fence submission error. Syncing.\n"); | 3999 | DRM_ERROR("Fence submission error. Syncing.\n"); |
4257 | 4000 | ||
4258 | vmw_resources_unreserve(sw_context, false); | 4001 | vmw_execbuf_bindings_commit(sw_context, false); |
4002 | vmw_bind_dx_query_mob(sw_context); | ||
4003 | vmw_validation_res_unreserve(&val_ctx, false); | ||
4259 | 4004 | ||
4260 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 4005 | vmw_validation_bo_fence(sw_context->ctx, fence); |
4261 | (void *) fence); | ||
4262 | 4006 | ||
4263 | if (unlikely(dev_priv->pinned_bo != NULL && | 4007 | if (unlikely(dev_priv->pinned_bo != NULL && |
4264 | !dev_priv->query_cid_valid)) | 4008 | !dev_priv->query_cid_valid)) |
4265 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); | 4009 | __vmw_execbuf_release_pinned_bo(dev_priv, fence); |
4266 | 4010 | ||
4267 | vmw_clear_validations(sw_context); | ||
4268 | |||
4269 | /* | 4011 | /* |
4270 | * If anything fails here, give up trying to export the fence | 4012 | * If anything fails here, give up trying to export the fence |
4271 | * and do a sync since the user mode will not be able to sync | 4013 | * and do a sync since the user mode will not be able to sync |
@@ -4300,7 +4042,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4300 | vmw_fence_obj_unreference(&fence); | 4042 | vmw_fence_obj_unreference(&fence); |
4301 | } | 4043 | } |
4302 | 4044 | ||
4303 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
4304 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); | 4045 | vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); |
4305 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 4046 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
4306 | 4047 | ||
@@ -4308,34 +4049,35 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
4308 | * Unreference resources outside of the cmdbuf_mutex to | 4049 | * Unreference resources outside of the cmdbuf_mutex to |
4309 | * avoid deadlocks in resource destruction paths. | 4050 | * avoid deadlocks in resource destruction paths. |
4310 | */ | 4051 | */ |
4311 | vmw_resource_list_unreference(sw_context, &resource_list); | 4052 | vmw_validation_unref_lists(&val_ctx); |
4312 | 4053 | ||
4313 | return 0; | 4054 | return 0; |
4314 | 4055 | ||
4315 | out_unlock_binding: | 4056 | out_unlock_binding: |
4316 | mutex_unlock(&dev_priv->binding_mutex); | 4057 | mutex_unlock(&dev_priv->binding_mutex); |
4317 | out_err: | 4058 | out_err: |
4318 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 4059 | vmw_validation_bo_backoff(&val_ctx); |
4319 | out_err_nores: | 4060 | out_err_nores: |
4320 | vmw_resources_unreserve(sw_context, true); | 4061 | vmw_execbuf_bindings_commit(sw_context, true); |
4062 | vmw_validation_res_unreserve(&val_ctx, true); | ||
4321 | vmw_resource_relocations_free(&sw_context->res_relocations); | 4063 | vmw_resource_relocations_free(&sw_context->res_relocations); |
4322 | vmw_free_relocations(sw_context); | 4064 | vmw_free_relocations(sw_context); |
4323 | vmw_clear_validations(sw_context); | ||
4324 | if (unlikely(dev_priv->pinned_bo != NULL && | 4065 | if (unlikely(dev_priv->pinned_bo != NULL && |
4325 | !dev_priv->query_cid_valid)) | 4066 | !dev_priv->query_cid_valid)) |
4326 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 4067 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
4327 | out_unlock: | 4068 | out_unlock: |
4328 | list_splice_init(&sw_context->resource_list, &resource_list); | ||
4329 | error_resource = sw_context->error_resource; | 4069 | error_resource = sw_context->error_resource; |
4330 | sw_context->error_resource = NULL; | 4070 | sw_context->error_resource = NULL; |
4331 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); | 4071 | vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); |
4072 | vmw_validation_drop_ht(&val_ctx); | ||
4073 | WARN_ON(!list_empty(&sw_context->ctx_list)); | ||
4332 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 4074 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
4333 | 4075 | ||
4334 | /* | 4076 | /* |
4335 | * Unreference resources outside of the cmdbuf_mutex to | 4077 | * Unreference resources outside of the cmdbuf_mutex to |
4336 | * avoid deadlocks in resource destruction paths. | 4078 | * avoid deadlocks in resource destruction paths. |
4337 | */ | 4079 | */ |
4338 | vmw_resource_list_unreference(sw_context, &resource_list); | 4080 | vmw_validation_unref_lists(&val_ctx); |
4339 | if (unlikely(error_resource != NULL)) | 4081 | if (unlikely(error_resource != NULL)) |
4340 | vmw_resource_unreference(&error_resource); | 4082 | vmw_resource_unreference(&error_resource); |
4341 | out_free_header: | 4083 | out_free_header: |
@@ -4398,38 +4140,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
4398 | struct vmw_fence_obj *fence) | 4140 | struct vmw_fence_obj *fence) |
4399 | { | 4141 | { |
4400 | int ret = 0; | 4142 | int ret = 0; |
4401 | struct list_head validate_list; | ||
4402 | struct ttm_validate_buffer pinned_val, query_val; | ||
4403 | struct vmw_fence_obj *lfence = NULL; | 4143 | struct vmw_fence_obj *lfence = NULL; |
4404 | struct ww_acquire_ctx ticket; | 4144 | DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); |
4405 | 4145 | ||
4406 | if (dev_priv->pinned_bo == NULL) | 4146 | if (dev_priv->pinned_bo == NULL) |
4407 | goto out_unlock; | 4147 | goto out_unlock; |
4408 | 4148 | ||
4409 | INIT_LIST_HEAD(&validate_list); | 4149 | ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, |
4410 | 4150 | false); | |
4411 | pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base); | 4151 | if (ret) |
4412 | pinned_val.shared = false; | 4152 | goto out_no_reserve; |
4413 | list_add_tail(&pinned_val.head, &validate_list); | ||
4414 | 4153 | ||
4415 | query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base); | 4154 | ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, |
4416 | query_val.shared = false; | 4155 | false); |
4417 | list_add_tail(&query_val.head, &validate_list); | 4156 | if (ret) |
4157 | goto out_no_reserve; | ||
4418 | 4158 | ||
4419 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, | 4159 | ret = vmw_validation_bo_reserve(&val_ctx, false); |
4420 | false, NULL); | 4160 | if (ret) |
4421 | if (unlikely(ret != 0)) { | ||
4422 | vmw_execbuf_unpin_panic(dev_priv); | ||
4423 | goto out_no_reserve; | 4161 | goto out_no_reserve; |
4424 | } | ||
4425 | 4162 | ||
4426 | if (dev_priv->query_cid_valid) { | 4163 | if (dev_priv->query_cid_valid) { |
4427 | BUG_ON(fence != NULL); | 4164 | BUG_ON(fence != NULL); |
4428 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); | 4165 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
4429 | if (unlikely(ret != 0)) { | 4166 | if (ret) |
4430 | vmw_execbuf_unpin_panic(dev_priv); | ||
4431 | goto out_no_emit; | 4167 | goto out_no_emit; |
4432 | } | ||
4433 | dev_priv->query_cid_valid = false; | 4168 | dev_priv->query_cid_valid = false; |
4434 | } | 4169 | } |
4435 | 4170 | ||
@@ -4443,22 +4178,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
4443 | NULL); | 4178 | NULL); |
4444 | fence = lfence; | 4179 | fence = lfence; |
4445 | } | 4180 | } |
4446 | ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence); | 4181 | vmw_validation_bo_fence(&val_ctx, fence); |
4447 | if (lfence != NULL) | 4182 | if (lfence != NULL) |
4448 | vmw_fence_obj_unreference(&lfence); | 4183 | vmw_fence_obj_unreference(&lfence); |
4449 | 4184 | ||
4450 | ttm_bo_unref(&query_val.bo); | 4185 | vmw_validation_unref_lists(&val_ctx); |
4451 | ttm_bo_unref(&pinned_val.bo); | ||
4452 | vmw_bo_unreference(&dev_priv->pinned_bo); | 4186 | vmw_bo_unreference(&dev_priv->pinned_bo); |
4453 | out_unlock: | 4187 | out_unlock: |
4454 | return; | 4188 | return; |
4455 | 4189 | ||
4456 | out_no_emit: | 4190 | out_no_emit: |
4457 | ttm_eu_backoff_reservation(&ticket, &validate_list); | 4191 | vmw_validation_bo_backoff(&val_ctx); |
4458 | out_no_reserve: | 4192 | out_no_reserve: |
4459 | ttm_bo_unref(&query_val.bo); | 4193 | vmw_validation_unref_lists(&val_ctx); |
4460 | ttm_bo_unref(&pinned_val.bo); | 4194 | vmw_execbuf_unpin_panic(dev_priv); |
4461 | vmw_bo_unreference(&dev_priv->pinned_bo); | 4195 | vmw_bo_unreference(&dev_priv->pinned_bo); |
4196 | |||
4462 | } | 4197 | } |
4463 | 4198 | ||
4464 | /** | 4199 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e5659bf28ee1..ab424358b8cb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2586,8 +2586,8 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, | |||
2586 | if (for_cpu_blit) | 2586 | if (for_cpu_blit) |
2587 | ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx); | 2587 | ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx); |
2588 | else | 2588 | else |
2589 | ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, | 2589 | ret = vmw_validation_bo_validate_single(bo, interruptible, |
2590 | validate_as_mob); | 2590 | validate_as_mob); |
2591 | if (ret) | 2591 | if (ret) |
2592 | ttm_bo_unreserve(bo); | 2592 | ttm_bo_unreserve(bo); |
2593 | 2593 | ||