diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-10 13:39:35 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-12 13:06:32 -0400 |
commit | d80efd5cb3dec16a8d1aea9b8a4a7921972dba65 (patch) | |
tree | 7330bd6473aff84e61ebf2f89f629abab3acd3a6 | |
parent | 8ce75f8ab9044fe11caaaf2b2c82471023212f9f (diff) |
drm/vmwgfx: Initial DX support
Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Charmaine Lee <charmainel@vmware.com>
22 files changed, 5351 insertions, 779 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 484093986d5a..d281575bbe11 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -8,5 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ | 8 | vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ |
9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ | 9 | vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ |
10 | vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ | 10 | vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ |
11 | vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o | ||
11 | 12 | ||
12 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 13 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c new file mode 100644 index 000000000000..9c42e96da510 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c | |||
@@ -0,0 +1,1294 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * This file implements the vmwgfx context binding manager, | ||
29 | * The sole reason for having to use this code is that vmware guest | ||
30 | * backed contexts can be swapped out to their backing mobs by the device | ||
31 | * at any time, also swapped in at any time. At swapin time, the device | ||
32 | * validates the context bindings to make sure they point to valid resources. | ||
33 | * It's this outside-of-drawcall validation (that can happen at any time), | ||
34 | * that makes this code necessary. | ||
35 | * | ||
36 | * We therefore need to kill any context bindings pointing to a resource | ||
37 | * when the resource is swapped out. Furthermore, if the vmwgfx driver has | ||
38 | * swapped out the context we can't swap it in again to kill bindings because | ||
39 | * of backing mob reservation lockdep violations, so as part of | ||
40 | * context swapout, also kill all bindings of a context, so that they are | ||
41 | * already killed if a resource to which a binding points | ||
42 | * needs to be swapped out. | ||
43 | * | ||
44 | * Note that a resource can be pointed to by bindings from multiple contexts, | ||
45 | * Therefore we can't easily protect this data by a per context mutex | ||
46 | * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex | ||
47 | * to protect all binding manager data. | ||
48 | * | ||
49 | * Finally, any association between a context and a global resource | ||
50 | * (surface, shader or even DX query) is conceptually a context binding that | ||
51 | * needs to be tracked by this code. | ||
52 | */ | ||
53 | |||
54 | #include "vmwgfx_drv.h" | ||
55 | #include "vmwgfx_binding.h" | ||
56 | #include "device_include/svga3d_reg.h" | ||
57 | |||
58 | #define VMW_BINDING_RT_BIT 0 | ||
59 | #define VMW_BINDING_PS_BIT 1 | ||
60 | #define VMW_BINDING_SO_BIT 2 | ||
61 | #define VMW_BINDING_VB_BIT 3 | ||
62 | #define VMW_BINDING_NUM_BITS 4 | ||
63 | |||
64 | #define VMW_BINDING_PS_SR_BIT 0 | ||
65 | |||
66 | /** | ||
67 | * struct vmw_ctx_binding_state - per context binding state | ||
68 | * | ||
69 | * @dev_priv: Pointer to device private structure. | ||
70 | * @list: linked list of individual active bindings. | ||
71 | * @render_targets: Render target bindings. | ||
72 | * @texture_units: Texture units bindings. | ||
73 | * @ds_view: Depth-stencil view binding. | ||
74 | * @so_targets: StreamOutput target bindings. | ||
75 | * @vertex_buffers: Vertex buffer bindings. | ||
76 | * @index_buffer: Index buffer binding. | ||
77 | * @per_shader: Per shader-type bindings. | ||
78 | * @dirty: Bitmap tracking per binding-type changes that have not yet | ||
79 | * been emitted to the device. | ||
80 | * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that | ||
81 | * have not yet been emitted to the device. | ||
82 | * @bind_cmd_buffer: Scratch space used to construct binding commands. | ||
83 | * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer | ||
84 | * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the | ||
85 | * device binding slot of the first command data entry in @bind_cmd_buffer. | ||
86 | * | ||
87 | * Note that this structure also provides storage space for the individual | ||
88 | * struct vmw_ctx_binding objects, so that no dynamic allocation is needed | ||
89 | * for individual bindings. | ||
90 | * | ||
91 | */ | ||
92 | struct vmw_ctx_binding_state { | ||
93 | struct vmw_private *dev_priv; | ||
94 | struct list_head list; | ||
95 | struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX]; | ||
96 | struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS]; | ||
97 | struct vmw_ctx_bindinfo_view ds_view; | ||
98 | struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS]; | ||
99 | struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; | ||
100 | struct vmw_ctx_bindinfo_ib index_buffer; | ||
101 | struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10]; | ||
102 | |||
103 | unsigned long dirty; | ||
104 | DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); | ||
105 | |||
106 | u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS]; | ||
107 | u32 bind_cmd_count; | ||
108 | u32 bind_first_slot; | ||
109 | }; | ||
110 | |||
111 | static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
112 | static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, | ||
113 | bool rebind); | ||
114 | static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
115 | static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
116 | static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
117 | static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
118 | static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
119 | static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs); | ||
120 | static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, | ||
121 | bool rebind); | ||
122 | static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
123 | static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind); | ||
124 | static void vmw_binding_build_asserts(void) __attribute__ ((unused)); | ||
125 | |||
126 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); | ||
127 | |||
128 | /** | ||
129 | * struct vmw_binding_info - Per binding type information for the binding | ||
130 | * manager | ||
131 | * | ||
132 | * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo. | ||
133 | * @offsets: array[shader_slot] of offsets to the array[slot] | ||
134 | * of struct bindings for the binding type. | ||
135 | * @scrub_func: Pointer to the scrub function for this binding type. | ||
136 | * | ||
137 | * Holds static information to help optimize the binding manager and avoid | ||
138 | * an excessive amount of switch statements. | ||
139 | */ | ||
140 | struct vmw_binding_info { | ||
141 | size_t size; | ||
142 | const size_t *offsets; | ||
143 | vmw_scrub_func scrub_func; | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * A number of static variables that help determine the scrub func and the | ||
148 | * location of the struct vmw_ctx_bindinfo slots for each binding type. | ||
149 | */ | ||
150 | static const size_t vmw_binding_shader_offsets[] = { | ||
151 | offsetof(struct vmw_ctx_binding_state, per_shader[0].shader), | ||
152 | offsetof(struct vmw_ctx_binding_state, per_shader[1].shader), | ||
153 | offsetof(struct vmw_ctx_binding_state, per_shader[2].shader), | ||
154 | }; | ||
155 | static const size_t vmw_binding_rt_offsets[] = { | ||
156 | offsetof(struct vmw_ctx_binding_state, render_targets), | ||
157 | }; | ||
158 | static const size_t vmw_binding_tex_offsets[] = { | ||
159 | offsetof(struct vmw_ctx_binding_state, texture_units), | ||
160 | }; | ||
161 | static const size_t vmw_binding_cb_offsets[] = { | ||
162 | offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers), | ||
163 | offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers), | ||
164 | offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers), | ||
165 | }; | ||
166 | static const size_t vmw_binding_dx_ds_offsets[] = { | ||
167 | offsetof(struct vmw_ctx_binding_state, ds_view), | ||
168 | }; | ||
169 | static const size_t vmw_binding_sr_offsets[] = { | ||
170 | offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res), | ||
171 | offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res), | ||
172 | offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res), | ||
173 | }; | ||
174 | static const size_t vmw_binding_so_offsets[] = { | ||
175 | offsetof(struct vmw_ctx_binding_state, so_targets), | ||
176 | }; | ||
177 | static const size_t vmw_binding_vb_offsets[] = { | ||
178 | offsetof(struct vmw_ctx_binding_state, vertex_buffers), | ||
179 | }; | ||
180 | static const size_t vmw_binding_ib_offsets[] = { | ||
181 | offsetof(struct vmw_ctx_binding_state, index_buffer), | ||
182 | }; | ||
183 | |||
184 | static const struct vmw_binding_info vmw_binding_infos[] = { | ||
185 | [vmw_ctx_binding_shader] = { | ||
186 | .size = sizeof(struct vmw_ctx_bindinfo_shader), | ||
187 | .offsets = vmw_binding_shader_offsets, | ||
188 | .scrub_func = vmw_binding_scrub_shader}, | ||
189 | [vmw_ctx_binding_rt] = { | ||
190 | .size = sizeof(struct vmw_ctx_bindinfo_view), | ||
191 | .offsets = vmw_binding_rt_offsets, | ||
192 | .scrub_func = vmw_binding_scrub_render_target}, | ||
193 | [vmw_ctx_binding_tex] = { | ||
194 | .size = sizeof(struct vmw_ctx_bindinfo_tex), | ||
195 | .offsets = vmw_binding_tex_offsets, | ||
196 | .scrub_func = vmw_binding_scrub_texture}, | ||
197 | [vmw_ctx_binding_cb] = { | ||
198 | .size = sizeof(struct vmw_ctx_bindinfo_cb), | ||
199 | .offsets = vmw_binding_cb_offsets, | ||
200 | .scrub_func = vmw_binding_scrub_cb}, | ||
201 | [vmw_ctx_binding_dx_shader] = { | ||
202 | .size = sizeof(struct vmw_ctx_bindinfo_shader), | ||
203 | .offsets = vmw_binding_shader_offsets, | ||
204 | .scrub_func = vmw_binding_scrub_dx_shader}, | ||
205 | [vmw_ctx_binding_dx_rt] = { | ||
206 | .size = sizeof(struct vmw_ctx_bindinfo_view), | ||
207 | .offsets = vmw_binding_rt_offsets, | ||
208 | .scrub_func = vmw_binding_scrub_dx_rt}, | ||
209 | [vmw_ctx_binding_sr] = { | ||
210 | .size = sizeof(struct vmw_ctx_bindinfo_view), | ||
211 | .offsets = vmw_binding_sr_offsets, | ||
212 | .scrub_func = vmw_binding_scrub_sr}, | ||
213 | [vmw_ctx_binding_ds] = { | ||
214 | .size = sizeof(struct vmw_ctx_bindinfo_view), | ||
215 | .offsets = vmw_binding_dx_ds_offsets, | ||
216 | .scrub_func = vmw_binding_scrub_dx_rt}, | ||
217 | [vmw_ctx_binding_so] = { | ||
218 | .size = sizeof(struct vmw_ctx_bindinfo_so), | ||
219 | .offsets = vmw_binding_so_offsets, | ||
220 | .scrub_func = vmw_binding_scrub_so}, | ||
221 | [vmw_ctx_binding_vb] = { | ||
222 | .size = sizeof(struct vmw_ctx_bindinfo_vb), | ||
223 | .offsets = vmw_binding_vb_offsets, | ||
224 | .scrub_func = vmw_binding_scrub_vb}, | ||
225 | [vmw_ctx_binding_ib] = { | ||
226 | .size = sizeof(struct vmw_ctx_bindinfo_ib), | ||
227 | .offsets = vmw_binding_ib_offsets, | ||
228 | .scrub_func = vmw_binding_scrub_ib}, | ||
229 | }; | ||
230 | |||
231 | /** | ||
232 | * vmw_cbs_context - Return a pointer to the context resource of a | ||
233 | * context binding state tracker. | ||
234 | * | ||
235 | * @cbs: The context binding state tracker. | ||
236 | * | ||
237 | * Provided there are any active bindings, this function will return an | ||
238 | * unreferenced pointer to the context resource that owns the context | ||
239 | * binding state tracker. If there are no active bindings, this function | ||
240 | * will return NULL. Note that the caller must somehow ensure that a reference | ||
241 | * is held on the context resource prior to calling this function. | ||
242 | */ | ||
243 | static const struct vmw_resource * | ||
244 | vmw_cbs_context(const struct vmw_ctx_binding_state *cbs) | ||
245 | { | ||
246 | if (list_empty(&cbs->list)) | ||
247 | return NULL; | ||
248 | |||
249 | return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo, | ||
250 | ctx_list)->ctx; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location. | ||
255 | * | ||
256 | * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot. | ||
257 | * @bt: The binding type. | ||
258 | * @shader_slot: The shader slot of the binding. If none, then set to 0. | ||
259 | * @slot: The slot of the binding. | ||
260 | */ | ||
261 | static struct vmw_ctx_bindinfo * | ||
262 | vmw_binding_loc(struct vmw_ctx_binding_state *cbs, | ||
263 | enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot) | ||
264 | { | ||
265 | const struct vmw_binding_info *b = &vmw_binding_infos[bt]; | ||
266 | size_t offset = b->offsets[shader_slot] + b->size*slot; | ||
267 | |||
268 | return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset); | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * vmw_binding_drop: Stop tracking a context binding | ||
273 | * | ||
274 | * @bi: Pointer to binding tracker storage. | ||
275 | * | ||
276 | * Stops tracking a context binding, and re-initializes its storage. | ||
277 | * Typically used when the context binding is replaced with a binding to | ||
278 | * another (or the same, for that matter) resource. | ||
279 | */ | ||
280 | static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) | ||
281 | { | ||
282 | list_del(&bi->ctx_list); | ||
283 | if (!list_empty(&bi->res_list)) | ||
284 | list_del(&bi->res_list); | ||
285 | bi->ctx = NULL; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * vmw_binding_add: Start tracking a context binding | ||
290 | * | ||
291 | * @cbs: Pointer to the context binding state tracker. | ||
292 | * @bi: Information about the binding to track. | ||
293 | * | ||
294 | * Starts tracking the binding in the context binding | ||
295 | * state structure @cbs. | ||
296 | */ | ||
297 | void vmw_binding_add(struct vmw_ctx_binding_state *cbs, | ||
298 | const struct vmw_ctx_bindinfo *bi, | ||
299 | u32 shader_slot, u32 slot) | ||
300 | { | ||
301 | struct vmw_ctx_bindinfo *loc = | ||
302 | vmw_binding_loc(cbs, bi->bt, shader_slot, slot); | ||
303 | const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt]; | ||
304 | |||
305 | if (loc->ctx != NULL) | ||
306 | vmw_binding_drop(loc); | ||
307 | |||
308 | memcpy(loc, bi, b->size); | ||
309 | loc->scrubbed = false; | ||
310 | list_add(&loc->ctx_list, &cbs->list); | ||
311 | INIT_LIST_HEAD(&loc->res_list); | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * vmw_binding_transfer: Transfer a context binding tracking entry. | ||
316 | * | ||
317 | * @cbs: Pointer to the persistent context binding state tracker. | ||
318 | * @bi: Information about the binding to track. | ||
319 | * | ||
320 | */ | ||
321 | static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
322 | const struct vmw_ctx_binding_state *from, | ||
323 | const struct vmw_ctx_bindinfo *bi) | ||
324 | { | ||
325 | size_t offset = (unsigned long)bi - (unsigned long)from; | ||
326 | struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *) | ||
327 | ((unsigned long) cbs + offset); | ||
328 | |||
329 | if (loc->ctx != NULL) { | ||
330 | WARN_ON(bi->scrubbed); | ||
331 | |||
332 | vmw_binding_drop(loc); | ||
333 | } | ||
334 | |||
335 | if (bi->res != NULL) { | ||
336 | memcpy(loc, bi, vmw_binding_infos[bi->bt].size); | ||
337 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
338 | list_add_tail(&loc->res_list, &loc->res->binding_head); | ||
339 | } | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * vmw_binding_state_kill - Kill all bindings associated with a | ||
344 | * struct vmw_ctx_binding state structure, and re-initialize the structure. | ||
345 | * | ||
346 | * @cbs: Pointer to the context binding state tracker. | ||
347 | * | ||
348 | * Emits commands to scrub all bindings associated with the | ||
349 | * context binding state tracker. Then re-initializes the whole structure. | ||
350 | */ | ||
351 | void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs) | ||
352 | { | ||
353 | struct vmw_ctx_bindinfo *entry, *next; | ||
354 | |||
355 | vmw_binding_state_scrub(cbs); | ||
356 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
357 | vmw_binding_drop(entry); | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * vmw_binding_state_scrub - Scrub all bindings associated with a | ||
362 | * struct vmw_ctx_binding state structure. | ||
363 | * | ||
364 | * @cbs: Pointer to the context binding state tracker. | ||
365 | * | ||
366 | * Emits commands to scrub all bindings associated with the | ||
367 | * context binding state tracker. | ||
368 | */ | ||
369 | void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs) | ||
370 | { | ||
371 | struct vmw_ctx_bindinfo *entry; | ||
372 | |||
373 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
374 | if (!entry->scrubbed) { | ||
375 | (void) vmw_binding_infos[entry->bt].scrub_func | ||
376 | (entry, false); | ||
377 | entry->scrubbed = true; | ||
378 | } | ||
379 | } | ||
380 | |||
381 | (void) vmw_binding_emit_dirty(cbs); | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * vmw_binding_res_list_kill - Kill all bindings on a | ||
386 | * resource binding list | ||
387 | * | ||
388 | * @head: list head of resource binding list | ||
389 | * | ||
390 | * Kills all bindings associated with a specific resource. Typically | ||
391 | * called before the resource is destroyed. | ||
392 | */ | ||
393 | void vmw_binding_res_list_kill(struct list_head *head) | ||
394 | { | ||
395 | struct vmw_ctx_bindinfo *entry, *next; | ||
396 | |||
397 | vmw_binding_res_list_scrub(head); | ||
398 | list_for_each_entry_safe(entry, next, head, res_list) | ||
399 | vmw_binding_drop(entry); | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * vmw_binding_res_list_scrub - Scrub all bindings on a | ||
404 | * resource binding list | ||
405 | * | ||
406 | * @head: list head of resource binding list | ||
407 | * | ||
408 | * Scrub all bindings associated with a specific resource. Typically | ||
409 | * called before the resource is evicted. | ||
410 | */ | ||
411 | void vmw_binding_res_list_scrub(struct list_head *head) | ||
412 | { | ||
413 | struct vmw_ctx_bindinfo *entry; | ||
414 | |||
415 | list_for_each_entry(entry, head, res_list) { | ||
416 | if (!entry->scrubbed) { | ||
417 | (void) vmw_binding_infos[entry->bt].scrub_func | ||
418 | (entry, false); | ||
419 | entry->scrubbed = true; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | list_for_each_entry(entry, head, res_list) { | ||
424 | struct vmw_ctx_binding_state *cbs = | ||
425 | vmw_context_binding_state(entry->ctx); | ||
426 | |||
427 | (void) vmw_binding_emit_dirty(cbs); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | |||
432 | /** | ||
433 | * vmw_binding_state_commit - Commit staged binding info | ||
434 | * | ||
435 | * @ctx: Pointer to context to commit the staged binding info to. | ||
436 | * @from: Staged binding info built during execbuf. | ||
437 | * @scrubbed: Transfer only scrubbed bindings. | ||
438 | * | ||
439 | * Transfers binding info from a temporary structure | ||
440 | * (typically used by execbuf) to the persistent | ||
441 | * structure in the context. This can be done once commands have been | ||
442 | * submitted to hardware | ||
443 | */ | ||
444 | void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, | ||
445 | struct vmw_ctx_binding_state *from) | ||
446 | { | ||
447 | struct vmw_ctx_bindinfo *entry, *next; | ||
448 | |||
449 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) { | ||
450 | vmw_binding_transfer(to, from, entry); | ||
451 | vmw_binding_drop(entry); | ||
452 | } | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context | ||
457 | * | ||
458 | * @ctx: The context resource | ||
459 | * | ||
460 | * Walks through the context binding list and rebinds all scrubbed | ||
461 | * resources. | ||
462 | */ | ||
463 | int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs) | ||
464 | { | ||
465 | struct vmw_ctx_bindinfo *entry; | ||
466 | int ret; | ||
467 | |||
468 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
469 | if (likely(!entry->scrubbed)) | ||
470 | continue; | ||
471 | |||
472 | if ((entry->res == NULL || entry->res->id == | ||
473 | SVGA3D_INVALID_ID)) | ||
474 | continue; | ||
475 | |||
476 | ret = vmw_binding_infos[entry->bt].scrub_func(entry, true); | ||
477 | if (unlikely(ret != 0)) | ||
478 | return ret; | ||
479 | |||
480 | entry->scrubbed = false; | ||
481 | } | ||
482 | |||
483 | return vmw_binding_emit_dirty(cbs); | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * vmw_binding_scrub_shader - scrub a shader binding from a context. | ||
488 | * | ||
489 | * @bi: single binding information. | ||
490 | * @rebind: Whether to issue a bind instead of scrub command. | ||
491 | */ | ||
492 | static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
493 | { | ||
494 | struct vmw_ctx_bindinfo_shader *binding = | ||
495 | container_of(bi, typeof(*binding), bi); | ||
496 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
497 | struct { | ||
498 | SVGA3dCmdHeader header; | ||
499 | SVGA3dCmdSetShader body; | ||
500 | } *cmd; | ||
501 | |||
502 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
503 | if (unlikely(cmd == NULL)) { | ||
504 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
505 | "unbinding.\n"); | ||
506 | return -ENOMEM; | ||
507 | } | ||
508 | |||
509 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; | ||
510 | cmd->header.size = sizeof(cmd->body); | ||
511 | cmd->body.cid = bi->ctx->id; | ||
512 | cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; | ||
513 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
514 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
515 | |||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | /** | ||
520 | * vmw_binding_scrub_render_target - scrub a render target binding | ||
521 | * from a context. | ||
522 | * | ||
523 | * @bi: single binding information. | ||
524 | * @rebind: Whether to issue a bind instead of scrub command. | ||
525 | */ | ||
526 | static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, | ||
527 | bool rebind) | ||
528 | { | ||
529 | struct vmw_ctx_bindinfo_view *binding = | ||
530 | container_of(bi, typeof(*binding), bi); | ||
531 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
532 | struct { | ||
533 | SVGA3dCmdHeader header; | ||
534 | SVGA3dCmdSetRenderTarget body; | ||
535 | } *cmd; | ||
536 | |||
537 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
538 | if (unlikely(cmd == NULL)) { | ||
539 | DRM_ERROR("Failed reserving FIFO space for render target " | ||
540 | "unbinding.\n"); | ||
541 | return -ENOMEM; | ||
542 | } | ||
543 | |||
544 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; | ||
545 | cmd->header.size = sizeof(cmd->body); | ||
546 | cmd->body.cid = bi->ctx->id; | ||
547 | cmd->body.type = binding->slot; | ||
548 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
549 | cmd->body.target.face = 0; | ||
550 | cmd->body.target.mipmap = 0; | ||
551 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
552 | |||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | /** | ||
557 | * vmw_binding_scrub_texture - scrub a texture binding from a context. | ||
558 | * | ||
559 | * @bi: single binding information. | ||
560 | * @rebind: Whether to issue a bind instead of scrub command. | ||
561 | * | ||
562 | * TODO: Possibly complement this function with a function that takes | ||
563 | * a list of texture bindings and combines them to a single command. | ||
564 | */ | ||
565 | static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, | ||
566 | bool rebind) | ||
567 | { | ||
568 | struct vmw_ctx_bindinfo_tex *binding = | ||
569 | container_of(bi, typeof(*binding), bi); | ||
570 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
571 | struct { | ||
572 | SVGA3dCmdHeader header; | ||
573 | struct { | ||
574 | SVGA3dCmdSetTextureState c; | ||
575 | SVGA3dTextureState s1; | ||
576 | } body; | ||
577 | } *cmd; | ||
578 | |||
579 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
580 | if (unlikely(cmd == NULL)) { | ||
581 | DRM_ERROR("Failed reserving FIFO space for texture " | ||
582 | "unbinding.\n"); | ||
583 | return -ENOMEM; | ||
584 | } | ||
585 | |||
586 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; | ||
587 | cmd->header.size = sizeof(cmd->body); | ||
588 | cmd->body.c.cid = bi->ctx->id; | ||
589 | cmd->body.s1.stage = binding->texture_stage; | ||
590 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | ||
591 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
592 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | /** | ||
598 | * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context. | ||
599 | * | ||
600 | * @bi: single binding information. | ||
601 | * @rebind: Whether to issue a bind instead of scrub command. | ||
602 | */ | ||
603 | static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
604 | { | ||
605 | struct vmw_ctx_bindinfo_shader *binding = | ||
606 | container_of(bi, typeof(*binding), bi); | ||
607 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
608 | struct { | ||
609 | SVGA3dCmdHeader header; | ||
610 | SVGA3dCmdDXSetShader body; | ||
611 | } *cmd; | ||
612 | |||
613 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); | ||
614 | if (unlikely(cmd == NULL)) { | ||
615 | DRM_ERROR("Failed reserving FIFO space for DX shader " | ||
616 | "unbinding.\n"); | ||
617 | return -ENOMEM; | ||
618 | } | ||
619 | cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER; | ||
620 | cmd->header.size = sizeof(cmd->body); | ||
621 | cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; | ||
622 | cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
623 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
624 | |||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | /** | ||
629 | * vmw_binding_scrub_cb - scrub a constant buffer binding from a context. | ||
630 | * | ||
631 | * @bi: single binding information. | ||
632 | * @rebind: Whether to issue a bind instead of scrub command. | ||
633 | */ | ||
634 | static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
635 | { | ||
636 | struct vmw_ctx_bindinfo_cb *binding = | ||
637 | container_of(bi, typeof(*binding), bi); | ||
638 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
639 | struct { | ||
640 | SVGA3dCmdHeader header; | ||
641 | SVGA3dCmdDXSetSingleConstantBuffer body; | ||
642 | } *cmd; | ||
643 | |||
644 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); | ||
645 | if (unlikely(cmd == NULL)) { | ||
646 | DRM_ERROR("Failed reserving FIFO space for DX shader " | ||
647 | "unbinding.\n"); | ||
648 | return -ENOMEM; | ||
649 | } | ||
650 | |||
651 | cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER; | ||
652 | cmd->header.size = sizeof(cmd->body); | ||
653 | cmd->body.slot = binding->slot; | ||
654 | cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; | ||
655 | if (rebind) { | ||
656 | cmd->body.offsetInBytes = binding->offset; | ||
657 | cmd->body.sizeInBytes = binding->size; | ||
658 | cmd->body.sid = bi->res->id; | ||
659 | } else { | ||
660 | cmd->body.offsetInBytes = 0; | ||
661 | cmd->body.sizeInBytes = 0; | ||
662 | cmd->body.sid = SVGA3D_INVALID_ID; | ||
663 | } | ||
664 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
665 | |||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | /** | ||
670 | * vmw_collect_view_ids - Build view id data for a view binding command | ||
671 | * without checking which bindings actually need to be emitted | ||
672 | * | ||
673 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
674 | * @bi: Pointer to where the binding info array is stored in @cbs | ||
675 | * @max_num: Maximum number of entries in the @bi array. | ||
676 | * | ||
677 | * Scans the @bi array for bindings and builds a buffer of view id data. | ||
678 | * Stops at the first non-existing binding in the @bi array. | ||
679 | * On output, @cbs->bind_cmd_count contains the number of bindings to be | ||
680 | * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer | ||
681 | * contains the command data. | ||
682 | */ | ||
683 | static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs, | ||
684 | const struct vmw_ctx_bindinfo *bi, | ||
685 | u32 max_num) | ||
686 | { | ||
687 | const struct vmw_ctx_bindinfo_view *biv = | ||
688 | container_of(bi, struct vmw_ctx_bindinfo_view, bi); | ||
689 | unsigned long i; | ||
690 | |||
691 | cbs->bind_cmd_count = 0; | ||
692 | cbs->bind_first_slot = 0; | ||
693 | |||
694 | for (i = 0; i < max_num; ++i, ++biv) { | ||
695 | if (!biv->bi.ctx) | ||
696 | break; | ||
697 | |||
698 | cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = | ||
699 | ((biv->bi.scrubbed) ? | ||
700 | SVGA3D_INVALID_ID : biv->bi.res->id); | ||
701 | } | ||
702 | } | ||
703 | |||
704 | /** | ||
705 | * vmw_collect_dirty_view_ids - Build view id data for a view binding command | ||
706 | * | ||
707 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
708 | * @bi: Pointer to where the binding info array is stored in @cbs | ||
709 | * @dirty: Bitmap indicating which bindings need to be emitted. | ||
710 | * @max_num: Maximum number of entries in the @bi array. | ||
711 | * | ||
712 | * Scans the @bi array for bindings that need to be emitted and | ||
713 | * builds a buffer of view id data. | ||
714 | * On output, @cbs->bind_cmd_count contains the number of bindings to be | ||
715 | * emitted, @cbs->bind_first_slot indicates the index of the first emitted | ||
716 | * binding, and @cbs->bind_cmd_buffer contains the command data. | ||
717 | */ | ||
718 | static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, | ||
719 | const struct vmw_ctx_bindinfo *bi, | ||
720 | unsigned long *dirty, | ||
721 | u32 max_num) | ||
722 | { | ||
723 | const struct vmw_ctx_bindinfo_view *biv = | ||
724 | container_of(bi, struct vmw_ctx_bindinfo_view, bi); | ||
725 | unsigned long i, next_bit; | ||
726 | |||
727 | cbs->bind_cmd_count = 0; | ||
728 | i = find_first_bit(dirty, max_num); | ||
729 | next_bit = i; | ||
730 | cbs->bind_first_slot = i; | ||
731 | |||
732 | biv += i; | ||
733 | for (; i < max_num; ++i, ++biv) { | ||
734 | cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = | ||
735 | ((!biv->bi.ctx || biv->bi.scrubbed) ? | ||
736 | SVGA3D_INVALID_ID : biv->bi.res->id); | ||
737 | |||
738 | if (next_bit == i) { | ||
739 | next_bit = find_next_bit(dirty, max_num, i + 1); | ||
740 | if (next_bit >= max_num) | ||
741 | break; | ||
742 | } | ||
743 | } | ||
744 | } | ||
745 | |||
746 | /** | ||
747 | * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands | ||
748 | * | ||
749 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
750 | */ | ||
751 | static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, | ||
752 | int shader_slot) | ||
753 | { | ||
754 | const struct vmw_ctx_bindinfo *loc = | ||
755 | &cbs->per_shader[shader_slot].shader_res[0].bi; | ||
756 | struct { | ||
757 | SVGA3dCmdHeader header; | ||
758 | SVGA3dCmdDXSetShaderResources body; | ||
759 | } *cmd; | ||
760 | size_t cmd_size, view_id_size; | ||
761 | const struct vmw_resource *ctx = vmw_cbs_context(cbs); | ||
762 | |||
763 | vmw_collect_dirty_view_ids(cbs, loc, | ||
764 | cbs->per_shader[shader_slot].dirty_sr, | ||
765 | SVGA3D_DX_MAX_SRVIEWS); | ||
766 | if (cbs->bind_cmd_count == 0) | ||
767 | return 0; | ||
768 | |||
769 | view_id_size = cbs->bind_cmd_count*sizeof(uint32); | ||
770 | cmd_size = sizeof(*cmd) + view_id_size; | ||
771 | cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); | ||
772 | if (unlikely(cmd == NULL)) { | ||
773 | DRM_ERROR("Failed reserving FIFO space for DX shader" | ||
774 | " resource binding.\n"); | ||
775 | return -ENOMEM; | ||
776 | } | ||
777 | |||
778 | cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES; | ||
779 | cmd->header.size = sizeof(cmd->body) + view_id_size; | ||
780 | cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN; | ||
781 | cmd->body.startView = cbs->bind_first_slot; | ||
782 | |||
783 | memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); | ||
784 | |||
785 | vmw_fifo_commit(ctx->dev_priv, cmd_size); | ||
786 | bitmap_clear(cbs->per_shader[shader_slot].dirty_sr, | ||
787 | cbs->bind_first_slot, cbs->bind_cmd_count); | ||
788 | |||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | /** | ||
793 | * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands | ||
794 | * | ||
795 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
796 | */ | ||
797 | static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) | ||
798 | { | ||
799 | const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi; | ||
800 | struct { | ||
801 | SVGA3dCmdHeader header; | ||
802 | SVGA3dCmdDXSetRenderTargets body; | ||
803 | } *cmd; | ||
804 | size_t cmd_size, view_id_size; | ||
805 | const struct vmw_resource *ctx = vmw_cbs_context(cbs); | ||
806 | |||
807 | vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS); | ||
808 | view_id_size = cbs->bind_cmd_count*sizeof(uint32); | ||
809 | cmd_size = sizeof(*cmd) + view_id_size; | ||
810 | cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); | ||
811 | if (unlikely(cmd == NULL)) { | ||
812 | DRM_ERROR("Failed reserving FIFO space for DX render-target" | ||
813 | " binding.\n"); | ||
814 | return -ENOMEM; | ||
815 | } | ||
816 | |||
817 | cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS; | ||
818 | cmd->header.size = sizeof(cmd->body) + view_id_size; | ||
819 | |||
820 | if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed) | ||
821 | cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id; | ||
822 | else | ||
823 | cmd->body.depthStencilViewId = SVGA3D_INVALID_ID; | ||
824 | |||
825 | memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); | ||
826 | |||
827 | vmw_fifo_commit(ctx->dev_priv, cmd_size); | ||
828 | |||
829 | return 0; | ||
830 | |||
831 | } | ||
832 | |||
833 | /** | ||
834 | * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command | ||
835 | * without checking which bindings actually need to be emitted | ||
836 | * | ||
837 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
838 | * @bi: Pointer to where the binding info array is stored in @cbs | ||
839 | * @max_num: Maximum number of entries in the @bi array. | ||
840 | * | ||
841 | * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data. | ||
842 | * Stops at the first non-existing binding in the @bi array. | ||
843 | * On output, @cbs->bind_cmd_count contains the number of bindings to be | ||
844 | * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer | ||
845 | * contains the command data. | ||
846 | */ | ||
847 | static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, | ||
848 | const struct vmw_ctx_bindinfo *bi, | ||
849 | u32 max_num) | ||
850 | { | ||
851 | const struct vmw_ctx_bindinfo_so *biso = | ||
852 | container_of(bi, struct vmw_ctx_bindinfo_so, bi); | ||
853 | unsigned long i; | ||
854 | SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer; | ||
855 | |||
856 | cbs->bind_cmd_count = 0; | ||
857 | cbs->bind_first_slot = 0; | ||
858 | |||
859 | for (i = 0; i < max_num; ++i, ++biso, ++so_buffer, | ||
860 | ++cbs->bind_cmd_count) { | ||
861 | if (!biso->bi.ctx) | ||
862 | break; | ||
863 | |||
864 | if (!biso->bi.scrubbed) { | ||
865 | so_buffer->sid = biso->bi.res->id; | ||
866 | so_buffer->offset = biso->offset; | ||
867 | so_buffer->sizeInBytes = biso->size; | ||
868 | } else { | ||
869 | so_buffer->sid = SVGA3D_INVALID_ID; | ||
870 | so_buffer->offset = 0; | ||
871 | so_buffer->sizeInBytes = 0; | ||
872 | } | ||
873 | } | ||
874 | } | ||
875 | |||
876 | /** | ||
877 | * vmw_binding_emit_set_so - Issue delayed streamout binding commands | ||
878 | * | ||
879 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
880 | */ | ||
881 | static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs) | ||
882 | { | ||
883 | const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi; | ||
884 | struct { | ||
885 | SVGA3dCmdHeader header; | ||
886 | SVGA3dCmdDXSetSOTargets body; | ||
887 | } *cmd; | ||
888 | size_t cmd_size, so_target_size; | ||
889 | const struct vmw_resource *ctx = vmw_cbs_context(cbs); | ||
890 | |||
891 | vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS); | ||
892 | if (cbs->bind_cmd_count == 0) | ||
893 | return 0; | ||
894 | |||
895 | so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget); | ||
896 | cmd_size = sizeof(*cmd) + so_target_size; | ||
897 | cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); | ||
898 | if (unlikely(cmd == NULL)) { | ||
899 | DRM_ERROR("Failed reserving FIFO space for DX SO target" | ||
900 | " binding.\n"); | ||
901 | return -ENOMEM; | ||
902 | } | ||
903 | |||
904 | cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS; | ||
905 | cmd->header.size = sizeof(cmd->body) + so_target_size; | ||
906 | memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size); | ||
907 | |||
908 | vmw_fifo_commit(ctx->dev_priv, cmd_size); | ||
909 | |||
910 | return 0; | ||
911 | |||
912 | } | ||
913 | |||
914 | /** | ||
915 | * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands | ||
916 | * | ||
917 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
918 | * | ||
919 | */ | ||
920 | static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs) | ||
921 | { | ||
922 | struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0]; | ||
923 | u32 i; | ||
924 | int ret; | ||
925 | |||
926 | for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) { | ||
927 | if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty)) | ||
928 | continue; | ||
929 | |||
930 | ret = vmw_emit_set_sr(cbs, i); | ||
931 | if (ret) | ||
932 | break; | ||
933 | |||
934 | __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty); | ||
935 | } | ||
936 | |||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | /** | ||
941 | * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a | ||
942 | * SVGA3dCmdDXSetVertexBuffers command | ||
943 | * | ||
944 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
945 | * @bi: Pointer to where the binding info array is stored in @cbs | ||
946 | * @dirty: Bitmap indicating which bindings need to be emitted. | ||
947 | * @max_num: Maximum number of entries in the @bi array. | ||
948 | * | ||
949 | * Scans the @bi array for bindings that need to be emitted and | ||
950 | * builds a buffer of SVGA3dVertexBuffer data. | ||
951 | * On output, @cbs->bind_cmd_count contains the number of bindings to be | ||
952 | * emitted, @cbs->bind_first_slot indicates the index of the first emitted | ||
953 | * binding, and @cbs->bind_cmd_buffer contains the command data. | ||
954 | */ | ||
955 | static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs, | ||
956 | const struct vmw_ctx_bindinfo *bi, | ||
957 | unsigned long *dirty, | ||
958 | u32 max_num) | ||
959 | { | ||
960 | const struct vmw_ctx_bindinfo_vb *biv = | ||
961 | container_of(bi, struct vmw_ctx_bindinfo_vb, bi); | ||
962 | unsigned long i, next_bit; | ||
963 | SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer; | ||
964 | |||
965 | cbs->bind_cmd_count = 0; | ||
966 | i = find_first_bit(dirty, max_num); | ||
967 | next_bit = i; | ||
968 | cbs->bind_first_slot = i; | ||
969 | |||
970 | biv += i; | ||
971 | for (; i < max_num; ++i, ++biv, ++vbs) { | ||
972 | if (!biv->bi.ctx || biv->bi.scrubbed) { | ||
973 | vbs->sid = SVGA3D_INVALID_ID; | ||
974 | vbs->stride = 0; | ||
975 | vbs->offset = 0; | ||
976 | } else { | ||
977 | vbs->sid = biv->bi.res->id; | ||
978 | vbs->stride = biv->stride; | ||
979 | vbs->offset = biv->offset; | ||
980 | } | ||
981 | cbs->bind_cmd_count++; | ||
982 | if (next_bit == i) { | ||
983 | next_bit = find_next_bit(dirty, max_num, i + 1); | ||
984 | if (next_bit >= max_num) | ||
985 | break; | ||
986 | } | ||
987 | } | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands | ||
992 | * | ||
993 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
994 | * | ||
995 | */ | ||
996 | static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) | ||
997 | { | ||
998 | const struct vmw_ctx_bindinfo *loc = | ||
999 | &cbs->vertex_buffers[0].bi; | ||
1000 | struct { | ||
1001 | SVGA3dCmdHeader header; | ||
1002 | SVGA3dCmdDXSetVertexBuffers body; | ||
1003 | } *cmd; | ||
1004 | size_t cmd_size, set_vb_size; | ||
1005 | const struct vmw_resource *ctx = vmw_cbs_context(cbs); | ||
1006 | |||
1007 | vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb, | ||
1008 | SVGA3D_DX_MAX_VERTEXBUFFERS); | ||
1009 | if (cbs->bind_cmd_count == 0) | ||
1010 | return 0; | ||
1011 | |||
1012 | set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer); | ||
1013 | cmd_size = sizeof(*cmd) + set_vb_size; | ||
1014 | cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); | ||
1015 | if (unlikely(cmd == NULL)) { | ||
1016 | DRM_ERROR("Failed reserving FIFO space for DX vertex buffer" | ||
1017 | " binding.\n"); | ||
1018 | return -ENOMEM; | ||
1019 | } | ||
1020 | |||
1021 | cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS; | ||
1022 | cmd->header.size = sizeof(cmd->body) + set_vb_size; | ||
1023 | cmd->body.startBuffer = cbs->bind_first_slot; | ||
1024 | |||
1025 | memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size); | ||
1026 | |||
1027 | vmw_fifo_commit(ctx->dev_priv, cmd_size); | ||
1028 | bitmap_clear(cbs->dirty_vb, | ||
1029 | cbs->bind_first_slot, cbs->bind_cmd_count); | ||
1030 | |||
1031 | return 0; | ||
1032 | } | ||
1033 | |||
1034 | /** | ||
1035 | * vmw_binding_emit_dirty - Issue delayed binding commands | ||
1036 | * | ||
1037 | * @cbs: Pointer to the context's struct vmw_ctx_binding_state | ||
1038 | * | ||
1039 | * This function issues the delayed binding commands that arise from | ||
1040 | * previous scrub / unscrub calls. These binding commands are typically | ||
1041 | * commands that batch a number of bindings and therefore it makes sense | ||
1042 | * to delay them. | ||
1043 | */ | ||
1044 | static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs) | ||
1045 | { | ||
1046 | int ret = 0; | ||
1047 | unsigned long hit = 0; | ||
1048 | |||
1049 | while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit)) | ||
1050 | < VMW_BINDING_NUM_BITS) { | ||
1051 | |||
1052 | switch (hit) { | ||
1053 | case VMW_BINDING_RT_BIT: | ||
1054 | ret = vmw_emit_set_rt(cbs); | ||
1055 | break; | ||
1056 | case VMW_BINDING_PS_BIT: | ||
1057 | ret = vmw_binding_emit_dirty_ps(cbs); | ||
1058 | break; | ||
1059 | case VMW_BINDING_SO_BIT: | ||
1060 | ret = vmw_emit_set_so(cbs); | ||
1061 | break; | ||
1062 | case VMW_BINDING_VB_BIT: | ||
1063 | ret = vmw_emit_set_vb(cbs); | ||
1064 | break; | ||
1065 | default: | ||
1066 | BUG(); | ||
1067 | } | ||
1068 | if (ret) | ||
1069 | return ret; | ||
1070 | |||
1071 | __clear_bit(hit, &cbs->dirty); | ||
1072 | hit++; | ||
1073 | } | ||
1074 | |||
1075 | return 0; | ||
1076 | } | ||
1077 | |||
1078 | /** | ||
1079 | * vmw_binding_scrub_sr - Schedule a dx shaderresource binding | ||
1080 | * scrub from a context | ||
1081 | * | ||
1082 | * @bi: single binding information. | ||
1083 | * @rebind: Whether to issue a bind instead of scrub command. | ||
1084 | */ | ||
1085 | static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
1086 | { | ||
1087 | struct vmw_ctx_bindinfo_view *biv = | ||
1088 | container_of(bi, struct vmw_ctx_bindinfo_view, bi); | ||
1089 | struct vmw_ctx_binding_state *cbs = | ||
1090 | vmw_context_binding_state(bi->ctx); | ||
1091 | |||
1092 | __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr); | ||
1093 | __set_bit(VMW_BINDING_PS_SR_BIT, | ||
1094 | &cbs->per_shader[biv->shader_slot].dirty); | ||
1095 | __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty); | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | /** | ||
1101 | * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding | ||
1102 | * scrub from a context | ||
1103 | * | ||
1104 | * @bi: single binding information. | ||
1105 | * @rebind: Whether to issue a bind instead of scrub command. | ||
1106 | */ | ||
1107 | static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
1108 | { | ||
1109 | struct vmw_ctx_binding_state *cbs = | ||
1110 | vmw_context_binding_state(bi->ctx); | ||
1111 | |||
1112 | __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty); | ||
1113 | |||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | /** | ||
1118 | * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding | ||
1119 | * scrub from a context | ||
1120 | * | ||
1121 | * @bi: single binding information. | ||
1122 | * @rebind: Whether to issue a bind instead of scrub command. | ||
1123 | */ | ||
1124 | static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
1125 | { | ||
1126 | struct vmw_ctx_binding_state *cbs = | ||
1127 | vmw_context_binding_state(bi->ctx); | ||
1128 | |||
1129 | __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty); | ||
1130 | |||
1131 | return 0; | ||
1132 | } | ||
1133 | |||
1134 | /** | ||
1135 | * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding | ||
1136 | * scrub from a context | ||
1137 | * | ||
1138 | * @bi: single binding information. | ||
1139 | * @rebind: Whether to issue a bind instead of scrub command. | ||
1140 | */ | ||
1141 | static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
1142 | { | ||
1143 | struct vmw_ctx_bindinfo_vb *bivb = | ||
1144 | container_of(bi, struct vmw_ctx_bindinfo_vb, bi); | ||
1145 | struct vmw_ctx_binding_state *cbs = | ||
1146 | vmw_context_binding_state(bi->ctx); | ||
1147 | |||
1148 | __set_bit(bivb->slot, cbs->dirty_vb); | ||
1149 | __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty); | ||
1150 | |||
1151 | return 0; | ||
1152 | } | ||
1153 | |||
1154 | /** | ||
1155 | * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context | ||
1156 | * | ||
1157 | * @bi: single binding information. | ||
1158 | * @rebind: Whether to issue a bind instead of scrub command. | ||
1159 | */ | ||
1160 | static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
1161 | { | ||
1162 | struct vmw_ctx_bindinfo_ib *binding = | ||
1163 | container_of(bi, typeof(*binding), bi); | ||
1164 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
1165 | struct { | ||
1166 | SVGA3dCmdHeader header; | ||
1167 | SVGA3dCmdDXSetIndexBuffer body; | ||
1168 | } *cmd; | ||
1169 | |||
1170 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); | ||
1171 | if (unlikely(cmd == NULL)) { | ||
1172 | DRM_ERROR("Failed reserving FIFO space for DX index buffer " | ||
1173 | "binding.\n"); | ||
1174 | return -ENOMEM; | ||
1175 | } | ||
1176 | cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER; | ||
1177 | cmd->header.size = sizeof(cmd->body); | ||
1178 | if (rebind) { | ||
1179 | cmd->body.sid = bi->res->id; | ||
1180 | cmd->body.format = binding->format; | ||
1181 | cmd->body.offset = binding->offset; | ||
1182 | } else { | ||
1183 | cmd->body.sid = SVGA3D_INVALID_ID; | ||
1184 | cmd->body.format = 0; | ||
1185 | cmd->body.offset = 0; | ||
1186 | } | ||
1187 | |||
1188 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
1189 | |||
1190 | return 0; | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with | ||
1195 | * memory accounting. | ||
1196 | * | ||
1197 | * @dev_priv: Pointer to a device private structure. | ||
1198 | * | ||
1199 | * Returns a pointer to a newly allocated struct or an error pointer on error. | ||
1200 | */ | ||
1201 | struct vmw_ctx_binding_state * | ||
1202 | vmw_binding_state_alloc(struct vmw_private *dev_priv) | ||
1203 | { | ||
1204 | struct vmw_ctx_binding_state *cbs; | ||
1205 | int ret; | ||
1206 | |||
1207 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), | ||
1208 | false, false); | ||
1209 | if (ret) | ||
1210 | return ERR_PTR(ret); | ||
1211 | |||
1212 | cbs = vzalloc(sizeof(*cbs)); | ||
1213 | if (!cbs) { | ||
1214 | ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); | ||
1215 | return ERR_PTR(-ENOMEM); | ||
1216 | } | ||
1217 | |||
1218 | cbs->dev_priv = dev_priv; | ||
1219 | INIT_LIST_HEAD(&cbs->list); | ||
1220 | |||
1221 | return cbs; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its | ||
1226 | * memory accounting info. | ||
1227 | * | ||
1228 | * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. | ||
1229 | */ | ||
1230 | void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) | ||
1231 | { | ||
1232 | struct vmw_private *dev_priv = cbs->dev_priv; | ||
1233 | |||
1234 | vfree(cbs); | ||
1235 | ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * vmw_binding_state_list - Get the binding list of a | ||
1240 | * struct vmw_ctx_binding_state | ||
1241 | * | ||
1242 | * @cbs: Pointer to the struct vmw_ctx_binding_state | ||
1243 | * | ||
1244 | * Returns the binding list which can be used to traverse through the bindings | ||
1245 | * and access the resource information of all bindings. | ||
1246 | */ | ||
1247 | struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs) | ||
1248 | { | ||
1249 | return &cbs->list; | ||
1250 | } | ||
1251 | |||
1252 | /** | ||
1253 | * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state | ||
1254 | * | ||
1255 | * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared | ||
1256 | * | ||
1257 | * Drops all bindings registered in @cbs. No device binding actions are | ||
1258 | * performed. | ||
1259 | */ | ||
1260 | void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs) | ||
1261 | { | ||
1262 | struct vmw_ctx_bindinfo *entry, *next; | ||
1263 | |||
1264 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
1265 | vmw_binding_drop(entry); | ||
1266 | } | ||
1267 | |||
1268 | /* | ||
1269 | * This function is unused at run-time, and only used to hold various build | ||
1270 | * asserts important for code optimization assumptions. | ||
1271 | */ | ||
1272 | static void vmw_binding_build_asserts(void) | ||
1273 | { | ||
1274 | BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3); | ||
1275 | BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX); | ||
1276 | BUILD_BUG_ON(sizeof(uint32) != sizeof(u32)); | ||
1277 | |||
1278 | /* | ||
1279 | * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various | ||
1280 | * view id arrays. | ||
1281 | */ | ||
1282 | BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX); | ||
1283 | BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS); | ||
1284 | BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS); | ||
1285 | |||
1286 | /* | ||
1287 | * struct vmw_ctx_binding_state::bind_cmd_buffer is used for | ||
1288 | * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers | ||
1289 | */ | ||
1290 | BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) > | ||
1291 | VMW_MAX_VIEW_BINDINGS*sizeof(u32)); | ||
1292 | BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) > | ||
1293 | VMW_MAX_VIEW_BINDINGS*sizeof(u32)); | ||
1294 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h new file mode 100644 index 000000000000..bf2e77ad5a20 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h | |||
@@ -0,0 +1,209 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | #ifndef _VMWGFX_BINDING_H_ | ||
28 | #define _VMWGFX_BINDING_H_ | ||
29 | |||
30 | #include "device_include/svga3d_reg.h" | ||
31 | #include <linux/list.h> | ||
32 | |||
33 | #define VMW_MAX_VIEW_BINDINGS 128 | ||
34 | |||
35 | struct vmw_private; | ||
36 | struct vmw_ctx_binding_state; | ||
37 | |||
38 | /* | ||
39 | * enum vmw_ctx_binding_type - abstract resource to context binding types | ||
40 | */ | ||
41 | enum vmw_ctx_binding_type { | ||
42 | vmw_ctx_binding_shader, | ||
43 | vmw_ctx_binding_rt, | ||
44 | vmw_ctx_binding_tex, | ||
45 | vmw_ctx_binding_cb, | ||
46 | vmw_ctx_binding_dx_shader, | ||
47 | vmw_ctx_binding_dx_rt, | ||
48 | vmw_ctx_binding_sr, | ||
49 | vmw_ctx_binding_ds, | ||
50 | vmw_ctx_binding_so, | ||
51 | vmw_ctx_binding_vb, | ||
52 | vmw_ctx_binding_ib, | ||
53 | vmw_ctx_binding_max | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * struct vmw_ctx_bindinfo - single binding metadata | ||
58 | * | ||
59 | * @ctx_list: List head for the context's list of bindings. | ||
60 | * @res_list: List head for a resource's list of bindings. | ||
61 | * @ctx: Non-refcounted pointer to the context that owns the binding. NULL | ||
62 | * indicates no binding present. | ||
63 | * @res: Non-refcounted pointer to the resource the binding points to. This | ||
64 | * is typically a surface or a view. | ||
65 | * @bt: Binding type. | ||
66 | * @scrubbed: Whether the binding has been scrubbed from the context. | ||
67 | */ | ||
68 | struct vmw_ctx_bindinfo { | ||
69 | struct list_head ctx_list; | ||
70 | struct list_head res_list; | ||
71 | struct vmw_resource *ctx; | ||
72 | struct vmw_resource *res; | ||
73 | enum vmw_ctx_binding_type bt; | ||
74 | bool scrubbed; | ||
75 | }; | ||
76 | |||
77 | /** | ||
78 | * struct vmw_ctx_bindinfo_tex - texture stage binding metadata | ||
79 | * | ||
80 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
81 | * @texture_stage: Device data used to reconstruct binding command. | ||
82 | */ | ||
83 | struct vmw_ctx_bindinfo_tex { | ||
84 | struct vmw_ctx_bindinfo bi; | ||
85 | uint32 texture_stage; | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * struct vmw_ctx_bindinfo_shader - Shader binding metadata | ||
90 | * | ||
91 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
92 | * @shader_slot: Device data used to reconstruct binding command. | ||
93 | */ | ||
94 | struct vmw_ctx_bindinfo_shader { | ||
95 | struct vmw_ctx_bindinfo bi; | ||
96 | SVGA3dShaderType shader_slot; | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata | ||
101 | * | ||
102 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
103 | * @shader_slot: Device data used to reconstruct binding command. | ||
104 | * @offset: Device data used to reconstruct binding command. | ||
105 | * @size: Device data used to reconstruct binding command. | ||
106 | * @slot: Device data used to reconstruct binding command. | ||
107 | */ | ||
108 | struct vmw_ctx_bindinfo_cb { | ||
109 | struct vmw_ctx_bindinfo bi; | ||
110 | SVGA3dShaderType shader_slot; | ||
111 | uint32 offset; | ||
112 | uint32 size; | ||
113 | uint32 slot; | ||
114 | }; | ||
115 | |||
116 | /** | ||
117 | * struct vmw_ctx_bindinfo_view - View binding metadata | ||
118 | * | ||
119 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
120 | * @shader_slot: Device data used to reconstruct binding command. | ||
121 | * @slot: Device data used to reconstruct binding command. | ||
122 | */ | ||
123 | struct vmw_ctx_bindinfo_view { | ||
124 | struct vmw_ctx_bindinfo bi; | ||
125 | SVGA3dShaderType shader_slot; | ||
126 | uint32 slot; | ||
127 | }; | ||
128 | |||
129 | /** | ||
130 | * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata | ||
131 | * | ||
132 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
133 | * @offset: Device data used to reconstruct binding command. | ||
134 | * @size: Device data used to reconstruct binding command. | ||
135 | * @slot: Device data used to reconstruct binding command. | ||
136 | */ | ||
137 | struct vmw_ctx_bindinfo_so { | ||
138 | struct vmw_ctx_bindinfo bi; | ||
139 | uint32 offset; | ||
140 | uint32 size; | ||
141 | uint32 slot; | ||
142 | }; | ||
143 | |||
144 | /** | ||
145 | * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata | ||
146 | * | ||
147 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
148 | * @offset: Device data used to reconstruct binding command. | ||
149 | * @stride: Device data used to reconstruct binding command. | ||
150 | * @slot: Device data used to reconstruct binding command. | ||
151 | */ | ||
152 | struct vmw_ctx_bindinfo_vb { | ||
153 | struct vmw_ctx_bindinfo bi; | ||
154 | uint32 offset; | ||
155 | uint32 stride; | ||
156 | uint32 slot; | ||
157 | }; | ||
158 | |||
159 | /** | ||
160 | * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata | ||
161 | * | ||
162 | * @bi: struct vmw_ctx_bindinfo we derive from. | ||
163 | * @offset: Device data used to reconstruct binding command. | ||
164 | * @format: Device data used to reconstruct binding command. | ||
165 | */ | ||
166 | struct vmw_ctx_bindinfo_ib { | ||
167 | struct vmw_ctx_bindinfo bi; | ||
168 | uint32 offset; | ||
169 | uint32 format; | ||
170 | }; | ||
171 | |||
172 | /** | ||
173 | * struct vmw_dx_shader_bindings - per shader type context binding state | ||
174 | * | ||
175 | * @shader: The shader binding for this shader type | ||
176 | * @const_buffer: Const buffer bindings for this shader type. | ||
177 | * @shader_res: Shader resource view bindings for this shader type. | ||
178 | * @dirty_sr: Bitmap tracking individual shader resource bindings changes | ||
179 | * that have not yet been emitted to the device. | ||
180 | * @dirty: Bitmap tracking per-binding type binding changes that have not | ||
181 | * yet been emitted to the device. | ||
182 | */ | ||
183 | struct vmw_dx_shader_bindings { | ||
184 | struct vmw_ctx_bindinfo_shader shader; | ||
185 | struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS]; | ||
186 | struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS]; | ||
187 | DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS); | ||
188 | unsigned long dirty; | ||
189 | }; | ||
190 | |||
191 | extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, | ||
192 | const struct vmw_ctx_bindinfo *ci, | ||
193 | u32 shader_slot, u32 slot); | ||
194 | extern void | ||
195 | vmw_binding_state_commit(struct vmw_ctx_binding_state *to, | ||
196 | struct vmw_ctx_binding_state *from); | ||
197 | extern void vmw_binding_res_list_kill(struct list_head *head); | ||
198 | extern void vmw_binding_res_list_scrub(struct list_head *head); | ||
199 | extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs); | ||
200 | extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs); | ||
201 | extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | ||
202 | extern struct vmw_ctx_binding_state * | ||
203 | vmw_binding_state_alloc(struct vmw_private *dev_priv); | ||
204 | extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs); | ||
205 | extern struct list_head * | ||
206 | vmw_binding_state_list(struct vmw_ctx_binding_state *cbs); | ||
207 | extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs); | ||
208 | |||
209 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 04fa8526b55e..5ae8f921da2a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
@@ -916,9 +916,8 @@ static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, | |||
916 | 916 | ||
917 | cur = man->cur; | 917 | cur = man->cur; |
918 | if (cur && (size + man->cur_pos > cur->size || | 918 | if (cur && (size + man->cur_pos > cur->size || |
919 | (ctx_id != SVGA3D_INVALID_ID && | 919 | ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && |
920 | (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && | 920 | ctx_id != cur->cb_header->dxContext))) |
921 | ctx_id != cur->cb_header->dxContext))) | ||
922 | __vmw_cmdbuf_cur_flush(man); | 921 | __vmw_cmdbuf_cur_flush(man); |
923 | 922 | ||
924 | if (!man->cur) { | 923 | if (!man->cur) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 21e9b7f8dad0..59d965f8b530 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | |||
@@ -26,15 +26,10 @@ | |||
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_resource_priv.h" | ||
29 | 30 | ||
30 | #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 | 31 | #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 |
31 | 32 | ||
32 | enum vmw_cmdbuf_res_state { | ||
33 | VMW_CMDBUF_RES_COMMITED, | ||
34 | VMW_CMDBUF_RES_ADD, | ||
35 | VMW_CMDBUF_RES_DEL | ||
36 | }; | ||
37 | |||
38 | /** | 33 | /** |
39 | * struct vmw_cmdbuf_res - Command buffer managed resource entry. | 34 | * struct vmw_cmdbuf_res - Command buffer managed resource entry. |
40 | * | 35 | * |
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list) | |||
132 | 127 | ||
133 | list_for_each_entry_safe(entry, next, list, head) { | 128 | list_for_each_entry_safe(entry, next, list, head) { |
134 | list_del(&entry->head); | 129 | list_del(&entry->head); |
130 | if (entry->res->func->commit_notify) | ||
131 | entry->res->func->commit_notify(entry->res, | ||
132 | entry->state); | ||
135 | switch (entry->state) { | 133 | switch (entry->state) { |
136 | case VMW_CMDBUF_RES_ADD: | 134 | case VMW_CMDBUF_RES_ADD: |
137 | entry->state = VMW_CMDBUF_RES_COMMITED; | 135 | entry->state = VMW_CMDBUF_RES_COMMITTED; |
138 | list_add_tail(&entry->head, &entry->man->list); | 136 | list_add_tail(&entry->head, &entry->man->list); |
139 | break; | 137 | break; |
140 | case VMW_CMDBUF_RES_DEL: | 138 | case VMW_CMDBUF_RES_DEL: |
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) | |||
175 | &entry->hash); | 173 | &entry->hash); |
176 | list_del(&entry->head); | 174 | list_del(&entry->head); |
177 | list_add_tail(&entry->head, &entry->man->list); | 175 | list_add_tail(&entry->head, &entry->man->list); |
178 | entry->state = VMW_CMDBUF_RES_COMMITED; | 176 | entry->state = VMW_CMDBUF_RES_COMMITTED; |
179 | break; | 177 | break; |
180 | default: | 178 | default: |
181 | BUG(); | 179 | BUG(); |
@@ -231,6 +229,9 @@ out_invalid_key: | |||
231 | * @res_type: The resource type. | 229 | * @res_type: The resource type. |
232 | * @user_key: The user-space id of the resource. | 230 | * @user_key: The user-space id of the resource. |
233 | * @list: The staging list. | 231 | * @list: The staging list. |
232 | * @res_p: If the resource is in an already committed state, points to the | ||
233 | * struct vmw_resource on successful return. The pointer will be | ||
234 | * non ref-counted. | ||
234 | * | 235 | * |
235 | * This function looks up the struct vmw_cmdbuf_res entry from the manager | 236 | * This function looks up the struct vmw_cmdbuf_res entry from the manager |
236 | * hash table and, if it exists, removes it. Depending on its current staging | 237 | * hash table and, if it exists, removes it. Depending on its current staging |
@@ -240,7 +241,8 @@ out_invalid_key: | |||
240 | int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | 241 | int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, |
241 | enum vmw_cmdbuf_res_type res_type, | 242 | enum vmw_cmdbuf_res_type res_type, |
242 | u32 user_key, | 243 | u32 user_key, |
243 | struct list_head *list) | 244 | struct list_head *list, |
245 | struct vmw_resource **res_p) | ||
244 | { | 246 | { |
245 | struct vmw_cmdbuf_res *entry; | 247 | struct vmw_cmdbuf_res *entry; |
246 | struct drm_hash_item *hash; | 248 | struct drm_hash_item *hash; |
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | |||
256 | switch (entry->state) { | 258 | switch (entry->state) { |
257 | case VMW_CMDBUF_RES_ADD: | 259 | case VMW_CMDBUF_RES_ADD: |
258 | vmw_cmdbuf_res_free(man, entry); | 260 | vmw_cmdbuf_res_free(man, entry); |
261 | *res_p = NULL; | ||
259 | break; | 262 | break; |
260 | case VMW_CMDBUF_RES_COMMITED: | 263 | case VMW_CMDBUF_RES_COMMITTED: |
261 | (void) drm_ht_remove_item(&man->resources, &entry->hash); | 264 | (void) drm_ht_remove_item(&man->resources, &entry->hash); |
262 | list_del(&entry->head); | 265 | list_del(&entry->head); |
263 | entry->state = VMW_CMDBUF_RES_DEL; | 266 | entry->state = VMW_CMDBUF_RES_DEL; |
264 | list_add_tail(&entry->head, list); | 267 | list_add_tail(&entry->head, list); |
268 | *res_p = entry->res; | ||
265 | break; | 269 | break; |
266 | default: | 270 | default: |
267 | BUG(); | 271 | BUG(); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 15f954423e7c..abfe67c893c7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -27,19 +27,18 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_resource_priv.h" | 29 | #include "vmwgfx_resource_priv.h" |
30 | #include "vmwgfx_binding.h" | ||
30 | #include "ttm/ttm_placement.h" | 31 | #include "ttm/ttm_placement.h" |
31 | 32 | ||
32 | struct vmw_user_context { | 33 | struct vmw_user_context { |
33 | struct ttm_base_object base; | 34 | struct ttm_base_object base; |
34 | struct vmw_resource res; | 35 | struct vmw_resource res; |
35 | struct vmw_ctx_binding_state cbs; | 36 | struct vmw_ctx_binding_state *cbs; |
36 | struct vmw_cmdbuf_res_manager *man; | 37 | struct vmw_cmdbuf_res_manager *man; |
38 | struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; | ||
39 | spinlock_t cotable_lock; | ||
37 | }; | 40 | }; |
38 | 41 | ||
39 | |||
40 | |||
41 | typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); | ||
42 | |||
43 | static void vmw_user_context_free(struct vmw_resource *res); | 42 | static void vmw_user_context_free(struct vmw_resource *res); |
44 | static struct vmw_resource * | 43 | static struct vmw_resource * |
45 | vmw_user_context_base_to_res(struct ttm_base_object *base); | 44 | vmw_user_context_base_to_res(struct ttm_base_object *base); |
@@ -51,12 +50,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
51 | bool readback, | 50 | bool readback, |
52 | struct ttm_validate_buffer *val_buf); | 51 | struct ttm_validate_buffer *val_buf); |
53 | static int vmw_gb_context_destroy(struct vmw_resource *res); | 52 | static int vmw_gb_context_destroy(struct vmw_resource *res); |
54 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); | 53 | static int vmw_dx_context_create(struct vmw_resource *res); |
55 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | 54 | static int vmw_dx_context_bind(struct vmw_resource *res, |
56 | bool rebind); | 55 | struct ttm_validate_buffer *val_buf); |
57 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); | 56 | static int vmw_dx_context_unbind(struct vmw_resource *res, |
58 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); | 57 | bool readback, |
59 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); | 58 | struct ttm_validate_buffer *val_buf); |
59 | static int vmw_dx_context_destroy(struct vmw_resource *res); | ||
60 | |||
60 | static uint64_t vmw_user_context_size; | 61 | static uint64_t vmw_user_context_size; |
61 | 62 | ||
62 | static const struct vmw_user_resource_conv user_context_conv = { | 63 | static const struct vmw_user_resource_conv user_context_conv = { |
@@ -93,15 +94,36 @@ static const struct vmw_res_func vmw_gb_context_func = { | |||
93 | .unbind = vmw_gb_context_unbind | 94 | .unbind = vmw_gb_context_unbind |
94 | }; | 95 | }; |
95 | 96 | ||
96 | static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { | 97 | static const struct vmw_res_func vmw_dx_context_func = { |
97 | [vmw_ctx_binding_shader] = vmw_context_scrub_shader, | 98 | .res_type = vmw_res_dx_context, |
98 | [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, | 99 | .needs_backup = true, |
99 | [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; | 100 | .may_evict = true, |
101 | .type_name = "dx contexts", | ||
102 | .backup_placement = &vmw_mob_placement, | ||
103 | .create = vmw_dx_context_create, | ||
104 | .destroy = vmw_dx_context_destroy, | ||
105 | .bind = vmw_dx_context_bind, | ||
106 | .unbind = vmw_dx_context_unbind | ||
107 | }; | ||
100 | 108 | ||
101 | /** | 109 | /** |
102 | * Context management: | 110 | * Context management: |
103 | */ | 111 | */ |
104 | 112 | ||
113 | static void vmw_context_cotables_unref(struct vmw_user_context *uctx) | ||
114 | { | ||
115 | struct vmw_resource *res; | ||
116 | int i; | ||
117 | |||
118 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||
119 | spin_lock(&uctx->cotable_lock); | ||
120 | res = uctx->cotables[i]; | ||
121 | uctx->cotables[i] = NULL; | ||
122 | spin_unlock(&uctx->cotable_lock); | ||
123 | vmw_resource_unreference(&res); | ||
124 | } | ||
125 | } | ||
126 | |||
105 | static void vmw_hw_context_destroy(struct vmw_resource *res) | 127 | static void vmw_hw_context_destroy(struct vmw_resource *res) |
106 | { | 128 | { |
107 | struct vmw_user_context *uctx = | 129 | struct vmw_user_context *uctx = |
@@ -113,17 +135,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
113 | } *cmd; | 135 | } *cmd; |
114 | 136 | ||
115 | 137 | ||
116 | if (res->func->destroy == vmw_gb_context_destroy) { | 138 | if (res->func->destroy == vmw_gb_context_destroy || |
139 | res->func->destroy == vmw_dx_context_destroy) { | ||
117 | mutex_lock(&dev_priv->cmdbuf_mutex); | 140 | mutex_lock(&dev_priv->cmdbuf_mutex); |
118 | vmw_cmdbuf_res_man_destroy(uctx->man); | 141 | vmw_cmdbuf_res_man_destroy(uctx->man); |
119 | mutex_lock(&dev_priv->binding_mutex); | 142 | mutex_lock(&dev_priv->binding_mutex); |
120 | (void) vmw_context_binding_state_kill(&uctx->cbs); | 143 | vmw_binding_state_kill(uctx->cbs); |
121 | (void) vmw_gb_context_destroy(res); | 144 | (void) res->func->destroy(res); |
122 | mutex_unlock(&dev_priv->binding_mutex); | 145 | mutex_unlock(&dev_priv->binding_mutex); |
123 | if (dev_priv->pinned_bo != NULL && | 146 | if (dev_priv->pinned_bo != NULL && |
124 | !dev_priv->query_cid_valid) | 147 | !dev_priv->query_cid_valid) |
125 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); | 148 | __vmw_execbuf_release_pinned_bo(dev_priv, NULL); |
126 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 149 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
150 | vmw_context_cotables_unref(uctx); | ||
127 | return; | 151 | return; |
128 | } | 152 | } |
129 | 153 | ||
@@ -144,16 +168,20 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) | |||
144 | } | 168 | } |
145 | 169 | ||
146 | static int vmw_gb_context_init(struct vmw_private *dev_priv, | 170 | static int vmw_gb_context_init(struct vmw_private *dev_priv, |
171 | bool dx, | ||
147 | struct vmw_resource *res, | 172 | struct vmw_resource *res, |
148 | void (*res_free) (struct vmw_resource *res)) | 173 | void (*res_free)(struct vmw_resource *res)) |
149 | { | 174 | { |
150 | int ret; | 175 | int ret, i; |
151 | struct vmw_user_context *uctx = | 176 | struct vmw_user_context *uctx = |
152 | container_of(res, struct vmw_user_context, res); | 177 | container_of(res, struct vmw_user_context, res); |
153 | 178 | ||
179 | res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : | ||
180 | SVGA3D_CONTEXT_DATA_SIZE); | ||
154 | ret = vmw_resource_init(dev_priv, res, true, | 181 | ret = vmw_resource_init(dev_priv, res, true, |
155 | res_free, &vmw_gb_context_func); | 182 | res_free, |
156 | res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; | 183 | dx ? &vmw_dx_context_func : |
184 | &vmw_gb_context_func); | ||
157 | if (unlikely(ret != 0)) | 185 | if (unlikely(ret != 0)) |
158 | goto out_err; | 186 | goto out_err; |
159 | 187 | ||
@@ -166,12 +194,32 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, | |||
166 | } | 194 | } |
167 | } | 195 | } |
168 | 196 | ||
169 | memset(&uctx->cbs, 0, sizeof(uctx->cbs)); | 197 | uctx->cbs = vmw_binding_state_alloc(dev_priv); |
170 | INIT_LIST_HEAD(&uctx->cbs.list); | 198 | if (IS_ERR(uctx->cbs)) { |
199 | ret = PTR_ERR(uctx->cbs); | ||
200 | goto out_err; | ||
201 | } | ||
202 | |||
203 | spin_lock_init(&uctx->cotable_lock); | ||
204 | |||
205 | if (dx) { | ||
206 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||
207 | uctx->cotables[i] = vmw_cotable_alloc(dev_priv, | ||
208 | &uctx->res, i); | ||
209 | if (unlikely(uctx->cotables[i] == NULL)) { | ||
210 | ret = -ENOMEM; | ||
211 | goto out_cotables; | ||
212 | } | ||
213 | } | ||
214 | } | ||
215 | |||
216 | |||
171 | 217 | ||
172 | vmw_resource_activate(res, vmw_hw_context_destroy); | 218 | vmw_resource_activate(res, vmw_hw_context_destroy); |
173 | return 0; | 219 | return 0; |
174 | 220 | ||
221 | out_cotables: | ||
222 | vmw_context_cotables_unref(uctx); | ||
175 | out_err: | 223 | out_err: |
176 | if (res_free) | 224 | if (res_free) |
177 | res_free(res); | 225 | res_free(res); |
@@ -182,7 +230,8 @@ out_err: | |||
182 | 230 | ||
183 | static int vmw_context_init(struct vmw_private *dev_priv, | 231 | static int vmw_context_init(struct vmw_private *dev_priv, |
184 | struct vmw_resource *res, | 232 | struct vmw_resource *res, |
185 | void (*res_free) (struct vmw_resource *res)) | 233 | void (*res_free)(struct vmw_resource *res), |
234 | bool dx) | ||
186 | { | 235 | { |
187 | int ret; | 236 | int ret; |
188 | 237 | ||
@@ -192,7 +241,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, | |||
192 | } *cmd; | 241 | } *cmd; |
193 | 242 | ||
194 | if (dev_priv->has_mob) | 243 | if (dev_priv->has_mob) |
195 | return vmw_gb_context_init(dev_priv, res, res_free); | 244 | return vmw_gb_context_init(dev_priv, dx, res, res_free); |
196 | 245 | ||
197 | ret = vmw_resource_init(dev_priv, res, false, | 246 | ret = vmw_resource_init(dev_priv, res, false, |
198 | res_free, &vmw_legacy_context_func); | 247 | res_free, &vmw_legacy_context_func); |
@@ -232,19 +281,10 @@ out_early: | |||
232 | return ret; | 281 | return ret; |
233 | } | 282 | } |
234 | 283 | ||
235 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
236 | { | ||
237 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
238 | int ret; | ||
239 | |||
240 | if (unlikely(res == NULL)) | ||
241 | return NULL; | ||
242 | |||
243 | ret = vmw_context_init(dev_priv, res, NULL); | ||
244 | |||
245 | return (ret == 0) ? res : NULL; | ||
246 | } | ||
247 | 284 | ||
285 | /* | ||
286 | * GB context. | ||
287 | */ | ||
248 | 288 | ||
249 | static int vmw_gb_context_create(struct vmw_resource *res) | 289 | static int vmw_gb_context_create(struct vmw_resource *res) |
250 | { | 290 | { |
@@ -309,7 +349,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res, | |||
309 | "binding.\n"); | 349 | "binding.\n"); |
310 | return -ENOMEM; | 350 | return -ENOMEM; |
311 | } | 351 | } |
312 | |||
313 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; | 352 | cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; |
314 | cmd->header.size = sizeof(cmd->body); | 353 | cmd->header.size = sizeof(cmd->body); |
315 | cmd->body.cid = res->id; | 354 | cmd->body.cid = res->id; |
@@ -346,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, | |||
346 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | 385 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); |
347 | 386 | ||
348 | mutex_lock(&dev_priv->binding_mutex); | 387 | mutex_lock(&dev_priv->binding_mutex); |
349 | vmw_context_binding_state_scrub(&uctx->cbs); | 388 | vmw_binding_state_scrub(uctx->cbs); |
350 | 389 | ||
351 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | 390 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); |
352 | 391 | ||
@@ -419,6 +458,221 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) | |||
419 | return 0; | 458 | return 0; |
420 | } | 459 | } |
421 | 460 | ||
461 | /* | ||
462 | * DX context. | ||
463 | */ | ||
464 | |||
465 | static int vmw_dx_context_create(struct vmw_resource *res) | ||
466 | { | ||
467 | struct vmw_private *dev_priv = res->dev_priv; | ||
468 | int ret; | ||
469 | struct { | ||
470 | SVGA3dCmdHeader header; | ||
471 | SVGA3dCmdDXDefineContext body; | ||
472 | } *cmd; | ||
473 | |||
474 | if (likely(res->id != -1)) | ||
475 | return 0; | ||
476 | |||
477 | ret = vmw_resource_alloc_id(res); | ||
478 | if (unlikely(ret != 0)) { | ||
479 | DRM_ERROR("Failed to allocate a context id.\n"); | ||
480 | goto out_no_id; | ||
481 | } | ||
482 | |||
483 | if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { | ||
484 | ret = -EBUSY; | ||
485 | goto out_no_fifo; | ||
486 | } | ||
487 | |||
488 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
489 | if (unlikely(cmd == NULL)) { | ||
490 | DRM_ERROR("Failed reserving FIFO space for context " | ||
491 | "creation.\n"); | ||
492 | ret = -ENOMEM; | ||
493 | goto out_no_fifo; | ||
494 | } | ||
495 | |||
496 | cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; | ||
497 | cmd->header.size = sizeof(cmd->body); | ||
498 | cmd->body.cid = res->id; | ||
499 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
500 | vmw_fifo_resource_inc(dev_priv); | ||
501 | |||
502 | return 0; | ||
503 | |||
504 | out_no_fifo: | ||
505 | vmw_resource_release_id(res); | ||
506 | out_no_id: | ||
507 | return ret; | ||
508 | } | ||
509 | |||
510 | static int vmw_dx_context_bind(struct vmw_resource *res, | ||
511 | struct ttm_validate_buffer *val_buf) | ||
512 | { | ||
513 | struct vmw_private *dev_priv = res->dev_priv; | ||
514 | struct { | ||
515 | SVGA3dCmdHeader header; | ||
516 | SVGA3dCmdDXBindContext body; | ||
517 | } *cmd; | ||
518 | struct ttm_buffer_object *bo = val_buf->bo; | ||
519 | |||
520 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
521 | |||
522 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
523 | if (unlikely(cmd == NULL)) { | ||
524 | DRM_ERROR("Failed reserving FIFO space for context " | ||
525 | "binding.\n"); | ||
526 | return -ENOMEM; | ||
527 | } | ||
528 | |||
529 | cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; | ||
530 | cmd->header.size = sizeof(cmd->body); | ||
531 | cmd->body.cid = res->id; | ||
532 | cmd->body.mobid = bo->mem.start; | ||
533 | cmd->body.validContents = res->backup_dirty; | ||
534 | res->backup_dirty = false; | ||
535 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
536 | |||
537 | |||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * vmw_dx_context_scrub_cotables - Scrub all bindings and | ||
543 | * cotables from a context | ||
544 | * | ||
545 | * @ctx: Pointer to the context resource | ||
546 | * @readback: Whether to save the otable contents on scrubbing. | ||
547 | * | ||
548 | * COtables must be unbound before their context, but unbinding requires | ||
549 | * the backup buffer being reserved, whereas scrubbing does not. | ||
550 | * This function scrubs all cotables of a context, potentially reading back | ||
551 | * the contents into their backup buffers. However, scrubbing cotables | ||
552 | * also makes the device context invalid, so scrub all bindings first so | ||
553 | * that doesn't have to be done later with an invalid context. | ||
554 | */ | ||
555 | void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, | ||
556 | bool readback) | ||
557 | { | ||
558 | struct vmw_user_context *uctx = | ||
559 | container_of(ctx, struct vmw_user_context, res); | ||
560 | int i; | ||
561 | |||
562 | vmw_binding_state_scrub(uctx->cbs); | ||
563 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||
564 | struct vmw_resource *res; | ||
565 | |||
566 | /* Avoid racing with ongoing cotable destruction. */ | ||
567 | spin_lock(&uctx->cotable_lock); | ||
568 | res = uctx->cotables[vmw_cotable_scrub_order[i]]; | ||
569 | if (res) | ||
570 | res = vmw_resource_reference_unless_doomed(res); | ||
571 | spin_unlock(&uctx->cotable_lock); | ||
572 | if (!res) | ||
573 | continue; | ||
574 | |||
575 | WARN_ON(vmw_cotable_scrub(res, readback)); | ||
576 | vmw_resource_unreference(&res); | ||
577 | } | ||
578 | } | ||
579 | |||
580 | static int vmw_dx_context_unbind(struct vmw_resource *res, | ||
581 | bool readback, | ||
582 | struct ttm_validate_buffer *val_buf) | ||
583 | { | ||
584 | struct vmw_private *dev_priv = res->dev_priv; | ||
585 | struct ttm_buffer_object *bo = val_buf->bo; | ||
586 | struct vmw_fence_obj *fence; | ||
587 | |||
588 | struct { | ||
589 | SVGA3dCmdHeader header; | ||
590 | SVGA3dCmdDXReadbackContext body; | ||
591 | } *cmd1; | ||
592 | struct { | ||
593 | SVGA3dCmdHeader header; | ||
594 | SVGA3dCmdDXBindContext body; | ||
595 | } *cmd2; | ||
596 | uint32_t submit_size; | ||
597 | uint8_t *cmd; | ||
598 | |||
599 | |||
600 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
601 | |||
602 | mutex_lock(&dev_priv->binding_mutex); | ||
603 | vmw_dx_context_scrub_cotables(res, readback); | ||
604 | |||
605 | submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); | ||
606 | |||
607 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
608 | if (unlikely(cmd == NULL)) { | ||
609 | DRM_ERROR("Failed reserving FIFO space for context " | ||
610 | "unbinding.\n"); | ||
611 | mutex_unlock(&dev_priv->binding_mutex); | ||
612 | return -ENOMEM; | ||
613 | } | ||
614 | |||
615 | cmd2 = (void *) cmd; | ||
616 | if (readback) { | ||
617 | cmd1 = (void *) cmd; | ||
618 | cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; | ||
619 | cmd1->header.size = sizeof(cmd1->body); | ||
620 | cmd1->body.cid = res->id; | ||
621 | cmd2 = (void *) (&cmd1[1]); | ||
622 | } | ||
623 | cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; | ||
624 | cmd2->header.size = sizeof(cmd2->body); | ||
625 | cmd2->body.cid = res->id; | ||
626 | cmd2->body.mobid = SVGA3D_INVALID_ID; | ||
627 | |||
628 | vmw_fifo_commit(dev_priv, submit_size); | ||
629 | mutex_unlock(&dev_priv->binding_mutex); | ||
630 | |||
631 | /* | ||
632 | * Create a fence object and fence the backup buffer. | ||
633 | */ | ||
634 | |||
635 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
636 | &fence, NULL); | ||
637 | |||
638 | vmw_fence_single_bo(bo, fence); | ||
639 | |||
640 | if (likely(fence != NULL)) | ||
641 | vmw_fence_obj_unreference(&fence); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static int vmw_dx_context_destroy(struct vmw_resource *res) | ||
647 | { | ||
648 | struct vmw_private *dev_priv = res->dev_priv; | ||
649 | struct { | ||
650 | SVGA3dCmdHeader header; | ||
651 | SVGA3dCmdDXDestroyContext body; | ||
652 | } *cmd; | ||
653 | |||
654 | if (likely(res->id == -1)) | ||
655 | return 0; | ||
656 | |||
657 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
658 | if (unlikely(cmd == NULL)) { | ||
659 | DRM_ERROR("Failed reserving FIFO space for context " | ||
660 | "destruction.\n"); | ||
661 | return -ENOMEM; | ||
662 | } | ||
663 | |||
664 | cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; | ||
665 | cmd->header.size = sizeof(cmd->body); | ||
666 | cmd->body.cid = res->id; | ||
667 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
668 | if (dev_priv->query_cid == res->id) | ||
669 | dev_priv->query_cid_valid = false; | ||
670 | vmw_resource_release_id(res); | ||
671 | vmw_fifo_resource_dec(dev_priv); | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
422 | /** | 676 | /** |
423 | * User-space context management: | 677 | * User-space context management: |
424 | */ | 678 | */ |
@@ -435,6 +689,8 @@ static void vmw_user_context_free(struct vmw_resource *res) | |||
435 | container_of(res, struct vmw_user_context, res); | 689 | container_of(res, struct vmw_user_context, res); |
436 | struct vmw_private *dev_priv = res->dev_priv; | 690 | struct vmw_private *dev_priv = res->dev_priv; |
437 | 691 | ||
692 | if (ctx->cbs) | ||
693 | vmw_binding_state_free(ctx->cbs); | ||
438 | ttm_base_object_kfree(ctx, base); | 694 | ttm_base_object_kfree(ctx, base); |
439 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 695 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
440 | vmw_user_context_size); | 696 | vmw_user_context_size); |
@@ -465,8 +721,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | |||
465 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); | 721 | return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); |
466 | } | 722 | } |
467 | 723 | ||
468 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | 724 | static int vmw_context_define(struct drm_device *dev, void *data, |
469 | struct drm_file *file_priv) | 725 | struct drm_file *file_priv, bool dx) |
470 | { | 726 | { |
471 | struct vmw_private *dev_priv = vmw_priv(dev); | 727 | struct vmw_private *dev_priv = vmw_priv(dev); |
472 | struct vmw_user_context *ctx; | 728 | struct vmw_user_context *ctx; |
@@ -476,6 +732,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
476 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 732 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
477 | int ret; | 733 | int ret; |
478 | 734 | ||
735 | if (!dev_priv->has_dx && dx) { | ||
736 | DRM_ERROR("DX contexts not supported by device.\n"); | ||
737 | return -EINVAL; | ||
738 | } | ||
479 | 739 | ||
480 | /* | 740 | /* |
481 | * Approximate idr memory usage with 128 bytes. It will be limited | 741 | * Approximate idr memory usage with 128 bytes. It will be limited |
@@ -516,7 +776,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, | |||
516 | * From here on, the destructor takes over resource freeing. | 776 | * From here on, the destructor takes over resource freeing. |
517 | */ | 777 | */ |
518 | 778 | ||
519 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | 779 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); |
520 | if (unlikely(ret != 0)) | 780 | if (unlikely(ret != 0)) |
521 | goto out_unlock; | 781 | goto out_unlock; |
522 | 782 | ||
@@ -535,387 +795,74 @@ out_err: | |||
535 | out_unlock: | 795 | out_unlock: |
536 | ttm_read_unlock(&dev_priv->reservation_sem); | 796 | ttm_read_unlock(&dev_priv->reservation_sem); |
537 | return ret; | 797 | return ret; |
538 | |||
539 | } | ||
540 | |||
541 | /** | ||
542 | * vmw_context_scrub_shader - scrub a shader binding from a context. | ||
543 | * | ||
544 | * @bi: single binding information. | ||
545 | * @rebind: Whether to issue a bind instead of scrub command. | ||
546 | */ | ||
547 | static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) | ||
548 | { | ||
549 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
550 | struct { | ||
551 | SVGA3dCmdHeader header; | ||
552 | SVGA3dCmdSetShader body; | ||
553 | } *cmd; | ||
554 | |||
555 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
556 | if (unlikely(cmd == NULL)) { | ||
557 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
558 | "unbinding.\n"); | ||
559 | return -ENOMEM; | ||
560 | } | ||
561 | |||
562 | cmd->header.id = SVGA_3D_CMD_SET_SHADER; | ||
563 | cmd->header.size = sizeof(cmd->body); | ||
564 | cmd->body.cid = bi->ctx->id; | ||
565 | cmd->body.type = bi->i1.shader_type; | ||
566 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
567 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
568 | |||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * vmw_context_scrub_render_target - scrub a render target binding | ||
574 | * from a context. | ||
575 | * | ||
576 | * @bi: single binding information. | ||
577 | * @rebind: Whether to issue a bind instead of scrub command. | ||
578 | */ | ||
579 | static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | ||
580 | bool rebind) | ||
581 | { | ||
582 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
583 | struct { | ||
584 | SVGA3dCmdHeader header; | ||
585 | SVGA3dCmdSetRenderTarget body; | ||
586 | } *cmd; | ||
587 | |||
588 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
589 | if (unlikely(cmd == NULL)) { | ||
590 | DRM_ERROR("Failed reserving FIFO space for render target " | ||
591 | "unbinding.\n"); | ||
592 | return -ENOMEM; | ||
593 | } | ||
594 | |||
595 | cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; | ||
596 | cmd->header.size = sizeof(cmd->body); | ||
597 | cmd->body.cid = bi->ctx->id; | ||
598 | cmd->body.type = bi->i1.rt_type; | ||
599 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
600 | cmd->body.target.face = 0; | ||
601 | cmd->body.target.mipmap = 0; | ||
602 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | /** | ||
608 | * vmw_context_scrub_texture - scrub a texture binding from a context. | ||
609 | * | ||
610 | * @bi: single binding information. | ||
611 | * @rebind: Whether to issue a bind instead of scrub command. | ||
612 | * | ||
613 | * TODO: Possibly complement this function with a function that takes | ||
614 | * a list of texture bindings and combines them to a single command. | ||
615 | */ | ||
616 | static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, | ||
617 | bool rebind) | ||
618 | { | ||
619 | struct vmw_private *dev_priv = bi->ctx->dev_priv; | ||
620 | struct { | ||
621 | SVGA3dCmdHeader header; | ||
622 | struct { | ||
623 | SVGA3dCmdSetTextureState c; | ||
624 | SVGA3dTextureState s1; | ||
625 | } body; | ||
626 | } *cmd; | ||
627 | |||
628 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
629 | if (unlikely(cmd == NULL)) { | ||
630 | DRM_ERROR("Failed reserving FIFO space for texture " | ||
631 | "unbinding.\n"); | ||
632 | return -ENOMEM; | ||
633 | } | ||
634 | |||
635 | |||
636 | cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; | ||
637 | cmd->header.size = sizeof(cmd->body); | ||
638 | cmd->body.c.cid = bi->ctx->id; | ||
639 | cmd->body.s1.stage = bi->i1.texture_stage; | ||
640 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | ||
641 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
642 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
643 | |||
644 | return 0; | ||
645 | } | 798 | } |
646 | 799 | ||
647 | /** | 800 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
648 | * vmw_context_binding_drop: Stop tracking a context binding | 801 | struct drm_file *file_priv) |
649 | * | ||
650 | * @cb: Pointer to binding tracker storage. | ||
651 | * | ||
652 | * Stops tracking a context binding, and re-initializes its storage. | ||
653 | * Typically used when the context binding is replaced with a binding to | ||
654 | * another (or the same, for that matter) resource. | ||
655 | */ | ||
656 | static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) | ||
657 | { | 802 | { |
658 | list_del(&cb->ctx_list); | 803 | return vmw_context_define(dev, data, file_priv, false); |
659 | if (!list_empty(&cb->res_list)) | ||
660 | list_del(&cb->res_list); | ||
661 | cb->bi.ctx = NULL; | ||
662 | } | 804 | } |
663 | 805 | ||
664 | /** | 806 | int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, |
665 | * vmw_context_binding_add: Start tracking a context binding | 807 | struct drm_file *file_priv) |
666 | * | ||
667 | * @cbs: Pointer to the context binding state tracker. | ||
668 | * @bi: Information about the binding to track. | ||
669 | * | ||
670 | * Performs basic checks on the binding to make sure arguments are within | ||
671 | * bounds and then starts tracking the binding in the context binding | ||
672 | * state structure @cbs. | ||
673 | */ | ||
674 | int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
675 | const struct vmw_ctx_bindinfo *bi) | ||
676 | { | 808 | { |
677 | struct vmw_ctx_binding *loc; | 809 | union drm_vmw_extended_context_arg *arg = (typeof(arg)) data; |
678 | 810 | struct drm_vmw_context_arg *rep = &arg->rep; | |
679 | switch (bi->bt) { | 811 | |
680 | case vmw_ctx_binding_rt: | 812 | switch (arg->req) { |
681 | if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { | 813 | case drm_vmw_context_legacy: |
682 | DRM_ERROR("Illegal render target type %u.\n", | 814 | return vmw_context_define(dev, rep, file_priv, false); |
683 | (unsigned) bi->i1.rt_type); | 815 | case drm_vmw_context_dx: |
684 | return -EINVAL; | 816 | return vmw_context_define(dev, rep, file_priv, true); |
685 | } | ||
686 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
687 | break; | ||
688 | case vmw_ctx_binding_tex: | ||
689 | if (unlikely((unsigned)bi->i1.texture_stage >= | ||
690 | SVGA3D_NUM_TEXTURE_UNITS)) { | ||
691 | DRM_ERROR("Illegal texture/sampler unit %u.\n", | ||
692 | (unsigned) bi->i1.texture_stage); | ||
693 | return -EINVAL; | ||
694 | } | ||
695 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
696 | break; | ||
697 | case vmw_ctx_binding_shader: | ||
698 | if (unlikely((unsigned)bi->i1.shader_type >= | ||
699 | SVGA3D_SHADERTYPE_PREDX_MAX)) { | ||
700 | DRM_ERROR("Illegal shader type %u.\n", | ||
701 | (unsigned) bi->i1.shader_type); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
705 | break; | ||
706 | default: | 817 | default: |
707 | BUG(); | ||
708 | } | ||
709 | |||
710 | if (loc->bi.ctx != NULL) | ||
711 | vmw_context_binding_drop(loc); | ||
712 | |||
713 | loc->bi = *bi; | ||
714 | loc->bi.scrubbed = false; | ||
715 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
716 | INIT_LIST_HEAD(&loc->res_list); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | /** | ||
722 | * vmw_context_binding_transfer: Transfer a context binding tracking entry. | ||
723 | * | ||
724 | * @cbs: Pointer to the persistent context binding state tracker. | ||
725 | * @bi: Information about the binding to track. | ||
726 | * | ||
727 | */ | ||
728 | static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, | ||
729 | const struct vmw_ctx_bindinfo *bi) | ||
730 | { | ||
731 | struct vmw_ctx_binding *loc; | ||
732 | |||
733 | switch (bi->bt) { | ||
734 | case vmw_ctx_binding_rt: | ||
735 | loc = &cbs->render_targets[bi->i1.rt_type]; | ||
736 | break; | ||
737 | case vmw_ctx_binding_tex: | ||
738 | loc = &cbs->texture_units[bi->i1.texture_stage]; | ||
739 | break; | ||
740 | case vmw_ctx_binding_shader: | ||
741 | loc = &cbs->shaders[bi->i1.shader_type]; | ||
742 | break; | 818 | break; |
743 | default: | ||
744 | BUG(); | ||
745 | } | ||
746 | |||
747 | if (loc->bi.ctx != NULL) | ||
748 | vmw_context_binding_drop(loc); | ||
749 | |||
750 | if (bi->res != NULL) { | ||
751 | loc->bi = *bi; | ||
752 | list_add_tail(&loc->ctx_list, &cbs->list); | ||
753 | list_add_tail(&loc->res_list, &bi->res->binding_head); | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /** | ||
758 | * vmw_context_binding_kill - Kill a binding on the device | ||
759 | * and stop tracking it. | ||
760 | * | ||
761 | * @cb: Pointer to binding tracker storage. | ||
762 | * | ||
763 | * Emits FIFO commands to scrub a binding represented by @cb. | ||
764 | * Then stops tracking the binding and re-initializes its storage. | ||
765 | */ | ||
766 | static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) | ||
767 | { | ||
768 | if (!cb->bi.scrubbed) { | ||
769 | (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); | ||
770 | cb->bi.scrubbed = true; | ||
771 | } | ||
772 | vmw_context_binding_drop(cb); | ||
773 | } | ||
774 | |||
775 | /** | ||
776 | * vmw_context_binding_state_kill - Kill all bindings associated with a | ||
777 | * struct vmw_ctx_binding state structure, and re-initialize the structure. | ||
778 | * | ||
779 | * @cbs: Pointer to the context binding state tracker. | ||
780 | * | ||
781 | * Emits commands to scrub all bindings associated with the | ||
782 | * context binding state tracker. Then re-initializes the whole structure. | ||
783 | */ | ||
784 | static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) | ||
785 | { | ||
786 | struct vmw_ctx_binding *entry, *next; | ||
787 | |||
788 | list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) | ||
789 | vmw_context_binding_kill(entry); | ||
790 | } | ||
791 | |||
792 | /** | ||
793 | * vmw_context_binding_state_scrub - Scrub all bindings associated with a | ||
794 | * struct vmw_ctx_binding state structure. | ||
795 | * | ||
796 | * @cbs: Pointer to the context binding state tracker. | ||
797 | * | ||
798 | * Emits commands to scrub all bindings associated with the | ||
799 | * context binding state tracker. | ||
800 | */ | ||
801 | static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) | ||
802 | { | ||
803 | struct vmw_ctx_binding *entry; | ||
804 | |||
805 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
806 | if (!entry->bi.scrubbed) { | ||
807 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
808 | entry->bi.scrubbed = true; | ||
809 | } | ||
810 | } | ||
811 | } | ||
812 | |||
813 | /** | ||
814 | * vmw_context_binding_res_list_kill - Kill all bindings on a | ||
815 | * resource binding list | ||
816 | * | ||
817 | * @head: list head of resource binding list | ||
818 | * | ||
819 | * Kills all bindings associated with a specific resource. Typically | ||
820 | * called before the resource is destroyed. | ||
821 | */ | ||
822 | void vmw_context_binding_res_list_kill(struct list_head *head) | ||
823 | { | ||
824 | struct vmw_ctx_binding *entry, *next; | ||
825 | |||
826 | list_for_each_entry_safe(entry, next, head, res_list) | ||
827 | vmw_context_binding_kill(entry); | ||
828 | } | ||
829 | |||
830 | /** | ||
831 | * vmw_context_binding_res_list_scrub - Scrub all bindings on a | ||
832 | * resource binding list | ||
833 | * | ||
834 | * @head: list head of resource binding list | ||
835 | * | ||
836 | * Scrub all bindings associated with a specific resource. Typically | ||
837 | * called before the resource is evicted. | ||
838 | */ | ||
839 | void vmw_context_binding_res_list_scrub(struct list_head *head) | ||
840 | { | ||
841 | struct vmw_ctx_binding *entry; | ||
842 | |||
843 | list_for_each_entry(entry, head, res_list) { | ||
844 | if (!entry->bi.scrubbed) { | ||
845 | (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); | ||
846 | entry->bi.scrubbed = true; | ||
847 | } | ||
848 | } | 819 | } |
820 | return -EINVAL; | ||
849 | } | 821 | } |
850 | 822 | ||
851 | /** | 823 | /** |
852 | * vmw_context_binding_state_transfer - Commit staged binding info | 824 | * vmw_context_binding_list - Return a list of context bindings |
853 | * | 825 | * |
854 | * @ctx: Pointer to context to commit the staged binding info to. | 826 | * @ctx: The context resource |
855 | * @from: Staged binding info built during execbuf. | ||
856 | * | 827 | * |
857 | * Transfers binding info from a temporary structure to the persistent | 828 | * Returns the current list of bindings of the given context. Note that |
858 | * structure in the context. This can be done once commands | 829 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. |
859 | */ | 830 | */ |
860 | void vmw_context_binding_state_transfer(struct vmw_resource *ctx, | 831 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) |
861 | struct vmw_ctx_binding_state *from) | ||
862 | { | 832 | { |
863 | struct vmw_user_context *uctx = | 833 | struct vmw_user_context *uctx = |
864 | container_of(ctx, struct vmw_user_context, res); | 834 | container_of(ctx, struct vmw_user_context, res); |
865 | struct vmw_ctx_binding *entry, *next; | ||
866 | 835 | ||
867 | list_for_each_entry_safe(entry, next, &from->list, ctx_list) | 836 | return vmw_binding_state_list(uctx->cbs); |
868 | vmw_context_binding_transfer(&uctx->cbs, &entry->bi); | ||
869 | } | 837 | } |
870 | 838 | ||
871 | /** | 839 | struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) |
872 | * vmw_context_rebind_all - Rebind all scrubbed bindings of a context | ||
873 | * | ||
874 | * @ctx: The context resource | ||
875 | * | ||
876 | * Walks through the context binding list and rebinds all scrubbed | ||
877 | * resources. | ||
878 | */ | ||
879 | int vmw_context_rebind_all(struct vmw_resource *ctx) | ||
880 | { | 840 | { |
881 | struct vmw_ctx_binding *entry; | 841 | return container_of(ctx, struct vmw_user_context, res)->man; |
882 | struct vmw_user_context *uctx = | 842 | } |
883 | container_of(ctx, struct vmw_user_context, res); | ||
884 | struct vmw_ctx_binding_state *cbs = &uctx->cbs; | ||
885 | int ret; | ||
886 | |||
887 | list_for_each_entry(entry, &cbs->list, ctx_list) { | ||
888 | if (likely(!entry->bi.scrubbed)) | ||
889 | continue; | ||
890 | |||
891 | if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == | ||
892 | SVGA3D_INVALID_ID)) | ||
893 | continue; | ||
894 | |||
895 | ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); | ||
896 | if (unlikely(ret != 0)) | ||
897 | return ret; | ||
898 | 843 | ||
899 | entry->bi.scrubbed = false; | 844 | struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, |
900 | } | 845 | SVGACOTableType cotable_type) |
846 | { | ||
847 | if (cotable_type >= SVGA_COTABLE_DX10_MAX) | ||
848 | return ERR_PTR(-EINVAL); | ||
901 | 849 | ||
902 | return 0; | 850 | return vmw_resource_reference |
851 | (container_of(ctx, struct vmw_user_context, res)-> | ||
852 | cotables[cotable_type]); | ||
903 | } | 853 | } |
904 | 854 | ||
905 | /** | 855 | /** |
906 | * vmw_context_binding_list - Return a list of context bindings | 856 | * vmw_context_binding_state - |
857 | * Return a pointer to a context binding state structure | ||
907 | * | 858 | * |
908 | * @ctx: The context resource | 859 | * @ctx: The context resource |
909 | * | 860 | * |
910 | * Returns the current list of bindings of the given context. Note that | 861 | * Returns the current state of bindings of the given context. Note that |
911 | * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. | 862 | * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. |
912 | */ | 863 | */ |
913 | struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) | 864 | struct vmw_ctx_binding_state * |
914 | { | 865 | vmw_context_binding_state(struct vmw_resource *ctx) |
915 | return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); | ||
916 | } | ||
917 | |||
918 | struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) | ||
919 | { | 866 | { |
920 | return container_of(ctx, struct vmw_user_context, res)->man; | 867 | return container_of(ctx, struct vmw_user_context, res)->cbs; |
921 | } | 868 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c new file mode 100644 index 000000000000..22bb04ffec78 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
@@ -0,0 +1,662 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Treat context OTables as resources to make use of the resource | ||
29 | * backing MOB eviction mechanism, that is used to read back the COTable | ||
30 | * whenever the backing MOB is evicted. | ||
31 | */ | ||
32 | |||
33 | #include "vmwgfx_drv.h" | ||
34 | #include "vmwgfx_resource_priv.h" | ||
35 | #include <ttm/ttm_placement.h> | ||
36 | #include "vmwgfx_so.h" | ||
37 | |||
38 | /** | ||
39 | * struct vmw_cotable - Context Object Table resource | ||
40 | * | ||
41 | * @res: struct vmw_resource we are deriving from. | ||
42 | * @ctx: non-refcounted pointer to the owning context. | ||
43 | * @size_read_back: Size of data read back during eviction. | ||
44 | * @seen_entries: Seen entries in command stream for this cotable. | ||
45 | * @type: The cotable type. | ||
46 | * @scrubbed: Whether the cotable has been scrubbed. | ||
47 | * @resource_list: List of resources in the cotable. | ||
48 | */ | ||
49 | struct vmw_cotable { | ||
50 | struct vmw_resource res; | ||
51 | struct vmw_resource *ctx; | ||
52 | size_t size_read_back; | ||
53 | int seen_entries; | ||
54 | u32 type; | ||
55 | bool scrubbed; | ||
56 | struct list_head resource_list; | ||
57 | }; | ||
58 | |||
59 | /** | ||
60 | * struct vmw_cotable_info - Static info about cotable types | ||
61 | * | ||
62 | * @min_initial_entries: Min number of initial intries at cotable allocation | ||
63 | * for this cotable type. | ||
64 | * @size: Size of each entry. | ||
65 | */ | ||
66 | struct vmw_cotable_info { | ||
67 | u32 min_initial_entries; | ||
68 | u32 size; | ||
69 | void (*unbind_func)(struct vmw_private *, struct list_head *, | ||
70 | bool); | ||
71 | }; | ||
72 | |||
73 | static const struct vmw_cotable_info co_info[] = { | ||
74 | {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, | ||
75 | {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, | ||
76 | {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, | ||
77 | {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, | ||
78 | {1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, | ||
79 | {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, | ||
80 | {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, | ||
81 | {1, sizeof(SVGACOTableDXSamplerEntry), NULL}, | ||
82 | {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL}, | ||
83 | {1, sizeof(SVGACOTableDXQueryEntry), NULL}, | ||
84 | {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * Cotables with bindings that we remove must be scrubbed first, | ||
89 | * otherwise, the device will swap in an invalid context when we remove | ||
90 | * bindings before scrubbing a cotable... | ||
91 | */ | ||
92 | const SVGACOTableType vmw_cotable_scrub_order[] = { | ||
93 | SVGA_COTABLE_RTVIEW, | ||
94 | SVGA_COTABLE_DSVIEW, | ||
95 | SVGA_COTABLE_SRVIEW, | ||
96 | SVGA_COTABLE_DXSHADER, | ||
97 | SVGA_COTABLE_ELEMENTLAYOUT, | ||
98 | SVGA_COTABLE_BLENDSTATE, | ||
99 | SVGA_COTABLE_DEPTHSTENCIL, | ||
100 | SVGA_COTABLE_RASTERIZERSTATE, | ||
101 | SVGA_COTABLE_SAMPLER, | ||
102 | SVGA_COTABLE_STREAMOUTPUT, | ||
103 | SVGA_COTABLE_DXQUERY, | ||
104 | }; | ||
105 | |||
106 | static int vmw_cotable_bind(struct vmw_resource *res, | ||
107 | struct ttm_validate_buffer *val_buf); | ||
108 | static int vmw_cotable_unbind(struct vmw_resource *res, | ||
109 | bool readback, | ||
110 | struct ttm_validate_buffer *val_buf); | ||
111 | static int vmw_cotable_create(struct vmw_resource *res); | ||
112 | static int vmw_cotable_destroy(struct vmw_resource *res); | ||
113 | |||
114 | static const struct vmw_res_func vmw_cotable_func = { | ||
115 | .res_type = vmw_res_cotable, | ||
116 | .needs_backup = true, | ||
117 | .may_evict = true, | ||
118 | .type_name = "context guest backed object tables", | ||
119 | .backup_placement = &vmw_mob_placement, | ||
120 | .create = vmw_cotable_create, | ||
121 | .destroy = vmw_cotable_destroy, | ||
122 | .bind = vmw_cotable_bind, | ||
123 | .unbind = vmw_cotable_unbind, | ||
124 | }; | ||
125 | |||
126 | /** | ||
127 | * vmw_cotable - Convert a struct vmw_resource pointer to a struct | ||
128 | * vmw_cotable pointer | ||
129 | * | ||
130 | * @res: Pointer to the resource. | ||
131 | */ | ||
132 | static struct vmw_cotable *vmw_cotable(struct vmw_resource *res) | ||
133 | { | ||
134 | return container_of(res, struct vmw_cotable, res); | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * vmw_cotable_destroy - Cotable resource destroy callback | ||
139 | * | ||
140 | * @res: Pointer to the cotable resource. | ||
141 | * | ||
142 | * There is no device cotable destroy command, so this function only | ||
143 | * makes sure that the resource id is set to invalid. | ||
144 | */ | ||
145 | static int vmw_cotable_destroy(struct vmw_resource *res) | ||
146 | { | ||
147 | res->id = -1; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * vmw_cotable_unscrub - Undo a cotable unscrub operation | ||
153 | * | ||
154 | * @res: Pointer to the cotable resource | ||
155 | * | ||
156 | * This function issues commands to (re)bind the cotable to | ||
157 | * its backing mob, which needs to be validated and reserved at this point. | ||
158 | * This is identical to bind() except the function interface looks different. | ||
159 | */ | ||
160 | static int vmw_cotable_unscrub(struct vmw_resource *res) | ||
161 | { | ||
162 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
163 | struct vmw_private *dev_priv = res->dev_priv; | ||
164 | struct ttm_buffer_object *bo = &res->backup->base; | ||
165 | struct { | ||
166 | SVGA3dCmdHeader header; | ||
167 | SVGA3dCmdDXSetCOTable body; | ||
168 | } *cmd; | ||
169 | |||
170 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | ||
171 | lockdep_assert_held(&bo->resv->lock.base); | ||
172 | |||
173 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID); | ||
174 | if (!cmd) { | ||
175 | DRM_ERROR("Failed reserving FIFO space for cotable " | ||
176 | "binding.\n"); | ||
177 | return -ENOMEM; | ||
178 | } | ||
179 | |||
180 | WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); | ||
181 | WARN_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
182 | cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; | ||
183 | cmd->header.size = sizeof(cmd->body); | ||
184 | cmd->body.cid = vcotbl->ctx->id; | ||
185 | cmd->body.type = vcotbl->type; | ||
186 | cmd->body.mobid = bo->mem.start; | ||
187 | cmd->body.validSizeInBytes = vcotbl->size_read_back; | ||
188 | |||
189 | vmw_fifo_commit_flush(dev_priv, sizeof(*cmd)); | ||
190 | vcotbl->scrubbed = false; | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * vmw_cotable_bind - Undo a cotable unscrub operation | ||
197 | * | ||
198 | * @res: Pointer to the cotable resource | ||
199 | * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller | ||
200 | * for convenience / fencing. | ||
201 | * | ||
202 | * This function issues commands to (re)bind the cotable to | ||
203 | * its backing mob, which needs to be validated and reserved at this point. | ||
204 | */ | ||
205 | static int vmw_cotable_bind(struct vmw_resource *res, | ||
206 | struct ttm_validate_buffer *val_buf) | ||
207 | { | ||
208 | /* | ||
209 | * The create() callback may have changed @res->backup without | ||
210 | * the caller noticing, and with val_buf->bo still pointing to | ||
211 | * the old backup buffer. Although hackish, and not used currently, | ||
212 | * take the opportunity to correct the value here so that it's not | ||
213 | * misused in the future. | ||
214 | */ | ||
215 | val_buf->bo = &res->backup->base; | ||
216 | |||
217 | return vmw_cotable_unscrub(res); | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * vmw_cotable_scrub - Scrub the cotable from the device. | ||
222 | * | ||
223 | * @res: Pointer to the cotable resource. | ||
224 | * @readback: Whether initiate a readback of the cotable data to the backup | ||
225 | * buffer. | ||
226 | * | ||
227 | * In some situations (context swapouts) it might be desirable to make the | ||
228 | * device forget about the cotable without performing a full unbind. A full | ||
229 | * unbind requires reserved backup buffers and it might not be possible to | ||
230 | * reserve them due to locking order violation issues. The vmw_cotable_scrub | ||
231 | * function implements a partial unbind() without that requirement but with the | ||
232 | * following restrictions. | ||
233 | * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must | ||
234 | * be called. | ||
235 | * 2) Before the cotable backing buffer is used by the CPU, or during the | ||
236 | * resource destruction, vmw_cotable_unbind() must be called. | ||
237 | */ | ||
238 | int vmw_cotable_scrub(struct vmw_resource *res, bool readback) | ||
239 | { | ||
240 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
241 | struct vmw_private *dev_priv = res->dev_priv; | ||
242 | size_t submit_size; | ||
243 | |||
244 | struct { | ||
245 | SVGA3dCmdHeader header; | ||
246 | SVGA3dCmdDXReadbackCOTable body; | ||
247 | } *cmd0; | ||
248 | struct { | ||
249 | SVGA3dCmdHeader header; | ||
250 | SVGA3dCmdDXSetCOTable body; | ||
251 | } *cmd1; | ||
252 | |||
253 | if (vcotbl->scrubbed) | ||
254 | return 0; | ||
255 | |||
256 | if (co_info[vcotbl->type].unbind_func) | ||
257 | co_info[vcotbl->type].unbind_func(dev_priv, | ||
258 | &vcotbl->resource_list, | ||
259 | readback); | ||
260 | submit_size = sizeof(*cmd1); | ||
261 | if (readback) | ||
262 | submit_size += sizeof(*cmd0); | ||
263 | |||
264 | cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID); | ||
265 | if (!cmd1) { | ||
266 | DRM_ERROR("Failed reserving FIFO space for cotable " | ||
267 | "unbinding.\n"); | ||
268 | return -ENOMEM; | ||
269 | } | ||
270 | |||
271 | vcotbl->size_read_back = 0; | ||
272 | if (readback) { | ||
273 | cmd0 = (void *) cmd1; | ||
274 | cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; | ||
275 | cmd0->header.size = sizeof(cmd0->body); | ||
276 | cmd0->body.cid = vcotbl->ctx->id; | ||
277 | cmd0->body.type = vcotbl->type; | ||
278 | cmd1 = (void *) &cmd0[1]; | ||
279 | vcotbl->size_read_back = res->backup_size; | ||
280 | } | ||
281 | cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; | ||
282 | cmd1->header.size = sizeof(cmd1->body); | ||
283 | cmd1->body.cid = vcotbl->ctx->id; | ||
284 | cmd1->body.type = vcotbl->type; | ||
285 | cmd1->body.mobid = SVGA3D_INVALID_ID; | ||
286 | cmd1->body.validSizeInBytes = 0; | ||
287 | vmw_fifo_commit_flush(dev_priv, submit_size); | ||
288 | vcotbl->scrubbed = true; | ||
289 | |||
290 | /* Trigger a create() on next validate. */ | ||
291 | res->id = -1; | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * vmw_cotable_unbind - Cotable resource unbind callback | ||
298 | * | ||
299 | * @res: Pointer to the cotable resource. | ||
300 | * @readback: Whether to read back cotable data to the backup buffer. | ||
301 | * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller | ||
302 | * for convenience / fencing. | ||
303 | * | ||
304 | * Unbinds the cotable from the device and fences the backup buffer. | ||
305 | */ | ||
306 | static int vmw_cotable_unbind(struct vmw_resource *res, | ||
307 | bool readback, | ||
308 | struct ttm_validate_buffer *val_buf) | ||
309 | { | ||
310 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
311 | struct vmw_private *dev_priv = res->dev_priv; | ||
312 | struct ttm_buffer_object *bo = val_buf->bo; | ||
313 | struct vmw_fence_obj *fence; | ||
314 | int ret; | ||
315 | |||
316 | if (list_empty(&res->mob_head)) | ||
317 | return 0; | ||
318 | |||
319 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | ||
320 | lockdep_assert_held(&bo->resv->lock.base); | ||
321 | |||
322 | mutex_lock(&dev_priv->binding_mutex); | ||
323 | if (!vcotbl->scrubbed) | ||
324 | vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); | ||
325 | mutex_unlock(&dev_priv->binding_mutex); | ||
326 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
327 | vmw_fence_single_bo(bo, fence); | ||
328 | if (likely(fence != NULL)) | ||
329 | vmw_fence_obj_unreference(&fence); | ||
330 | |||
331 | return ret; | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * vmw_cotable_readback - Read back a cotable without unbinding. | ||
336 | * | ||
337 | * @res: The cotable resource. | ||
338 | * | ||
339 | * Reads back a cotable to its backing mob without scrubbing the MOB from | ||
340 | * the cotable. The MOB is fenced for subsequent CPU access. | ||
341 | */ | ||
342 | static int vmw_cotable_readback(struct vmw_resource *res) | ||
343 | { | ||
344 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
345 | struct vmw_private *dev_priv = res->dev_priv; | ||
346 | |||
347 | struct { | ||
348 | SVGA3dCmdHeader header; | ||
349 | SVGA3dCmdDXReadbackCOTable body; | ||
350 | } *cmd; | ||
351 | struct vmw_fence_obj *fence; | ||
352 | |||
353 | if (!vcotbl->scrubbed) { | ||
354 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), | ||
355 | SVGA3D_INVALID_ID); | ||
356 | if (!cmd) { | ||
357 | DRM_ERROR("Failed reserving FIFO space for cotable " | ||
358 | "readback.\n"); | ||
359 | return -ENOMEM; | ||
360 | } | ||
361 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; | ||
362 | cmd->header.size = sizeof(cmd->body); | ||
363 | cmd->body.cid = vcotbl->ctx->id; | ||
364 | cmd->body.type = vcotbl->type; | ||
365 | vcotbl->size_read_back = res->backup_size; | ||
366 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
367 | } | ||
368 | |||
369 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | ||
370 | vmw_fence_single_bo(&res->backup->base, fence); | ||
371 | vmw_fence_obj_unreference(&fence); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | /** | ||
377 | * vmw_cotable_resize - Resize a cotable. | ||
378 | * | ||
379 | * @res: The cotable resource. | ||
380 | * @new_size: The new size. | ||
381 | * | ||
382 | * Resizes a cotable and binds the new backup buffer. | ||
383 | * On failure the cotable is left intact. | ||
384 | * Important! This function may not fail once the MOB switch has been | ||
385 | * committed to hardware. That would put the device context in an | ||
386 | * invalid state which we can't currently recover from. | ||
387 | */ | ||
388 | static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) | ||
389 | { | ||
390 | struct vmw_private *dev_priv = res->dev_priv; | ||
391 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
392 | struct vmw_dma_buffer *buf, *old_buf = res->backup; | ||
393 | struct ttm_buffer_object *bo, *old_bo = &res->backup->base; | ||
394 | size_t old_size = res->backup_size; | ||
395 | size_t old_size_read_back = vcotbl->size_read_back; | ||
396 | size_t cur_size_read_back; | ||
397 | struct ttm_bo_kmap_obj old_map, new_map; | ||
398 | int ret; | ||
399 | size_t i; | ||
400 | |||
401 | ret = vmw_cotable_readback(res); | ||
402 | if (ret) | ||
403 | return ret; | ||
404 | |||
405 | cur_size_read_back = vcotbl->size_read_back; | ||
406 | vcotbl->size_read_back = old_size_read_back; | ||
407 | |||
408 | /* | ||
409 | * While device is processing, Allocate and reserve a buffer object | ||
410 | * for the new COTable. Initially pin the buffer object to make sure | ||
411 | * we can use tryreserve without failure. | ||
412 | */ | ||
413 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | ||
414 | if (!buf) | ||
415 | return -ENOMEM; | ||
416 | |||
417 | ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, | ||
418 | true, vmw_dmabuf_bo_free); | ||
419 | if (ret) { | ||
420 | DRM_ERROR("Failed initializing new cotable MOB.\n"); | ||
421 | return ret; | ||
422 | } | ||
423 | |||
424 | bo = &buf->base; | ||
425 | WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); | ||
426 | |||
427 | ret = ttm_bo_wait(old_bo, false, false, false); | ||
428 | if (unlikely(ret != 0)) { | ||
429 | DRM_ERROR("Failed waiting for cotable unbind.\n"); | ||
430 | goto out_wait; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Do a page by page copy of COTables. This eliminates slow vmap()s. | ||
435 | * This should really be a TTM utility. | ||
436 | */ | ||
437 | for (i = 0; i < old_bo->num_pages; ++i) { | ||
438 | bool dummy; | ||
439 | |||
440 | ret = ttm_bo_kmap(old_bo, i, 1, &old_map); | ||
441 | if (unlikely(ret != 0)) { | ||
442 | DRM_ERROR("Failed mapping old COTable on resize.\n"); | ||
443 | goto out_wait; | ||
444 | } | ||
445 | ret = ttm_bo_kmap(bo, i, 1, &new_map); | ||
446 | if (unlikely(ret != 0)) { | ||
447 | DRM_ERROR("Failed mapping new COTable on resize.\n"); | ||
448 | goto out_map_new; | ||
449 | } | ||
450 | memcpy(ttm_kmap_obj_virtual(&new_map, &dummy), | ||
451 | ttm_kmap_obj_virtual(&old_map, &dummy), | ||
452 | PAGE_SIZE); | ||
453 | ttm_bo_kunmap(&new_map); | ||
454 | ttm_bo_kunmap(&old_map); | ||
455 | } | ||
456 | |||
457 | /* Unpin new buffer, and switch backup buffers. */ | ||
458 | ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); | ||
459 | if (unlikely(ret != 0)) { | ||
460 | DRM_ERROR("Failed validating new COTable backup buffer.\n"); | ||
461 | goto out_wait; | ||
462 | } | ||
463 | |||
464 | res->backup = buf; | ||
465 | res->backup_size = new_size; | ||
466 | vcotbl->size_read_back = cur_size_read_back; | ||
467 | |||
468 | /* | ||
469 | * Now tell the device to switch. If this fails, then we need to | ||
470 | * revert the full resize. | ||
471 | */ | ||
472 | ret = vmw_cotable_unscrub(res); | ||
473 | if (ret) { | ||
474 | DRM_ERROR("Failed switching COTable backup buffer.\n"); | ||
475 | res->backup = old_buf; | ||
476 | res->backup_size = old_size; | ||
477 | vcotbl->size_read_back = old_size_read_back; | ||
478 | goto out_wait; | ||
479 | } | ||
480 | |||
481 | /* Let go of the old mob. */ | ||
482 | list_del(&res->mob_head); | ||
483 | list_add_tail(&res->mob_head, &buf->res_list); | ||
484 | vmw_dmabuf_unreference(&old_buf); | ||
485 | res->id = vcotbl->type; | ||
486 | |||
487 | return 0; | ||
488 | |||
489 | out_map_new: | ||
490 | ttm_bo_kunmap(&old_map); | ||
491 | out_wait: | ||
492 | ttm_bo_unreserve(bo); | ||
493 | vmw_dmabuf_unreference(&buf); | ||
494 | |||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | /** | ||
499 | * vmw_cotable_create - Cotable resource create callback | ||
500 | * | ||
501 | * @res: Pointer to a cotable resource. | ||
502 | * | ||
503 | * There is no separate create command for cotables, so this callback, which | ||
504 | * is called before bind() in the validation sequence is instead used for two | ||
505 | * things. | ||
506 | * 1) Unscrub the cotable if it is scrubbed and still attached to a backup | ||
507 | * buffer, that is, if @res->mob_head is non-empty. | ||
508 | * 2) Resize the cotable if needed. | ||
509 | */ | ||
510 | static int vmw_cotable_create(struct vmw_resource *res) | ||
511 | { | ||
512 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
513 | size_t new_size = res->backup_size; | ||
514 | size_t needed_size; | ||
515 | int ret; | ||
516 | |||
517 | /* Check whether we need to resize the cotable */ | ||
518 | needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size; | ||
519 | while (needed_size > new_size) | ||
520 | new_size *= 2; | ||
521 | |||
522 | if (likely(new_size <= res->backup_size)) { | ||
523 | if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { | ||
524 | ret = vmw_cotable_unscrub(res); | ||
525 | if (ret) | ||
526 | return ret; | ||
527 | } | ||
528 | res->id = vcotbl->type; | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | return vmw_cotable_resize(res, new_size); | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * vmw_hw_cotable_destroy - Cotable hw_destroy callback | ||
537 | * | ||
538 | * @res: Pointer to a cotable resource. | ||
539 | * | ||
540 | * The final (part of resource destruction) destroy callback. | ||
541 | */ | ||
542 | static void vmw_hw_cotable_destroy(struct vmw_resource *res) | ||
543 | { | ||
544 | (void) vmw_cotable_destroy(res); | ||
545 | } | ||
546 | |||
547 | static size_t cotable_acc_size; | ||
548 | |||
549 | /** | ||
550 | * vmw_cotable_free - Cotable resource destructor | ||
551 | * | ||
552 | * @res: Pointer to a cotable resource. | ||
553 | */ | ||
554 | static void vmw_cotable_free(struct vmw_resource *res) | ||
555 | { | ||
556 | struct vmw_private *dev_priv = res->dev_priv; | ||
557 | |||
558 | kfree(res); | ||
559 | ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * vmw_cotable_alloc - Create a cotable resource | ||
564 | * | ||
565 | * @dev_priv: Pointer to a device private struct. | ||
566 | * @ctx: Pointer to the context resource. | ||
567 | * The cotable resource will not add a refcount. | ||
568 | * @type: The cotable type. | ||
569 | */ | ||
570 | struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, | ||
571 | struct vmw_resource *ctx, | ||
572 | u32 type) | ||
573 | { | ||
574 | struct vmw_cotable *vcotbl; | ||
575 | int ret; | ||
576 | u32 num_entries; | ||
577 | |||
578 | if (unlikely(cotable_acc_size == 0)) | ||
579 | cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); | ||
580 | |||
581 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
582 | cotable_acc_size, false, true); | ||
583 | if (unlikely(ret)) | ||
584 | return ERR_PTR(ret); | ||
585 | |||
586 | vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); | ||
587 | if (unlikely(vcotbl == NULL)) { | ||
588 | ret = -ENOMEM; | ||
589 | goto out_no_alloc; | ||
590 | } | ||
591 | |||
592 | ret = vmw_resource_init(dev_priv, &vcotbl->res, true, | ||
593 | vmw_cotable_free, &vmw_cotable_func); | ||
594 | if (unlikely(ret != 0)) | ||
595 | goto out_no_init; | ||
596 | |||
597 | INIT_LIST_HEAD(&vcotbl->resource_list); | ||
598 | vcotbl->res.id = type; | ||
599 | vcotbl->res.backup_size = PAGE_SIZE; | ||
600 | num_entries = PAGE_SIZE / co_info[type].size; | ||
601 | if (num_entries < co_info[type].min_initial_entries) { | ||
602 | vcotbl->res.backup_size = co_info[type].min_initial_entries * | ||
603 | co_info[type].size; | ||
604 | vcotbl->res.backup_size = | ||
605 | (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK; | ||
606 | } | ||
607 | |||
608 | vcotbl->scrubbed = true; | ||
609 | vcotbl->seen_entries = -1; | ||
610 | vcotbl->type = type; | ||
611 | vcotbl->ctx = ctx; | ||
612 | |||
613 | vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); | ||
614 | |||
615 | return &vcotbl->res; | ||
616 | |||
617 | out_no_init: | ||
618 | kfree(vcotbl); | ||
619 | out_no_alloc: | ||
620 | ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); | ||
621 | return ERR_PTR(ret); | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * vmw_cotable_notify - Notify the cotable about an item creation | ||
626 | * | ||
627 | * @res: Pointer to a cotable resource. | ||
628 | * @id: Item id. | ||
629 | */ | ||
630 | int vmw_cotable_notify(struct vmw_resource *res, int id) | ||
631 | { | ||
632 | struct vmw_cotable *vcotbl = vmw_cotable(res); | ||
633 | |||
634 | if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) { | ||
635 | DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n", | ||
636 | (unsigned) vcotbl->type, id); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | if (vcotbl->seen_entries < id) { | ||
641 | /* Trigger a call to create() on next validate */ | ||
642 | res->id = -1; | ||
643 | vcotbl->seen_entries = id; | ||
644 | } | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /** | ||
650 | * vmw_cotable_add_view - add a view to the cotable's list of active views. | ||
651 | * | ||
652 | * @res: pointer struct vmw_resource representing the cotable. | ||
653 | * @head: pointer to the struct list_head member of the resource, dedicated | ||
654 | * to the cotable active resource list. | ||
655 | */ | ||
656 | void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) | ||
657 | { | ||
658 | struct vmw_cotable *vcotbl = | ||
659 | container_of(res, struct vmw_cotable, res); | ||
660 | |||
661 | list_add_tail(head, &vcotbl->resource_list); | ||
662 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b83adea43f3a..fd0cb8c67d05 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "vmwgfx_drv.h" | 30 | #include "vmwgfx_drv.h" |
31 | #include "vmwgfx_binding.h" | ||
31 | #include <drm/ttm/ttm_placement.h> | 32 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/ttm/ttm_bo_driver.h> | 33 | #include <drm/ttm/ttm_bo_driver.h> |
33 | #include <drm/ttm/ttm_object.h> | 34 | #include <drm/ttm/ttm_object.h> |
@@ -127,6 +128,9 @@ | |||
127 | #define DRM_IOCTL_VMW_SYNCCPU \ | 128 | #define DRM_IOCTL_VMW_SYNCCPU \ |
128 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ | 129 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ |
129 | struct drm_vmw_synccpu_arg) | 130 | struct drm_vmw_synccpu_arg) |
131 | #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ | ||
132 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ | ||
133 | struct drm_vmw_context_arg) | ||
130 | 134 | ||
131 | /** | 135 | /** |
132 | * The core DRM version of this macro doesn't account for | 136 | * The core DRM version of this macro doesn't account for |
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
168 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 172 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
169 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 173 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
170 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 174 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
171 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, | 175 | VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | |
172 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 176 | DRM_RENDER_ALLOW), |
173 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, | 177 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, |
174 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 178 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
175 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, | 179 | VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, |
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
206 | VMW_IOCTL_DEF(VMW_SYNCCPU, | 210 | VMW_IOCTL_DEF(VMW_SYNCCPU, |
207 | vmw_user_dmabuf_synccpu_ioctl, | 211 | vmw_user_dmabuf_synccpu_ioctl, |
208 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | 212 | DRM_UNLOCKED | DRM_RENDER_ALLOW), |
213 | VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, | ||
214 | vmw_extended_context_define_ioctl, | ||
215 | DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), | ||
209 | }; | 216 | }; |
210 | 217 | ||
211 | static struct pci_device_id vmw_pci_id_list[] = { | 218 | static struct pci_device_id vmw_pci_id_list[] = { |
@@ -390,8 +397,10 @@ static int vmw_request_device(struct vmw_private *dev_priv) | |||
390 | } | 397 | } |
391 | vmw_fence_fifo_up(dev_priv->fman); | 398 | vmw_fence_fifo_up(dev_priv->fman); |
392 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); | 399 | dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); |
393 | if (IS_ERR(dev_priv->cman)) | 400 | if (IS_ERR(dev_priv->cman)) { |
394 | dev_priv->cman = NULL; | 401 | dev_priv->cman = NULL; |
402 | dev_priv->has_dx = false; | ||
403 | } | ||
395 | 404 | ||
396 | ret = vmw_request_device_late(dev_priv); | 405 | ret = vmw_request_device_late(dev_priv); |
397 | if (ret) | 406 | if (ret) |
@@ -848,6 +857,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
848 | } | 857 | } |
849 | } | 858 | } |
850 | 859 | ||
860 | if (dev_priv->has_mob) { | ||
861 | spin_lock(&dev_priv->cap_lock); | ||
862 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); | ||
863 | dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); | ||
864 | spin_unlock(&dev_priv->cap_lock); | ||
865 | } | ||
866 | |||
867 | |||
851 | ret = vmw_kms_init(dev_priv); | 868 | ret = vmw_kms_init(dev_priv); |
852 | if (unlikely(ret != 0)) | 869 | if (unlikely(ret != 0)) |
853 | goto out_no_kms; | 870 | goto out_no_kms; |
@@ -857,6 +874,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
857 | if (ret) | 874 | if (ret) |
858 | goto out_no_fifo; | 875 | goto out_no_fifo; |
859 | 876 | ||
877 | DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); | ||
878 | |||
860 | if (dev_priv->enable_fb) { | 879 | if (dev_priv->enable_fb) { |
861 | vmw_fifo_resource_inc(dev_priv); | 880 | vmw_fifo_resource_inc(dev_priv); |
862 | vmw_svga_enable(dev_priv); | 881 | vmw_svga_enable(dev_priv); |
@@ -900,6 +919,8 @@ out_err0: | |||
900 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 919 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
901 | idr_destroy(&dev_priv->res_idr[i]); | 920 | idr_destroy(&dev_priv->res_idr[i]); |
902 | 921 | ||
922 | if (dev_priv->ctx.staged_bindings) | ||
923 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); | ||
903 | kfree(dev_priv); | 924 | kfree(dev_priv); |
904 | return ret; | 925 | return ret; |
905 | } | 926 | } |
@@ -945,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
945 | iounmap(dev_priv->mmio_virt); | 966 | iounmap(dev_priv->mmio_virt); |
946 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 967 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
947 | (void)ttm_bo_device_release(&dev_priv->bdev); | 968 | (void)ttm_bo_device_release(&dev_priv->bdev); |
969 | if (dev_priv->ctx.staged_bindings) | ||
970 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); | ||
948 | vmw_ttm_global_release(dev_priv); | 971 | vmw_ttm_global_release(dev_priv); |
949 | 972 | ||
950 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 973 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
@@ -1082,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | |||
1082 | const struct drm_ioctl_desc *ioctl = | 1105 | const struct drm_ioctl_desc *ioctl = |
1083 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | 1106 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; |
1084 | 1107 | ||
1085 | if (unlikely(ioctl->cmd != cmd)) { | 1108 | if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { |
1086 | DRM_ERROR("Invalid command format, ioctl %d\n", | 1109 | ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); |
1087 | nr - DRM_COMMAND_BASE); | 1110 | if (unlikely(ret != 0)) |
1088 | return -EINVAL; | 1111 | return ret; |
1112 | |||
1113 | if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) | ||
1114 | goto out_io_encoding; | ||
1115 | |||
1116 | return (long) vmw_execbuf_ioctl(dev, arg, file_priv, | ||
1117 | _IOC_SIZE(cmd)); | ||
1089 | } | 1118 | } |
1119 | |||
1120 | if (unlikely(ioctl->cmd != cmd)) | ||
1121 | goto out_io_encoding; | ||
1122 | |||
1090 | flags = ioctl->flags; | 1123 | flags = ioctl->flags; |
1091 | } else if (!drm_ioctl_flags(nr, &flags)) | 1124 | } else if (!drm_ioctl_flags(nr, &flags)) |
1092 | return -EINVAL; | 1125 | return -EINVAL; |
@@ -1106,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | |||
1106 | ttm_read_unlock(&vmaster->lock); | 1139 | ttm_read_unlock(&vmaster->lock); |
1107 | 1140 | ||
1108 | return ret; | 1141 | return ret; |
1142 | |||
1143 | out_io_encoding: | ||
1144 | DRM_ERROR("Invalid command format, ioctl %d\n", | ||
1145 | nr - DRM_COMMAND_BASE); | ||
1146 | |||
1147 | return -EINVAL; | ||
1109 | } | 1148 | } |
1110 | 1149 | ||
1111 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | 1150 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, |
@@ -1156,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev, | |||
1156 | kfree(vmaster); | 1195 | kfree(vmaster); |
1157 | } | 1196 | } |
1158 | 1197 | ||
1159 | |||
1160 | static int vmw_master_set(struct drm_device *dev, | 1198 | static int vmw_master_set(struct drm_device *dev, |
1161 | struct drm_file *file_priv, | 1199 | struct drm_file *file_priv, |
1162 | bool from_open) | 1200 | bool from_open) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index f513e444125d..b88ea50b7d95 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -59,6 +59,8 @@ | |||
59 | #define VMWGFX_NUM_GB_SHADER 20000 | 59 | #define VMWGFX_NUM_GB_SHADER 20000 |
60 | #define VMWGFX_NUM_GB_SURFACE 32768 | 60 | #define VMWGFX_NUM_GB_SURFACE 32768 |
61 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS | 61 | #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS |
62 | #define VMWGFX_NUM_DXCONTEXT 256 | ||
63 | #define VMWGFX_NUM_DXQUERY 512 | ||
62 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ | 64 | #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ |
63 | VMWGFX_NUM_GB_SHADER +\ | 65 | VMWGFX_NUM_GB_SHADER +\ |
64 | VMWGFX_NUM_GB_SURFACE +\ | 66 | VMWGFX_NUM_GB_SURFACE +\ |
@@ -132,6 +134,9 @@ enum vmw_res_type { | |||
132 | vmw_res_surface, | 134 | vmw_res_surface, |
133 | vmw_res_stream, | 135 | vmw_res_stream, |
134 | vmw_res_shader, | 136 | vmw_res_shader, |
137 | vmw_res_dx_context, | ||
138 | vmw_res_cotable, | ||
139 | vmw_res_view, | ||
135 | vmw_res_max | 140 | vmw_res_max |
136 | }; | 141 | }; |
137 | 142 | ||
@@ -139,7 +144,8 @@ enum vmw_res_type { | |||
139 | * Resources that are managed using command streams. | 144 | * Resources that are managed using command streams. |
140 | */ | 145 | */ |
141 | enum vmw_cmdbuf_res_type { | 146 | enum vmw_cmdbuf_res_type { |
142 | vmw_cmdbuf_res_compat_shader | 147 | vmw_cmdbuf_res_shader, |
148 | vmw_cmdbuf_res_view | ||
143 | }; | 149 | }; |
144 | 150 | ||
145 | struct vmw_cmdbuf_res_manager; | 151 | struct vmw_cmdbuf_res_manager; |
@@ -162,11 +168,13 @@ struct vmw_surface { | |||
162 | struct drm_vmw_size *sizes; | 168 | struct drm_vmw_size *sizes; |
163 | uint32_t num_sizes; | 169 | uint32_t num_sizes; |
164 | bool scanout; | 170 | bool scanout; |
171 | uint32_t array_size; | ||
165 | /* TODO so far just a extra pointer */ | 172 | /* TODO so far just a extra pointer */ |
166 | struct vmw_cursor_snooper snooper; | 173 | struct vmw_cursor_snooper snooper; |
167 | struct vmw_surface_offset *offsets; | 174 | struct vmw_surface_offset *offsets; |
168 | SVGA3dTextureFilter autogen_filter; | 175 | SVGA3dTextureFilter autogen_filter; |
169 | uint32_t multisample_count; | 176 | uint32_t multisample_count; |
177 | struct list_head view_list; | ||
170 | }; | 178 | }; |
171 | 179 | ||
172 | struct vmw_marker_queue { | 180 | struct vmw_marker_queue { |
@@ -186,6 +194,7 @@ struct vmw_fifo_state { | |||
186 | struct mutex fifo_mutex; | 194 | struct mutex fifo_mutex; |
187 | struct rw_semaphore rwsem; | 195 | struct rw_semaphore rwsem; |
188 | struct vmw_marker_queue marker_queue; | 196 | struct vmw_marker_queue marker_queue; |
197 | bool dx; | ||
189 | }; | 198 | }; |
190 | 199 | ||
191 | struct vmw_relocation { | 200 | struct vmw_relocation { |
@@ -266,73 +275,6 @@ struct vmw_piter { | |||
266 | }; | 275 | }; |
267 | 276 | ||
268 | /* | 277 | /* |
269 | * enum vmw_ctx_binding_type - abstract resource to context binding types | ||
270 | */ | ||
271 | enum vmw_ctx_binding_type { | ||
272 | vmw_ctx_binding_shader, | ||
273 | vmw_ctx_binding_rt, | ||
274 | vmw_ctx_binding_tex, | ||
275 | vmw_ctx_binding_max | ||
276 | }; | ||
277 | |||
278 | /** | ||
279 | * struct vmw_ctx_bindinfo - structure representing a single context binding | ||
280 | * | ||
281 | * @ctx: Pointer to the context structure. NULL means the binding is not | ||
282 | * active. | ||
283 | * @res: Non ref-counted pointer to the bound resource. | ||
284 | * @bt: The binding type. | ||
285 | * @i1: Union of information needed to unbind. | ||
286 | */ | ||
287 | struct vmw_ctx_bindinfo { | ||
288 | struct vmw_resource *ctx; | ||
289 | struct vmw_resource *res; | ||
290 | enum vmw_ctx_binding_type bt; | ||
291 | bool scrubbed; | ||
292 | union { | ||
293 | SVGA3dShaderType shader_type; | ||
294 | SVGA3dRenderTargetType rt_type; | ||
295 | uint32 texture_stage; | ||
296 | } i1; | ||
297 | }; | ||
298 | |||
299 | /** | ||
300 | * struct vmw_ctx_binding - structure representing a single context binding | ||
301 | * - suitable for tracking in a context | ||
302 | * | ||
303 | * @ctx_list: List head for context. | ||
304 | * @res_list: List head for bound resource. | ||
305 | * @bi: Binding info | ||
306 | */ | ||
307 | struct vmw_ctx_binding { | ||
308 | struct list_head ctx_list; | ||
309 | struct list_head res_list; | ||
310 | struct vmw_ctx_bindinfo bi; | ||
311 | }; | ||
312 | |||
313 | |||
314 | /** | ||
315 | * struct vmw_ctx_binding_state - context binding state | ||
316 | * | ||
317 | * @list: linked list of individual bindings. | ||
318 | * @render_targets: Render target bindings. | ||
319 | * @texture_units: Texture units/samplers bindings. | ||
320 | * @shaders: Shader bindings. | ||
321 | * | ||
322 | * Note that this structure also provides storage space for the individual | ||
323 | * struct vmw_ctx_binding objects, so that no dynamic allocation is needed | ||
324 | * for individual bindings. | ||
325 | * | ||
326 | */ | ||
327 | struct vmw_ctx_binding_state { | ||
328 | struct list_head list; | ||
329 | struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; | ||
330 | struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; | ||
331 | struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_PREDX_MAX]; | ||
332 | }; | ||
333 | |||
334 | |||
335 | /* | ||
336 | * enum vmw_display_unit_type - Describes the display unit | 278 | * enum vmw_display_unit_type - Describes the display unit |
337 | */ | 279 | */ |
338 | enum vmw_display_unit_type { | 280 | enum vmw_display_unit_type { |
@@ -356,6 +298,7 @@ struct vmw_sw_context{ | |||
356 | uint32_t *cmd_bounce; | 298 | uint32_t *cmd_bounce; |
357 | uint32_t cmd_bounce_size; | 299 | uint32_t cmd_bounce_size; |
358 | struct list_head resource_list; | 300 | struct list_head resource_list; |
301 | struct list_head ctx_resource_list; /* For contexts and cotables */ | ||
359 | struct vmw_dma_buffer *cur_query_bo; | 302 | struct vmw_dma_buffer *cur_query_bo; |
360 | struct list_head res_relocations; | 303 | struct list_head res_relocations; |
361 | uint32_t *buf_start; | 304 | uint32_t *buf_start; |
@@ -363,8 +306,13 @@ struct vmw_sw_context{ | |||
363 | struct vmw_resource *last_query_ctx; | 306 | struct vmw_resource *last_query_ctx; |
364 | bool needs_post_query_barrier; | 307 | bool needs_post_query_barrier; |
365 | struct vmw_resource *error_resource; | 308 | struct vmw_resource *error_resource; |
366 | struct vmw_ctx_binding_state staged_bindings; | 309 | struct vmw_ctx_binding_state *staged_bindings; |
310 | bool staged_bindings_inuse; | ||
367 | struct list_head staged_cmd_res; | 311 | struct list_head staged_cmd_res; |
312 | struct vmw_resource_val_node *dx_ctx_node; | ||
313 | struct vmw_dma_buffer *dx_query_mob; | ||
314 | struct vmw_resource *dx_query_ctx; | ||
315 | struct vmw_cmdbuf_res_manager *man; | ||
368 | }; | 316 | }; |
369 | 317 | ||
370 | struct vmw_legacy_display; | 318 | struct vmw_legacy_display; |
@@ -382,6 +330,26 @@ struct vmw_vga_topology_state { | |||
382 | uint32_t pos_y; | 330 | uint32_t pos_y; |
383 | }; | 331 | }; |
384 | 332 | ||
333 | |||
334 | /* | ||
335 | * struct vmw_otable - Guest Memory OBject table metadata | ||
336 | * | ||
337 | * @size: Size of the table (page-aligned). | ||
338 | * @page_table: Pointer to a struct vmw_mob holding the page table. | ||
339 | */ | ||
340 | struct vmw_otable { | ||
341 | unsigned long size; | ||
342 | struct vmw_mob *page_table; | ||
343 | bool enabled; | ||
344 | }; | ||
345 | |||
346 | struct vmw_otable_batch { | ||
347 | unsigned num_otables; | ||
348 | struct vmw_otable *otables; | ||
349 | struct vmw_resource *context; | ||
350 | struct ttm_buffer_object *otable_bo; | ||
351 | }; | ||
352 | |||
385 | struct vmw_private { | 353 | struct vmw_private { |
386 | struct ttm_bo_device bdev; | 354 | struct ttm_bo_device bdev; |
387 | struct ttm_bo_global_ref bo_global_ref; | 355 | struct ttm_bo_global_ref bo_global_ref; |
@@ -417,6 +385,7 @@ struct vmw_private { | |||
417 | bool has_mob; | 385 | bool has_mob; |
418 | spinlock_t hw_lock; | 386 | spinlock_t hw_lock; |
419 | spinlock_t cap_lock; | 387 | spinlock_t cap_lock; |
388 | bool has_dx; | ||
420 | 389 | ||
421 | /* | 390 | /* |
422 | * VGA registers. | 391 | * VGA registers. |
@@ -552,8 +521,7 @@ struct vmw_private { | |||
552 | /* | 521 | /* |
553 | * Guest Backed stuff | 522 | * Guest Backed stuff |
554 | */ | 523 | */ |
555 | struct ttm_buffer_object *otable_bo; | 524 | struct vmw_otable_batch otable_batch; |
556 | struct vmw_otable *otables; | ||
557 | 525 | ||
558 | struct vmw_cmdbuf_man *cman; | 526 | struct vmw_cmdbuf_man *cman; |
559 | }; | 527 | }; |
@@ -685,6 +653,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | |||
685 | uint32_t *inout_id, | 653 | uint32_t *inout_id, |
686 | struct vmw_resource **out); | 654 | struct vmw_resource **out); |
687 | extern void vmw_resource_unreserve(struct vmw_resource *res, | 655 | extern void vmw_resource_unreserve(struct vmw_resource *res, |
656 | bool switch_backup, | ||
688 | struct vmw_dma_buffer *new_backup, | 657 | struct vmw_dma_buffer *new_backup, |
689 | unsigned long new_backup_offset); | 658 | unsigned long new_backup_offset); |
690 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, | 659 | extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
@@ -742,7 +711,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv, | |||
742 | extern void vmw_fifo_release(struct vmw_private *dev_priv, | 711 | extern void vmw_fifo_release(struct vmw_private *dev_priv, |
743 | struct vmw_fifo_state *fifo); | 712 | struct vmw_fifo_state *fifo); |
744 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); | 713 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); |
714 | extern void * | ||
715 | vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); | ||
745 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); | 716 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); |
717 | extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); | ||
746 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | 718 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, |
747 | uint32_t *seqno); | 719 | uint32_t *seqno); |
748 | extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); | 720 | extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); |
@@ -828,14 +800,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter) | |||
828 | * Command submission - vmwgfx_execbuf.c | 800 | * Command submission - vmwgfx_execbuf.c |
829 | */ | 801 | */ |
830 | 802 | ||
831 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 803 | extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, |
832 | struct drm_file *file_priv); | 804 | struct drm_file *file_priv, size_t size); |
833 | extern int vmw_execbuf_process(struct drm_file *file_priv, | 805 | extern int vmw_execbuf_process(struct drm_file *file_priv, |
834 | struct vmw_private *dev_priv, | 806 | struct vmw_private *dev_priv, |
835 | void __user *user_commands, | 807 | void __user *user_commands, |
836 | void *kernel_commands, | 808 | void *kernel_commands, |
837 | uint32_t command_size, | 809 | uint32_t command_size, |
838 | uint64_t throttle_us, | 810 | uint64_t throttle_us, |
811 | uint32_t dx_context_handle, | ||
839 | struct drm_vmw_fence_rep __user | 812 | struct drm_vmw_fence_rep __user |
840 | *user_fence_rep, | 813 | *user_fence_rep, |
841 | struct vmw_fence_obj **out_fence); | 814 | struct vmw_fence_obj **out_fence); |
@@ -960,6 +933,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv, | |||
960 | uint32_t handle); | 933 | uint32_t handle); |
961 | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); | 934 | extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); |
962 | extern void vmw_resource_unpin(struct vmw_resource *res); | 935 | extern void vmw_resource_unpin(struct vmw_resource *res); |
936 | extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); | ||
963 | 937 | ||
964 | /** | 938 | /** |
965 | * Overlay control - vmwgfx_overlay.c | 939 | * Overlay control - vmwgfx_overlay.c |
@@ -1016,27 +990,28 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv); | |||
1016 | 990 | ||
1017 | extern const struct vmw_user_resource_conv *user_context_converter; | 991 | extern const struct vmw_user_resource_conv *user_context_converter; |
1018 | 992 | ||
1019 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
1020 | |||
1021 | extern int vmw_context_check(struct vmw_private *dev_priv, | 993 | extern int vmw_context_check(struct vmw_private *dev_priv, |
1022 | struct ttm_object_file *tfile, | 994 | struct ttm_object_file *tfile, |
1023 | int id, | 995 | int id, |
1024 | struct vmw_resource **p_res); | 996 | struct vmw_resource **p_res); |
1025 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | 997 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, |
1026 | struct drm_file *file_priv); | 998 | struct drm_file *file_priv); |
999 | extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, | ||
1000 | struct drm_file *file_priv); | ||
1027 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | 1001 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, |
1028 | struct drm_file *file_priv); | 1002 | struct drm_file *file_priv); |
1029 | extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, | ||
1030 | const struct vmw_ctx_bindinfo *ci); | ||
1031 | extern void | ||
1032 | vmw_context_binding_state_transfer(struct vmw_resource *res, | ||
1033 | struct vmw_ctx_binding_state *cbs); | ||
1034 | extern void vmw_context_binding_res_list_kill(struct list_head *head); | ||
1035 | extern void vmw_context_binding_res_list_scrub(struct list_head *head); | ||
1036 | extern int vmw_context_rebind_all(struct vmw_resource *ctx); | ||
1037 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | 1003 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); |
1038 | extern struct vmw_cmdbuf_res_manager * | 1004 | extern struct vmw_cmdbuf_res_manager * |
1039 | vmw_context_res_man(struct vmw_resource *ctx); | 1005 | vmw_context_res_man(struct vmw_resource *ctx); |
1006 | extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, | ||
1007 | SVGACOTableType cotable_type); | ||
1008 | extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); | ||
1009 | struct vmw_ctx_binding_state; | ||
1010 | extern struct vmw_ctx_binding_state * | ||
1011 | vmw_context_binding_state(struct vmw_resource *ctx); | ||
1012 | extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, | ||
1013 | bool readback); | ||
1014 | |||
1040 | /* | 1015 | /* |
1041 | * Surface management - vmwgfx_surface.c | 1016 | * Surface management - vmwgfx_surface.c |
1042 | */ | 1017 | */ |
@@ -1066,6 +1041,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1066 | bool for_scanout, | 1041 | bool for_scanout, |
1067 | uint32_t num_mip_levels, | 1042 | uint32_t num_mip_levels, |
1068 | uint32_t multisample_count, | 1043 | uint32_t multisample_count, |
1044 | uint32_t array_size, | ||
1069 | struct drm_vmw_size size, | 1045 | struct drm_vmw_size size, |
1070 | struct vmw_surface **srf_out); | 1046 | struct vmw_surface **srf_out); |
1071 | 1047 | ||
@@ -1085,12 +1061,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv, | |||
1085 | SVGA3dShaderType shader_type, | 1061 | SVGA3dShaderType shader_type, |
1086 | size_t size, | 1062 | size_t size, |
1087 | struct list_head *list); | 1063 | struct list_head *list); |
1088 | extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, | 1064 | extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, |
1089 | u32 user_key, SVGA3dShaderType shader_type, | 1065 | u32 user_key, SVGA3dShaderType shader_type, |
1090 | struct list_head *list); | 1066 | struct list_head *list); |
1067 | extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, | ||
1068 | struct vmw_resource *ctx, | ||
1069 | u32 user_key, | ||
1070 | SVGA3dShaderType shader_type, | ||
1071 | struct list_head *list); | ||
1072 | extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, | ||
1073 | struct list_head *list, | ||
1074 | bool readback); | ||
1075 | |||
1091 | extern struct vmw_resource * | 1076 | extern struct vmw_resource * |
1092 | vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, | 1077 | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, |
1093 | u32 user_key, SVGA3dShaderType shader_type); | 1078 | u32 user_key, SVGA3dShaderType shader_type); |
1094 | 1079 | ||
1095 | /* | 1080 | /* |
1096 | * Command buffer managed resources - vmwgfx_cmdbuf_res.c | 1081 | * Command buffer managed resources - vmwgfx_cmdbuf_res.c |
@@ -1114,8 +1099,20 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, | |||
1114 | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, | 1099 | extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, |
1115 | enum vmw_cmdbuf_res_type res_type, | 1100 | enum vmw_cmdbuf_res_type res_type, |
1116 | u32 user_key, | 1101 | u32 user_key, |
1117 | struct list_head *list); | 1102 | struct list_head *list, |
1103 | struct vmw_resource **res); | ||
1118 | 1104 | ||
1105 | /* | ||
1106 | * COTable management - vmwgfx_cotable.c | ||
1107 | */ | ||
1108 | extern const SVGACOTableType vmw_cotable_scrub_order[]; | ||
1109 | extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, | ||
1110 | struct vmw_resource *ctx, | ||
1111 | u32 type); | ||
1112 | extern int vmw_cotable_notify(struct vmw_resource *res, int id); | ||
1113 | extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); | ||
1114 | extern void vmw_cotable_add_resource(struct vmw_resource *ctx, | ||
1115 | struct list_head *head); | ||
1119 | 1116 | ||
1120 | /* | 1117 | /* |
1121 | * Command buffer managerment vmwgfx_cmdbuf.c | 1118 | * Command buffer managerment vmwgfx_cmdbuf.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 847264f8a33a..401305bbb810 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include "vmwgfx_reg.h" | 29 | #include "vmwgfx_reg.h" |
30 | #include <drm/ttm/ttm_bo_api.h> | 30 | #include <drm/ttm/ttm_bo_api.h> |
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include "vmwgfx_so.h" | ||
33 | #include "vmwgfx_binding.h" | ||
32 | 34 | ||
33 | #define VMW_RES_HT_ORDER 12 | 35 | #define VMW_RES_HT_ORDER 12 |
34 | 36 | ||
@@ -59,8 +61,11 @@ struct vmw_resource_relocation { | |||
59 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. | 61 | * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. |
60 | * @first_usage: Set to true the first time the resource is referenced in | 62 | * @first_usage: Set to true the first time the resource is referenced in |
61 | * the command stream. | 63 | * the command stream. |
62 | * @no_buffer_needed: Resources do not need to allocate buffer backup on | 64 | * @switching_backup: The command stream provides a new backup buffer for a |
63 | * reservation. The command stream will provide one. | 65 | * resource. |
66 | * @no_buffer_needed: This means @switching_backup is true on first buffer | ||
67 | * reference. So resource reservation does not need to allocate a backup | ||
68 | * buffer for the resource. | ||
64 | */ | 69 | */ |
65 | struct vmw_resource_val_node { | 70 | struct vmw_resource_val_node { |
66 | struct list_head head; | 71 | struct list_head head; |
@@ -69,8 +74,9 @@ struct vmw_resource_val_node { | |||
69 | struct vmw_dma_buffer *new_backup; | 74 | struct vmw_dma_buffer *new_backup; |
70 | struct vmw_ctx_binding_state *staged_bindings; | 75 | struct vmw_ctx_binding_state *staged_bindings; |
71 | unsigned long new_backup_offset; | 76 | unsigned long new_backup_offset; |
72 | bool first_usage; | 77 | u32 first_usage : 1; |
73 | bool no_buffer_needed; | 78 | u32 switching_backup : 1; |
79 | u32 no_buffer_needed : 1; | ||
74 | }; | 80 | }; |
75 | 81 | ||
76 | /** | 82 | /** |
@@ -92,6 +98,10 @@ struct vmw_cmd_entry { | |||
92 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ | 98 | [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ |
93 | (_gb_disable), (_gb_enable)} | 99 | (_gb_disable), (_gb_enable)} |
94 | 100 | ||
101 | static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | ||
102 | struct vmw_sw_context *sw_context, | ||
103 | struct vmw_resource *ctx); | ||
104 | |||
95 | /** | 105 | /** |
96 | * vmw_resource_unreserve - unreserve resources previously reserved for | 106 | * vmw_resource_unreserve - unreserve resources previously reserved for |
97 | * command submission. | 107 | * command submission. |
@@ -99,15 +109,16 @@ struct vmw_cmd_entry { | |||
99 | * @list_head: list of resources to unreserve. | 109 | * @list_head: list of resources to unreserve. |
100 | * @backoff: Whether command submission failed. | 110 | * @backoff: Whether command submission failed. |
101 | */ | 111 | */ |
102 | static void vmw_resource_list_unreserve(struct list_head *list, | 112 | static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context, |
113 | struct list_head *list, | ||
103 | bool backoff) | 114 | bool backoff) |
104 | { | 115 | { |
105 | struct vmw_resource_val_node *val; | 116 | struct vmw_resource_val_node *val; |
106 | 117 | ||
107 | list_for_each_entry(val, list, head) { | 118 | list_for_each_entry(val, list, head) { |
108 | struct vmw_resource *res = val->res; | 119 | struct vmw_resource *res = val->res; |
109 | struct vmw_dma_buffer *new_backup = | 120 | bool switch_backup = |
110 | backoff ? NULL : val->new_backup; | 121 | (backoff) ? false : val->switching_backup; |
111 | 122 | ||
112 | /* | 123 | /* |
113 | * Transfer staged context bindings to the | 124 | * Transfer staged context bindings to the |
@@ -115,18 +126,71 @@ static void vmw_resource_list_unreserve(struct list_head *list, | |||
115 | */ | 126 | */ |
116 | if (unlikely(val->staged_bindings)) { | 127 | if (unlikely(val->staged_bindings)) { |
117 | if (!backoff) { | 128 | if (!backoff) { |
118 | vmw_context_binding_state_transfer | 129 | vmw_binding_state_commit |
119 | (val->res, val->staged_bindings); | 130 | (vmw_context_binding_state(val->res), |
131 | val->staged_bindings); | ||
120 | } | 132 | } |
121 | kfree(val->staged_bindings); | 133 | |
134 | if (val->staged_bindings != sw_context->staged_bindings) | ||
135 | vmw_binding_state_free(val->staged_bindings); | ||
136 | else | ||
137 | sw_context->staged_bindings_inuse = false; | ||
122 | val->staged_bindings = NULL; | 138 | val->staged_bindings = NULL; |
123 | } | 139 | } |
124 | vmw_resource_unreserve(res, new_backup, | 140 | vmw_resource_unreserve(res, switch_backup, val->new_backup, |
125 | val->new_backup_offset); | 141 | val->new_backup_offset); |
126 | vmw_dmabuf_unreference(&val->new_backup); | 142 | vmw_dmabuf_unreference(&val->new_backup); |
127 | } | 143 | } |
128 | } | 144 | } |
129 | 145 | ||
146 | /** | ||
147 | * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is | ||
148 | * added to the validate list. | ||
149 | * | ||
150 | * @dev_priv: Pointer to the device private: | ||
151 | * @sw_context: The validation context: | ||
152 | * @node: The validation node holding this context. | ||
153 | */ | ||
154 | static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, | ||
155 | struct vmw_sw_context *sw_context, | ||
156 | struct vmw_resource_val_node *node) | ||
157 | { | ||
158 | int ret; | ||
159 | |||
160 | ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); | ||
161 | if (unlikely(ret != 0)) | ||
162 | goto out_err; | ||
163 | |||
164 | if (!sw_context->staged_bindings) { | ||
165 | sw_context->staged_bindings = | ||
166 | vmw_binding_state_alloc(dev_priv); | ||
167 | if (IS_ERR(sw_context->staged_bindings)) { | ||
168 | DRM_ERROR("Failed to allocate context binding " | ||
169 | "information.\n"); | ||
170 | ret = PTR_ERR(sw_context->staged_bindings); | ||
171 | sw_context->staged_bindings = NULL; | ||
172 | goto out_err; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | if (sw_context->staged_bindings_inuse) { | ||
177 | node->staged_bindings = vmw_binding_state_alloc(dev_priv); | ||
178 | if (IS_ERR(node->staged_bindings)) { | ||
179 | DRM_ERROR("Failed to allocate context binding " | ||
180 | "information.\n"); | ||
181 | ret = PTR_ERR(node->staged_bindings); | ||
182 | node->staged_bindings = NULL; | ||
183 | goto out_err; | ||
184 | } | ||
185 | } else { | ||
186 | node->staged_bindings = sw_context->staged_bindings; | ||
187 | sw_context->staged_bindings_inuse = true; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | out_err: | ||
192 | return ret; | ||
193 | } | ||
130 | 194 | ||
131 | /** | 195 | /** |
132 | * vmw_resource_val_add - Add a resource to the software context's | 196 | * vmw_resource_val_add - Add a resource to the software context's |
@@ -141,6 +205,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
141 | struct vmw_resource *res, | 205 | struct vmw_resource *res, |
142 | struct vmw_resource_val_node **p_node) | 206 | struct vmw_resource_val_node **p_node) |
143 | { | 207 | { |
208 | struct vmw_private *dev_priv = res->dev_priv; | ||
144 | struct vmw_resource_val_node *node; | 209 | struct vmw_resource_val_node *node; |
145 | struct drm_hash_item *hash; | 210 | struct drm_hash_item *hash; |
146 | int ret; | 211 | int ret; |
@@ -169,14 +234,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
169 | kfree(node); | 234 | kfree(node); |
170 | return ret; | 235 | return ret; |
171 | } | 236 | } |
172 | list_add_tail(&node->head, &sw_context->resource_list); | ||
173 | node->res = vmw_resource_reference(res); | 237 | node->res = vmw_resource_reference(res); |
174 | node->first_usage = true; | 238 | node->first_usage = true; |
175 | |||
176 | if (unlikely(p_node != NULL)) | 239 | if (unlikely(p_node != NULL)) |
177 | *p_node = node; | 240 | *p_node = node; |
178 | 241 | ||
179 | return 0; | 242 | if (!dev_priv->has_mob) { |
243 | list_add_tail(&node->head, &sw_context->resource_list); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | switch (vmw_res_type(res)) { | ||
248 | case vmw_res_context: | ||
249 | case vmw_res_dx_context: | ||
250 | list_add(&node->head, &sw_context->ctx_resource_list); | ||
251 | ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); | ||
252 | break; | ||
253 | case vmw_res_cotable: | ||
254 | list_add_tail(&node->head, &sw_context->ctx_resource_list); | ||
255 | break; | ||
256 | default: | ||
257 | list_add_tail(&node->head, &sw_context->resource_list); | ||
258 | break; | ||
259 | } | ||
260 | |||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * vmw_view_res_val_add - Add a view and the surface it's pointing to | ||
266 | * to the validation list | ||
267 | * | ||
268 | * @sw_context: The software context holding the validation list. | ||
269 | * @view: Pointer to the view resource. | ||
270 | * | ||
271 | * Returns 0 if success, negative error code otherwise. | ||
272 | */ | ||
273 | static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, | ||
274 | struct vmw_resource *view) | ||
275 | { | ||
276 | int ret; | ||
277 | |||
278 | /* | ||
279 | * First add the resource the view is pointing to, otherwise | ||
280 | * it may be swapped out when the view is validated. | ||
281 | */ | ||
282 | ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | return vmw_resource_val_add(sw_context, view, NULL); | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * vmw_view_id_val_add - Look up a view and add it and the surface it's | ||
291 | * pointing to to the validation list. | ||
292 | * | ||
293 | * @sw_context: The software context holding the validation list. | ||
294 | * @view_type: The view type to look up. | ||
295 | * @id: view id of the view. | ||
296 | * | ||
297 | * The view is represented by a view id and the DX context it's created on, | ||
298 | * or scheduled for creation on. If there is no DX context set, the function | ||
299 | * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. | ||
300 | */ | ||
301 | static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, | ||
302 | enum vmw_view_type view_type, u32 id) | ||
303 | { | ||
304 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
305 | struct vmw_resource *view; | ||
306 | int ret; | ||
307 | |||
308 | if (!ctx_node) { | ||
309 | DRM_ERROR("DX Context not set.\n"); | ||
310 | return -EINVAL; | ||
311 | } | ||
312 | |||
313 | view = vmw_view_lookup(sw_context->man, view_type, id); | ||
314 | if (IS_ERR(view)) | ||
315 | return PTR_ERR(view); | ||
316 | |||
317 | ret = vmw_view_res_val_add(sw_context, view); | ||
318 | vmw_resource_unreference(&view); | ||
319 | |||
320 | return ret; | ||
180 | } | 321 | } |
181 | 322 | ||
182 | /** | 323 | /** |
@@ -195,19 +336,41 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, | |||
195 | struct vmw_resource *ctx) | 336 | struct vmw_resource *ctx) |
196 | { | 337 | { |
197 | struct list_head *binding_list; | 338 | struct list_head *binding_list; |
198 | struct vmw_ctx_binding *entry; | 339 | struct vmw_ctx_bindinfo *entry; |
199 | int ret = 0; | 340 | int ret = 0; |
200 | struct vmw_resource *res; | 341 | struct vmw_resource *res; |
342 | u32 i; | ||
343 | |||
344 | /* Add all cotables to the validation list. */ | ||
345 | if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { | ||
346 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | ||
347 | res = vmw_context_cotable(ctx, i); | ||
348 | if (IS_ERR(res)) | ||
349 | continue; | ||
350 | |||
351 | ret = vmw_resource_val_add(sw_context, res, NULL); | ||
352 | vmw_resource_unreference(&res); | ||
353 | if (unlikely(ret != 0)) | ||
354 | return ret; | ||
355 | } | ||
356 | } | ||
357 | |||
201 | 358 | ||
359 | /* Add all resources bound to the context to the validation list */ | ||
202 | mutex_lock(&dev_priv->binding_mutex); | 360 | mutex_lock(&dev_priv->binding_mutex); |
203 | binding_list = vmw_context_binding_list(ctx); | 361 | binding_list = vmw_context_binding_list(ctx); |
204 | 362 | ||
205 | list_for_each_entry(entry, binding_list, ctx_list) { | 363 | list_for_each_entry(entry, binding_list, ctx_list) { |
206 | res = vmw_resource_reference_unless_doomed(entry->bi.res); | 364 | /* entry->res is not refcounted */ |
365 | res = vmw_resource_reference_unless_doomed(entry->res); | ||
207 | if (unlikely(res == NULL)) | 366 | if (unlikely(res == NULL)) |
208 | continue; | 367 | continue; |
209 | 368 | ||
210 | ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); | 369 | if (vmw_res_type(entry->res) == vmw_res_view) |
370 | ret = vmw_view_res_val_add(sw_context, entry->res); | ||
371 | else | ||
372 | ret = vmw_resource_val_add(sw_context, entry->res, | ||
373 | NULL); | ||
211 | vmw_resource_unreference(&res); | 374 | vmw_resource_unreference(&res); |
212 | if (unlikely(ret != 0)) | 375 | if (unlikely(ret != 0)) |
213 | break; | 376 | break; |
@@ -409,6 +572,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
409 | 572 | ||
410 | list_for_each_entry(val, &sw_context->resource_list, head) { | 573 | list_for_each_entry(val, &sw_context->resource_list, head) { |
411 | struct vmw_resource *res = val->res; | 574 | struct vmw_resource *res = val->res; |
575 | struct vmw_dma_buffer *backup = res->backup; | ||
412 | 576 | ||
413 | ret = vmw_resource_validate(res); | 577 | ret = vmw_resource_validate(res); |
414 | if (unlikely(ret != 0)) { | 578 | if (unlikely(ret != 0)) { |
@@ -416,18 +580,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
416 | DRM_ERROR("Failed to validate resource.\n"); | 580 | DRM_ERROR("Failed to validate resource.\n"); |
417 | return ret; | 581 | return ret; |
418 | } | 582 | } |
583 | |||
584 | /* Check if the resource switched backup buffer */ | ||
585 | if (backup && res->backup && (backup != res->backup)) { | ||
586 | struct vmw_dma_buffer *vbo = res->backup; | ||
587 | |||
588 | ret = vmw_bo_to_validate_list | ||
589 | (sw_context, vbo, | ||
590 | vmw_resource_needs_backup(res), NULL); | ||
591 | if (ret) { | ||
592 | ttm_bo_unreserve(&vbo->base); | ||
593 | return ret; | ||
594 | } | ||
595 | } | ||
419 | } | 596 | } |
420 | return 0; | 597 | return 0; |
421 | } | 598 | } |
422 | 599 | ||
423 | |||
424 | /** | 600 | /** |
425 | * vmw_cmd_res_reloc_add - Add a resource to a software context's | 601 | * vmw_cmd_res_reloc_add - Add a resource to a software context's |
426 | * relocation- and validation lists. | 602 | * relocation- and validation lists. |
427 | * | 603 | * |
428 | * @dev_priv: Pointer to a struct vmw_private identifying the device. | 604 | * @dev_priv: Pointer to a struct vmw_private identifying the device. |
429 | * @sw_context: Pointer to the software context. | 605 | * @sw_context: Pointer to the software context. |
430 | * @res_type: Resource type. | ||
431 | * @id_loc: Pointer to where the id that needs translation is located. | 606 | * @id_loc: Pointer to where the id that needs translation is located. |
432 | * @res: Valid pointer to a struct vmw_resource. | 607 | * @res: Valid pointer to a struct vmw_resource. |
433 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node | 608 | * @p_val: If non null, a pointer to the struct vmw_resource_validate_node |
@@ -435,7 +610,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) | |||
435 | */ | 610 | */ |
436 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | 611 | static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, |
437 | struct vmw_sw_context *sw_context, | 612 | struct vmw_sw_context *sw_context, |
438 | enum vmw_res_type res_type, | ||
439 | uint32_t *id_loc, | 613 | uint32_t *id_loc, |
440 | struct vmw_resource *res, | 614 | struct vmw_resource *res, |
441 | struct vmw_resource_val_node **p_val) | 615 | struct vmw_resource_val_node **p_val) |
@@ -454,29 +628,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
454 | if (unlikely(ret != 0)) | 628 | if (unlikely(ret != 0)) |
455 | return ret; | 629 | return ret; |
456 | 630 | ||
457 | if (res_type == vmw_res_context && dev_priv->has_mob && | ||
458 | node->first_usage) { | ||
459 | |||
460 | /* | ||
461 | * Put contexts first on the list to be able to exit | ||
462 | * list traversal for contexts early. | ||
463 | */ | ||
464 | list_del(&node->head); | ||
465 | list_add(&node->head, &sw_context->resource_list); | ||
466 | |||
467 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | ||
468 | if (unlikely(ret != 0)) | ||
469 | return ret; | ||
470 | node->staged_bindings = | ||
471 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | ||
472 | if (node->staged_bindings == NULL) { | ||
473 | DRM_ERROR("Failed to allocate context binding " | ||
474 | "information.\n"); | ||
475 | return -ENOMEM; | ||
476 | } | ||
477 | INIT_LIST_HEAD(&node->staged_bindings->list); | ||
478 | } | ||
479 | |||
480 | if (p_val) | 631 | if (p_val) |
481 | *p_val = node; | 632 | *p_val = node; |
482 | 633 | ||
@@ -554,7 +705,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, | |||
554 | rcache->res = res; | 705 | rcache->res = res; |
555 | rcache->handle = *id_loc; | 706 | rcache->handle = *id_loc; |
556 | 707 | ||
557 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, | 708 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, |
558 | res, &node); | 709 | res, &node); |
559 | if (unlikely(ret != 0)) | 710 | if (unlikely(ret != 0)) |
560 | goto out_no_reloc; | 711 | goto out_no_reloc; |
@@ -589,7 +740,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | |||
589 | if (unlikely(!val->staged_bindings)) | 740 | if (unlikely(!val->staged_bindings)) |
590 | break; | 741 | break; |
591 | 742 | ||
592 | ret = vmw_context_rebind_all(val->res); | 743 | ret = vmw_binding_rebind_all |
744 | (vmw_context_binding_state(val->res)); | ||
593 | if (unlikely(ret != 0)) { | 745 | if (unlikely(ret != 0)) { |
594 | if (ret != -ERESTARTSYS) | 746 | if (ret != -ERESTARTSYS) |
595 | DRM_ERROR("Failed to rebind context.\n"); | 747 | DRM_ERROR("Failed to rebind context.\n"); |
@@ -601,6 +753,69 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) | |||
601 | } | 753 | } |
602 | 754 | ||
603 | /** | 755 | /** |
756 | * vmw_view_bindings_add - Add an array of view bindings to a context | ||
757 | * binding state tracker. | ||
758 | * | ||
759 | * @sw_context: The execbuf state used for this command. | ||
760 | * @view_type: View type for the bindings. | ||
761 | * @binding_type: Binding type for the bindings. | ||
762 | * @shader_slot: The shader slot to user for the bindings. | ||
763 | * @view_ids: Array of view ids to be bound. | ||
764 | * @num_views: Number of view ids in @view_ids. | ||
765 | * @first_slot: The binding slot to be used for the first view id in @view_ids. | ||
766 | */ | ||
767 | static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, | ||
768 | enum vmw_view_type view_type, | ||
769 | enum vmw_ctx_binding_type binding_type, | ||
770 | uint32 shader_slot, | ||
771 | uint32 view_ids[], u32 num_views, | ||
772 | u32 first_slot) | ||
773 | { | ||
774 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
775 | struct vmw_cmdbuf_res_manager *man; | ||
776 | u32 i; | ||
777 | int ret; | ||
778 | |||
779 | if (!ctx_node) { | ||
780 | DRM_ERROR("DX Context not set.\n"); | ||
781 | return -EINVAL; | ||
782 | } | ||
783 | |||
784 | man = sw_context->man; | ||
785 | for (i = 0; i < num_views; ++i) { | ||
786 | struct vmw_ctx_bindinfo_view binding; | ||
787 | struct vmw_resource *view = NULL; | ||
788 | |||
789 | if (view_ids[i] != SVGA3D_INVALID_ID) { | ||
790 | view = vmw_view_lookup(man, view_type, view_ids[i]); | ||
791 | if (IS_ERR(view)) { | ||
792 | DRM_ERROR("View not found.\n"); | ||
793 | return PTR_ERR(view); | ||
794 | } | ||
795 | |||
796 | ret = vmw_view_res_val_add(sw_context, view); | ||
797 | if (ret) { | ||
798 | DRM_ERROR("Could not add view to " | ||
799 | "validation list.\n"); | ||
800 | vmw_resource_unreference(&view); | ||
801 | return ret; | ||
802 | } | ||
803 | } | ||
804 | binding.bi.ctx = ctx_node->res; | ||
805 | binding.bi.res = view; | ||
806 | binding.bi.bt = binding_type; | ||
807 | binding.shader_slot = shader_slot; | ||
808 | binding.slot = first_slot + i; | ||
809 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | ||
810 | shader_slot, binding.slot); | ||
811 | if (view) | ||
812 | vmw_resource_unreference(&view); | ||
813 | } | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | /** | ||
604 | * vmw_cmd_cid_check - Check a command header for valid context information. | 819 | * vmw_cmd_cid_check - Check a command header for valid context information. |
605 | * | 820 | * |
606 | * @dev_priv: Pointer to a device private structure. | 821 | * @dev_priv: Pointer to a device private structure. |
@@ -638,6 +853,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
638 | 853 | ||
639 | cmd = container_of(header, struct vmw_sid_cmd, header); | 854 | cmd = container_of(header, struct vmw_sid_cmd, header); |
640 | 855 | ||
856 | if (cmd->body.type >= SVGA3D_RT_MAX) { | ||
857 | DRM_ERROR("Illegal render target type %u.\n", | ||
858 | (unsigned) cmd->body.type); | ||
859 | return -EINVAL; | ||
860 | } | ||
861 | |||
641 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 862 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
642 | user_context_converter, &cmd->body.cid, | 863 | user_context_converter, &cmd->body.cid, |
643 | &ctx_node); | 864 | &ctx_node); |
@@ -651,13 +872,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
651 | return ret; | 872 | return ret; |
652 | 873 | ||
653 | if (dev_priv->has_mob) { | 874 | if (dev_priv->has_mob) { |
654 | struct vmw_ctx_bindinfo bi; | 875 | struct vmw_ctx_bindinfo_view binding; |
655 | 876 | ||
656 | bi.ctx = ctx_node->res; | 877 | binding.bi.ctx = ctx_node->res; |
657 | bi.res = res_node ? res_node->res : NULL; | 878 | binding.bi.res = res_node ? res_node->res : NULL; |
658 | bi.bt = vmw_ctx_binding_rt; | 879 | binding.bi.bt = vmw_ctx_binding_rt; |
659 | bi.i1.rt_type = cmd->body.type; | 880 | binding.slot = cmd->body.type; |
660 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 881 | vmw_binding_add(ctx_node->staged_bindings, |
882 | &binding.bi, 0, binding.slot); | ||
661 | } | 883 | } |
662 | 884 | ||
663 | return 0; | 885 | return 0; |
@@ -1364,6 +1586,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1364 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | 1586 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
1365 | continue; | 1587 | continue; |
1366 | 1588 | ||
1589 | if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { | ||
1590 | DRM_ERROR("Illegal texture/sampler unit %u.\n", | ||
1591 | (unsigned) cur_state->stage); | ||
1592 | return -EINVAL; | ||
1593 | } | ||
1594 | |||
1367 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 1595 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
1368 | user_surface_converter, | 1596 | user_surface_converter, |
1369 | &cur_state->value, &res_node); | 1597 | &cur_state->value, &res_node); |
@@ -1371,14 +1599,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | |||
1371 | return ret; | 1599 | return ret; |
1372 | 1600 | ||
1373 | if (dev_priv->has_mob) { | 1601 | if (dev_priv->has_mob) { |
1374 | struct vmw_ctx_bindinfo bi; | 1602 | struct vmw_ctx_bindinfo_tex binding; |
1375 | 1603 | ||
1376 | bi.ctx = ctx_node->res; | 1604 | binding.bi.ctx = ctx_node->res; |
1377 | bi.res = res_node ? res_node->res : NULL; | 1605 | binding.bi.res = res_node ? res_node->res : NULL; |
1378 | bi.bt = vmw_ctx_binding_tex; | 1606 | binding.bi.bt = vmw_ctx_binding_tex; |
1379 | bi.i1.texture_stage = cur_state->stage; | 1607 | binding.texture_stage = cur_state->stage; |
1380 | vmw_context_binding_add(ctx_node->staged_bindings, | 1608 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
1381 | &bi); | 1609 | 0, binding.texture_stage); |
1382 | } | 1610 | } |
1383 | } | 1611 | } |
1384 | 1612 | ||
@@ -1408,6 +1636,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
1408 | return ret; | 1636 | return ret; |
1409 | } | 1637 | } |
1410 | 1638 | ||
1639 | |||
1640 | /** | ||
1641 | * vmw_cmd_res_switch_backup - Utility function to handle backup buffer | ||
1642 | * switching | ||
1643 | * | ||
1644 | * @dev_priv: Pointer to a device private struct. | ||
1645 | * @sw_context: The software context being used for this batch. | ||
1646 | * @val_node: The validation node representing the resource. | ||
1647 | * @buf_id: Pointer to the user-space backup buffer handle in the command | ||
1648 | * stream. | ||
1649 | * @backup_offset: Offset of backup into MOB. | ||
1650 | * | ||
1651 | * This function prepares for registering a switch of backup buffers | ||
1652 | * in the resource metadata just prior to unreserving. It's basically a wrapper | ||
1653 | * around vmw_cmd_res_switch_backup with a different interface. | ||
1654 | */ | ||
1655 | static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, | ||
1656 | struct vmw_sw_context *sw_context, | ||
1657 | struct vmw_resource_val_node *val_node, | ||
1658 | uint32_t *buf_id, | ||
1659 | unsigned long backup_offset) | ||
1660 | { | ||
1661 | struct vmw_dma_buffer *dma_buf; | ||
1662 | int ret; | ||
1663 | |||
1664 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
1665 | if (ret) | ||
1666 | return ret; | ||
1667 | |||
1668 | val_node->switching_backup = true; | ||
1669 | if (val_node->first_usage) | ||
1670 | val_node->no_buffer_needed = true; | ||
1671 | |||
1672 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
1673 | val_node->new_backup = dma_buf; | ||
1674 | val_node->new_backup_offset = backup_offset; | ||
1675 | |||
1676 | return 0; | ||
1677 | } | ||
1678 | |||
1679 | |||
1411 | /** | 1680 | /** |
1412 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching | 1681 | * vmw_cmd_switch_backup - Utility function to handle backup buffer switching |
1413 | * | 1682 | * |
@@ -1421,7 +1690,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, | |||
1421 | * @backup_offset: Offset of backup into MOB. | 1690 | * @backup_offset: Offset of backup into MOB. |
1422 | * | 1691 | * |
1423 | * This function prepares for registering a switch of backup buffers | 1692 | * This function prepares for registering a switch of backup buffers |
1424 | * in the resource metadata just prior to unreserving. | 1693 | * in the resource metadata just prior to unreserving. It's basically a wrapper |
1694 | * around vmw_cmd_res_switch_backup with a different interface. | ||
1425 | */ | 1695 | */ |
1426 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | 1696 | static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, |
1427 | struct vmw_sw_context *sw_context, | 1697 | struct vmw_sw_context *sw_context, |
@@ -1432,27 +1702,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, | |||
1432 | uint32_t *buf_id, | 1702 | uint32_t *buf_id, |
1433 | unsigned long backup_offset) | 1703 | unsigned long backup_offset) |
1434 | { | 1704 | { |
1435 | int ret; | ||
1436 | struct vmw_dma_buffer *dma_buf; | ||
1437 | struct vmw_resource_val_node *val_node; | 1705 | struct vmw_resource_val_node *val_node; |
1706 | int ret; | ||
1438 | 1707 | ||
1439 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, | 1708 | ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, |
1440 | converter, res_id, &val_node); | 1709 | converter, res_id, &val_node); |
1441 | if (unlikely(ret != 0)) | 1710 | if (ret) |
1442 | return ret; | ||
1443 | |||
1444 | ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); | ||
1445 | if (unlikely(ret != 0)) | ||
1446 | return ret; | 1711 | return ret; |
1447 | 1712 | ||
1448 | if (val_node->first_usage) | 1713 | return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, |
1449 | val_node->no_buffer_needed = true; | 1714 | buf_id, backup_offset); |
1450 | |||
1451 | vmw_dmabuf_unreference(&val_node->new_backup); | ||
1452 | val_node->new_backup = dma_buf; | ||
1453 | val_node->new_backup_offset = backup_offset; | ||
1454 | |||
1455 | return 0; | ||
1456 | } | 1715 | } |
1457 | 1716 | ||
1458 | /** | 1717 | /** |
@@ -1704,10 +1963,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, | |||
1704 | if (unlikely(!dev_priv->has_mob)) | 1963 | if (unlikely(!dev_priv->has_mob)) |
1705 | return 0; | 1964 | return 0; |
1706 | 1965 | ||
1707 | ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), | 1966 | ret = vmw_shader_remove(vmw_context_res_man(val->res), |
1708 | cmd->body.shid, | 1967 | cmd->body.shid, |
1709 | cmd->body.type, | 1968 | cmd->body.type, |
1710 | &sw_context->staged_cmd_res); | 1969 | &sw_context->staged_cmd_res); |
1711 | if (unlikely(ret != 0)) | 1970 | if (unlikely(ret != 0)) |
1712 | return ret; | 1971 | return ret; |
1713 | 1972 | ||
@@ -1735,13 +1994,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
1735 | SVGA3dCmdSetShader body; | 1994 | SVGA3dCmdSetShader body; |
1736 | } *cmd; | 1995 | } *cmd; |
1737 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; | 1996 | struct vmw_resource_val_node *ctx_node, *res_node = NULL; |
1738 | struct vmw_ctx_bindinfo bi; | 1997 | struct vmw_ctx_bindinfo_shader binding; |
1739 | struct vmw_resource *res = NULL; | 1998 | struct vmw_resource *res = NULL; |
1740 | int ret; | 1999 | int ret; |
1741 | 2000 | ||
1742 | cmd = container_of(header, struct vmw_set_shader_cmd, | 2001 | cmd = container_of(header, struct vmw_set_shader_cmd, |
1743 | header); | 2002 | header); |
1744 | 2003 | ||
2004 | if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { | ||
2005 | DRM_ERROR("Illegal shader type %u.\n", | ||
2006 | (unsigned) cmd->body.type); | ||
2007 | return -EINVAL; | ||
2008 | } | ||
2009 | |||
1745 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | 2010 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, |
1746 | user_context_converter, &cmd->body.cid, | 2011 | user_context_converter, &cmd->body.cid, |
1747 | &ctx_node); | 2012 | &ctx_node); |
@@ -1752,14 +2017,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
1752 | return 0; | 2017 | return 0; |
1753 | 2018 | ||
1754 | if (cmd->body.shid != SVGA3D_INVALID_ID) { | 2019 | if (cmd->body.shid != SVGA3D_INVALID_ID) { |
1755 | res = vmw_compat_shader_lookup | 2020 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), |
1756 | (vmw_context_res_man(ctx_node->res), | 2021 | cmd->body.shid, |
1757 | cmd->body.shid, | 2022 | cmd->body.type); |
1758 | cmd->body.type); | ||
1759 | 2023 | ||
1760 | if (!IS_ERR(res)) { | 2024 | if (!IS_ERR(res)) { |
1761 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, | 2025 | ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, |
1762 | vmw_res_shader, | ||
1763 | &cmd->body.shid, res, | 2026 | &cmd->body.shid, res, |
1764 | &res_node); | 2027 | &res_node); |
1765 | vmw_resource_unreference(&res); | 2028 | vmw_resource_unreference(&res); |
@@ -1777,11 +2040,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, | |||
1777 | return ret; | 2040 | return ret; |
1778 | } | 2041 | } |
1779 | 2042 | ||
1780 | bi.ctx = ctx_node->res; | 2043 | binding.bi.ctx = ctx_node->res; |
1781 | bi.res = res_node ? res_node->res : NULL; | 2044 | binding.bi.res = res_node ? res_node->res : NULL; |
1782 | bi.bt = vmw_ctx_binding_shader; | 2045 | binding.bi.bt = vmw_ctx_binding_shader; |
1783 | bi.i1.shader_type = cmd->body.type; | 2046 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; |
1784 | return vmw_context_binding_add(ctx_node->staged_bindings, &bi); | 2047 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, |
2048 | binding.shader_slot, 0); | ||
2049 | return 0; | ||
1785 | } | 2050 | } |
1786 | 2051 | ||
1787 | /** | 2052 | /** |
@@ -1843,6 +2108,633 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, | |||
1843 | cmd->body.offsetInBytes); | 2108 | cmd->body.offsetInBytes); |
1844 | } | 2109 | } |
1845 | 2110 | ||
2111 | /** | ||
2112 | * vmw_cmd_dx_set_single_constant_buffer - Validate an | ||
2113 | * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. | ||
2114 | * | ||
2115 | * @dev_priv: Pointer to a device private struct. | ||
2116 | * @sw_context: The software context being used for this batch. | ||
2117 | * @header: Pointer to the command header in the command stream. | ||
2118 | */ | ||
2119 | static int | ||
2120 | vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, | ||
2121 | struct vmw_sw_context *sw_context, | ||
2122 | SVGA3dCmdHeader *header) | ||
2123 | { | ||
2124 | struct { | ||
2125 | SVGA3dCmdHeader header; | ||
2126 | SVGA3dCmdDXSetSingleConstantBuffer body; | ||
2127 | } *cmd; | ||
2128 | struct vmw_resource_val_node *res_node = NULL; | ||
2129 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2130 | struct vmw_ctx_bindinfo_cb binding; | ||
2131 | int ret; | ||
2132 | |||
2133 | if (unlikely(ctx_node == NULL)) { | ||
2134 | DRM_ERROR("DX Context not set.\n"); | ||
2135 | return -EINVAL; | ||
2136 | } | ||
2137 | |||
2138 | cmd = container_of(header, typeof(*cmd), header); | ||
2139 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
2140 | user_surface_converter, | ||
2141 | &cmd->body.sid, &res_node); | ||
2142 | if (unlikely(ret != 0)) | ||
2143 | return ret; | ||
2144 | |||
2145 | binding.bi.ctx = ctx_node->res; | ||
2146 | binding.bi.res = res_node ? res_node->res : NULL; | ||
2147 | binding.bi.bt = vmw_ctx_binding_cb; | ||
2148 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; | ||
2149 | binding.offset = cmd->body.offsetInBytes; | ||
2150 | binding.size = cmd->body.sizeInBytes; | ||
2151 | binding.slot = cmd->body.slot; | ||
2152 | |||
2153 | if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || | ||
2154 | binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { | ||
2155 | DRM_ERROR("Illegal const buffer shader %u slot %u.\n", | ||
2156 | (unsigned) cmd->body.type, | ||
2157 | (unsigned) binding.slot); | ||
2158 | return -EINVAL; | ||
2159 | } | ||
2160 | |||
2161 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | ||
2162 | binding.shader_slot, binding.slot); | ||
2163 | |||
2164 | return 0; | ||
2165 | } | ||
2166 | |||
2167 | /** | ||
2168 | * vmw_cmd_dx_set_shader_res - Validate an | ||
2169 | * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command | ||
2170 | * | ||
2171 | * @dev_priv: Pointer to a device private struct. | ||
2172 | * @sw_context: The software context being used for this batch. | ||
2173 | * @header: Pointer to the command header in the command stream. | ||
2174 | */ | ||
2175 | static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, | ||
2176 | struct vmw_sw_context *sw_context, | ||
2177 | SVGA3dCmdHeader *header) | ||
2178 | { | ||
2179 | struct { | ||
2180 | SVGA3dCmdHeader header; | ||
2181 | SVGA3dCmdDXSetShaderResources body; | ||
2182 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2183 | u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / | ||
2184 | sizeof(SVGA3dShaderResourceViewId); | ||
2185 | |||
2186 | if ((u64) cmd->body.startView + (u64) num_sr_view > | ||
2187 | (u64) SVGA3D_DX_MAX_SRVIEWS || | ||
2188 | cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { | ||
2189 | DRM_ERROR("Invalid shader binding.\n"); | ||
2190 | return -EINVAL; | ||
2191 | } | ||
2192 | |||
2193 | return vmw_view_bindings_add(sw_context, vmw_view_sr, | ||
2194 | vmw_ctx_binding_sr, | ||
2195 | cmd->body.type - SVGA3D_SHADERTYPE_MIN, | ||
2196 | (void *) &cmd[1], num_sr_view, | ||
2197 | cmd->body.startView); | ||
2198 | } | ||
2199 | |||
2200 | /** | ||
2201 | * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER | ||
2202 | * command | ||
2203 | * | ||
2204 | * @dev_priv: Pointer to a device private struct. | ||
2205 | * @sw_context: The software context being used for this batch. | ||
2206 | * @header: Pointer to the command header in the command stream. | ||
2207 | */ | ||
2208 | static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, | ||
2209 | struct vmw_sw_context *sw_context, | ||
2210 | SVGA3dCmdHeader *header) | ||
2211 | { | ||
2212 | struct { | ||
2213 | SVGA3dCmdHeader header; | ||
2214 | SVGA3dCmdDXSetShader body; | ||
2215 | } *cmd; | ||
2216 | struct vmw_resource *res = NULL; | ||
2217 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2218 | struct vmw_ctx_bindinfo_shader binding; | ||
2219 | int ret = 0; | ||
2220 | |||
2221 | if (unlikely(ctx_node == NULL)) { | ||
2222 | DRM_ERROR("DX Context not set.\n"); | ||
2223 | return -EINVAL; | ||
2224 | } | ||
2225 | |||
2226 | cmd = container_of(header, typeof(*cmd), header); | ||
2227 | |||
2228 | if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { | ||
2229 | DRM_ERROR("Illegal shader type %u.\n", | ||
2230 | (unsigned) cmd->body.type); | ||
2231 | return -EINVAL; | ||
2232 | } | ||
2233 | |||
2234 | if (cmd->body.shaderId != SVGA3D_INVALID_ID) { | ||
2235 | res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); | ||
2236 | if (IS_ERR(res)) { | ||
2237 | DRM_ERROR("Could not find shader for binding.\n"); | ||
2238 | return PTR_ERR(res); | ||
2239 | } | ||
2240 | |||
2241 | ret = vmw_resource_val_add(sw_context, res, NULL); | ||
2242 | if (ret) | ||
2243 | goto out_unref; | ||
2244 | } | ||
2245 | |||
2246 | binding.bi.ctx = ctx_node->res; | ||
2247 | binding.bi.res = res; | ||
2248 | binding.bi.bt = vmw_ctx_binding_dx_shader; | ||
2249 | binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; | ||
2250 | |||
2251 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | ||
2252 | binding.shader_slot, 0); | ||
2253 | out_unref: | ||
2254 | if (res) | ||
2255 | vmw_resource_unreference(&res); | ||
2256 | |||
2257 | return ret; | ||
2258 | } | ||
2259 | |||
2260 | /** | ||
2261 | * vmw_cmd_dx_set_vertex_buffers - Validates an | ||
2262 | * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command | ||
2263 | * | ||
2264 | * @dev_priv: Pointer to a device private struct. | ||
2265 | * @sw_context: The software context being used for this batch. | ||
2266 | * @header: Pointer to the command header in the command stream. | ||
2267 | */ | ||
2268 | static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, | ||
2269 | struct vmw_sw_context *sw_context, | ||
2270 | SVGA3dCmdHeader *header) | ||
2271 | { | ||
2272 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2273 | struct vmw_ctx_bindinfo_vb binding; | ||
2274 | struct vmw_resource_val_node *res_node; | ||
2275 | struct { | ||
2276 | SVGA3dCmdHeader header; | ||
2277 | SVGA3dCmdDXSetVertexBuffers body; | ||
2278 | SVGA3dVertexBuffer buf[]; | ||
2279 | } *cmd; | ||
2280 | int i, ret, num; | ||
2281 | |||
2282 | if (unlikely(ctx_node == NULL)) { | ||
2283 | DRM_ERROR("DX Context not set.\n"); | ||
2284 | return -EINVAL; | ||
2285 | } | ||
2286 | |||
2287 | cmd = container_of(header, typeof(*cmd), header); | ||
2288 | num = (cmd->header.size - sizeof(cmd->body)) / | ||
2289 | sizeof(SVGA3dVertexBuffer); | ||
2290 | if ((u64)num + (u64)cmd->body.startBuffer > | ||
2291 | (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { | ||
2292 | DRM_ERROR("Invalid number of vertex buffers.\n"); | ||
2293 | return -EINVAL; | ||
2294 | } | ||
2295 | |||
2296 | for (i = 0; i < num; i++) { | ||
2297 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
2298 | user_surface_converter, | ||
2299 | &cmd->buf[i].sid, &res_node); | ||
2300 | if (unlikely(ret != 0)) | ||
2301 | return ret; | ||
2302 | |||
2303 | binding.bi.ctx = ctx_node->res; | ||
2304 | binding.bi.bt = vmw_ctx_binding_vb; | ||
2305 | binding.bi.res = ((res_node) ? res_node->res : NULL); | ||
2306 | binding.offset = cmd->buf[i].offset; | ||
2307 | binding.stride = cmd->buf[i].stride; | ||
2308 | binding.slot = i + cmd->body.startBuffer; | ||
2309 | |||
2310 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, | ||
2311 | 0, binding.slot); | ||
2312 | } | ||
2313 | |||
2314 | return 0; | ||
2315 | } | ||
2316 | |||
2317 | /** | ||
2318 | * vmw_cmd_dx_ia_set_vertex_buffers - Validate an | ||
2319 | * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. | ||
2320 | * | ||
2321 | * @dev_priv: Pointer to a device private struct. | ||
2322 | * @sw_context: The software context being used for this batch. | ||
2323 | * @header: Pointer to the command header in the command stream. | ||
2324 | */ | ||
2325 | static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, | ||
2326 | struct vmw_sw_context *sw_context, | ||
2327 | SVGA3dCmdHeader *header) | ||
2328 | { | ||
2329 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2330 | struct vmw_ctx_bindinfo_ib binding; | ||
2331 | struct vmw_resource_val_node *res_node; | ||
2332 | struct { | ||
2333 | SVGA3dCmdHeader header; | ||
2334 | SVGA3dCmdDXSetIndexBuffer body; | ||
2335 | } *cmd; | ||
2336 | int ret; | ||
2337 | |||
2338 | if (unlikely(ctx_node == NULL)) { | ||
2339 | DRM_ERROR("DX Context not set.\n"); | ||
2340 | return -EINVAL; | ||
2341 | } | ||
2342 | |||
2343 | cmd = container_of(header, typeof(*cmd), header); | ||
2344 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
2345 | user_surface_converter, | ||
2346 | &cmd->body.sid, &res_node); | ||
2347 | if (unlikely(ret != 0)) | ||
2348 | return ret; | ||
2349 | |||
2350 | binding.bi.ctx = ctx_node->res; | ||
2351 | binding.bi.res = ((res_node) ? res_node->res : NULL); | ||
2352 | binding.bi.bt = vmw_ctx_binding_ib; | ||
2353 | binding.offset = cmd->body.offset; | ||
2354 | binding.format = cmd->body.format; | ||
2355 | |||
2356 | vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); | ||
2357 | |||
2358 | return 0; | ||
2359 | } | ||
2360 | |||
2361 | /** | ||
2362 | * vmw_cmd_dx_set_rendertarget - Validate an | ||
2363 | * SVGA_3D_CMD_DX_SET_RENDERTARGETS command | ||
2364 | * | ||
2365 | * @dev_priv: Pointer to a device private struct. | ||
2366 | * @sw_context: The software context being used for this batch. | ||
2367 | * @header: Pointer to the command header in the command stream. | ||
2368 | */ | ||
2369 | static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, | ||
2370 | struct vmw_sw_context *sw_context, | ||
2371 | SVGA3dCmdHeader *header) | ||
2372 | { | ||
2373 | struct { | ||
2374 | SVGA3dCmdHeader header; | ||
2375 | SVGA3dCmdDXSetRenderTargets body; | ||
2376 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2377 | int ret; | ||
2378 | u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / | ||
2379 | sizeof(SVGA3dRenderTargetViewId); | ||
2380 | |||
2381 | if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { | ||
2382 | DRM_ERROR("Invalid DX Rendertarget binding.\n"); | ||
2383 | return -EINVAL; | ||
2384 | } | ||
2385 | |||
2386 | ret = vmw_view_bindings_add(sw_context, vmw_view_ds, | ||
2387 | vmw_ctx_binding_ds, 0, | ||
2388 | &cmd->body.depthStencilViewId, 1, 0); | ||
2389 | if (ret) | ||
2390 | return ret; | ||
2391 | |||
2392 | return vmw_view_bindings_add(sw_context, vmw_view_rt, | ||
2393 | vmw_ctx_binding_dx_rt, 0, | ||
2394 | (void *)&cmd[1], num_rt_view, 0); | ||
2395 | } | ||
2396 | |||
2397 | /** | ||
2398 | * vmw_cmd_dx_clear_rendertarget_view - Validate an | ||
2399 | * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command | ||
2400 | * | ||
2401 | * @dev_priv: Pointer to a device private struct. | ||
2402 | * @sw_context: The software context being used for this batch. | ||
2403 | * @header: Pointer to the command header in the command stream. | ||
2404 | */ | ||
2405 | static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, | ||
2406 | struct vmw_sw_context *sw_context, | ||
2407 | SVGA3dCmdHeader *header) | ||
2408 | { | ||
2409 | struct { | ||
2410 | SVGA3dCmdHeader header; | ||
2411 | SVGA3dCmdDXClearRenderTargetView body; | ||
2412 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2413 | |||
2414 | return vmw_view_id_val_add(sw_context, vmw_view_rt, | ||
2415 | cmd->body.renderTargetViewId); | ||
2416 | } | ||
2417 | |||
2418 | /** | ||
2419 | * vmw_cmd_dx_clear_rendertarget_view - Validate an | ||
2420 | * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command | ||
2421 | * | ||
2422 | * @dev_priv: Pointer to a device private struct. | ||
2423 | * @sw_context: The software context being used for this batch. | ||
2424 | * @header: Pointer to the command header in the command stream. | ||
2425 | */ | ||
2426 | static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, | ||
2427 | struct vmw_sw_context *sw_context, | ||
2428 | SVGA3dCmdHeader *header) | ||
2429 | { | ||
2430 | struct { | ||
2431 | SVGA3dCmdHeader header; | ||
2432 | SVGA3dCmdDXClearDepthStencilView body; | ||
2433 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2434 | |||
2435 | return vmw_view_id_val_add(sw_context, vmw_view_ds, | ||
2436 | cmd->body.depthStencilViewId); | ||
2437 | } | ||
2438 | |||
2439 | static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, | ||
2440 | struct vmw_sw_context *sw_context, | ||
2441 | SVGA3dCmdHeader *header) | ||
2442 | { | ||
2443 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2444 | struct vmw_resource_val_node *srf_node; | ||
2445 | struct vmw_resource *res; | ||
2446 | enum vmw_view_type view_type; | ||
2447 | int ret; | ||
2448 | /* | ||
2449 | * This is based on the fact that all affected define commands have | ||
2450 | * the same initial command body layout. | ||
2451 | */ | ||
2452 | struct { | ||
2453 | SVGA3dCmdHeader header; | ||
2454 | uint32 defined_id; | ||
2455 | uint32 sid; | ||
2456 | } *cmd; | ||
2457 | |||
2458 | if (unlikely(ctx_node == NULL)) { | ||
2459 | DRM_ERROR("DX Context not set.\n"); | ||
2460 | return -EINVAL; | ||
2461 | } | ||
2462 | |||
2463 | view_type = vmw_view_cmd_to_type(header->id); | ||
2464 | cmd = container_of(header, typeof(*cmd), header); | ||
2465 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
2466 | user_surface_converter, | ||
2467 | &cmd->sid, &srf_node); | ||
2468 | if (unlikely(ret != 0)) | ||
2469 | return ret; | ||
2470 | |||
2471 | res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); | ||
2472 | ret = vmw_cotable_notify(res, cmd->defined_id); | ||
2473 | vmw_resource_unreference(&res); | ||
2474 | if (unlikely(ret != 0)) | ||
2475 | return ret; | ||
2476 | |||
2477 | return vmw_view_add(sw_context->man, | ||
2478 | ctx_node->res, | ||
2479 | srf_node->res, | ||
2480 | view_type, | ||
2481 | cmd->defined_id, | ||
2482 | header, | ||
2483 | header->size + sizeof(*header), | ||
2484 | &sw_context->staged_cmd_res); | ||
2485 | } | ||
2486 | |||
2487 | static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, | ||
2488 | struct vmw_sw_context *sw_context, | ||
2489 | SVGA3dCmdHeader *header) | ||
2490 | { | ||
2491 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2492 | struct vmw_resource *res; | ||
2493 | /* | ||
2494 | * This is based on the fact that all affected define commands have | ||
2495 | * the same initial command body layout. | ||
2496 | */ | ||
2497 | struct { | ||
2498 | SVGA3dCmdHeader header; | ||
2499 | uint32 defined_id; | ||
2500 | } *cmd; | ||
2501 | enum vmw_so_type so_type; | ||
2502 | int ret; | ||
2503 | |||
2504 | if (unlikely(ctx_node == NULL)) { | ||
2505 | DRM_ERROR("DX Context not set.\n"); | ||
2506 | return -EINVAL; | ||
2507 | } | ||
2508 | |||
2509 | so_type = vmw_so_cmd_to_type(header->id); | ||
2510 | res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); | ||
2511 | cmd = container_of(header, typeof(*cmd), header); | ||
2512 | ret = vmw_cotable_notify(res, cmd->defined_id); | ||
2513 | vmw_resource_unreference(&res); | ||
2514 | |||
2515 | return ret; | ||
2516 | } | ||
2517 | |||
2518 | /** | ||
2519 | * vmw_cmd_dx_check_subresource - Validate an | ||
2520 | * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command | ||
2521 | * | ||
2522 | * @dev_priv: Pointer to a device private struct. | ||
2523 | * @sw_context: The software context being used for this batch. | ||
2524 | * @header: Pointer to the command header in the command stream. | ||
2525 | */ | ||
2526 | static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, | ||
2527 | struct vmw_sw_context *sw_context, | ||
2528 | SVGA3dCmdHeader *header) | ||
2529 | { | ||
2530 | struct { | ||
2531 | SVGA3dCmdHeader header; | ||
2532 | union { | ||
2533 | SVGA3dCmdDXReadbackSubResource r_body; | ||
2534 | SVGA3dCmdDXInvalidateSubResource i_body; | ||
2535 | SVGA3dCmdDXUpdateSubResource u_body; | ||
2536 | SVGA3dSurfaceId sid; | ||
2537 | }; | ||
2538 | } *cmd; | ||
2539 | |||
2540 | BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != | ||
2541 | offsetof(typeof(*cmd), sid)); | ||
2542 | BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != | ||
2543 | offsetof(typeof(*cmd), sid)); | ||
2544 | BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != | ||
2545 | offsetof(typeof(*cmd), sid)); | ||
2546 | |||
2547 | cmd = container_of(header, typeof(*cmd), header); | ||
2548 | |||
2549 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | ||
2550 | user_surface_converter, | ||
2551 | &cmd->sid, NULL); | ||
2552 | } | ||
2553 | |||
2554 | static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, | ||
2555 | struct vmw_sw_context *sw_context, | ||
2556 | SVGA3dCmdHeader *header) | ||
2557 | { | ||
2558 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2559 | |||
2560 | if (unlikely(ctx_node == NULL)) { | ||
2561 | DRM_ERROR("DX Context not set.\n"); | ||
2562 | return -EINVAL; | ||
2563 | } | ||
2564 | |||
2565 | return 0; | ||
2566 | } | ||
2567 | |||
2568 | /** | ||
2569 | * vmw_cmd_dx_view_remove - validate a view remove command and | ||
2570 | * schedule the view resource for removal. | ||
2571 | * | ||
2572 | * @dev_priv: Pointer to a device private struct. | ||
2573 | * @sw_context: The software context being used for this batch. | ||
2574 | * @header: Pointer to the command header in the command stream. | ||
2575 | * | ||
2576 | * Check that the view exists, and if it was not created using this | ||
2577 | * command batch, make sure it's validated (present in the device) so that | ||
2578 | * the remove command will not confuse the device. | ||
2579 | */ | ||
2580 | static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, | ||
2581 | struct vmw_sw_context *sw_context, | ||
2582 | SVGA3dCmdHeader *header) | ||
2583 | { | ||
2584 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2585 | struct { | ||
2586 | SVGA3dCmdHeader header; | ||
2587 | union vmw_view_destroy body; | ||
2588 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2589 | enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); | ||
2590 | struct vmw_resource *view; | ||
2591 | int ret; | ||
2592 | |||
2593 | if (!ctx_node) { | ||
2594 | DRM_ERROR("DX Context not set.\n"); | ||
2595 | return -EINVAL; | ||
2596 | } | ||
2597 | |||
2598 | ret = vmw_view_remove(sw_context->man, | ||
2599 | cmd->body.view_id, view_type, | ||
2600 | &sw_context->staged_cmd_res, | ||
2601 | &view); | ||
2602 | if (ret || !view) | ||
2603 | return ret; | ||
2604 | |||
2605 | /* | ||
2606 | * Add view to the validate list iff it was not created using this | ||
2607 | * command batch. | ||
2608 | */ | ||
2609 | return vmw_view_res_val_add(sw_context, view); | ||
2610 | } | ||
2611 | |||
2612 | /** | ||
2613 | * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER | ||
2614 | * command | ||
2615 | * | ||
2616 | * @dev_priv: Pointer to a device private struct. | ||
2617 | * @sw_context: The software context being used for this batch. | ||
2618 | * @header: Pointer to the command header in the command stream. | ||
2619 | */ | ||
2620 | static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, | ||
2621 | struct vmw_sw_context *sw_context, | ||
2622 | SVGA3dCmdHeader *header) | ||
2623 | { | ||
2624 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2625 | struct vmw_resource *res; | ||
2626 | struct { | ||
2627 | SVGA3dCmdHeader header; | ||
2628 | SVGA3dCmdDXDefineShader body; | ||
2629 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2630 | int ret; | ||
2631 | |||
2632 | if (!ctx_node) { | ||
2633 | DRM_ERROR("DX Context not set.\n"); | ||
2634 | return -EINVAL; | ||
2635 | } | ||
2636 | |||
2637 | res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); | ||
2638 | ret = vmw_cotable_notify(res, cmd->body.shaderId); | ||
2639 | vmw_resource_unreference(&res); | ||
2640 | if (ret) | ||
2641 | return ret; | ||
2642 | |||
2643 | return vmw_dx_shader_add(sw_context->man, ctx_node->res, | ||
2644 | cmd->body.shaderId, cmd->body.type, | ||
2645 | &sw_context->staged_cmd_res); | ||
2646 | } | ||
2647 | |||
2648 | /** | ||
2649 | * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER | ||
2650 | * command | ||
2651 | * | ||
2652 | * @dev_priv: Pointer to a device private struct. | ||
2653 | * @sw_context: The software context being used for this batch. | ||
2654 | * @header: Pointer to the command header in the command stream. | ||
2655 | */ | ||
2656 | static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, | ||
2657 | struct vmw_sw_context *sw_context, | ||
2658 | SVGA3dCmdHeader *header) | ||
2659 | { | ||
2660 | struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; | ||
2661 | struct { | ||
2662 | SVGA3dCmdHeader header; | ||
2663 | SVGA3dCmdDXDestroyShader body; | ||
2664 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2665 | int ret; | ||
2666 | |||
2667 | if (!ctx_node) { | ||
2668 | DRM_ERROR("DX Context not set.\n"); | ||
2669 | return -EINVAL; | ||
2670 | } | ||
2671 | |||
2672 | ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, | ||
2673 | &sw_context->staged_cmd_res); | ||
2674 | if (ret) | ||
2675 | DRM_ERROR("Could not find shader to remove.\n"); | ||
2676 | |||
2677 | return ret; | ||
2678 | } | ||
2679 | |||
2680 | /** | ||
2681 | * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER | ||
2682 | * command | ||
2683 | * | ||
2684 | * @dev_priv: Pointer to a device private struct. | ||
2685 | * @sw_context: The software context being used for this batch. | ||
2686 | * @header: Pointer to the command header in the command stream. | ||
2687 | */ | ||
2688 | static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, | ||
2689 | struct vmw_sw_context *sw_context, | ||
2690 | SVGA3dCmdHeader *header) | ||
2691 | { | ||
2692 | struct vmw_resource_val_node *ctx_node; | ||
2693 | struct vmw_resource_val_node *res_node; | ||
2694 | struct vmw_resource *res; | ||
2695 | struct { | ||
2696 | SVGA3dCmdHeader header; | ||
2697 | SVGA3dCmdDXBindShader body; | ||
2698 | } *cmd = container_of(header, typeof(*cmd), header); | ||
2699 | int ret; | ||
2700 | |||
2701 | if (cmd->body.cid != SVGA3D_INVALID_ID) { | ||
2702 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, | ||
2703 | user_context_converter, | ||
2704 | &cmd->body.cid, &ctx_node); | ||
2705 | if (ret) | ||
2706 | return ret; | ||
2707 | } else { | ||
2708 | ctx_node = sw_context->dx_ctx_node; | ||
2709 | if (!ctx_node) { | ||
2710 | DRM_ERROR("DX Context not set.\n"); | ||
2711 | return -EINVAL; | ||
2712 | } | ||
2713 | } | ||
2714 | |||
2715 | res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), | ||
2716 | cmd->body.shid, 0); | ||
2717 | if (IS_ERR(res)) { | ||
2718 | DRM_ERROR("Could not find shader to bind.\n"); | ||
2719 | return PTR_ERR(res); | ||
2720 | } | ||
2721 | |||
2722 | ret = vmw_resource_val_add(sw_context, res, &res_node); | ||
2723 | if (ret) { | ||
2724 | DRM_ERROR("Error creating resource validation node.\n"); | ||
2725 | goto out_unref; | ||
2726 | } | ||
2727 | |||
2728 | |||
2729 | ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, | ||
2730 | &cmd->body.mobid, | ||
2731 | cmd->body.offsetInBytes); | ||
2732 | out_unref: | ||
2733 | vmw_resource_unreference(&res); | ||
2734 | |||
2735 | return ret; | ||
2736 | } | ||
2737 | |||
1846 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | 2738 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
1847 | struct vmw_sw_context *sw_context, | 2739 | struct vmw_sw_context *sw_context, |
1848 | void *buf, uint32_t *size) | 2740 | void *buf, uint32_t *size) |
@@ -2050,7 +2942,136 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
2050 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, | 2942 | VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, |
2051 | false, false, true), | 2943 | false, false, true), |
2052 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, | 2944 | VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, |
2053 | true, false, true) | 2945 | true, false, true), |
2946 | VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, | ||
2947 | false, false, true), | ||
2948 | VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, | ||
2949 | false, false, true), | ||
2950 | VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, | ||
2951 | false, false, true), | ||
2952 | VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, | ||
2953 | false, false, true), | ||
2954 | |||
2955 | /* | ||
2956 | * DX commands | ||
2957 | */ | ||
2958 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, | ||
2959 | false, false, true), | ||
2960 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, | ||
2961 | false, false, true), | ||
2962 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, | ||
2963 | false, false, true), | ||
2964 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, | ||
2965 | false, false, true), | ||
2966 | VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, | ||
2967 | false, false, true), | ||
2968 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, | ||
2969 | &vmw_cmd_dx_set_single_constant_buffer, true, false, true), | ||
2970 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, | ||
2971 | &vmw_cmd_dx_set_shader_res, true, false, true), | ||
2972 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, | ||
2973 | true, false, true), | ||
2974 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_invalid, | ||
2975 | true, false, true), | ||
2976 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_invalid, | ||
2977 | true, false, true), | ||
2978 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_invalid, | ||
2979 | true, false, true), | ||
2980 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, | ||
2981 | &vmw_cmd_dx_set_vertex_buffers, true, false, true), | ||
2982 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, | ||
2983 | &vmw_cmd_dx_set_index_buffer, true, false, true), | ||
2984 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, | ||
2985 | &vmw_cmd_dx_set_rendertargets, true, false, true), | ||
2986 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, | ||
2987 | true, false, true), | ||
2988 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check, | ||
2989 | true, false, true), | ||
2990 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, | ||
2991 | &vmw_cmd_dx_cid_check, | ||
2992 | true, false, true), | ||
2993 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid, | ||
2994 | true, false, true), | ||
2995 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid, | ||
2996 | true, false, true), | ||
2997 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid, | ||
2998 | true, false, true), | ||
2999 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid, | ||
3000 | true, false, true), | ||
3001 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid, | ||
3002 | true, false, true), | ||
3003 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, | ||
3004 | true, false, true), | ||
3005 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, | ||
3006 | true, false, true), | ||
3007 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, | ||
3008 | true, false, true), | ||
3009 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, | ||
3010 | true, false, true), | ||
3011 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, | ||
3012 | &vmw_cmd_dx_clear_rendertarget_view, true, false, true), | ||
3013 | VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, | ||
3014 | &vmw_cmd_dx_clear_depthstencil_view, true, false, true), | ||
3015 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid, | ||
3016 | true, false, true), | ||
3017 | VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, | ||
3018 | true, false, true), | ||
3019 | VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid, | ||
3020 | true, false, true), | ||
3021 | VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, | ||
3022 | &vmw_cmd_dx_check_subresource, true, false, true), | ||
3023 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, | ||
3024 | &vmw_cmd_dx_check_subresource, true, false, true), | ||
3025 | VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, | ||
3026 | &vmw_cmd_dx_check_subresource, true, false, true), | ||
3027 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, | ||
3028 | &vmw_cmd_dx_view_define, true, false, true), | ||
3029 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, | ||
3030 | &vmw_cmd_dx_view_remove, true, false, true), | ||
3031 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, | ||
3032 | &vmw_cmd_dx_view_define, true, false, true), | ||
3033 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, | ||
3034 | &vmw_cmd_dx_view_remove, true, false, true), | ||
3035 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, | ||
3036 | &vmw_cmd_dx_view_define, true, false, true), | ||
3037 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, | ||
3038 | &vmw_cmd_dx_view_remove, true, false, true), | ||
3039 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, | ||
3040 | &vmw_cmd_dx_so_define, true, false, true), | ||
3041 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, | ||
3042 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3043 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, | ||
3044 | &vmw_cmd_dx_so_define, true, false, true), | ||
3045 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, | ||
3046 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3047 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, | ||
3048 | &vmw_cmd_dx_so_define, true, false, true), | ||
3049 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, | ||
3050 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3051 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, | ||
3052 | &vmw_cmd_dx_so_define, true, false, true), | ||
3053 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, | ||
3054 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3055 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, | ||
3056 | &vmw_cmd_dx_so_define, true, false, true), | ||
3057 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, | ||
3058 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3059 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, | ||
3060 | &vmw_cmd_dx_define_shader, true, false, true), | ||
3061 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, | ||
3062 | &vmw_cmd_dx_destroy_shader, true, false, true), | ||
3063 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, | ||
3064 | &vmw_cmd_dx_bind_shader, true, false, true), | ||
3065 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, | ||
3066 | &vmw_cmd_dx_so_define, true, false, true), | ||
3067 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, | ||
3068 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3069 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_invalid, | ||
3070 | true, false, true), | ||
3071 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, | ||
3072 | &vmw_cmd_dx_cid_check, true, false, true), | ||
3073 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, | ||
3074 | &vmw_cmd_dx_cid_check, true, false, true), | ||
2054 | }; | 3075 | }; |
2055 | 3076 | ||
2056 | static int vmw_cmd_check(struct vmw_private *dev_priv, | 3077 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
@@ -2183,7 +3204,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | |||
2183 | * | 3204 | * |
2184 | * @list: The resource list. | 3205 | * @list: The resource list. |
2185 | */ | 3206 | */ |
2186 | static void vmw_resource_list_unreference(struct list_head *list) | 3207 | static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, |
3208 | struct list_head *list) | ||
2187 | { | 3209 | { |
2188 | struct vmw_resource_val_node *val, *val_next; | 3210 | struct vmw_resource_val_node *val, *val_next; |
2189 | 3211 | ||
@@ -2194,8 +3216,15 @@ static void vmw_resource_list_unreference(struct list_head *list) | |||
2194 | list_for_each_entry_safe(val, val_next, list, head) { | 3216 | list_for_each_entry_safe(val, val_next, list, head) { |
2195 | list_del_init(&val->head); | 3217 | list_del_init(&val->head); |
2196 | vmw_resource_unreference(&val->res); | 3218 | vmw_resource_unreference(&val->res); |
2197 | if (unlikely(val->staged_bindings)) | 3219 | |
2198 | kfree(val->staged_bindings); | 3220 | if (val->staged_bindings) { |
3221 | if (val->staged_bindings != sw_context->staged_bindings) | ||
3222 | vmw_binding_state_free(val->staged_bindings); | ||
3223 | else | ||
3224 | sw_context->staged_bindings_inuse = false; | ||
3225 | val->staged_bindings = NULL; | ||
3226 | } | ||
3227 | |||
2199 | kfree(val); | 3228 | kfree(val); |
2200 | } | 3229 | } |
2201 | } | 3230 | } |
@@ -2431,8 +3460,13 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, | |||
2431 | u32 command_size, | 3460 | u32 command_size, |
2432 | struct vmw_sw_context *sw_context) | 3461 | struct vmw_sw_context *sw_context) |
2433 | { | 3462 | { |
2434 | void *cmd = vmw_fifo_reserve(dev_priv, command_size); | 3463 | void *cmd; |
2435 | 3464 | ||
3465 | if (sw_context->dx_ctx_node) | ||
3466 | cmd = vmw_fifo_reserve_dx(dev_priv, command_size, | ||
3467 | sw_context->dx_ctx_node->res->id); | ||
3468 | else | ||
3469 | cmd = vmw_fifo_reserve(dev_priv, command_size); | ||
2436 | if (!cmd) { | 3470 | if (!cmd) { |
2437 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | 3471 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
2438 | return -ENOMEM; | 3472 | return -ENOMEM; |
@@ -2464,8 +3498,10 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, | |||
2464 | u32 command_size, | 3498 | u32 command_size, |
2465 | struct vmw_sw_context *sw_context) | 3499 | struct vmw_sw_context *sw_context) |
2466 | { | 3500 | { |
3501 | u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : | ||
3502 | SVGA3D_INVALID_ID); | ||
2467 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, | 3503 | void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, |
2468 | SVGA3D_INVALID_ID, false, header); | 3504 | id, false, header); |
2469 | 3505 | ||
2470 | vmw_apply_relocations(sw_context); | 3506 | vmw_apply_relocations(sw_context); |
2471 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); | 3507 | vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); |
@@ -2535,12 +3571,44 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, | |||
2535 | return kernel_commands; | 3571 | return kernel_commands; |
2536 | } | 3572 | } |
2537 | 3573 | ||
3574 | static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, | ||
3575 | struct vmw_sw_context *sw_context, | ||
3576 | uint32_t handle) | ||
3577 | { | ||
3578 | struct vmw_resource_val_node *ctx_node; | ||
3579 | struct vmw_resource *res; | ||
3580 | int ret; | ||
3581 | |||
3582 | if (handle == SVGA3D_INVALID_ID) | ||
3583 | return 0; | ||
3584 | |||
3585 | ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, | ||
3586 | handle, user_context_converter, | ||
3587 | &res); | ||
3588 | if (unlikely(ret != 0)) { | ||
3589 | DRM_ERROR("Could not find or user DX context 0x%08x.\n", | ||
3590 | (unsigned) handle); | ||
3591 | return ret; | ||
3592 | } | ||
3593 | |||
3594 | ret = vmw_resource_val_add(sw_context, res, &ctx_node); | ||
3595 | if (unlikely(ret != 0)) | ||
3596 | goto out_err; | ||
3597 | |||
3598 | sw_context->dx_ctx_node = ctx_node; | ||
3599 | sw_context->man = vmw_context_res_man(res); | ||
3600 | out_err: | ||
3601 | vmw_resource_unreference(&res); | ||
3602 | return ret; | ||
3603 | } | ||
3604 | |||
2538 | int vmw_execbuf_process(struct drm_file *file_priv, | 3605 | int vmw_execbuf_process(struct drm_file *file_priv, |
2539 | struct vmw_private *dev_priv, | 3606 | struct vmw_private *dev_priv, |
2540 | void __user *user_commands, | 3607 | void __user *user_commands, |
2541 | void *kernel_commands, | 3608 | void *kernel_commands, |
2542 | uint32_t command_size, | 3609 | uint32_t command_size, |
2543 | uint64_t throttle_us, | 3610 | uint64_t throttle_us, |
3611 | uint32_t dx_context_handle, | ||
2544 | struct drm_vmw_fence_rep __user *user_fence_rep, | 3612 | struct drm_vmw_fence_rep __user *user_fence_rep, |
2545 | struct vmw_fence_obj **out_fence) | 3613 | struct vmw_fence_obj **out_fence) |
2546 | { | 3614 | { |
@@ -2596,12 +3664,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2596 | sw_context->cur_reloc = 0; | 3664 | sw_context->cur_reloc = 0; |
2597 | sw_context->cur_val_buf = 0; | 3665 | sw_context->cur_val_buf = 0; |
2598 | INIT_LIST_HEAD(&sw_context->resource_list); | 3666 | INIT_LIST_HEAD(&sw_context->resource_list); |
3667 | INIT_LIST_HEAD(&sw_context->ctx_resource_list); | ||
2599 | sw_context->cur_query_bo = dev_priv->pinned_bo; | 3668 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
2600 | sw_context->last_query_ctx = NULL; | 3669 | sw_context->last_query_ctx = NULL; |
2601 | sw_context->needs_post_query_barrier = false; | 3670 | sw_context->needs_post_query_barrier = false; |
3671 | sw_context->dx_ctx_node = NULL; | ||
2602 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); | 3672 | memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); |
2603 | INIT_LIST_HEAD(&sw_context->validate_nodes); | 3673 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
2604 | INIT_LIST_HEAD(&sw_context->res_relocations); | 3674 | INIT_LIST_HEAD(&sw_context->res_relocations); |
3675 | if (sw_context->staged_bindings) | ||
3676 | vmw_binding_state_reset(sw_context->staged_bindings); | ||
3677 | |||
2605 | if (!sw_context->res_ht_initialized) { | 3678 | if (!sw_context->res_ht_initialized) { |
2606 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); | 3679 | ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); |
2607 | if (unlikely(ret != 0)) | 3680 | if (unlikely(ret != 0)) |
@@ -2610,11 +3683,20 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2610 | } | 3683 | } |
2611 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); | 3684 | INIT_LIST_HEAD(&sw_context->staged_cmd_res); |
2612 | INIT_LIST_HEAD(&resource_list); | 3685 | INIT_LIST_HEAD(&resource_list); |
3686 | ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); | ||
3687 | if (unlikely(ret != 0)) { | ||
3688 | list_splice_init(&sw_context->ctx_resource_list, | ||
3689 | &sw_context->resource_list); | ||
3690 | goto out_err_nores; | ||
3691 | } | ||
3692 | |||
2613 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, | 3693 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
2614 | command_size); | 3694 | command_size); |
2615 | if (unlikely(ret != 0)) | 3695 | if (unlikely(ret != 0)) |
2616 | goto out_err_nores; | 3696 | goto out_err_nores; |
2617 | 3697 | ||
3698 | list_splice_init(&sw_context->ctx_resource_list, | ||
3699 | &sw_context->resource_list); | ||
2618 | ret = vmw_resources_reserve(sw_context); | 3700 | ret = vmw_resources_reserve(sw_context); |
2619 | if (unlikely(ret != 0)) | 3701 | if (unlikely(ret != 0)) |
2620 | goto out_err_nores; | 3702 | goto out_err_nores; |
@@ -2622,7 +3704,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2622 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, | 3704 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, |
2623 | true, NULL); | 3705 | true, NULL); |
2624 | if (unlikely(ret != 0)) | 3706 | if (unlikely(ret != 0)) |
2625 | goto out_err; | 3707 | goto out_err_nores; |
2626 | 3708 | ||
2627 | ret = vmw_validate_buffers(dev_priv, sw_context); | 3709 | ret = vmw_validate_buffers(dev_priv, sw_context); |
2628 | if (unlikely(ret != 0)) | 3710 | if (unlikely(ret != 0)) |
@@ -2652,8 +3734,9 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2652 | sw_context); | 3734 | sw_context); |
2653 | header = NULL; | 3735 | header = NULL; |
2654 | } | 3736 | } |
3737 | mutex_unlock(&dev_priv->binding_mutex); | ||
2655 | if (ret) | 3738 | if (ret) |
2656 | goto out_unlock_binding; | 3739 | goto out_err; |
2657 | 3740 | ||
2658 | vmw_query_bo_switch_commit(dev_priv, sw_context); | 3741 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
2659 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | 3742 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
@@ -2668,8 +3751,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2668 | if (ret != 0) | 3751 | if (ret != 0) |
2669 | DRM_ERROR("Fence submission error. Syncing.\n"); | 3752 | DRM_ERROR("Fence submission error. Syncing.\n"); |
2670 | 3753 | ||
2671 | vmw_resource_list_unreserve(&sw_context->resource_list, false); | 3754 | vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, |
2672 | mutex_unlock(&dev_priv->binding_mutex); | 3755 | false); |
2673 | 3756 | ||
2674 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, | 3757 | ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, |
2675 | (void *) fence); | 3758 | (void *) fence); |
@@ -2698,7 +3781,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2698 | * Unreference resources outside of the cmdbuf_mutex to | 3781 | * Unreference resources outside of the cmdbuf_mutex to |
2699 | * avoid deadlocks in resource destruction paths. | 3782 | * avoid deadlocks in resource destruction paths. |
2700 | */ | 3783 | */ |
2701 | vmw_resource_list_unreference(&resource_list); | 3784 | vmw_resource_list_unreference(sw_context, &resource_list); |
2702 | 3785 | ||
2703 | return 0; | 3786 | return 0; |
2704 | 3787 | ||
@@ -2707,7 +3790,8 @@ out_unlock_binding: | |||
2707 | out_err: | 3790 | out_err: |
2708 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); | 3791 | ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); |
2709 | out_err_nores: | 3792 | out_err_nores: |
2710 | vmw_resource_list_unreserve(&sw_context->resource_list, true); | 3793 | vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, |
3794 | true); | ||
2711 | vmw_resource_relocations_free(&sw_context->res_relocations); | 3795 | vmw_resource_relocations_free(&sw_context->res_relocations); |
2712 | vmw_free_relocations(sw_context); | 3796 | vmw_free_relocations(sw_context); |
2713 | vmw_clear_validations(sw_context); | 3797 | vmw_clear_validations(sw_context); |
@@ -2725,7 +3809,7 @@ out_unlock: | |||
2725 | * Unreference resources outside of the cmdbuf_mutex to | 3809 | * Unreference resources outside of the cmdbuf_mutex to |
2726 | * avoid deadlocks in resource destruction paths. | 3810 | * avoid deadlocks in resource destruction paths. |
2727 | */ | 3811 | */ |
2728 | vmw_resource_list_unreference(&resource_list); | 3812 | vmw_resource_list_unreference(sw_context, &resource_list); |
2729 | if (unlikely(error_resource != NULL)) | 3813 | if (unlikely(error_resource != NULL)) |
2730 | vmw_resource_unreference(&error_resource); | 3814 | vmw_resource_unreference(&error_resource); |
2731 | out_free_header: | 3815 | out_free_header: |
@@ -2877,36 +3961,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) | |||
2877 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 3961 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
2878 | } | 3962 | } |
2879 | 3963 | ||
2880 | 3964 | int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, | |
2881 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | 3965 | struct drm_file *file_priv, size_t size) |
2882 | struct drm_file *file_priv) | ||
2883 | { | 3966 | { |
2884 | struct vmw_private *dev_priv = vmw_priv(dev); | 3967 | struct vmw_private *dev_priv = vmw_priv(dev); |
2885 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | 3968 | struct drm_vmw_execbuf_arg arg; |
2886 | int ret; | 3969 | int ret; |
3970 | static const size_t copy_offset[] = { | ||
3971 | offsetof(struct drm_vmw_execbuf_arg, context_handle), | ||
3972 | sizeof(struct drm_vmw_execbuf_arg)}; | ||
3973 | |||
3974 | if (unlikely(size < copy_offset[0])) { | ||
3975 | DRM_ERROR("Invalid command size, ioctl %d\n", | ||
3976 | DRM_VMW_EXECBUF); | ||
3977 | return -EINVAL; | ||
3978 | } | ||
3979 | |||
3980 | if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0) | ||
3981 | return -EFAULT; | ||
2887 | 3982 | ||
2888 | /* | 3983 | /* |
2889 | * This will allow us to extend the ioctl argument while | 3984 | * Extend the ioctl argument while |
2890 | * maintaining backwards compatibility: | 3985 | * maintaining backwards compatibility: |
2891 | * We take different code paths depending on the value of | 3986 | * We take different code paths depending on the value of |
2892 | * arg->version. | 3987 | * arg.version. |
2893 | */ | 3988 | */ |
2894 | 3989 | ||
2895 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { | 3990 | if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || |
3991 | arg.version == 0)) { | ||
2896 | DRM_ERROR("Incorrect execbuf version.\n"); | 3992 | DRM_ERROR("Incorrect execbuf version.\n"); |
2897 | DRM_ERROR("You're running outdated experimental " | ||
2898 | "vmwgfx user-space drivers."); | ||
2899 | return -EINVAL; | 3993 | return -EINVAL; |
2900 | } | 3994 | } |
2901 | 3995 | ||
3996 | if (arg.version > 1 && | ||
3997 | copy_from_user(&arg.context_handle, | ||
3998 | (void __user *) (data + copy_offset[0]), | ||
3999 | copy_offset[arg.version - 1] - | ||
4000 | copy_offset[0]) != 0) | ||
4001 | return -EFAULT; | ||
4002 | |||
4003 | switch (arg.version) { | ||
4004 | case 1: | ||
4005 | arg.context_handle = (uint32_t) -1; | ||
4006 | break; | ||
4007 | case 2: | ||
4008 | if (arg.pad64 != 0) { | ||
4009 | DRM_ERROR("Unused IOCTL data not set to zero.\n"); | ||
4010 | return -EINVAL; | ||
4011 | } | ||
4012 | break; | ||
4013 | default: | ||
4014 | break; | ||
4015 | } | ||
4016 | |||
2902 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | 4017 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
2903 | if (unlikely(ret != 0)) | 4018 | if (unlikely(ret != 0)) |
2904 | return ret; | 4019 | return ret; |
2905 | 4020 | ||
2906 | ret = vmw_execbuf_process(file_priv, dev_priv, | 4021 | ret = vmw_execbuf_process(file_priv, dev_priv, |
2907 | (void __user *)(unsigned long)arg->commands, | 4022 | (void __user *)(unsigned long)arg.commands, |
2908 | NULL, arg->command_size, arg->throttle_us, | 4023 | NULL, arg.command_size, arg.throttle_us, |
2909 | (void __user *)(unsigned long)arg->fence_rep, | 4024 | arg.context_handle, |
4025 | (void __user *)(unsigned long)arg.fence_rep, | ||
2910 | NULL); | 4026 | NULL); |
2911 | ttm_read_unlock(&dev_priv->reservation_sem); | 4027 | ttm_read_unlock(&dev_priv->reservation_sem); |
2912 | if (unlikely(ret != 0)) | 4028 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index cb24936a18c1..3c876d4826c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -29,6 +29,11 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include <drm/ttm/ttm_placement.h> | 30 | #include <drm/ttm/ttm_placement.h> |
31 | 31 | ||
32 | struct vmw_temp_set_context { | ||
33 | SVGA3dCmdHeader header; | ||
34 | SVGA3dCmdDXTempSetContext body; | ||
35 | }; | ||
36 | |||
32 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | 37 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) |
33 | { | 38 | { |
34 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; | 39 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
@@ -99,6 +104,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
99 | uint32_t max; | 104 | uint32_t max; |
100 | uint32_t min; | 105 | uint32_t min; |
101 | 106 | ||
107 | fifo->dx = false; | ||
102 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | 108 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; |
103 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); | 109 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); |
104 | if (unlikely(fifo->static_buffer == NULL)) | 110 | if (unlikely(fifo->static_buffer == NULL)) |
@@ -396,15 +402,20 @@ out_err: | |||
396 | return NULL; | 402 | return NULL; |
397 | } | 403 | } |
398 | 404 | ||
399 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | 405 | void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, |
406 | int ctx_id) | ||
400 | { | 407 | { |
401 | void *ret; | 408 | void *ret; |
402 | 409 | ||
403 | if (dev_priv->cman) | 410 | if (dev_priv->cman) |
404 | ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, | 411 | ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, |
405 | SVGA3D_INVALID_ID, false, NULL); | 412 | ctx_id, false, NULL); |
406 | else | 413 | else if (ctx_id == SVGA3D_INVALID_ID) |
407 | ret = vmw_local_fifo_reserve(dev_priv, bytes); | 414 | ret = vmw_local_fifo_reserve(dev_priv, bytes); |
415 | else { | ||
416 | WARN_ON("Command buffer has not been allocated.\n"); | ||
417 | ret = NULL; | ||
418 | } | ||
408 | if (IS_ERR_OR_NULL(ret)) { | 419 | if (IS_ERR_OR_NULL(ret)) { |
409 | DRM_ERROR("Fifo reserve failure of %u bytes.\n", | 420 | DRM_ERROR("Fifo reserve failure of %u bytes.\n", |
410 | (unsigned) bytes); | 421 | (unsigned) bytes); |
@@ -466,6 +477,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
466 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 477 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
467 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | 478 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; |
468 | 479 | ||
480 | if (fifo_state->dx) | ||
481 | bytes += sizeof(struct vmw_temp_set_context); | ||
482 | |||
483 | fifo_state->dx = false; | ||
469 | BUG_ON((bytes & 3) != 0); | 484 | BUG_ON((bytes & 3) != 0); |
470 | BUG_ON(bytes > fifo_state->reserved_size); | 485 | BUG_ON(bytes > fifo_state->reserved_size); |
471 | 486 | ||
@@ -518,7 +533,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | |||
518 | * @dev_priv: Pointer to device private structure. | 533 | * @dev_priv: Pointer to device private structure. |
519 | * @bytes: Number of bytes to commit. | 534 | * @bytes: Number of bytes to commit. |
520 | */ | 535 | */ |
521 | static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) | 536 | void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) |
522 | { | 537 | { |
523 | if (dev_priv->cman) | 538 | if (dev_priv->cman) |
524 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); | 539 | vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); |
@@ -706,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, | |||
706 | 721 | ||
707 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); | 722 | return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); |
708 | } | 723 | } |
724 | |||
725 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | ||
726 | { | ||
727 | return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); | ||
728 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 91efe9cdd822..dca7f7f41aab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -110,6 +110,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
110 | param->value = | 110 | param->value = |
111 | (dev_priv->active_display_unit == vmw_du_screen_target); | 111 | (dev_priv->active_display_unit == vmw_du_screen_target); |
112 | break; | 112 | break; |
113 | case DRM_VMW_PARAM_DX: | ||
114 | param->value = dev_priv->has_dx; | ||
115 | break; | ||
113 | default: | 116 | default: |
114 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 117 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
115 | param->param); | 118 | param->param); |
@@ -193,8 +196,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
193 | uint32_t *bounce32 = (uint32_t *) bounce; | 196 | uint32_t *bounce32 = (uint32_t *) bounce; |
194 | 197 | ||
195 | num = size / sizeof(uint32_t); | 198 | num = size / sizeof(uint32_t); |
196 | if (num > SVGA3D_DEVCAP_MAX) | 199 | if (num > SVGA3D_DEVCAP_DX) |
197 | num = SVGA3D_DEVCAP_MAX; | 200 | num = SVGA3D_DEVCAP_DX; |
198 | 201 | ||
199 | spin_lock(&dev_priv->cap_lock); | 202 | spin_lock(&dev_priv->cap_lock); |
200 | for (i = 0; i < num; ++i) { | 203 | for (i = 0; i < num; ++i) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 34d04bf17dfa..f961bb98cdaa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -528,7 +528,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
528 | return -EINVAL; | 528 | return -EINVAL; |
529 | } | 529 | } |
530 | 530 | ||
531 | if (unlikely(format != surface->format)) { | 531 | /* |
532 | * For DX, surface format validation is done when surface->scanout | ||
533 | * is set. | ||
534 | */ | ||
535 | if (!dev_priv->has_dx && format != surface->format) { | ||
532 | DRM_ERROR("Invalid surface format for requested mode.\n"); | 536 | DRM_ERROR("Invalid surface format for requested mode.\n"); |
533 | return -EINVAL; | 537 | return -EINVAL; |
534 | } | 538 | } |
@@ -754,6 +758,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, | |||
754 | true, /* can be a scanout buffer */ | 758 | true, /* can be a scanout buffer */ |
755 | 1, /* num of mip levels */ | 759 | 1, /* num of mip levels */ |
756 | 0, | 760 | 0, |
761 | 0, | ||
757 | content_base_size, | 762 | content_base_size, |
758 | srf_out); | 763 | srf_out); |
759 | if (ret) { | 764 | if (ret) { |
@@ -769,7 +774,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, | |||
769 | vmw_dmabuf_unreference(&res->backup); | 774 | vmw_dmabuf_unreference(&res->backup); |
770 | res->backup = vmw_dmabuf_reference(dmabuf_mob); | 775 | res->backup = vmw_dmabuf_reference(dmabuf_mob); |
771 | res->backup_offset = 0; | 776 | res->backup_offset = 0; |
772 | vmw_resource_unreserve(res, NULL, 0); | 777 | vmw_resource_unreserve(res, false, NULL, 0); |
773 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 778 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); |
774 | 779 | ||
775 | return 0; | 780 | return 0; |
@@ -1869,7 +1874,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, | |||
1869 | void vmw_kms_helper_resource_revert(struct vmw_resource *res) | 1874 | void vmw_kms_helper_resource_revert(struct vmw_resource *res) |
1870 | { | 1875 | { |
1871 | vmw_kms_helper_buffer_revert(res->backup); | 1876 | vmw_kms_helper_buffer_revert(res->backup); |
1872 | vmw_resource_unreserve(res, NULL, 0); | 1877 | vmw_resource_unreserve(res, false, NULL, 0); |
1873 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 1878 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); |
1874 | } | 1879 | } |
1875 | 1880 | ||
@@ -1916,7 +1921,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | |||
1916 | out_revert: | 1921 | out_revert: |
1917 | vmw_kms_helper_buffer_revert(res->backup); | 1922 | vmw_kms_helper_buffer_revert(res->backup); |
1918 | out_unreserve: | 1923 | out_unreserve: |
1919 | vmw_resource_unreserve(res, NULL, 0); | 1924 | vmw_resource_unreserve(res, false, NULL, 0); |
1920 | out_unlock: | 1925 | out_unlock: |
1921 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 1926 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); |
1922 | return ret; | 1927 | return ret; |
@@ -1937,7 +1942,7 @@ void vmw_kms_helper_resource_finish(struct vmw_resource *res, | |||
1937 | vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, | 1942 | vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, |
1938 | out_fence, NULL); | 1943 | out_fence, NULL); |
1939 | 1944 | ||
1940 | vmw_resource_unreserve(res, NULL, 0); | 1945 | vmw_resource_unreserve(res, false, NULL, 0); |
1941 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 1946 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); |
1942 | } | 1947 | } |
1943 | 1948 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 5b0287eba30d..a8203a9e1050 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
@@ -67,9 +67,23 @@ struct vmw_mob { | |||
67 | * @size: Size of the table (page-aligned). | 67 | * @size: Size of the table (page-aligned). |
68 | * @page_table: Pointer to a struct vmw_mob holding the page table. | 68 | * @page_table: Pointer to a struct vmw_mob holding the page table. |
69 | */ | 69 | */ |
70 | struct vmw_otable { | 70 | static const struct vmw_otable pre_dx_tables[] = { |
71 | unsigned long size; | 71 | {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, |
72 | struct vmw_mob *page_table; | 72 | {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, |
73 | {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, | ||
74 | {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, | ||
75 | {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, | ||
76 | NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} | ||
77 | }; | ||
78 | |||
79 | static const struct vmw_otable dx_tables[] = { | ||
80 | {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, | ||
81 | {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, | ||
82 | {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, | ||
83 | {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, | ||
84 | {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, | ||
85 | NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, | ||
86 | {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, | ||
73 | }; | 87 | }; |
74 | 88 | ||
75 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | 89 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, |
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, | |||
92 | */ | 106 | */ |
93 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, | 107 | static int vmw_setup_otable_base(struct vmw_private *dev_priv, |
94 | SVGAOTableType type, | 108 | SVGAOTableType type, |
109 | struct ttm_buffer_object *otable_bo, | ||
95 | unsigned long offset, | 110 | unsigned long offset, |
96 | struct vmw_otable *otable) | 111 | struct vmw_otable *otable) |
97 | { | 112 | { |
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
106 | 121 | ||
107 | BUG_ON(otable->page_table != NULL); | 122 | BUG_ON(otable->page_table != NULL); |
108 | 123 | ||
109 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | 124 | vsgt = vmw_bo_sg_table(otable_bo); |
110 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | 125 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); |
111 | WARN_ON(!vmw_piter_next(&iter)); | 126 | WARN_ON(!vmw_piter_next(&iter)); |
112 | 127 | ||
@@ -193,7 +208,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | |||
193 | "takedown.\n"); | 208 | "takedown.\n"); |
194 | return; | 209 | return; |
195 | } | 210 | } |
196 | 211 | ||
197 | memset(cmd, 0, sizeof(*cmd)); | 212 | memset(cmd, 0, sizeof(*cmd)); |
198 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | 213 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; |
199 | cmd->header.size = sizeof(cmd->body); | 214 | cmd->header.size = sizeof(cmd->body); |
@@ -218,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, | |||
218 | otable->page_table = NULL; | 233 | otable->page_table = NULL; |
219 | } | 234 | } |
220 | 235 | ||
221 | /* | 236 | |
222 | * vmw_otables_setup - Set up guest backed memory object tables | 237 | static int vmw_otable_batch_setup(struct vmw_private *dev_priv, |
223 | * | 238 | struct vmw_otable_batch *batch) |
224 | * @dev_priv: Pointer to a device private structure | ||
225 | * | ||
226 | * Takes care of the device guest backed surface | ||
227 | * initialization, by setting up the guest backed memory object tables. | ||
228 | * Returns 0 on success and various error codes on failure. A succesful return | ||
229 | * means the object tables can be taken down using the vmw_otables_takedown | ||
230 | * function. | ||
231 | */ | ||
232 | int vmw_otables_setup(struct vmw_private *dev_priv) | ||
233 | { | 239 | { |
234 | unsigned long offset; | 240 | unsigned long offset; |
235 | unsigned long bo_size; | 241 | unsigned long bo_size; |
236 | struct vmw_otable *otables; | 242 | struct vmw_otable *otables = batch->otables; |
237 | SVGAOTableType i; | 243 | SVGAOTableType i; |
238 | int ret; | 244 | int ret; |
239 | 245 | ||
240 | otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), | ||
241 | GFP_KERNEL); | ||
242 | if (unlikely(otables == NULL)) { | ||
243 | DRM_ERROR("Failed to allocate space for otable " | ||
244 | "metadata.\n"); | ||
245 | return -ENOMEM; | ||
246 | } | ||
247 | |||
248 | otables[SVGA_OTABLE_MOB].size = | ||
249 | VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; | ||
250 | otables[SVGA_OTABLE_SURFACE].size = | ||
251 | VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; | ||
252 | otables[SVGA_OTABLE_CONTEXT].size = | ||
253 | VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; | ||
254 | otables[SVGA_OTABLE_SHADER].size = | ||
255 | VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; | ||
256 | otables[SVGA_OTABLE_SCREENTARGET].size = | ||
257 | VMWGFX_NUM_GB_SCREEN_TARGET * | ||
258 | SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; | ||
259 | |||
260 | bo_size = 0; | 246 | bo_size = 0; |
261 | for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { | 247 | for (i = 0; i < batch->num_otables; ++i) { |
248 | if (!otables[i].enabled) | ||
249 | continue; | ||
250 | |||
262 | otables[i].size = | 251 | otables[i].size = |
263 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; | 252 | (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; |
264 | bo_size += otables[i].size; | 253 | bo_size += otables[i].size; |
@@ -268,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv) | |||
268 | ttm_bo_type_device, | 257 | ttm_bo_type_device, |
269 | &vmw_sys_ne_placement, | 258 | &vmw_sys_ne_placement, |
270 | 0, false, NULL, | 259 | 0, false, NULL, |
271 | &dev_priv->otable_bo); | 260 | &batch->otable_bo); |
272 | 261 | ||
273 | if (unlikely(ret != 0)) | 262 | if (unlikely(ret != 0)) |
274 | goto out_no_bo; | 263 | goto out_no_bo; |
275 | 264 | ||
276 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); | 265 | ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); |
277 | BUG_ON(ret != 0); | 266 | BUG_ON(ret != 0); |
278 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | 267 | ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); |
279 | if (unlikely(ret != 0)) | 268 | if (unlikely(ret != 0)) |
280 | goto out_unreserve; | 269 | goto out_unreserve; |
281 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | 270 | ret = vmw_bo_map_dma(batch->otable_bo); |
282 | if (unlikely(ret != 0)) | 271 | if (unlikely(ret != 0)) |
283 | goto out_unreserve; | 272 | goto out_unreserve; |
284 | 273 | ||
285 | ttm_bo_unreserve(dev_priv->otable_bo); | 274 | ttm_bo_unreserve(batch->otable_bo); |
286 | 275 | ||
287 | offset = 0; | 276 | offset = 0; |
288 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { | 277 | for (i = 0; i < batch->num_otables; ++i) { |
289 | ret = vmw_setup_otable_base(dev_priv, i, offset, | 278 | if (!batch->otables[i].enabled) |
279 | continue; | ||
280 | |||
281 | ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, | ||
282 | offset, | ||
290 | &otables[i]); | 283 | &otables[i]); |
291 | if (unlikely(ret != 0)) | 284 | if (unlikely(ret != 0)) |
292 | goto out_no_setup; | 285 | goto out_no_setup; |
293 | offset += otables[i].size; | 286 | offset += otables[i].size; |
294 | } | 287 | } |
295 | 288 | ||
296 | dev_priv->otables = otables; | ||
297 | return 0; | 289 | return 0; |
298 | 290 | ||
299 | out_unreserve: | 291 | out_unreserve: |
300 | ttm_bo_unreserve(dev_priv->otable_bo); | 292 | ttm_bo_unreserve(batch->otable_bo); |
301 | out_no_setup: | 293 | out_no_setup: |
302 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | 294 | for (i = 0; i < batch->num_otables; ++i) { |
303 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | 295 | if (batch->otables[i].enabled) |
296 | vmw_takedown_otable_base(dev_priv, i, | ||
297 | &batch->otables[i]); | ||
298 | } | ||
304 | 299 | ||
305 | ttm_bo_unref(&dev_priv->otable_bo); | 300 | ttm_bo_unref(&batch->otable_bo); |
306 | out_no_bo: | 301 | out_no_bo: |
307 | kfree(otables); | ||
308 | return ret; | 302 | return ret; |
309 | } | 303 | } |
310 | 304 | ||
311 | |||
312 | /* | 305 | /* |
313 | * vmw_otables_takedown - Take down guest backed memory object tables | 306 | * vmw_otables_setup - Set up guest backed memory object tables |
314 | * | 307 | * |
315 | * @dev_priv: Pointer to a device private structure | 308 | * @dev_priv: Pointer to a device private structure |
316 | * | 309 | * |
317 | * Take down the Guest Memory Object tables. | 310 | * Takes care of the device guest backed surface |
311 | * initialization, by setting up the guest backed memory object tables. | ||
312 | * Returns 0 on success and various error codes on failure. A successful return | ||
313 | * means the object tables can be taken down using the vmw_otables_takedown | ||
314 | * function. | ||
318 | */ | 315 | */ |
319 | void vmw_otables_takedown(struct vmw_private *dev_priv) | 316 | int vmw_otables_setup(struct vmw_private *dev_priv) |
317 | { | ||
318 | struct vmw_otable **otables = &dev_priv->otable_batch.otables; | ||
319 | int ret; | ||
320 | |||
321 | if (dev_priv->has_dx) { | ||
322 | *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL); | ||
323 | if (*otables == NULL) | ||
324 | return -ENOMEM; | ||
325 | |||
326 | memcpy(*otables, dx_tables, sizeof(dx_tables)); | ||
327 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); | ||
328 | } else { | ||
329 | *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL); | ||
330 | if (*otables == NULL) | ||
331 | return -ENOMEM; | ||
332 | |||
333 | memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables)); | ||
334 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); | ||
335 | } | ||
336 | |||
337 | ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); | ||
338 | if (unlikely(ret != 0)) | ||
339 | goto out_setup; | ||
340 | |||
341 | return 0; | ||
342 | |||
343 | out_setup: | ||
344 | kfree(*otables); | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, | ||
349 | struct vmw_otable_batch *batch) | ||
320 | { | 350 | { |
321 | SVGAOTableType i; | 351 | SVGAOTableType i; |
322 | struct ttm_buffer_object *bo = dev_priv->otable_bo; | 352 | struct ttm_buffer_object *bo = batch->otable_bo; |
323 | int ret; | 353 | int ret; |
324 | 354 | ||
325 | for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) | 355 | for (i = 0; i < batch->num_otables; ++i) |
326 | vmw_takedown_otable_base(dev_priv, i, | 356 | if (batch->otables[i].enabled) |
327 | &dev_priv->otables[i]); | 357 | vmw_takedown_otable_base(dev_priv, i, |
358 | &batch->otables[i]); | ||
328 | 359 | ||
329 | ret = ttm_bo_reserve(bo, false, true, false, NULL); | 360 | ret = ttm_bo_reserve(bo, false, true, false, NULL); |
330 | BUG_ON(ret != 0); | 361 | BUG_ON(ret != 0); |
@@ -332,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv) | |||
332 | vmw_fence_single_bo(bo, NULL); | 363 | vmw_fence_single_bo(bo, NULL); |
333 | ttm_bo_unreserve(bo); | 364 | ttm_bo_unreserve(bo); |
334 | 365 | ||
335 | ttm_bo_unref(&dev_priv->otable_bo); | 366 | ttm_bo_unref(&batch->otable_bo); |
336 | kfree(dev_priv->otables); | ||
337 | dev_priv->otables = NULL; | ||
338 | } | 367 | } |
339 | 368 | ||
369 | /* | ||
370 | * vmw_otables_takedown - Take down guest backed memory object tables | ||
371 | * | ||
372 | * @dev_priv: Pointer to a device private structure | ||
373 | * | ||
374 | * Take down the Guest Memory Object tables. | ||
375 | */ | ||
376 | void vmw_otables_takedown(struct vmw_private *dev_priv) | ||
377 | { | ||
378 | vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); | ||
379 | kfree(dev_priv->otable_batch.otables); | ||
380 | } | ||
340 | 381 | ||
341 | /* | 382 | /* |
342 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages | 383 | * vmw_mob_calculate_pt_pages - Calculate the number of page table pages |
@@ -410,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | |||
410 | goto out_unreserve; | 451 | goto out_unreserve; |
411 | 452 | ||
412 | ttm_bo_unreserve(mob->pt_bo); | 453 | ttm_bo_unreserve(mob->pt_bo); |
413 | 454 | ||
414 | return 0; | 455 | return 0; |
415 | 456 | ||
416 | out_unreserve: | 457 | out_unreserve: |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index be2809aaa7cb..6186e859dab0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | #include "vmwgfx_resource_priv.h" | 33 | #include "vmwgfx_resource_priv.h" |
34 | #include "vmwgfx_binding.h" | ||
34 | 35 | ||
35 | #define VMW_RES_EVICT_ERR_COUNT 10 | 36 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | 37 | ||
@@ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) | |||
144 | } | 145 | } |
145 | 146 | ||
146 | if (likely(res->hw_destroy != NULL)) { | 147 | if (likely(res->hw_destroy != NULL)) { |
147 | res->hw_destroy(res); | ||
148 | mutex_lock(&dev_priv->binding_mutex); | 148 | mutex_lock(&dev_priv->binding_mutex); |
149 | vmw_context_binding_res_list_kill(&res->binding_head); | 149 | vmw_binding_res_list_kill(&res->binding_head); |
150 | mutex_unlock(&dev_priv->binding_mutex); | 150 | mutex_unlock(&dev_priv->binding_mutex); |
151 | res->hw_destroy(res); | ||
151 | } | 152 | } |
152 | 153 | ||
153 | id = res->id; | 154 | id = res->id; |
@@ -1149,14 +1150,16 @@ out_bind_failed: | |||
1149 | * command submission. | 1150 | * command submission. |
1150 | * | 1151 | * |
1151 | * @res: Pointer to the struct vmw_resource to unreserve. | 1152 | * @res: Pointer to the struct vmw_resource to unreserve. |
1153 | * @switch_backup: Backup buffer has been switched. | ||
1152 | * @new_backup: Pointer to new backup buffer if command submission | 1154 | * @new_backup: Pointer to new backup buffer if command submission |
1153 | * switched. | 1155 | * switched. May be NULL. |
1154 | * @new_backup_offset: New backup offset if @new_backup is !NULL. | 1156 | * @new_backup_offset: New backup offset if @switch_backup is true. |
1155 | * | 1157 | * |
1156 | * Currently unreserving a resource means putting it back on the device's | 1158 | * Currently unreserving a resource means putting it back on the device's |
1157 | * resource lru list, so that it can be evicted if necessary. | 1159 | * resource lru list, so that it can be evicted if necessary. |
1158 | */ | 1160 | */ |
1159 | void vmw_resource_unreserve(struct vmw_resource *res, | 1161 | void vmw_resource_unreserve(struct vmw_resource *res, |
1162 | bool switch_backup, | ||
1160 | struct vmw_dma_buffer *new_backup, | 1163 | struct vmw_dma_buffer *new_backup, |
1161 | unsigned long new_backup_offset) | 1164 | unsigned long new_backup_offset) |
1162 | { | 1165 | { |
@@ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res, | |||
1165 | if (!list_empty(&res->lru_head)) | 1168 | if (!list_empty(&res->lru_head)) |
1166 | return; | 1169 | return; |
1167 | 1170 | ||
1168 | if (new_backup && new_backup != res->backup) { | 1171 | if (switch_backup && new_backup != res->backup) { |
1169 | |||
1170 | if (res->backup) { | 1172 | if (res->backup) { |
1171 | lockdep_assert_held(&res->backup->base.resv->lock.base); | 1173 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
1172 | list_del_init(&res->mob_head); | 1174 | list_del_init(&res->mob_head); |
1173 | vmw_dmabuf_unreference(&res->backup); | 1175 | vmw_dmabuf_unreference(&res->backup); |
1174 | } | 1176 | } |
1175 | 1177 | ||
1176 | res->backup = vmw_dmabuf_reference(new_backup); | 1178 | if (new_backup) { |
1177 | lockdep_assert_held(&new_backup->base.resv->lock.base); | 1179 | res->backup = vmw_dmabuf_reference(new_backup); |
1178 | list_add_tail(&res->mob_head, &new_backup->res_list); | 1180 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
1181 | list_add_tail(&res->mob_head, &new_backup->res_list); | ||
1182 | } else { | ||
1183 | res->backup = NULL; | ||
1184 | } | ||
1179 | } | 1185 | } |
1180 | if (new_backup) | 1186 | if (switch_backup) |
1181 | res->backup_offset = new_backup_offset; | 1187 | res->backup_offset = new_backup_offset; |
1182 | 1188 | ||
1183 | if (!res->func->may_evict || res->id == -1 || res->pin_count) | 1189 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
@@ -1269,8 +1275,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, | |||
1269 | if (res->func->needs_backup && res->backup == NULL && | 1275 | if (res->func->needs_backup && res->backup == NULL && |
1270 | !no_backup) { | 1276 | !no_backup) { |
1271 | ret = vmw_resource_buf_alloc(res, interruptible); | 1277 | ret = vmw_resource_buf_alloc(res, interruptible); |
1272 | if (unlikely(ret != 0)) | 1278 | if (unlikely(ret != 0)) { |
1279 | DRM_ERROR("Failed to allocate a backup buffer " | ||
1280 | "of size %lu. bytes\n", | ||
1281 | (unsigned long) res->backup_size); | ||
1273 | return ret; | 1282 | return ret; |
1283 | } | ||
1274 | } | 1284 | } |
1275 | 1285 | ||
1276 | return 0; | 1286 | return 0; |
@@ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1354 | struct ttm_validate_buffer val_buf; | 1364 | struct ttm_validate_buffer val_buf; |
1355 | unsigned err_count = 0; | 1365 | unsigned err_count = 0; |
1356 | 1366 | ||
1357 | if (likely(!res->func->may_evict)) | 1367 | if (!res->func->create) |
1358 | return 0; | 1368 | return 0; |
1359 | 1369 | ||
1360 | val_buf.bo = NULL; | 1370 | val_buf.bo = NULL; |
@@ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) | |||
1624 | res->pin_count++; | 1634 | res->pin_count++; |
1625 | 1635 | ||
1626 | out_no_validate: | 1636 | out_no_validate: |
1627 | vmw_resource_unreserve(res, NULL, 0UL); | 1637 | vmw_resource_unreserve(res, false, NULL, 0UL); |
1628 | out_no_reserve: | 1638 | out_no_reserve: |
1629 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1639 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1630 | ttm_write_unlock(&dev_priv->reservation_sem); | 1640 | ttm_write_unlock(&dev_priv->reservation_sem); |
@@ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res) | |||
1660 | ttm_bo_unreserve(&vbo->base); | 1670 | ttm_bo_unreserve(&vbo->base); |
1661 | } | 1671 | } |
1662 | 1672 | ||
1663 | vmw_resource_unreserve(res, NULL, 0UL); | 1673 | vmw_resource_unreserve(res, false, NULL, 0UL); |
1664 | 1674 | ||
1665 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1675 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1666 | ttm_read_unlock(&dev_priv->reservation_sem); | 1676 | ttm_read_unlock(&dev_priv->reservation_sem); |
1667 | } | 1677 | } |
1678 | |||
1679 | /** | ||
1680 | * vmw_res_type - Return the resource type | ||
1681 | * | ||
1682 | * @res: Pointer to the resource | ||
1683 | */ | ||
1684 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) | ||
1685 | { | ||
1686 | return res->func->res_type; | ||
1687 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index f3adeed2854c..743e2adafed2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | |||
@@ -30,6 +30,12 @@ | |||
30 | 30 | ||
31 | #include "vmwgfx_drv.h" | 31 | #include "vmwgfx_drv.h" |
32 | 32 | ||
33 | enum vmw_cmdbuf_res_state { | ||
34 | VMW_CMDBUF_RES_COMMITTED, | ||
35 | VMW_CMDBUF_RES_ADD, | ||
36 | VMW_CMDBUF_RES_DEL | ||
37 | }; | ||
38 | |||
33 | /** | 39 | /** |
34 | * struct vmw_user_resource_conv - Identify a derived user-exported resource | 40 | * struct vmw_user_resource_conv - Identify a derived user-exported resource |
35 | * type and provide a function to convert its ttm_base_object pointer to | 41 | * type and provide a function to convert its ttm_base_object pointer to |
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv { | |||
55 | * @bind: Bind a hardware resource to persistent buffer storage. | 61 | * @bind: Bind a hardware resource to persistent buffer storage. |
56 | * @unbind: Unbind a hardware resource from persistent | 62 | * @unbind: Unbind a hardware resource from persistent |
57 | * buffer storage. | 63 | * buffer storage. |
64 | * @commit_notify: If the resource is a command buffer managed resource, | ||
65 | * callback to notify that a define or remove command | ||
66 | * has been committed to the device. | ||
58 | */ | 67 | */ |
59 | |||
60 | struct vmw_res_func { | 68 | struct vmw_res_func { |
61 | enum vmw_res_type res_type; | 69 | enum vmw_res_type res_type; |
62 | bool needs_backup; | 70 | bool needs_backup; |
@@ -71,6 +79,8 @@ struct vmw_res_func { | |||
71 | int (*unbind) (struct vmw_resource *res, | 79 | int (*unbind) (struct vmw_resource *res, |
72 | bool readback, | 80 | bool readback, |
73 | struct ttm_validate_buffer *val_buf); | 81 | struct ttm_validate_buffer *val_buf); |
82 | void (*commit_notify)(struct vmw_resource *res, | ||
83 | enum vmw_cmdbuf_res_state state); | ||
74 | }; | 84 | }; |
75 | 85 | ||
76 | int vmw_resource_alloc_id(struct vmw_resource *res); | 86 | int vmw_resource_alloc_id(struct vmw_resource *res); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 11bc60c2771a..61403ebe3a1e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -27,12 +27,15 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_resource_priv.h" | 29 | #include "vmwgfx_resource_priv.h" |
30 | #include "vmwgfx_binding.h" | ||
30 | #include "ttm/ttm_placement.h" | 31 | #include "ttm/ttm_placement.h" |
31 | 32 | ||
32 | struct vmw_shader { | 33 | struct vmw_shader { |
33 | struct vmw_resource res; | 34 | struct vmw_resource res; |
34 | SVGA3dShaderType type; | 35 | SVGA3dShaderType type; |
35 | uint32_t size; | 36 | uint32_t size; |
37 | uint8_t num_input_sig; | ||
38 | uint8_t num_output_sig; | ||
36 | }; | 39 | }; |
37 | 40 | ||
38 | struct vmw_user_shader { | 41 | struct vmw_user_shader { |
@@ -40,8 +43,18 @@ struct vmw_user_shader { | |||
40 | struct vmw_shader shader; | 43 | struct vmw_shader shader; |
41 | }; | 44 | }; |
42 | 45 | ||
46 | struct vmw_dx_shader { | ||
47 | struct vmw_resource res; | ||
48 | struct vmw_resource *ctx; | ||
49 | struct vmw_resource *cotable; | ||
50 | u32 id; | ||
51 | bool committed; | ||
52 | struct list_head cotable_head; | ||
53 | }; | ||
54 | |||
43 | static uint64_t vmw_user_shader_size; | 55 | static uint64_t vmw_user_shader_size; |
44 | static uint64_t vmw_shader_size; | 56 | static uint64_t vmw_shader_size; |
57 | static size_t vmw_shader_dx_size; | ||
45 | 58 | ||
46 | static void vmw_user_shader_free(struct vmw_resource *res); | 59 | static void vmw_user_shader_free(struct vmw_resource *res); |
47 | static struct vmw_resource * | 60 | static struct vmw_resource * |
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, | |||
55 | struct ttm_validate_buffer *val_buf); | 68 | struct ttm_validate_buffer *val_buf); |
56 | static int vmw_gb_shader_destroy(struct vmw_resource *res); | 69 | static int vmw_gb_shader_destroy(struct vmw_resource *res); |
57 | 70 | ||
71 | static int vmw_dx_shader_create(struct vmw_resource *res); | ||
72 | static int vmw_dx_shader_bind(struct vmw_resource *res, | ||
73 | struct ttm_validate_buffer *val_buf); | ||
74 | static int vmw_dx_shader_unbind(struct vmw_resource *res, | ||
75 | bool readback, | ||
76 | struct ttm_validate_buffer *val_buf); | ||
77 | static void vmw_dx_shader_commit_notify(struct vmw_resource *res, | ||
78 | enum vmw_cmdbuf_res_state state); | ||
79 | static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); | ||
80 | static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); | ||
81 | static uint64_t vmw_user_shader_size; | ||
82 | |||
58 | static const struct vmw_user_resource_conv user_shader_conv = { | 83 | static const struct vmw_user_resource_conv user_shader_conv = { |
59 | .object_type = VMW_RES_SHADER, | 84 | .object_type = VMW_RES_SHADER, |
60 | .base_obj_to_res = vmw_user_shader_base_to_res, | 85 | .base_obj_to_res = vmw_user_shader_base_to_res, |
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = { | |||
77 | .unbind = vmw_gb_shader_unbind | 102 | .unbind = vmw_gb_shader_unbind |
78 | }; | 103 | }; |
79 | 104 | ||
105 | static const struct vmw_res_func vmw_dx_shader_func = { | ||
106 | .res_type = vmw_res_shader, | ||
107 | .needs_backup = true, | ||
108 | .may_evict = false, | ||
109 | .type_name = "dx shaders", | ||
110 | .backup_placement = &vmw_mob_placement, | ||
111 | .create = vmw_dx_shader_create, | ||
112 | /* | ||
113 | * The destroy callback is only called with a committed resource on | ||
114 | * context destroy, in which case we destroy the cotable anyway, | ||
115 | * so there's no need to destroy DX shaders separately. | ||
116 | */ | ||
117 | .destroy = NULL, | ||
118 | .bind = vmw_dx_shader_bind, | ||
119 | .unbind = vmw_dx_shader_unbind, | ||
120 | .commit_notify = vmw_dx_shader_commit_notify, | ||
121 | }; | ||
122 | |||
80 | /** | 123 | /** |
81 | * Shader management: | 124 | * Shader management: |
82 | */ | 125 | */ |
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res) | |||
87 | return container_of(res, struct vmw_shader, res); | 130 | return container_of(res, struct vmw_shader, res); |
88 | } | 131 | } |
89 | 132 | ||
133 | /** | ||
134 | * vmw_res_to_dx_shader - typecast a struct vmw_resource to a | ||
135 | * struct vmw_dx_shader | ||
136 | * | ||
137 | * @res: Pointer to the struct vmw_resource. | ||
138 | */ | ||
139 | static inline struct vmw_dx_shader * | ||
140 | vmw_res_to_dx_shader(struct vmw_resource *res) | ||
141 | { | ||
142 | return container_of(res, struct vmw_dx_shader, res); | ||
143 | } | ||
144 | |||
90 | static void vmw_hw_shader_destroy(struct vmw_resource *res) | 145 | static void vmw_hw_shader_destroy(struct vmw_resource *res) |
91 | { | 146 | { |
92 | (void) vmw_gb_shader_destroy(res); | 147 | if (likely(res->func->destroy)) |
148 | (void) res->func->destroy(res); | ||
149 | else | ||
150 | res->id = -1; | ||
93 | } | 151 | } |
94 | 152 | ||
153 | |||
95 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | 154 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, |
96 | struct vmw_resource *res, | 155 | struct vmw_resource *res, |
97 | uint32_t size, | 156 | uint32_t size, |
98 | uint64_t offset, | 157 | uint64_t offset, |
99 | SVGA3dShaderType type, | 158 | SVGA3dShaderType type, |
159 | uint8_t num_input_sig, | ||
160 | uint8_t num_output_sig, | ||
100 | struct vmw_dma_buffer *byte_code, | 161 | struct vmw_dma_buffer *byte_code, |
101 | void (*res_free) (struct vmw_resource *res)) | 162 | void (*res_free) (struct vmw_resource *res)) |
102 | { | 163 | { |
103 | struct vmw_shader *shader = vmw_res_to_shader(res); | 164 | struct vmw_shader *shader = vmw_res_to_shader(res); |
104 | int ret; | 165 | int ret; |
105 | 166 | ||
106 | ret = vmw_resource_init(dev_priv, res, true, | 167 | ret = vmw_resource_init(dev_priv, res, true, res_free, |
107 | res_free, &vmw_gb_shader_func); | 168 | &vmw_gb_shader_func); |
108 | |||
109 | 169 | ||
110 | if (unlikely(ret != 0)) { | 170 | if (unlikely(ret != 0)) { |
111 | if (res_free) | 171 | if (res_free) |
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, | |||
122 | } | 182 | } |
123 | shader->size = size; | 183 | shader->size = size; |
124 | shader->type = type; | 184 | shader->type = type; |
185 | shader->num_input_sig = num_input_sig; | ||
186 | shader->num_output_sig = num_output_sig; | ||
125 | 187 | ||
126 | vmw_resource_activate(res, vmw_hw_shader_destroy); | 188 | vmw_resource_activate(res, vmw_hw_shader_destroy); |
127 | return 0; | 189 | return 0; |
128 | } | 190 | } |
129 | 191 | ||
192 | /* | ||
193 | * GB shader code: | ||
194 | */ | ||
195 | |||
130 | static int vmw_gb_shader_create(struct vmw_resource *res) | 196 | static int vmw_gb_shader_create(struct vmw_resource *res) |
131 | { | 197 | { |
132 | struct vmw_private *dev_priv = res->dev_priv; | 198 | struct vmw_private *dev_priv = res->dev_priv; |
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
259 | return 0; | 325 | return 0; |
260 | 326 | ||
261 | mutex_lock(&dev_priv->binding_mutex); | 327 | mutex_lock(&dev_priv->binding_mutex); |
262 | vmw_context_binding_res_list_scrub(&res->binding_head); | 328 | vmw_binding_res_list_scrub(&res->binding_head); |
263 | 329 | ||
264 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 330 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
265 | if (unlikely(cmd == NULL)) { | 331 | if (unlikely(cmd == NULL)) { |
@@ -280,6 +346,321 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) | |||
280 | return 0; | 346 | return 0; |
281 | } | 347 | } |
282 | 348 | ||
349 | /* | ||
350 | * DX shader code: | ||
351 | */ | ||
352 | |||
353 | /** | ||
354 | * vmw_dx_shader_commit_notify - Notify that a shader operation has been | ||
355 | * committed to hardware from a user-supplied command stream. | ||
356 | * | ||
357 | * @res: Pointer to the shader resource. | ||
358 | * @state: Indicating whether a creation or removal has been committed. | ||
359 | * | ||
360 | */ | ||
361 | static void vmw_dx_shader_commit_notify(struct vmw_resource *res, | ||
362 | enum vmw_cmdbuf_res_state state) | ||
363 | { | ||
364 | struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||
365 | struct vmw_private *dev_priv = res->dev_priv; | ||
366 | |||
367 | if (state == VMW_CMDBUF_RES_ADD) { | ||
368 | mutex_lock(&dev_priv->binding_mutex); | ||
369 | vmw_cotable_add_resource(shader->cotable, | ||
370 | &shader->cotable_head); | ||
371 | shader->committed = true; | ||
372 | res->id = shader->id; | ||
373 | mutex_unlock(&dev_priv->binding_mutex); | ||
374 | } else { | ||
375 | mutex_lock(&dev_priv->binding_mutex); | ||
376 | list_del_init(&shader->cotable_head); | ||
377 | shader->committed = false; | ||
378 | res->id = -1; | ||
379 | mutex_unlock(&dev_priv->binding_mutex); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader. | ||
385 | * | ||
386 | * @res: The shader resource | ||
387 | * | ||
388 | * This function reverts a scrub operation. | ||
389 | */ | ||
390 | static int vmw_dx_shader_unscrub(struct vmw_resource *res) | ||
391 | { | ||
392 | struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||
393 | struct vmw_private *dev_priv = res->dev_priv; | ||
394 | struct { | ||
395 | SVGA3dCmdHeader header; | ||
396 | SVGA3dCmdDXBindShader body; | ||
397 | } *cmd; | ||
398 | |||
399 | if (!list_empty(&shader->cotable_head) || !shader->committed) | ||
400 | return 0; | ||
401 | |||
402 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), | ||
403 | shader->ctx->id); | ||
404 | if (unlikely(cmd == NULL)) { | ||
405 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
406 | "scrubbing.\n"); | ||
407 | return -ENOMEM; | ||
408 | } | ||
409 | |||
410 | cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; | ||
411 | cmd->header.size = sizeof(cmd->body); | ||
412 | cmd->body.cid = shader->ctx->id; | ||
413 | cmd->body.shid = shader->id; | ||
414 | cmd->body.mobid = res->backup->base.mem.start; | ||
415 | cmd->body.offsetInBytes = res->backup_offset; | ||
416 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
417 | |||
418 | vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * vmw_dx_shader_create - The DX shader create callback | ||
425 | * | ||
426 | * @res: The DX shader resource | ||
427 | * | ||
428 | * The create callback is called as part of resource validation and | ||
429 | * makes sure that we unscrub the shader if it's previously been scrubbed. | ||
430 | */ | ||
431 | static int vmw_dx_shader_create(struct vmw_resource *res) | ||
432 | { | ||
433 | struct vmw_private *dev_priv = res->dev_priv; | ||
434 | struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||
435 | int ret = 0; | ||
436 | |||
437 | WARN_ON_ONCE(!shader->committed); | ||
438 | |||
439 | if (!list_empty(&res->mob_head)) { | ||
440 | mutex_lock(&dev_priv->binding_mutex); | ||
441 | ret = vmw_dx_shader_unscrub(res); | ||
442 | mutex_unlock(&dev_priv->binding_mutex); | ||
443 | } | ||
444 | |||
445 | res->id = shader->id; | ||
446 | return ret; | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * vmw_dx_shader_bind - The DX shader bind callback | ||
451 | * | ||
452 | * @res: The DX shader resource | ||
453 | * @val_buf: Pointer to the validate buffer. | ||
454 | * | ||
455 | */ | ||
456 | static int vmw_dx_shader_bind(struct vmw_resource *res, | ||
457 | struct ttm_validate_buffer *val_buf) | ||
458 | { | ||
459 | struct vmw_private *dev_priv = res->dev_priv; | ||
460 | struct ttm_buffer_object *bo = val_buf->bo; | ||
461 | |||
462 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
463 | mutex_lock(&dev_priv->binding_mutex); | ||
464 | vmw_dx_shader_unscrub(res); | ||
465 | mutex_unlock(&dev_priv->binding_mutex); | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader. | ||
472 | * | ||
473 | * @res: The shader resource | ||
474 | * | ||
475 | * This function unbinds a MOB from the DX shader without requiring the | ||
476 | * MOB dma_buffer to be reserved. The driver still considers the MOB bound. | ||
477 | * However, once the driver eventually decides to unbind the MOB, it doesn't | ||
478 | * need to access the context. | ||
479 | */ | ||
480 | static int vmw_dx_shader_scrub(struct vmw_resource *res) | ||
481 | { | ||
482 | struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||
483 | struct vmw_private *dev_priv = res->dev_priv; | ||
484 | struct { | ||
485 | SVGA3dCmdHeader header; | ||
486 | SVGA3dCmdDXBindShader body; | ||
487 | } *cmd; | ||
488 | |||
489 | if (list_empty(&shader->cotable_head)) | ||
490 | return 0; | ||
491 | |||
492 | WARN_ON_ONCE(!shader->committed); | ||
493 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
494 | if (unlikely(cmd == NULL)) { | ||
495 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
496 | "scrubbing.\n"); | ||
497 | return -ENOMEM; | ||
498 | } | ||
499 | |||
500 | cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; | ||
501 | cmd->header.size = sizeof(cmd->body); | ||
502 | cmd->body.cid = shader->ctx->id; | ||
503 | cmd->body.shid = res->id; | ||
504 | cmd->body.mobid = SVGA3D_INVALID_ID; | ||
505 | cmd->body.offsetInBytes = 0; | ||
506 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
507 | res->id = -1; | ||
508 | list_del_init(&shader->cotable_head); | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * vmw_dx_shader_unbind - The dx shader unbind callback. | ||
515 | * | ||
516 | * @res: The shader resource | ||
517 | * @readback: Whether this is a readback unbind. Currently unused. | ||
518 | * @val_buf: MOB buffer information. | ||
519 | */ | ||
520 | static int vmw_dx_shader_unbind(struct vmw_resource *res, | ||
521 | bool readback, | ||
522 | struct ttm_validate_buffer *val_buf) | ||
523 | { | ||
524 | struct vmw_private *dev_priv = res->dev_priv; | ||
525 | struct vmw_fence_obj *fence; | ||
526 | int ret; | ||
527 | |||
528 | BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||
529 | |||
530 | mutex_lock(&dev_priv->binding_mutex); | ||
531 | ret = vmw_dx_shader_scrub(res); | ||
532 | mutex_unlock(&dev_priv->binding_mutex); | ||
533 | |||
534 | if (ret) | ||
535 | return ret; | ||
536 | |||
537 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
538 | &fence, NULL); | ||
539 | vmw_fence_single_bo(val_buf->bo, fence); | ||
540 | |||
541 | if (likely(fence != NULL)) | ||
542 | vmw_fence_obj_unreference(&fence); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for | ||
549 | * DX shaders. | ||
550 | * | ||
551 | * @dev_priv: Pointer to device private structure. | ||
552 | * @list: The list of cotable resources. | ||
553 | * @readback: Whether the call was part of a readback unbind. | ||
554 | * | ||
555 | * Scrubs all shader MOBs so that any subsequent shader unbind or shader | ||
556 | * destroy operation won't need to swap in the context. | ||
557 | */ | ||
558 | void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, | ||
559 | struct list_head *list, | ||
560 | bool readback) | ||
561 | { | ||
562 | struct vmw_dx_shader *entry, *next; | ||
563 | |||
564 | WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||
565 | |||
566 | list_for_each_entry_safe(entry, next, list, cotable_head) { | ||
567 | WARN_ON(vmw_dx_shader_scrub(&entry->res)); | ||
568 | if (!readback) | ||
569 | entry->committed = false; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * vmw_dx_shader_res_free - The DX shader free callback | ||
575 | * | ||
576 | * @res: The shader resource | ||
577 | * | ||
578 | * Frees the DX shader resource and updates memory accounting. | ||
579 | */ | ||
580 | static void vmw_dx_shader_res_free(struct vmw_resource *res) | ||
581 | { | ||
582 | struct vmw_private *dev_priv = res->dev_priv; | ||
583 | struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); | ||
584 | |||
585 | vmw_resource_unreference(&shader->cotable); | ||
586 | kfree(shader); | ||
587 | ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); | ||
588 | } | ||
589 | |||
590 | /** | ||
591 | * vmw_dx_shader_add - Add a shader resource as a command buffer managed | ||
592 | * resource. | ||
593 | * | ||
594 | * @man: The command buffer resource manager. | ||
595 | * @ctx: Pointer to the context resource. | ||
596 | * @user_key: The id used for this shader. | ||
597 | * @shader_type: The shader type. | ||
598 | * @list: The list of staged command buffer managed resources. | ||
599 | */ | ||
600 | int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, | ||
601 | struct vmw_resource *ctx, | ||
602 | u32 user_key, | ||
603 | SVGA3dShaderType shader_type, | ||
604 | struct list_head *list) | ||
605 | { | ||
606 | struct vmw_dx_shader *shader; | ||
607 | struct vmw_resource *res; | ||
608 | struct vmw_private *dev_priv = ctx->dev_priv; | ||
609 | int ret; | ||
610 | |||
611 | if (!vmw_shader_dx_size) | ||
612 | vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); | ||
613 | |||
614 | if (!vmw_shader_id_ok(user_key, shader_type)) | ||
615 | return -EINVAL; | ||
616 | |||
617 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, | ||
618 | false, true); | ||
619 | if (ret) { | ||
620 | if (ret != -ERESTARTSYS) | ||
621 | DRM_ERROR("Out of graphics memory for shader " | ||
622 | "creation.\n"); | ||
623 | return ret; | ||
624 | } | ||
625 | |||
626 | shader = kmalloc(sizeof(*shader), GFP_KERNEL); | ||
627 | if (!shader) { | ||
628 | ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); | ||
629 | return -ENOMEM; | ||
630 | } | ||
631 | |||
632 | res = &shader->res; | ||
633 | shader->ctx = ctx; | ||
634 | shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); | ||
635 | shader->id = user_key; | ||
636 | shader->committed = false; | ||
637 | INIT_LIST_HEAD(&shader->cotable_head); | ||
638 | ret = vmw_resource_init(dev_priv, res, true, | ||
639 | vmw_dx_shader_res_free, &vmw_dx_shader_func); | ||
640 | if (ret) | ||
641 | goto out_resource_init; | ||
642 | |||
643 | /* | ||
644 | * The user_key name-space is not per shader type for DX shaders, | ||
645 | * so when hashing, use a single zero shader type. | ||
646 | */ | ||
647 | ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, | ||
648 | vmw_shader_key(user_key, 0), | ||
649 | res, list); | ||
650 | if (ret) | ||
651 | goto out_resource_init; | ||
652 | |||
653 | res->id = shader->id; | ||
654 | vmw_resource_activate(res, vmw_hw_shader_destroy); | ||
655 | |||
656 | out_resource_init: | ||
657 | vmw_resource_unreference(&res); | ||
658 | |||
659 | return ret; | ||
660 | } | ||
661 | |||
662 | |||
663 | |||
283 | /** | 664 | /** |
284 | * User-space shader management: | 665 | * User-space shader management: |
285 | */ | 666 | */ |
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, | |||
341 | size_t shader_size, | 722 | size_t shader_size, |
342 | size_t offset, | 723 | size_t offset, |
343 | SVGA3dShaderType shader_type, | 724 | SVGA3dShaderType shader_type, |
725 | uint8_t num_input_sig, | ||
726 | uint8_t num_output_sig, | ||
344 | struct ttm_object_file *tfile, | 727 | struct ttm_object_file *tfile, |
345 | u32 *handle) | 728 | u32 *handle) |
346 | { | 729 | { |
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, | |||
383 | */ | 766 | */ |
384 | 767 | ||
385 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, | 768 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, |
386 | offset, shader_type, buffer, | 769 | offset, shader_type, num_input_sig, |
770 | num_output_sig, buffer, | ||
387 | vmw_user_shader_free); | 771 | vmw_user_shader_free); |
388 | if (unlikely(ret != 0)) | 772 | if (unlikely(ret != 0)) |
389 | goto out; | 773 | goto out; |
@@ -449,7 +833,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, | |||
449 | * From here on, the destructor takes over resource freeing. | 833 | * From here on, the destructor takes over resource freeing. |
450 | */ | 834 | */ |
451 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, | 835 | ret = vmw_gb_shader_init(dev_priv, res, shader_size, |
452 | offset, shader_type, buffer, | 836 | offset, shader_type, 0, 0, buffer, |
453 | vmw_shader_free); | 837 | vmw_shader_free); |
454 | 838 | ||
455 | out_err: | 839 | out_err: |
@@ -457,19 +841,20 @@ out_err: | |||
457 | } | 841 | } |
458 | 842 | ||
459 | 843 | ||
460 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | 844 | static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, |
461 | struct drm_file *file_priv) | 845 | enum drm_vmw_shader_type shader_type_drm, |
846 | u32 buffer_handle, size_t size, size_t offset, | ||
847 | uint8_t num_input_sig, uint8_t num_output_sig, | ||
848 | uint32_t *shader_handle) | ||
462 | { | 849 | { |
463 | struct vmw_private *dev_priv = vmw_priv(dev); | 850 | struct vmw_private *dev_priv = vmw_priv(dev); |
464 | struct drm_vmw_shader_create_arg *arg = | ||
465 | (struct drm_vmw_shader_create_arg *)data; | ||
466 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 851 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
467 | struct vmw_dma_buffer *buffer = NULL; | 852 | struct vmw_dma_buffer *buffer = NULL; |
468 | SVGA3dShaderType shader_type; | 853 | SVGA3dShaderType shader_type; |
469 | int ret; | 854 | int ret; |
470 | 855 | ||
471 | if (arg->buffer_handle != SVGA3D_INVALID_ID) { | 856 | if (buffer_handle != SVGA3D_INVALID_ID) { |
472 | ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | 857 | ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, |
473 | &buffer); | 858 | &buffer); |
474 | if (unlikely(ret != 0)) { | 859 | if (unlikely(ret != 0)) { |
475 | DRM_ERROR("Could not find buffer for shader " | 860 | DRM_ERROR("Could not find buffer for shader " |
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
478 | } | 863 | } |
479 | 864 | ||
480 | if ((u64)buffer->base.num_pages * PAGE_SIZE < | 865 | if ((u64)buffer->base.num_pages * PAGE_SIZE < |
481 | (u64)arg->size + (u64)arg->offset) { | 866 | (u64)size + (u64)offset) { |
482 | DRM_ERROR("Illegal buffer- or shader size.\n"); | 867 | DRM_ERROR("Illegal buffer- or shader size.\n"); |
483 | ret = -EINVAL; | 868 | ret = -EINVAL; |
484 | goto out_bad_arg; | 869 | goto out_bad_arg; |
485 | } | 870 | } |
486 | } | 871 | } |
487 | 872 | ||
488 | switch (arg->shader_type) { | 873 | switch (shader_type_drm) { |
489 | case drm_vmw_shader_type_vs: | 874 | case drm_vmw_shader_type_vs: |
490 | shader_type = SVGA3D_SHADERTYPE_VS; | 875 | shader_type = SVGA3D_SHADERTYPE_VS; |
491 | break; | 876 | break; |
492 | case drm_vmw_shader_type_ps: | 877 | case drm_vmw_shader_type_ps: |
493 | shader_type = SVGA3D_SHADERTYPE_PS; | 878 | shader_type = SVGA3D_SHADERTYPE_PS; |
494 | break; | 879 | break; |
495 | case drm_vmw_shader_type_gs: | ||
496 | shader_type = SVGA3D_SHADERTYPE_GS; | ||
497 | break; | ||
498 | default: | 880 | default: |
499 | DRM_ERROR("Illegal shader type.\n"); | 881 | DRM_ERROR("Illegal shader type.\n"); |
500 | ret = -EINVAL; | 882 | ret = -EINVAL; |
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | |||
505 | if (unlikely(ret != 0)) | 887 | if (unlikely(ret != 0)) |
506 | goto out_bad_arg; | 888 | goto out_bad_arg; |
507 | 889 | ||
508 | ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, | 890 | ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset, |
509 | shader_type, tfile, &arg->shader_handle); | 891 | shader_type, num_input_sig, |
892 | num_output_sig, tfile, shader_handle); | ||
510 | 893 | ||
511 | ttm_read_unlock(&dev_priv->reservation_sem); | 894 | ttm_read_unlock(&dev_priv->reservation_sem); |
512 | out_bad_arg: | 895 | out_bad_arg: |
@@ -515,7 +898,7 @@ out_bad_arg: | |||
515 | } | 898 | } |
516 | 899 | ||
517 | /** | 900 | /** |
518 | * vmw_compat_shader_id_ok - Check whether a compat shader user key and | 901 | * vmw_shader_id_ok - Check whether a compat shader user key and |
519 | * shader type are within valid bounds. | 902 | * shader type are within valid bounds. |
520 | * | 903 | * |
521 | * @user_key: User space id of the shader. | 904 | * @user_key: User space id of the shader. |
@@ -523,13 +906,13 @@ out_bad_arg: | |||
523 | * | 906 | * |
524 | * Returns true if valid false if not. | 907 | * Returns true if valid false if not. |
525 | */ | 908 | */ |
526 | static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) | 909 | static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) |
527 | { | 910 | { |
528 | return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; | 911 | return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; |
529 | } | 912 | } |
530 | 913 | ||
531 | /** | 914 | /** |
532 | * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. | 915 | * vmw_shader_key - Compute a hash key suitable for a compat shader. |
533 | * | 916 | * |
534 | * @user_key: User space id of the shader. | 917 | * @user_key: User space id of the shader. |
535 | * @shader_type: Shader type. | 918 | * @shader_type: Shader type. |
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) | |||
537 | * Returns a hash key suitable for a command buffer managed resource | 920 | * Returns a hash key suitable for a command buffer managed resource |
538 | * manager hash table. | 921 | * manager hash table. |
539 | */ | 922 | */ |
540 | static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) | 923 | static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type) |
541 | { | 924 | { |
542 | return user_key | (shader_type << 20); | 925 | return user_key | (shader_type << 20); |
543 | } | 926 | } |
544 | 927 | ||
545 | /** | 928 | /** |
546 | * vmw_compat_shader_remove - Stage a compat shader for removal. | 929 | * vmw_shader_remove - Stage a compat shader for removal. |
547 | * | 930 | * |
548 | * @man: Pointer to the compat shader manager identifying the shader namespace. | 931 | * @man: Pointer to the compat shader manager identifying the shader namespace. |
549 | * @user_key: The key that is used to identify the shader. The key is | 932 | * @user_key: The key that is used to identify the shader. The key is |
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) | |||
551 | * @shader_type: Shader type. | 934 | * @shader_type: Shader type. |
552 | * @list: Caller's list of staged command buffer resource actions. | 935 | * @list: Caller's list of staged command buffer resource actions. |
553 | */ | 936 | */ |
554 | int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, | 937 | int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, |
555 | u32 user_key, SVGA3dShaderType shader_type, | 938 | u32 user_key, SVGA3dShaderType shader_type, |
556 | struct list_head *list) | 939 | struct list_head *list) |
557 | { | 940 | { |
558 | if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 941 | struct vmw_resource *dummy; |
942 | |||
943 | if (!vmw_shader_id_ok(user_key, shader_type)) | ||
559 | return -EINVAL; | 944 | return -EINVAL; |
560 | 945 | ||
561 | return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, | 946 | return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader, |
562 | vmw_compat_shader_key(user_key, | 947 | vmw_shader_key(user_key, shader_type), |
563 | shader_type), | 948 | list, &dummy); |
564 | list); | ||
565 | } | 949 | } |
566 | 950 | ||
567 | /** | 951 | /** |
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, | |||
591 | int ret; | 975 | int ret; |
592 | struct vmw_resource *res; | 976 | struct vmw_resource *res; |
593 | 977 | ||
594 | if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 978 | if (!vmw_shader_id_ok(user_key, shader_type)) |
595 | return -EINVAL; | 979 | return -EINVAL; |
596 | 980 | ||
597 | /* Allocate and pin a DMA buffer */ | 981 | /* Allocate and pin a DMA buffer */ |
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, | |||
628 | if (unlikely(ret != 0)) | 1012 | if (unlikely(ret != 0)) |
629 | goto no_reserve; | 1013 | goto no_reserve; |
630 | 1014 | ||
631 | ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, | 1015 | ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, |
632 | vmw_compat_shader_key(user_key, shader_type), | 1016 | vmw_shader_key(user_key, shader_type), |
633 | res, list); | 1017 | res, list); |
634 | vmw_resource_unreference(&res); | 1018 | vmw_resource_unreference(&res); |
635 | no_reserve: | 1019 | no_reserve: |
@@ -639,7 +1023,7 @@ out: | |||
639 | } | 1023 | } |
640 | 1024 | ||
641 | /** | 1025 | /** |
642 | * vmw_compat_shader_lookup - Look up a compat shader | 1026 | * vmw_shader_lookup - Look up a compat shader |
643 | * | 1027 | * |
644 | * @man: Pointer to the command buffer managed resource manager identifying | 1028 | * @man: Pointer to the command buffer managed resource manager identifying |
645 | * the shader namespace. | 1029 | * the shader namespace. |
@@ -650,14 +1034,26 @@ out: | |||
650 | * found. An error pointer otherwise. | 1034 | * found. An error pointer otherwise. |
651 | */ | 1035 | */ |
652 | struct vmw_resource * | 1036 | struct vmw_resource * |
653 | vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, | 1037 | vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, |
654 | u32 user_key, | 1038 | u32 user_key, |
655 | SVGA3dShaderType shader_type) | 1039 | SVGA3dShaderType shader_type) |
656 | { | 1040 | { |
657 | if (!vmw_compat_shader_id_ok(user_key, shader_type)) | 1041 | if (!vmw_shader_id_ok(user_key, shader_type)) |
658 | return ERR_PTR(-EINVAL); | 1042 | return ERR_PTR(-EINVAL); |
659 | 1043 | ||
660 | return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, | 1044 | return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader, |
661 | vmw_compat_shader_key(user_key, | 1045 | vmw_shader_key(user_key, shader_type)); |
662 | shader_type)); | 1046 | } |
1047 | |||
1048 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
1049 | struct drm_file *file_priv) | ||
1050 | { | ||
1051 | struct drm_vmw_shader_create_arg *arg = | ||
1052 | (struct drm_vmw_shader_create_arg *)data; | ||
1053 | |||
1054 | return vmw_shader_define(dev, file_priv, arg->shader_type, | ||
1055 | arg->buffer_handle, | ||
1056 | arg->size, arg->offset, | ||
1057 | 0, 0, | ||
1058 | &arg->shader_handle); | ||
663 | } | 1059 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c new file mode 100644 index 000000000000..4dfdc95b2cfe --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c | |||
@@ -0,0 +1,555 @@ | |||
1 | /************************************************************************** | ||
2 | * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
21 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
22 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
23 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | **************************************************************************/ | ||
26 | |||
27 | #include "vmwgfx_drv.h" | ||
28 | #include "vmwgfx_resource_priv.h" | ||
29 | #include "vmwgfx_so.h" | ||
30 | #include "vmwgfx_binding.h" | ||
31 | |||
32 | /* | ||
33 | * The currently only reason we need to keep track of views is that if we | ||
34 | * destroy a hardware surface, all views pointing to it must also be destroyed, | ||
35 | * otherwise the device will error. | ||
36 | * So in particuar if a surface is evicted, we must destroy all views pointing | ||
37 | * to it, and all context bindings of that view. Similarly we must restore | ||
38 | * the view bindings, views and surfaces pointed to by the views when a | ||
39 | * context is referenced in the command stream. | ||
40 | */ | ||
41 | |||
42 | /** | ||
43 | * struct vmw_view - view metadata | ||
44 | * | ||
45 | * @res: The struct vmw_resource we derive from | ||
46 | * @ctx: Non-refcounted pointer to the context this view belongs to. | ||
47 | * @srf: Refcounted pointer to the surface pointed to by this view. | ||
48 | * @cotable: Refcounted pointer to the cotable holding this view. | ||
49 | * @srf_head: List head for the surface-to-view list. | ||
50 | * @cotable_head: List head for the cotable-to_view list. | ||
51 | * @view_type: View type. | ||
52 | * @view_id: User-space per context view id. Currently used also as per | ||
53 | * context device view id. | ||
54 | * @cmd_size: Size of the SVGA3D define view command that we've copied from the | ||
55 | * command stream. | ||
56 | * @committed: Whether the view is actually created or pending creation at the | ||
57 | * device level. | ||
58 | * @cmd: The SVGA3D define view command copied from the command stream. | ||
59 | */ | ||
60 | struct vmw_view { | ||
61 | struct rcu_head rcu; | ||
62 | struct vmw_resource res; | ||
63 | struct vmw_resource *ctx; /* Immutable */ | ||
64 | struct vmw_resource *srf; /* Immutable */ | ||
65 | struct vmw_resource *cotable; /* Immutable */ | ||
66 | struct list_head srf_head; /* Protected by binding_mutex */ | ||
67 | struct list_head cotable_head; /* Protected by binding_mutex */ | ||
68 | unsigned view_type; /* Immutable */ | ||
69 | unsigned view_id; /* Immutable */ | ||
70 | u32 cmd_size; /* Immutable */ | ||
71 | bool committed; /* Protected by binding_mutex */ | ||
72 | u32 cmd[1]; /* Immutable */ | ||
73 | }; | ||
74 | |||
75 | static int vmw_view_create(struct vmw_resource *res); | ||
76 | static int vmw_view_destroy(struct vmw_resource *res); | ||
77 | static void vmw_hw_view_destroy(struct vmw_resource *res); | ||
78 | static void vmw_view_commit_notify(struct vmw_resource *res, | ||
79 | enum vmw_cmdbuf_res_state state); | ||
80 | |||
81 | static const struct vmw_res_func vmw_view_func = { | ||
82 | .res_type = vmw_res_view, | ||
83 | .needs_backup = false, | ||
84 | .may_evict = false, | ||
85 | .type_name = "DX view", | ||
86 | .backup_placement = NULL, | ||
87 | .create = vmw_view_create, | ||
88 | .commit_notify = vmw_view_commit_notify, | ||
89 | }; | ||
90 | |||
91 | /** | ||
92 | * struct vmw_view - view define command body stub | ||
93 | * | ||
94 | * @view_id: The device id of the view being defined | ||
95 | * @sid: The surface id of the view being defined | ||
96 | * | ||
97 | * This generic struct is used by the code to change @view_id and @sid of a | ||
98 | * saved view define command. | ||
99 | */ | ||
100 | struct vmw_view_define { | ||
101 | uint32 view_id; | ||
102 | uint32 sid; | ||
103 | }; | ||
104 | |||
105 | /** | ||
106 | * vmw_view - Convert a struct vmw_resource to a struct vmw_view | ||
107 | * | ||
108 | * @res: Pointer to the resource to convert. | ||
109 | * | ||
110 | * Returns a pointer to a struct vmw_view. | ||
111 | */ | ||
112 | static struct vmw_view *vmw_view(struct vmw_resource *res) | ||
113 | { | ||
114 | return container_of(res, struct vmw_view, res); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * vmw_view_commit_notify - Notify that a view operation has been committed to | ||
119 | * hardware from a user-supplied command stream. | ||
120 | * | ||
121 | * @res: Pointer to the view resource. | ||
122 | * @state: Indicating whether a creation or removal has been committed. | ||
123 | * | ||
124 | */ | ||
125 | static void vmw_view_commit_notify(struct vmw_resource *res, | ||
126 | enum vmw_cmdbuf_res_state state) | ||
127 | { | ||
128 | struct vmw_view *view = vmw_view(res); | ||
129 | struct vmw_private *dev_priv = res->dev_priv; | ||
130 | |||
131 | mutex_lock(&dev_priv->binding_mutex); | ||
132 | if (state == VMW_CMDBUF_RES_ADD) { | ||
133 | struct vmw_surface *srf = vmw_res_to_srf(view->srf); | ||
134 | |||
135 | list_add_tail(&view->srf_head, &srf->view_list); | ||
136 | vmw_cotable_add_resource(view->cotable, &view->cotable_head); | ||
137 | view->committed = true; | ||
138 | res->id = view->view_id; | ||
139 | |||
140 | } else { | ||
141 | list_del_init(&view->cotable_head); | ||
142 | list_del_init(&view->srf_head); | ||
143 | view->committed = false; | ||
144 | res->id = -1; | ||
145 | } | ||
146 | mutex_unlock(&dev_priv->binding_mutex); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * vmw_view_create - Create a hardware view. | ||
151 | * | ||
152 | * @res: Pointer to the view resource. | ||
153 | * | ||
154 | * Create a hardware view. Typically used if that view has previously been | ||
155 | * destroyed by an eviction operation. | ||
156 | */ | ||
157 | static int vmw_view_create(struct vmw_resource *res) | ||
158 | { | ||
159 | struct vmw_view *view = vmw_view(res); | ||
160 | struct vmw_surface *srf = vmw_res_to_srf(view->srf); | ||
161 | struct vmw_private *dev_priv = res->dev_priv; | ||
162 | struct { | ||
163 | SVGA3dCmdHeader header; | ||
164 | struct vmw_view_define body; | ||
165 | } *cmd; | ||
166 | |||
167 | mutex_lock(&dev_priv->binding_mutex); | ||
168 | if (!view->committed) { | ||
169 | mutex_unlock(&dev_priv->binding_mutex); | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size, | ||
174 | view->ctx->id); | ||
175 | if (!cmd) { | ||
176 | DRM_ERROR("Failed reserving FIFO space for view creation.\n"); | ||
177 | mutex_unlock(&dev_priv->binding_mutex); | ||
178 | return -ENOMEM; | ||
179 | } | ||
180 | memcpy(cmd, &view->cmd, view->cmd_size); | ||
181 | WARN_ON(cmd->body.view_id != view->view_id); | ||
182 | /* Sid may have changed due to surface eviction. */ | ||
183 | WARN_ON(view->srf->id == SVGA3D_INVALID_ID); | ||
184 | cmd->body.sid = view->srf->id; | ||
185 | vmw_fifo_commit(res->dev_priv, view->cmd_size); | ||
186 | res->id = view->view_id; | ||
187 | list_add_tail(&view->srf_head, &srf->view_list); | ||
188 | vmw_cotable_add_resource(view->cotable, &view->cotable_head); | ||
189 | mutex_unlock(&dev_priv->binding_mutex); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * vmw_view_destroy - Destroy a hardware view. | ||
196 | * | ||
197 | * @res: Pointer to the view resource. | ||
198 | * | ||
199 | * Destroy a hardware view. Typically used on unexpected termination of the | ||
200 | * owning process or if the surface the view is pointing to is destroyed. | ||
201 | */ | ||
202 | static int vmw_view_destroy(struct vmw_resource *res) | ||
203 | { | ||
204 | struct vmw_private *dev_priv = res->dev_priv; | ||
205 | struct vmw_view *view = vmw_view(res); | ||
206 | struct { | ||
207 | SVGA3dCmdHeader header; | ||
208 | union vmw_view_destroy body; | ||
209 | } *cmd; | ||
210 | |||
211 | WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||
212 | vmw_binding_res_list_scrub(&res->binding_head); | ||
213 | |||
214 | if (!view->committed || res->id == -1) | ||
215 | return 0; | ||
216 | |||
217 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id); | ||
218 | if (!cmd) { | ||
219 | DRM_ERROR("Failed reserving FIFO space for view " | ||
220 | "destruction.\n"); | ||
221 | return -ENOMEM; | ||
222 | } | ||
223 | |||
224 | cmd->header.id = vmw_view_destroy_cmds[view->view_type]; | ||
225 | cmd->header.size = sizeof(cmd->body); | ||
226 | cmd->body.view_id = view->view_id; | ||
227 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
228 | res->id = -1; | ||
229 | list_del_init(&view->cotable_head); | ||
230 | list_del_init(&view->srf_head); | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup. | ||
237 | * | ||
238 | * @res: Pointer to the view resource. | ||
239 | * | ||
240 | * Destroy a hardware view if it's still present. | ||
241 | */ | ||
242 | static void vmw_hw_view_destroy(struct vmw_resource *res) | ||
243 | { | ||
244 | struct vmw_private *dev_priv = res->dev_priv; | ||
245 | |||
246 | mutex_lock(&dev_priv->binding_mutex); | ||
247 | WARN_ON(vmw_view_destroy(res)); | ||
248 | res->id = -1; | ||
249 | mutex_unlock(&dev_priv->binding_mutex); | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager | ||
254 | * | ||
255 | * @user_key: The user-space id used for the view. | ||
256 | * @view_type: The view type. | ||
257 | * | ||
258 | * Destroy a hardware view if it's still present. | ||
259 | */ | ||
260 | static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type) | ||
261 | { | ||
262 | return user_key | (view_type << 20); | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * vmw_view_id_ok - Basic view id and type range checks. | ||
267 | * | ||
268 | * @user_key: The user-space id used for the view. | ||
269 | * @view_type: The view type. | ||
270 | * | ||
271 | * Checks that the view id and type (typically provided by user-space) is | ||
272 | * valid. | ||
273 | */ | ||
274 | static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) | ||
275 | { | ||
276 | return (user_key < SVGA_COTABLE_MAX_IDS && | ||
277 | view_type < vmw_view_max); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * vmw_view_res_free - resource res_free callback for view resources | ||
282 | * | ||
283 | * @res: Pointer to a struct vmw_resource | ||
284 | * | ||
285 | * Frees memory and memory accounting held by a struct vmw_view. | ||
286 | */ | ||
287 | static void vmw_view_res_free(struct vmw_resource *res) | ||
288 | { | ||
289 | struct vmw_view *view = vmw_view(res); | ||
290 | size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; | ||
291 | struct vmw_private *dev_priv = res->dev_priv; | ||
292 | |||
293 | vmw_resource_unreference(&view->cotable); | ||
294 | vmw_resource_unreference(&view->srf); | ||
295 | kfree_rcu(view, rcu); | ||
296 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * vmw_view_add - Create a view resource and stage it for addition | ||
301 | * as a command buffer managed resource. | ||
302 | * | ||
303 | * @man: Pointer to the compat shader manager identifying the shader namespace. | ||
304 | * @ctx: Pointer to a struct vmw_resource identifying the active context. | ||
305 | * @srf: Pointer to a struct vmw_resource identifying the surface the view | ||
306 | * points to. | ||
307 | * @view_type: The view type deduced from the view create command. | ||
308 | * @user_key: The key that is used to identify the shader. The key is | ||
309 | * unique to the view type and to the context. | ||
310 | * @cmd: Pointer to the view create command in the command stream. | ||
311 | * @cmd_size: Size of the view create command in the command stream. | ||
312 | * @list: Caller's list of staged command buffer resource actions. | ||
313 | */ | ||
314 | int vmw_view_add(struct vmw_cmdbuf_res_manager *man, | ||
315 | struct vmw_resource *ctx, | ||
316 | struct vmw_resource *srf, | ||
317 | enum vmw_view_type view_type, | ||
318 | u32 user_key, | ||
319 | const void *cmd, | ||
320 | size_t cmd_size, | ||
321 | struct list_head *list) | ||
322 | { | ||
323 | static const size_t vmw_view_define_sizes[] = { | ||
324 | [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView), | ||
325 | [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView), | ||
326 | [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView) | ||
327 | }; | ||
328 | |||
329 | struct vmw_private *dev_priv = ctx->dev_priv; | ||
330 | struct vmw_resource *res; | ||
331 | struct vmw_view *view; | ||
332 | size_t size; | ||
333 | int ret; | ||
334 | |||
335 | if (cmd_size != vmw_view_define_sizes[view_type] + | ||
336 | sizeof(SVGA3dCmdHeader)) { | ||
337 | DRM_ERROR("Illegal view create command size.\n"); | ||
338 | return -EINVAL; | ||
339 | } | ||
340 | |||
341 | if (!vmw_view_id_ok(user_key, view_type)) { | ||
342 | DRM_ERROR("Illegal view add view id.\n"); | ||
343 | return -EINVAL; | ||
344 | } | ||
345 | |||
346 | size = offsetof(struct vmw_view, cmd) + cmd_size; | ||
347 | |||
348 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true); | ||
349 | if (ret) { | ||
350 | if (ret != -ERESTARTSYS) | ||
351 | DRM_ERROR("Out of graphics memory for view" | ||
352 | " creation.\n"); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | view = kmalloc(size, GFP_KERNEL); | ||
357 | if (!view) { | ||
358 | ttm_mem_global_free(vmw_mem_glob(dev_priv), size); | ||
359 | return -ENOMEM; | ||
360 | } | ||
361 | |||
362 | res = &view->res; | ||
363 | view->ctx = ctx; | ||
364 | view->srf = vmw_resource_reference(srf); | ||
365 | view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); | ||
366 | view->view_type = view_type; | ||
367 | view->view_id = user_key; | ||
368 | view->cmd_size = cmd_size; | ||
369 | view->committed = false; | ||
370 | INIT_LIST_HEAD(&view->srf_head); | ||
371 | INIT_LIST_HEAD(&view->cotable_head); | ||
372 | memcpy(&view->cmd, cmd, cmd_size); | ||
373 | ret = vmw_resource_init(dev_priv, res, true, | ||
374 | vmw_view_res_free, &vmw_view_func); | ||
375 | if (ret) | ||
376 | goto out_resource_init; | ||
377 | |||
378 | ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view, | ||
379 | vmw_view_key(user_key, view_type), | ||
380 | res, list); | ||
381 | if (ret) | ||
382 | goto out_resource_init; | ||
383 | |||
384 | res->id = view->view_id; | ||
385 | vmw_resource_activate(res, vmw_hw_view_destroy); | ||
386 | |||
387 | out_resource_init: | ||
388 | vmw_resource_unreference(&res); | ||
389 | |||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * vmw_view_remove - Stage a view for removal. | ||
395 | * | ||
396 | * @man: Pointer to the view manager identifying the shader namespace. | ||
397 | * @user_key: The key that is used to identify the view. The key is | ||
398 | * unique to the view type. | ||
399 | * @view_type: View type | ||
400 | * @list: Caller's list of staged command buffer resource actions. | ||
401 | * @res_p: If the resource is in an already committed state, points to the | ||
402 | * struct vmw_resource on successful return. The pointer will be | ||
403 | * non ref-counted. | ||
404 | */ | ||
405 | int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, | ||
406 | u32 user_key, enum vmw_view_type view_type, | ||
407 | struct list_head *list, | ||
408 | struct vmw_resource **res_p) | ||
409 | { | ||
410 | if (!vmw_view_id_ok(user_key, view_type)) { | ||
411 | DRM_ERROR("Illegal view remove view id.\n"); | ||
412 | return -EINVAL; | ||
413 | } | ||
414 | |||
415 | return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view, | ||
416 | vmw_view_key(user_key, view_type), | ||
417 | list, res_p); | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable. | ||
422 | * | ||
423 | * @dev_priv: Pointer to a device private struct. | ||
424 | * @list: List of views belonging to a cotable. | ||
425 | * @readback: Unused. Needed for function interface only. | ||
426 | * | ||
427 | * This function evicts all views belonging to a cotable. | ||
428 | * It must be called with the binding_mutex held, and the caller must hold | ||
429 | * a reference to the view resource. This is typically called before the | ||
430 | * cotable is paged out. | ||
431 | */ | ||
432 | void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, | ||
433 | struct list_head *list, | ||
434 | bool readback) | ||
435 | { | ||
436 | struct vmw_view *entry, *next; | ||
437 | |||
438 | WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||
439 | |||
440 | list_for_each_entry_safe(entry, next, list, cotable_head) | ||
441 | WARN_ON(vmw_view_destroy(&entry->res)); | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * vmw_view_surface_list_destroy - Evict all views pointing to a surface | ||
446 | * | ||
447 | * @dev_priv: Pointer to a device private struct. | ||
448 | * @list: List of views pointing to a surface. | ||
449 | * | ||
450 | * This function evicts all views pointing to a surface. This is typically | ||
451 | * called before the surface is evicted. | ||
452 | */ | ||
453 | void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, | ||
454 | struct list_head *list) | ||
455 | { | ||
456 | struct vmw_view *entry, *next; | ||
457 | |||
458 | WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); | ||
459 | |||
460 | list_for_each_entry_safe(entry, next, list, srf_head) | ||
461 | WARN_ON(vmw_view_destroy(&entry->res)); | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * vmw_view_srf - Return a non-refcounted pointer to the surface a view is | ||
466 | * pointing to. | ||
467 | * | ||
468 | * @res: pointer to a view resource. | ||
469 | * | ||
470 | * Note that the view itself is holding a reference, so as long | ||
471 | * the view resource is alive, the surface resource will be. | ||
472 | */ | ||
473 | struct vmw_resource *vmw_view_srf(struct vmw_resource *res) | ||
474 | { | ||
475 | return vmw_view(res)->srf; | ||
476 | } | ||
477 | |||
478 | /** | ||
479 | * vmw_view_lookup - Look up a view. | ||
480 | * | ||
481 | * @man: The context's cmdbuf ref manager. | ||
482 | * @view_type: The view type. | ||
483 | * @user_key: The view user id. | ||
484 | * | ||
485 | * returns a refcounted pointer to a view or an error pointer if not found. | ||
486 | */ | ||
487 | struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, | ||
488 | enum vmw_view_type view_type, | ||
489 | u32 user_key) | ||
490 | { | ||
491 | return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view, | ||
492 | vmw_view_key(user_key, view_type)); | ||
493 | } | ||
494 | |||
495 | const u32 vmw_view_destroy_cmds[] = { | ||
496 | [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, | ||
497 | [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, | ||
498 | [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, | ||
499 | }; | ||
500 | |||
501 | const SVGACOTableType vmw_view_cotables[] = { | ||
502 | [vmw_view_sr] = SVGA_COTABLE_SRVIEW, | ||
503 | [vmw_view_rt] = SVGA_COTABLE_RTVIEW, | ||
504 | [vmw_view_ds] = SVGA_COTABLE_DSVIEW, | ||
505 | }; | ||
506 | |||
507 | const SVGACOTableType vmw_so_cotables[] = { | ||
508 | [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT, | ||
509 | [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE, | ||
510 | [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL, | ||
511 | [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE, | ||
512 | [vmw_so_ss] = SVGA_COTABLE_SAMPLER, | ||
513 | [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT | ||
514 | }; | ||
515 | |||
516 | |||
517 | /* To remove unused function warning */ | ||
518 | static void vmw_so_build_asserts(void) __attribute__((used)); | ||
519 | |||
520 | |||
521 | /* | ||
522 | * This function is unused at run-time, and only used to dump various build | ||
523 | * asserts important for code optimization assumptions. | ||
524 | */ | ||
525 | static void vmw_so_build_asserts(void) | ||
526 | { | ||
527 | /* Assert that our vmw_view_cmd_to_type() function is correct. */ | ||
528 | BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW != | ||
529 | SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1); | ||
530 | BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW != | ||
531 | SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2); | ||
532 | BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW != | ||
533 | SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3); | ||
534 | BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW != | ||
535 | SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4); | ||
536 | BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW != | ||
537 | SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5); | ||
538 | |||
539 | /* Assert that our "one body fits all" assumption is valid */ | ||
540 | BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32)); | ||
541 | |||
542 | /* Assert that the view key space can hold all view ids. */ | ||
543 | BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1)); | ||
544 | |||
545 | /* | ||
546 | * Assert that the offset of sid in all view define commands | ||
547 | * is what we assume it to be. | ||
548 | */ | ||
549 | BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||
550 | offsetof(SVGA3dCmdDXDefineShaderResourceView, sid)); | ||
551 | BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||
552 | offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); | ||
553 | BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != | ||
554 | offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); | ||
555 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h new file mode 100644 index 000000000000..5ef867a9e0d5 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /************************************************************************** | ||
2 | * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
20 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
21 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
22 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
23 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | **************************************************************************/ | ||
26 | #ifndef VMW_SO_H | ||
27 | #define VMW_SO_H | ||
28 | |||
29 | enum vmw_view_type { | ||
30 | vmw_view_sr, | ||
31 | vmw_view_rt, | ||
32 | vmw_view_ds, | ||
33 | vmw_view_max, | ||
34 | }; | ||
35 | |||
36 | enum vmw_so_type { | ||
37 | vmw_so_el, | ||
38 | vmw_so_bs, | ||
39 | vmw_so_ds, | ||
40 | vmw_so_rs, | ||
41 | vmw_so_ss, | ||
42 | vmw_so_so, | ||
43 | vmw_so_max, | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * union vmw_view_destroy - view destruction command body | ||
48 | * | ||
49 | * @rtv: RenderTarget view destruction command body | ||
50 | * @srv: ShaderResource view destruction command body | ||
51 | * @dsv: DepthStencil view destruction command body | ||
52 | * @view_id: A single u32 view id. | ||
53 | * | ||
54 | * The assumption here is that all union members are really represented by a | ||
55 | * single u32 in the command stream. If that's not the case, | ||
56 | * the size of this union will not equal the size of an u32, and the | ||
57 | * assumption is invalid, and we detect that at compile time in the | ||
58 | * vmw_so_build_asserts() function. | ||
59 | */ | ||
60 | union vmw_view_destroy { | ||
61 | struct SVGA3dCmdDXDestroyRenderTargetView rtv; | ||
62 | struct SVGA3dCmdDXDestroyShaderResourceView srv; | ||
63 | struct SVGA3dCmdDXDestroyDepthStencilView dsv; | ||
64 | u32 view_id; | ||
65 | }; | ||
66 | |||
67 | /* Map enum vmw_view_type to view destroy command ids*/ | ||
68 | extern const u32 vmw_view_destroy_cmds[]; | ||
69 | |||
70 | /* Map enum vmw_view_type to SVGACOTableType */ | ||
71 | extern const SVGACOTableType vmw_view_cotables[]; | ||
72 | |||
73 | /* Map enum vmw_so_type to SVGACOTableType */ | ||
74 | extern const SVGACOTableType vmw_so_cotables[]; | ||
75 | |||
76 | /* | ||
77 | * vmw_view_cmd_to_type - Return the view type for a create or destroy command | ||
78 | * | ||
79 | * @id: The SVGA3D command id. | ||
80 | * | ||
81 | * For a given view create or destroy command id, return the corresponding | ||
82 | * enum vmw_view_type. If the command is unknown, return vmw_view_max. | ||
83 | * The validity of the simplified calculation is verified in the | ||
84 | * vmw_so_build_asserts() function. | ||
85 | */ | ||
86 | static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) | ||
87 | { | ||
88 | u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2; | ||
89 | |||
90 | if (tmp > (u32)vmw_view_max) | ||
91 | return vmw_view_max; | ||
92 | |||
93 | return (enum vmw_view_type) tmp; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * vmw_so_cmd_to_type - Return the state object type for a | ||
98 | * create or destroy command | ||
99 | * | ||
100 | * @id: The SVGA3D command id. | ||
101 | * | ||
102 | * For a given state object create or destroy command id, | ||
103 | * return the corresponding enum vmw_so_type. If the command is uknown, | ||
104 | * return vmw_so_max. We should perhaps optimize this function using | ||
105 | * a similar strategy as vmw_view_cmd_to_type(). | ||
106 | */ | ||
107 | static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) | ||
108 | { | ||
109 | switch (id) { | ||
110 | case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT: | ||
111 | case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT: | ||
112 | return vmw_so_el; | ||
113 | case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE: | ||
114 | case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE: | ||
115 | return vmw_so_bs; | ||
116 | case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE: | ||
117 | case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: | ||
118 | return vmw_so_ds; | ||
119 | case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: | ||
120 | case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: | ||
121 | return vmw_so_rs; | ||
122 | case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: | ||
123 | case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE: | ||
124 | return vmw_so_ss; | ||
125 | case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT: | ||
126 | case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT: | ||
127 | return vmw_so_so; | ||
128 | default: | ||
129 | break; | ||
130 | } | ||
131 | return vmw_so_max; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * View management - vmwgfx_so.c | ||
136 | */ | ||
137 | extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man, | ||
138 | struct vmw_resource *ctx, | ||
139 | struct vmw_resource *srf, | ||
140 | enum vmw_view_type view_type, | ||
141 | u32 user_key, | ||
142 | const void *cmd, | ||
143 | size_t cmd_size, | ||
144 | struct list_head *list); | ||
145 | |||
146 | extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, | ||
147 | u32 user_key, enum vmw_view_type view_type, | ||
148 | struct list_head *list, | ||
149 | struct vmw_resource **res_p); | ||
150 | |||
151 | extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, | ||
152 | struct list_head *view_list); | ||
153 | extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, | ||
154 | struct list_head *list, | ||
155 | bool readback); | ||
156 | extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res); | ||
157 | extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, | ||
158 | enum vmw_view_type view_type, | ||
159 | u32 user_key); | ||
160 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index d4a453703eed..ae6773e171b0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -561,6 +561,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) | |||
561 | true, /* a scanout buffer */ | 561 | true, /* a scanout buffer */ |
562 | content_srf.mip_levels[0], | 562 | content_srf.mip_levels[0], |
563 | content_srf.multisample_count, | 563 | content_srf.multisample_count, |
564 | 0, | ||
564 | display_base_size, | 565 | display_base_size, |
565 | &display_srf); | 566 | &display_srf); |
566 | if (unlikely(ret != 0)) { | 567 | if (unlikely(ret != 0)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index eea1790eed6a..12ade0cf98d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -27,9 +27,12 @@ | |||
27 | 27 | ||
28 | #include "vmwgfx_drv.h" | 28 | #include "vmwgfx_drv.h" |
29 | #include "vmwgfx_resource_priv.h" | 29 | #include "vmwgfx_resource_priv.h" |
30 | #include "vmwgfx_so.h" | ||
31 | #include "vmwgfx_binding.h" | ||
30 | #include <ttm/ttm_placement.h> | 32 | #include <ttm/ttm_placement.h> |
31 | #include "device_include/svga3d_surfacedefs.h" | 33 | #include "device_include/svga3d_surfacedefs.h" |
32 | 34 | ||
35 | |||
33 | /** | 36 | /** |
34 | * struct vmw_user_surface - User-space visible surface resource | 37 | * struct vmw_user_surface - User-space visible surface resource |
35 | * | 38 | * |
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv, | |||
593 | * surface validate. | 596 | * surface validate. |
594 | */ | 597 | */ |
595 | 598 | ||
599 | INIT_LIST_HEAD(&srf->view_list); | ||
596 | vmw_resource_activate(res, vmw_hw_surface_destroy); | 600 | vmw_resource_activate(res, vmw_hw_surface_destroy); |
597 | return ret; | 601 | return ret; |
598 | } | 602 | } |
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
723 | desc = svga3dsurface_get_desc(req->format); | 727 | desc = svga3dsurface_get_desc(req->format); |
724 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { | 728 | if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { |
725 | DRM_ERROR("Invalid surface format for surface creation.\n"); | 729 | DRM_ERROR("Invalid surface format for surface creation.\n"); |
730 | DRM_ERROR("Format requested is: %d\n", req->format); | ||
726 | return -EINVAL; | 731 | return -EINVAL; |
727 | } | 732 | } |
728 | 733 | ||
@@ -1018,12 +1023,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | |||
1018 | { | 1023 | { |
1019 | struct vmw_private *dev_priv = res->dev_priv; | 1024 | struct vmw_private *dev_priv = res->dev_priv; |
1020 | struct vmw_surface *srf = vmw_res_to_srf(res); | 1025 | struct vmw_surface *srf = vmw_res_to_srf(res); |
1021 | uint32_t cmd_len, submit_len; | 1026 | uint32_t cmd_len, cmd_id, submit_len; |
1022 | int ret; | 1027 | int ret; |
1023 | struct { | 1028 | struct { |
1024 | SVGA3dCmdHeader header; | 1029 | SVGA3dCmdHeader header; |
1025 | SVGA3dCmdDefineGBSurface body; | 1030 | SVGA3dCmdDefineGBSurface body; |
1026 | } *cmd; | 1031 | } *cmd; |
1032 | struct { | ||
1033 | SVGA3dCmdHeader header; | ||
1034 | SVGA3dCmdDefineGBSurface_v2 body; | ||
1035 | } *cmd2; | ||
1027 | 1036 | ||
1028 | if (likely(res->id != -1)) | 1037 | if (likely(res->id != -1)) |
1029 | return 0; | 1038 | return 0; |
@@ -1040,9 +1049,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | |||
1040 | goto out_no_fifo; | 1049 | goto out_no_fifo; |
1041 | } | 1050 | } |
1042 | 1051 | ||
1043 | cmd_len = sizeof(cmd->body); | 1052 | if (srf->array_size > 0) { |
1044 | submit_len = sizeof(*cmd); | 1053 | /* has_dx checked on creation time. */ |
1054 | cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; | ||
1055 | cmd_len = sizeof(cmd2->body); | ||
1056 | submit_len = sizeof(*cmd2); | ||
1057 | } else { | ||
1058 | cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | ||
1059 | cmd_len = sizeof(cmd->body); | ||
1060 | submit_len = sizeof(*cmd); | ||
1061 | } | ||
1062 | |||
1045 | cmd = vmw_fifo_reserve(dev_priv, submit_len); | 1063 | cmd = vmw_fifo_reserve(dev_priv, submit_len); |
1064 | cmd2 = (typeof(cmd2))cmd; | ||
1046 | if (unlikely(cmd == NULL)) { | 1065 | if (unlikely(cmd == NULL)) { |
1047 | DRM_ERROR("Failed reserving FIFO space for surface " | 1066 | DRM_ERROR("Failed reserving FIFO space for surface " |
1048 | "creation.\n"); | 1067 | "creation.\n"); |
@@ -1050,17 +1069,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res) | |||
1050 | goto out_no_fifo; | 1069 | goto out_no_fifo; |
1051 | } | 1070 | } |
1052 | 1071 | ||
1053 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; | 1072 | if (srf->array_size > 0) { |
1054 | cmd->header.size = cmd_len; | 1073 | cmd2->header.id = cmd_id; |
1055 | cmd->body.sid = srf->res.id; | 1074 | cmd2->header.size = cmd_len; |
1056 | cmd->body.surfaceFlags = srf->flags; | 1075 | cmd2->body.sid = srf->res.id; |
1057 | cmd->body.format = srf->format; | 1076 | cmd2->body.surfaceFlags = srf->flags; |
1058 | cmd->body.numMipLevels = srf->mip_levels[0]; | 1077 | cmd2->body.format = cpu_to_le32(srf->format); |
1059 | cmd->body.multisampleCount = srf->multisample_count; | 1078 | cmd2->body.numMipLevels = srf->mip_levels[0]; |
1060 | cmd->body.autogenFilter = srf->autogen_filter; | 1079 | cmd2->body.multisampleCount = srf->multisample_count; |
1061 | cmd->body.size.width = srf->base_size.width; | 1080 | cmd2->body.autogenFilter = srf->autogen_filter; |
1062 | cmd->body.size.height = srf->base_size.height; | 1081 | cmd2->body.size.width = srf->base_size.width; |
1063 | cmd->body.size.depth = srf->base_size.depth; | 1082 | cmd2->body.size.height = srf->base_size.height; |
1083 | cmd2->body.size.depth = srf->base_size.depth; | ||
1084 | cmd2->body.arraySize = srf->array_size; | ||
1085 | } else { | ||
1086 | cmd->header.id = cmd_id; | ||
1087 | cmd->header.size = cmd_len; | ||
1088 | cmd->body.sid = srf->res.id; | ||
1089 | cmd->body.surfaceFlags = srf->flags; | ||
1090 | cmd->body.format = cpu_to_le32(srf->format); | ||
1091 | cmd->body.numMipLevels = srf->mip_levels[0]; | ||
1092 | cmd->body.multisampleCount = srf->multisample_count; | ||
1093 | cmd->body.autogenFilter = srf->autogen_filter; | ||
1094 | cmd->body.size.width = srf->base_size.width; | ||
1095 | cmd->body.size.height = srf->base_size.height; | ||
1096 | cmd->body.size.depth = srf->base_size.depth; | ||
1097 | } | ||
1098 | |||
1064 | vmw_fifo_commit(dev_priv, submit_len); | 1099 | vmw_fifo_commit(dev_priv, submit_len); |
1065 | 1100 | ||
1066 | return 0; | 1101 | return 0; |
@@ -1188,6 +1223,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, | |||
1188 | static int vmw_gb_surface_destroy(struct vmw_resource *res) | 1223 | static int vmw_gb_surface_destroy(struct vmw_resource *res) |
1189 | { | 1224 | { |
1190 | struct vmw_private *dev_priv = res->dev_priv; | 1225 | struct vmw_private *dev_priv = res->dev_priv; |
1226 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
1191 | struct { | 1227 | struct { |
1192 | SVGA3dCmdHeader header; | 1228 | SVGA3dCmdHeader header; |
1193 | SVGA3dCmdDestroyGBSurface body; | 1229 | SVGA3dCmdDestroyGBSurface body; |
@@ -1197,7 +1233,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) | |||
1197 | return 0; | 1233 | return 0; |
1198 | 1234 | ||
1199 | mutex_lock(&dev_priv->binding_mutex); | 1235 | mutex_lock(&dev_priv->binding_mutex); |
1200 | vmw_context_binding_res_list_scrub(&res->binding_head); | 1236 | vmw_view_surface_list_destroy(dev_priv, &srf->view_list); |
1237 | vmw_binding_res_list_scrub(&res->binding_head); | ||
1201 | 1238 | ||
1202 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 1239 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
1203 | if (unlikely(cmd == NULL)) { | 1240 | if (unlikely(cmd == NULL)) { |
@@ -1259,6 +1296,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1259 | req->drm_surface_flags & drm_vmw_surface_flag_scanout, | 1296 | req->drm_surface_flags & drm_vmw_surface_flag_scanout, |
1260 | req->mip_levels, | 1297 | req->mip_levels, |
1261 | req->multisample_count, | 1298 | req->multisample_count, |
1299 | req->array_size, | ||
1262 | req->base_size, | 1300 | req->base_size, |
1263 | &srf); | 1301 | &srf); |
1264 | if (unlikely(ret != 0)) | 1302 | if (unlikely(ret != 0)) |
@@ -1275,10 +1313,17 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, | |||
1275 | res = &user_srf->srf.res; | 1313 | res = &user_srf->srf.res; |
1276 | 1314 | ||
1277 | 1315 | ||
1278 | if (req->buffer_handle != SVGA3D_INVALID_ID) | 1316 | if (req->buffer_handle != SVGA3D_INVALID_ID) { |
1279 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, | 1317 | ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, |
1280 | &res->backup); | 1318 | &res->backup); |
1281 | else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) | 1319 | if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < |
1320 | res->backup_size) { | ||
1321 | DRM_ERROR("Surface backup buffer is too small.\n"); | ||
1322 | vmw_dmabuf_unreference(&res->backup); | ||
1323 | ret = -EINVAL; | ||
1324 | goto out_unlock; | ||
1325 | } | ||
1326 | } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) | ||
1282 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | 1327 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, |
1283 | res->backup_size, | 1328 | res->backup_size, |
1284 | req->drm_surface_flags & | 1329 | req->drm_surface_flags & |
@@ -1378,6 +1423,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
1378 | rep->creq.drm_surface_flags = 0; | 1423 | rep->creq.drm_surface_flags = 0; |
1379 | rep->creq.multisample_count = srf->multisample_count; | 1424 | rep->creq.multisample_count = srf->multisample_count; |
1380 | rep->creq.autogen_filter = srf->autogen_filter; | 1425 | rep->creq.autogen_filter = srf->autogen_filter; |
1426 | rep->creq.array_size = srf->array_size; | ||
1381 | rep->creq.buffer_handle = backup_handle; | 1427 | rep->creq.buffer_handle = backup_handle; |
1382 | rep->creq.base_size = srf->base_size; | 1428 | rep->creq.base_size = srf->base_size; |
1383 | rep->crep.handle = user_srf->prime.base.hash.key; | 1429 | rep->crep.handle = user_srf->prime.base.hash.key; |
@@ -1404,6 +1450,7 @@ out_bad_resource: | |||
1404 | * @for_scanout: true if inteded to be used for scanout buffer | 1450 | * @for_scanout: true if inteded to be used for scanout buffer |
1405 | * @num_mip_levels: number of MIP levels | 1451 | * @num_mip_levels: number of MIP levels |
1406 | * @multisample_count: | 1452 | * @multisample_count: |
1453 | * @array_size: Surface array size. | ||
1407 | * @size: width, heigh, depth of the surface requested | 1454 | * @size: width, heigh, depth of the surface requested |
1408 | * @user_srf_out: allocated user_srf. Set to NULL on failure. | 1455 | * @user_srf_out: allocated user_srf. Set to NULL on failure. |
1409 | * | 1456 | * |
@@ -1419,6 +1466,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1419 | bool for_scanout, | 1466 | bool for_scanout, |
1420 | uint32_t num_mip_levels, | 1467 | uint32_t num_mip_levels, |
1421 | uint32_t multisample_count, | 1468 | uint32_t multisample_count, |
1469 | uint32_t array_size, | ||
1422 | struct drm_vmw_size size, | 1470 | struct drm_vmw_size size, |
1423 | struct vmw_surface **srf_out) | 1471 | struct vmw_surface **srf_out) |
1424 | { | 1472 | { |
@@ -1426,7 +1474,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1426 | struct vmw_user_surface *user_srf; | 1474 | struct vmw_user_surface *user_srf; |
1427 | struct vmw_surface *srf; | 1475 | struct vmw_surface *srf; |
1428 | int ret; | 1476 | int ret; |
1429 | 1477 | u32 num_layers; | |
1430 | 1478 | ||
1431 | *srf_out = NULL; | 1479 | *srf_out = NULL; |
1432 | 1480 | ||
@@ -1445,6 +1493,12 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1445 | } | 1493 | } |
1446 | } | 1494 | } |
1447 | 1495 | ||
1496 | /* array_size must be null for non-GL3 host. */ | ||
1497 | if (array_size > 0 && !dev_priv->has_dx) { | ||
1498 | DRM_ERROR("Tried to create DX surface on non-DX host.\n"); | ||
1499 | return -EINVAL; | ||
1500 | } | ||
1501 | |||
1448 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | 1502 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
1449 | if (unlikely(ret != 0)) | 1503 | if (unlikely(ret != 0)) |
1450 | return ret; | 1504 | return ret; |
@@ -1481,10 +1535,21 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, | |||
1481 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; | 1535 | srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; |
1482 | srf->multisample_count = multisample_count; | 1536 | srf->multisample_count = multisample_count; |
1483 | 1537 | ||
1484 | srf->res.backup_size = svga3dsurface_get_serialized_size(srf->format, | 1538 | if (array_size) |
1485 | srf->base_size, | 1539 | num_layers = array_size; |
1486 | srf->mip_levels[0], | 1540 | else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP) |
1487 | srf->flags & SVGA3D_SURFACE_CUBEMAP); | 1541 | num_layers = SVGA3D_MAX_SURFACE_FACES; |
1542 | else | ||
1543 | num_layers = 1; | ||
1544 | |||
1545 | srf->res.backup_size = | ||
1546 | svga3dsurface_get_serialized_size(srf->format, | ||
1547 | srf->base_size, | ||
1548 | srf->mip_levels[0], | ||
1549 | num_layers); | ||
1550 | |||
1551 | if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) | ||
1552 | srf->res.backup_size += sizeof(SVGA3dDXSOState); | ||
1488 | 1553 | ||
1489 | if (dev_priv->active_display_unit == vmw_du_screen_target && | 1554 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
1490 | for_scanout) | 1555 | for_scanout) |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index c8a863180174..c5bcddd9f58c 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -64,6 +64,7 @@ | |||
64 | #define DRM_VMW_GB_SURFACE_CREATE 23 | 64 | #define DRM_VMW_GB_SURFACE_CREATE 23 |
65 | #define DRM_VMW_GB_SURFACE_REF 24 | 65 | #define DRM_VMW_GB_SURFACE_REF 24 |
66 | #define DRM_VMW_SYNCCPU 25 | 66 | #define DRM_VMW_SYNCCPU 25 |
67 | #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 | ||
67 | 68 | ||
68 | /*************************************************************************/ | 69 | /*************************************************************************/ |
69 | /** | 70 | /** |
@@ -89,6 +90,7 @@ | |||
89 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 | 90 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
90 | #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 | 91 | #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
91 | #define DRM_VMW_PARAM_SCREEN_TARGET 11 | 92 | #define DRM_VMW_PARAM_SCREEN_TARGET 11 |
93 | #define DRM_VMW_PARAM_DX 12 | ||
92 | 94 | ||
93 | /** | 95 | /** |
94 | * enum drm_vmw_handle_type - handle type for ref ioctls | 96 | * enum drm_vmw_handle_type - handle type for ref ioctls |
@@ -297,7 +299,7 @@ union drm_vmw_surface_reference_arg { | |||
297 | * Argument to the DRM_VMW_EXECBUF Ioctl. | 299 | * Argument to the DRM_VMW_EXECBUF Ioctl. |
298 | */ | 300 | */ |
299 | 301 | ||
300 | #define DRM_VMW_EXECBUF_VERSION 1 | 302 | #define DRM_VMW_EXECBUF_VERSION 2 |
301 | 303 | ||
302 | struct drm_vmw_execbuf_arg { | 304 | struct drm_vmw_execbuf_arg { |
303 | uint64_t commands; | 305 | uint64_t commands; |
@@ -306,6 +308,8 @@ struct drm_vmw_execbuf_arg { | |||
306 | uint64_t fence_rep; | 308 | uint64_t fence_rep; |
307 | uint32_t version; | 309 | uint32_t version; |
308 | uint32_t flags; | 310 | uint32_t flags; |
311 | uint32_t context_handle; | ||
312 | uint32_t pad64; | ||
309 | }; | 313 | }; |
310 | 314 | ||
311 | /** | 315 | /** |
@@ -826,7 +830,6 @@ struct drm_vmw_update_layout_arg { | |||
826 | enum drm_vmw_shader_type { | 830 | enum drm_vmw_shader_type { |
827 | drm_vmw_shader_type_vs = 0, | 831 | drm_vmw_shader_type_vs = 0, |
828 | drm_vmw_shader_type_ps, | 832 | drm_vmw_shader_type_ps, |
829 | drm_vmw_shader_type_gs | ||
830 | }; | 833 | }; |
831 | 834 | ||
832 | 835 | ||
@@ -908,6 +911,8 @@ enum drm_vmw_surface_flags { | |||
908 | * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID | 911 | * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID |
909 | * if none. | 912 | * if none. |
910 | * @base_size Size of the base mip level for all faces. | 913 | * @base_size Size of the base mip level for all faces. |
914 | * @array_size Must be zero for non-DX hardware, and if non-zero | ||
915 | * svga3d_flags must have proper bind flags setup. | ||
911 | * | 916 | * |
912 | * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. | 917 | * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. |
913 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. | 918 | * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. |
@@ -920,7 +925,7 @@ struct drm_vmw_gb_surface_create_req { | |||
920 | uint32_t multisample_count; | 925 | uint32_t multisample_count; |
921 | uint32_t autogen_filter; | 926 | uint32_t autogen_filter; |
922 | uint32_t buffer_handle; | 927 | uint32_t buffer_handle; |
923 | uint32_t pad64; | 928 | uint32_t array_size; |
924 | struct drm_vmw_size base_size; | 929 | struct drm_vmw_size base_size; |
925 | }; | 930 | }; |
926 | 931 | ||
@@ -1060,4 +1065,28 @@ struct drm_vmw_synccpu_arg { | |||
1060 | uint32_t pad64; | 1065 | uint32_t pad64; |
1061 | }; | 1066 | }; |
1062 | 1067 | ||
1068 | /*************************************************************************/ | ||
1069 | /** | ||
1070 | * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. | ||
1071 | * | ||
1072 | * Allocates a device unique context id, and queues a create context command | ||
1073 | * for the host. Does not wait for host completion. | ||
1074 | */ | ||
1075 | enum drm_vmw_extended_context { | ||
1076 | drm_vmw_context_legacy, | ||
1077 | drm_vmw_context_dx | ||
1078 | }; | ||
1079 | |||
1080 | /** | ||
1081 | * union drm_vmw_extended_context_arg | ||
1082 | * | ||
1083 | * @req: Context type. | ||
1084 | * @rep: Context identifier. | ||
1085 | * | ||
1086 | * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. | ||
1087 | */ | ||
1088 | union drm_vmw_extended_context_arg { | ||
1089 | enum drm_vmw_extended_context req; | ||
1090 | struct drm_vmw_context_arg rep; | ||
1091 | }; | ||
1063 | #endif | 1092 | #endif |