diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_shader.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 440 |
1 files changed, 440 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c new file mode 100644 index 000000000000..813bd0a2abaf --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "vmwgfx_resource_priv.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | |||
32 | struct vmw_shader { | ||
33 | struct vmw_resource res; | ||
34 | SVGA3dShaderType type; | ||
35 | uint32_t size; | ||
36 | }; | ||
37 | |||
38 | struct vmw_user_shader { | ||
39 | struct ttm_base_object base; | ||
40 | struct vmw_shader shader; | ||
41 | }; | ||
42 | |||
43 | static void vmw_user_shader_free(struct vmw_resource *res); | ||
44 | static struct vmw_resource * | ||
45 | vmw_user_shader_base_to_res(struct ttm_base_object *base); | ||
46 | |||
47 | static int vmw_gb_shader_create(struct vmw_resource *res); | ||
48 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
49 | struct ttm_validate_buffer *val_buf); | ||
50 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
51 | bool readback, | ||
52 | struct ttm_validate_buffer *val_buf); | ||
53 | static int vmw_gb_shader_destroy(struct vmw_resource *res); | ||
54 | |||
55 | static uint64_t vmw_user_shader_size; | ||
56 | |||
57 | static const struct vmw_user_resource_conv user_shader_conv = { | ||
58 | .object_type = VMW_RES_SHADER, | ||
59 | .base_obj_to_res = vmw_user_shader_base_to_res, | ||
60 | .res_free = vmw_user_shader_free | ||
61 | }; | ||
62 | |||
63 | const struct vmw_user_resource_conv *user_shader_converter = | ||
64 | &user_shader_conv; | ||
65 | |||
66 | |||
67 | static const struct vmw_res_func vmw_gb_shader_func = { | ||
68 | .res_type = vmw_res_shader, | ||
69 | .needs_backup = true, | ||
70 | .may_evict = true, | ||
71 | .type_name = "guest backed shaders", | ||
72 | .backup_placement = &vmw_mob_placement, | ||
73 | .create = vmw_gb_shader_create, | ||
74 | .destroy = vmw_gb_shader_destroy, | ||
75 | .bind = vmw_gb_shader_bind, | ||
76 | .unbind = vmw_gb_shader_unbind | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * Shader management: | ||
81 | */ | ||
82 | |||
83 | static inline struct vmw_shader * | ||
84 | vmw_res_to_shader(struct vmw_resource *res) | ||
85 | { | ||
86 | return container_of(res, struct vmw_shader, res); | ||
87 | } | ||
88 | |||
89 | static void vmw_hw_shader_destroy(struct vmw_resource *res) | ||
90 | { | ||
91 | (void) vmw_gb_shader_destroy(res); | ||
92 | } | ||
93 | |||
94 | static int vmw_gb_shader_init(struct vmw_private *dev_priv, | ||
95 | struct vmw_resource *res, | ||
96 | uint32_t size, | ||
97 | uint64_t offset, | ||
98 | SVGA3dShaderType type, | ||
99 | struct vmw_dma_buffer *byte_code, | ||
100 | void (*res_free) (struct vmw_resource *res)) | ||
101 | { | ||
102 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
103 | int ret; | ||
104 | |||
105 | ret = vmw_resource_init(dev_priv, res, true, | ||
106 | res_free, &vmw_gb_shader_func); | ||
107 | |||
108 | |||
109 | if (unlikely(ret != 0)) { | ||
110 | if (res_free) | ||
111 | res_free(res); | ||
112 | else | ||
113 | kfree(res); | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | res->backup_size = size; | ||
118 | if (byte_code) { | ||
119 | res->backup = vmw_dmabuf_reference(byte_code); | ||
120 | res->backup_offset = offset; | ||
121 | } | ||
122 | shader->size = size; | ||
123 | shader->type = type; | ||
124 | |||
125 | vmw_resource_activate(res, vmw_hw_shader_destroy); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int vmw_gb_shader_create(struct vmw_resource *res) | ||
130 | { | ||
131 | struct vmw_private *dev_priv = res->dev_priv; | ||
132 | struct vmw_shader *shader = vmw_res_to_shader(res); | ||
133 | int ret; | ||
134 | struct { | ||
135 | SVGA3dCmdHeader header; | ||
136 | SVGA3dCmdDefineGBShader body; | ||
137 | } *cmd; | ||
138 | |||
139 | if (likely(res->id != -1)) | ||
140 | return 0; | ||
141 | |||
142 | ret = vmw_resource_alloc_id(res); | ||
143 | if (unlikely(ret != 0)) { | ||
144 | DRM_ERROR("Failed to allocate a shader id.\n"); | ||
145 | goto out_no_id; | ||
146 | } | ||
147 | |||
148 | if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { | ||
149 | ret = -EBUSY; | ||
150 | goto out_no_fifo; | ||
151 | } | ||
152 | |||
153 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
154 | if (unlikely(cmd == NULL)) { | ||
155 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
156 | "creation.\n"); | ||
157 | ret = -ENOMEM; | ||
158 | goto out_no_fifo; | ||
159 | } | ||
160 | |||
161 | cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; | ||
162 | cmd->header.size = sizeof(cmd->body); | ||
163 | cmd->body.shid = res->id; | ||
164 | cmd->body.type = shader->type; | ||
165 | cmd->body.sizeInBytes = shader->size; | ||
166 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
167 | (void) vmw_3d_resource_inc(dev_priv, false); | ||
168 | |||
169 | return 0; | ||
170 | |||
171 | out_no_fifo: | ||
172 | vmw_resource_release_id(res); | ||
173 | out_no_id: | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int vmw_gb_shader_bind(struct vmw_resource *res, | ||
178 | struct ttm_validate_buffer *val_buf) | ||
179 | { | ||
180 | struct vmw_private *dev_priv = res->dev_priv; | ||
181 | struct { | ||
182 | SVGA3dCmdHeader header; | ||
183 | SVGA3dCmdBindGBShader body; | ||
184 | } *cmd; | ||
185 | struct ttm_buffer_object *bo = val_buf->bo; | ||
186 | |||
187 | BUG_ON(bo->mem.mem_type != VMW_PL_MOB); | ||
188 | |||
189 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
190 | if (unlikely(cmd == NULL)) { | ||
191 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
192 | "binding.\n"); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | |||
196 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
197 | cmd->header.size = sizeof(cmd->body); | ||
198 | cmd->body.shid = res->id; | ||
199 | cmd->body.mobid = bo->mem.start; | ||
200 | cmd->body.offsetInBytes = 0; | ||
201 | res->backup_dirty = false; | ||
202 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int vmw_gb_shader_unbind(struct vmw_resource *res, | ||
208 | bool readback, | ||
209 | struct ttm_validate_buffer *val_buf) | ||
210 | { | ||
211 | struct vmw_private *dev_priv = res->dev_priv; | ||
212 | struct { | ||
213 | SVGA3dCmdHeader header; | ||
214 | SVGA3dCmdBindGBShader body; | ||
215 | } *cmd; | ||
216 | struct vmw_fence_obj *fence; | ||
217 | |||
218 | BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); | ||
219 | |||
220 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
221 | if (unlikely(cmd == NULL)) { | ||
222 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
223 | "unbinding.\n"); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | |||
227 | cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; | ||
228 | cmd->header.size = sizeof(cmd->body); | ||
229 | cmd->body.shid = res->id; | ||
230 | cmd->body.mobid = SVGA3D_INVALID_ID; | ||
231 | cmd->body.offsetInBytes = 0; | ||
232 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
233 | |||
234 | /* | ||
235 | * Create a fence object and fence the backup buffer. | ||
236 | */ | ||
237 | |||
238 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, | ||
239 | &fence, NULL); | ||
240 | |||
241 | vmw_fence_single_bo(val_buf->bo, fence); | ||
242 | |||
243 | if (likely(fence != NULL)) | ||
244 | vmw_fence_obj_unreference(&fence); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int vmw_gb_shader_destroy(struct vmw_resource *res) | ||
250 | { | ||
251 | struct vmw_private *dev_priv = res->dev_priv; | ||
252 | struct { | ||
253 | SVGA3dCmdHeader header; | ||
254 | SVGA3dCmdDestroyGBShader body; | ||
255 | } *cmd; | ||
256 | |||
257 | if (likely(res->id == -1)) | ||
258 | return 0; | ||
259 | |||
260 | mutex_lock(&dev_priv->binding_mutex); | ||
261 | vmw_context_binding_res_list_kill(&res->binding_head); | ||
262 | |||
263 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
264 | if (unlikely(cmd == NULL)) { | ||
265 | DRM_ERROR("Failed reserving FIFO space for shader " | ||
266 | "destruction.\n"); | ||
267 | return -ENOMEM; | ||
268 | } | ||
269 | |||
270 | cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; | ||
271 | cmd->header.size = sizeof(cmd->body); | ||
272 | cmd->body.shid = res->id; | ||
273 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
274 | mutex_unlock(&dev_priv->binding_mutex); | ||
275 | vmw_resource_release_id(res); | ||
276 | vmw_3d_resource_dec(dev_priv, false); | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * User-space shader management: | ||
283 | */ | ||
284 | |||
285 | static struct vmw_resource * | ||
286 | vmw_user_shader_base_to_res(struct ttm_base_object *base) | ||
287 | { | ||
288 | return &(container_of(base, struct vmw_user_shader, base)-> | ||
289 | shader.res); | ||
290 | } | ||
291 | |||
292 | static void vmw_user_shader_free(struct vmw_resource *res) | ||
293 | { | ||
294 | struct vmw_user_shader *ushader = | ||
295 | container_of(res, struct vmw_user_shader, shader.res); | ||
296 | struct vmw_private *dev_priv = res->dev_priv; | ||
297 | |||
298 | ttm_base_object_kfree(ushader, base); | ||
299 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
300 | vmw_user_shader_size); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * This function is called when user space has no more references on the | ||
305 | * base object. It releases the base-object's reference on the resource object. | ||
306 | */ | ||
307 | |||
308 | static void vmw_user_shader_base_release(struct ttm_base_object **p_base) | ||
309 | { | ||
310 | struct ttm_base_object *base = *p_base; | ||
311 | struct vmw_resource *res = vmw_user_shader_base_to_res(base); | ||
312 | |||
313 | *p_base = NULL; | ||
314 | vmw_resource_unreference(&res); | ||
315 | } | ||
316 | |||
317 | int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | ||
318 | struct drm_file *file_priv) | ||
319 | { | ||
320 | struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; | ||
321 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
322 | |||
323 | return ttm_ref_object_base_unref(tfile, arg->handle, | ||
324 | TTM_REF_USAGE); | ||
325 | } | ||
326 | |||
327 | int vmw_shader_define_ioctl(struct drm_device *dev, void *data, | ||
328 | struct drm_file *file_priv) | ||
329 | { | ||
330 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
331 | struct vmw_user_shader *ushader; | ||
332 | struct vmw_resource *res; | ||
333 | struct vmw_resource *tmp; | ||
334 | struct drm_vmw_shader_create_arg *arg = | ||
335 | (struct drm_vmw_shader_create_arg *)data; | ||
336 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
337 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
338 | struct vmw_dma_buffer *buffer = NULL; | ||
339 | SVGA3dShaderType shader_type; | ||
340 | int ret; | ||
341 | |||
342 | if (arg->buffer_handle != SVGA3D_INVALID_ID) { | ||
343 | ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, | ||
344 | &buffer); | ||
345 | if (unlikely(ret != 0)) { | ||
346 | DRM_ERROR("Could not find buffer for shader " | ||
347 | "creation.\n"); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | if ((u64)buffer->base.num_pages * PAGE_SIZE < | ||
352 | (u64)arg->size + (u64)arg->offset) { | ||
353 | DRM_ERROR("Illegal buffer- or shader size.\n"); | ||
354 | ret = -EINVAL; | ||
355 | goto out_bad_arg; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | switch (arg->shader_type) { | ||
360 | case drm_vmw_shader_type_vs: | ||
361 | shader_type = SVGA3D_SHADERTYPE_VS; | ||
362 | break; | ||
363 | case drm_vmw_shader_type_ps: | ||
364 | shader_type = SVGA3D_SHADERTYPE_PS; | ||
365 | break; | ||
366 | case drm_vmw_shader_type_gs: | ||
367 | shader_type = SVGA3D_SHADERTYPE_GS; | ||
368 | break; | ||
369 | default: | ||
370 | DRM_ERROR("Illegal shader type.\n"); | ||
371 | ret = -EINVAL; | ||
372 | goto out_bad_arg; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Approximate idr memory usage with 128 bytes. It will be limited | ||
377 | * by maximum number_of shaders anyway. | ||
378 | */ | ||
379 | |||
380 | if (unlikely(vmw_user_shader_size == 0)) | ||
381 | vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) | ||
382 | + 128; | ||
383 | |||
384 | ret = ttm_read_lock(&vmaster->lock, true); | ||
385 | if (unlikely(ret != 0)) | ||
386 | return ret; | ||
387 | |||
388 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), | ||
389 | vmw_user_shader_size, | ||
390 | false, true); | ||
391 | if (unlikely(ret != 0)) { | ||
392 | if (ret != -ERESTARTSYS) | ||
393 | DRM_ERROR("Out of graphics memory for shader" | ||
394 | " creation.\n"); | ||
395 | goto out_unlock; | ||
396 | } | ||
397 | |||
398 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | ||
399 | if (unlikely(ushader == NULL)) { | ||
400 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | ||
401 | vmw_user_shader_size); | ||
402 | ret = -ENOMEM; | ||
403 | goto out_unlock; | ||
404 | } | ||
405 | |||
406 | res = &ushader->shader.res; | ||
407 | ushader->base.shareable = false; | ||
408 | ushader->base.tfile = NULL; | ||
409 | |||
410 | /* | ||
411 | * From here on, the destructor takes over resource freeing. | ||
412 | */ | ||
413 | |||
414 | ret = vmw_gb_shader_init(dev_priv, res, arg->size, | ||
415 | arg->offset, shader_type, buffer, | ||
416 | vmw_user_shader_free); | ||
417 | if (unlikely(ret != 0)) | ||
418 | goto out_unlock; | ||
419 | |||
420 | tmp = vmw_resource_reference(res); | ||
421 | ret = ttm_base_object_init(tfile, &ushader->base, false, | ||
422 | VMW_RES_SHADER, | ||
423 | &vmw_user_shader_base_release, NULL); | ||
424 | |||
425 | if (unlikely(ret != 0)) { | ||
426 | vmw_resource_unreference(&tmp); | ||
427 | goto out_err; | ||
428 | } | ||
429 | |||
430 | arg->shader_handle = ushader->base.hash.key; | ||
431 | out_err: | ||
432 | vmw_resource_unreference(&res); | ||
433 | out_unlock: | ||
434 | ttm_read_unlock(&vmaster->lock); | ||
435 | out_bad_arg: | ||
436 | vmw_dmabuf_unreference(&buffer); | ||
437 | |||
438 | return ret; | ||
439 | |||
440 | } | ||