diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 1183 |
1 files changed, 1183 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c new file mode 100644 index 000000000000..c012d5927f65 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -0,0 +1,1183 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "vmwgfx_drm.h" | ||
30 | #include "ttm/ttm_object.h" | ||
31 | #include "ttm/ttm_placement.h" | ||
32 | #include "drmP.h" | ||
33 | |||
34 | #define VMW_RES_CONTEXT ttm_driver_type0 | ||
35 | #define VMW_RES_SURFACE ttm_driver_type1 | ||
36 | #define VMW_RES_STREAM ttm_driver_type2 | ||
37 | |||
38 | struct vmw_user_context { | ||
39 | struct ttm_base_object base; | ||
40 | struct vmw_resource res; | ||
41 | }; | ||
42 | |||
43 | struct vmw_user_surface { | ||
44 | struct ttm_base_object base; | ||
45 | struct vmw_surface srf; | ||
46 | }; | ||
47 | |||
48 | struct vmw_user_dma_buffer { | ||
49 | struct ttm_base_object base; | ||
50 | struct vmw_dma_buffer dma; | ||
51 | }; | ||
52 | |||
53 | struct vmw_bo_user_rep { | ||
54 | uint32_t handle; | ||
55 | uint64_t map_handle; | ||
56 | }; | ||
57 | |||
58 | struct vmw_stream { | ||
59 | struct vmw_resource res; | ||
60 | uint32_t stream_id; | ||
61 | }; | ||
62 | |||
63 | struct vmw_user_stream { | ||
64 | struct ttm_base_object base; | ||
65 | struct vmw_stream stream; | ||
66 | }; | ||
67 | |||
68 | static inline struct vmw_dma_buffer * | ||
69 | vmw_dma_buffer(struct ttm_buffer_object *bo) | ||
70 | { | ||
71 | return container_of(bo, struct vmw_dma_buffer, base); | ||
72 | } | ||
73 | |||
74 | static inline struct vmw_user_dma_buffer * | ||
75 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) | ||
76 | { | ||
77 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
78 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); | ||
79 | } | ||
80 | |||
81 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | ||
82 | { | ||
83 | kref_get(&res->kref); | ||
84 | return res; | ||
85 | } | ||
86 | |||
87 | static void vmw_resource_release(struct kref *kref) | ||
88 | { | ||
89 | struct vmw_resource *res = | ||
90 | container_of(kref, struct vmw_resource, kref); | ||
91 | struct vmw_private *dev_priv = res->dev_priv; | ||
92 | |||
93 | idr_remove(res->idr, res->id); | ||
94 | write_unlock(&dev_priv->resource_lock); | ||
95 | |||
96 | if (likely(res->hw_destroy != NULL)) | ||
97 | res->hw_destroy(res); | ||
98 | |||
99 | if (res->res_free != NULL) | ||
100 | res->res_free(res); | ||
101 | else | ||
102 | kfree(res); | ||
103 | |||
104 | write_lock(&dev_priv->resource_lock); | ||
105 | } | ||
106 | |||
107 | void vmw_resource_unreference(struct vmw_resource **p_res) | ||
108 | { | ||
109 | struct vmw_resource *res = *p_res; | ||
110 | struct vmw_private *dev_priv = res->dev_priv; | ||
111 | |||
112 | *p_res = NULL; | ||
113 | write_lock(&dev_priv->resource_lock); | ||
114 | kref_put(&res->kref, vmw_resource_release); | ||
115 | write_unlock(&dev_priv->resource_lock); | ||
116 | } | ||
117 | |||
118 | static int vmw_resource_init(struct vmw_private *dev_priv, | ||
119 | struct vmw_resource *res, | ||
120 | struct idr *idr, | ||
121 | enum ttm_object_type obj_type, | ||
122 | void (*res_free) (struct vmw_resource *res)) | ||
123 | { | ||
124 | int ret; | ||
125 | |||
126 | kref_init(&res->kref); | ||
127 | res->hw_destroy = NULL; | ||
128 | res->res_free = res_free; | ||
129 | res->res_type = obj_type; | ||
130 | res->idr = idr; | ||
131 | res->avail = false; | ||
132 | res->dev_priv = dev_priv; | ||
133 | |||
134 | do { | ||
135 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | write_lock(&dev_priv->resource_lock); | ||
139 | ret = idr_get_new_above(idr, res, 1, &res->id); | ||
140 | write_unlock(&dev_priv->resource_lock); | ||
141 | |||
142 | } while (ret == -EAGAIN); | ||
143 | |||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * vmw_resource_activate | ||
149 | * | ||
150 | * @res: Pointer to the newly created resource | ||
151 | * @hw_destroy: Destroy function. NULL if none. | ||
152 | * | ||
153 | * Activate a resource after the hardware has been made aware of it. | ||
154 | * Set tye destroy function to @destroy. Typically this frees the | ||
155 | * resource and destroys the hardware resources associated with it. | ||
156 | * Activate basically means that the function vmw_resource_lookup will | ||
157 | * find it. | ||
158 | */ | ||
159 | |||
160 | static void vmw_resource_activate(struct vmw_resource *res, | ||
161 | void (*hw_destroy) (struct vmw_resource *)) | ||
162 | { | ||
163 | struct vmw_private *dev_priv = res->dev_priv; | ||
164 | |||
165 | write_lock(&dev_priv->resource_lock); | ||
166 | res->avail = true; | ||
167 | res->hw_destroy = hw_destroy; | ||
168 | write_unlock(&dev_priv->resource_lock); | ||
169 | } | ||
170 | |||
171 | struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, | ||
172 | struct idr *idr, int id) | ||
173 | { | ||
174 | struct vmw_resource *res; | ||
175 | |||
176 | read_lock(&dev_priv->resource_lock); | ||
177 | res = idr_find(idr, id); | ||
178 | if (res && res->avail) | ||
179 | kref_get(&res->kref); | ||
180 | else | ||
181 | res = NULL; | ||
182 | read_unlock(&dev_priv->resource_lock); | ||
183 | |||
184 | if (unlikely(res == NULL)) | ||
185 | return NULL; | ||
186 | |||
187 | return res; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * Context management: | ||
192 | */ | ||
193 | |||
194 | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||
195 | { | ||
196 | |||
197 | struct vmw_private *dev_priv = res->dev_priv; | ||
198 | struct { | ||
199 | SVGA3dCmdHeader header; | ||
200 | SVGA3dCmdDestroyContext body; | ||
201 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
202 | |||
203 | if (unlikely(cmd == NULL)) { | ||
204 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
205 | "destruction.\n"); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); | ||
210 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
211 | cmd->body.cid = cpu_to_le32(res->id); | ||
212 | |||
213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
214 | } | ||
215 | |||
216 | static int vmw_context_init(struct vmw_private *dev_priv, | ||
217 | struct vmw_resource *res, | ||
218 | void (*res_free) (struct vmw_resource *res)) | ||
219 | { | ||
220 | int ret; | ||
221 | |||
222 | struct { | ||
223 | SVGA3dCmdHeader header; | ||
224 | SVGA3dCmdDefineContext body; | ||
225 | } *cmd; | ||
226 | |||
227 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | ||
228 | VMW_RES_CONTEXT, res_free); | ||
229 | |||
230 | if (unlikely(ret != 0)) { | ||
231 | if (res_free == NULL) | ||
232 | kfree(res); | ||
233 | else | ||
234 | res_free(res); | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
239 | if (unlikely(cmd == NULL)) { | ||
240 | DRM_ERROR("Fifo reserve failed.\n"); | ||
241 | vmw_resource_unreference(&res); | ||
242 | return -ENOMEM; | ||
243 | } | ||
244 | |||
245 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); | ||
246 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
247 | cmd->body.cid = cpu_to_le32(res->id); | ||
248 | |||
249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
250 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
255 | { | ||
256 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
257 | int ret; | ||
258 | |||
259 | if (unlikely(res == NULL)) | ||
260 | return NULL; | ||
261 | |||
262 | ret = vmw_context_init(dev_priv, res, NULL); | ||
263 | return (ret == 0) ? res : NULL; | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * User-space context management: | ||
268 | */ | ||
269 | |||
270 | static void vmw_user_context_free(struct vmw_resource *res) | ||
271 | { | ||
272 | struct vmw_user_context *ctx = | ||
273 | container_of(res, struct vmw_user_context, res); | ||
274 | |||
275 | kfree(ctx); | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * This function is called when user space has no more references on the | ||
280 | * base object. It releases the base-object's reference on the resource object. | ||
281 | */ | ||
282 | |||
283 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) | ||
284 | { | ||
285 | struct ttm_base_object *base = *p_base; | ||
286 | struct vmw_user_context *ctx = | ||
287 | container_of(base, struct vmw_user_context, base); | ||
288 | struct vmw_resource *res = &ctx->res; | ||
289 | |||
290 | *p_base = NULL; | ||
291 | vmw_resource_unreference(&res); | ||
292 | } | ||
293 | |||
294 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
295 | struct drm_file *file_priv) | ||
296 | { | ||
297 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
298 | struct vmw_resource *res; | ||
299 | struct vmw_user_context *ctx; | ||
300 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
301 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
302 | int ret = 0; | ||
303 | |||
304 | res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); | ||
305 | if (unlikely(res == NULL)) | ||
306 | return -EINVAL; | ||
307 | |||
308 | if (res->res_free != &vmw_user_context_free) { | ||
309 | ret = -EINVAL; | ||
310 | goto out; | ||
311 | } | ||
312 | |||
313 | ctx = container_of(res, struct vmw_user_context, res); | ||
314 | if (ctx->base.tfile != tfile && !ctx->base.shareable) { | ||
315 | ret = -EPERM; | ||
316 | goto out; | ||
317 | } | ||
318 | |||
319 | ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); | ||
320 | out: | ||
321 | vmw_resource_unreference(&res); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
326 | struct drm_file *file_priv) | ||
327 | { | ||
328 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
329 | struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
330 | struct vmw_resource *res; | ||
331 | struct vmw_resource *tmp; | ||
332 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
333 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
334 | int ret; | ||
335 | |||
336 | if (unlikely(ctx == NULL)) | ||
337 | return -ENOMEM; | ||
338 | |||
339 | res = &ctx->res; | ||
340 | ctx->base.shareable = false; | ||
341 | ctx->base.tfile = NULL; | ||
342 | |||
343 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | ||
344 | if (unlikely(ret != 0)) | ||
345 | return ret; | ||
346 | |||
347 | tmp = vmw_resource_reference(&ctx->res); | ||
348 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | ||
349 | &vmw_user_context_base_release, NULL); | ||
350 | |||
351 | if (unlikely(ret != 0)) { | ||
352 | vmw_resource_unreference(&tmp); | ||
353 | goto out_err; | ||
354 | } | ||
355 | |||
356 | arg->cid = res->id; | ||
357 | out_err: | ||
358 | vmw_resource_unreference(&res); | ||
359 | return ret; | ||
360 | |||
361 | } | ||
362 | |||
363 | int vmw_context_check(struct vmw_private *dev_priv, | ||
364 | struct ttm_object_file *tfile, | ||
365 | int id) | ||
366 | { | ||
367 | struct vmw_resource *res; | ||
368 | int ret = 0; | ||
369 | |||
370 | read_lock(&dev_priv->resource_lock); | ||
371 | res = idr_find(&dev_priv->context_idr, id); | ||
372 | if (res && res->avail) { | ||
373 | struct vmw_user_context *ctx = | ||
374 | container_of(res, struct vmw_user_context, res); | ||
375 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | ||
376 | ret = -EPERM; | ||
377 | } else | ||
378 | ret = -EINVAL; | ||
379 | read_unlock(&dev_priv->resource_lock); | ||
380 | |||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | |||
385 | /** | ||
386 | * Surface management. | ||
387 | */ | ||
388 | |||
389 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | ||
390 | { | ||
391 | |||
392 | struct vmw_private *dev_priv = res->dev_priv; | ||
393 | struct { | ||
394 | SVGA3dCmdHeader header; | ||
395 | SVGA3dCmdDestroySurface body; | ||
396 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
397 | |||
398 | if (unlikely(cmd == NULL)) { | ||
399 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
400 | "destruction.\n"); | ||
401 | return; | ||
402 | } | ||
403 | |||
404 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); | ||
405 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
406 | cmd->body.sid = cpu_to_le32(res->id); | ||
407 | |||
408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
409 | } | ||
410 | |||
411 | void vmw_surface_res_free(struct vmw_resource *res) | ||
412 | { | ||
413 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
414 | |||
415 | kfree(srf->sizes); | ||
416 | kfree(srf->snooper.image); | ||
417 | kfree(srf); | ||
418 | } | ||
419 | |||
420 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
421 | struct vmw_surface *srf, | ||
422 | void (*res_free) (struct vmw_resource *res)) | ||
423 | { | ||
424 | int ret; | ||
425 | struct { | ||
426 | SVGA3dCmdHeader header; | ||
427 | SVGA3dCmdDefineSurface body; | ||
428 | } *cmd; | ||
429 | SVGA3dSize *cmd_size; | ||
430 | struct vmw_resource *res = &srf->res; | ||
431 | struct drm_vmw_size *src_size; | ||
432 | size_t submit_size; | ||
433 | uint32_t cmd_len; | ||
434 | int i; | ||
435 | |||
436 | BUG_ON(res_free == NULL); | ||
437 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | ||
438 | VMW_RES_SURFACE, res_free); | ||
439 | |||
440 | if (unlikely(ret != 0)) { | ||
441 | res_free(res); | ||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); | ||
446 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
447 | |||
448 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
449 | if (unlikely(cmd == NULL)) { | ||
450 | DRM_ERROR("Fifo reserve failed for create surface.\n"); | ||
451 | vmw_resource_unreference(&res); | ||
452 | return -ENOMEM; | ||
453 | } | ||
454 | |||
455 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); | ||
456 | cmd->header.size = cpu_to_le32(cmd_len); | ||
457 | cmd->body.sid = cpu_to_le32(res->id); | ||
458 | cmd->body.surfaceFlags = cpu_to_le32(srf->flags); | ||
459 | cmd->body.format = cpu_to_le32(srf->format); | ||
460 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
461 | cmd->body.face[i].numMipLevels = | ||
462 | cpu_to_le32(srf->mip_levels[i]); | ||
463 | } | ||
464 | |||
465 | cmd += 1; | ||
466 | cmd_size = (SVGA3dSize *) cmd; | ||
467 | src_size = srf->sizes; | ||
468 | |||
469 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
470 | cmd_size->width = cpu_to_le32(src_size->width); | ||
471 | cmd_size->height = cpu_to_le32(src_size->height); | ||
472 | cmd_size->depth = cpu_to_le32(src_size->depth); | ||
473 | } | ||
474 | |||
475 | vmw_fifo_commit(dev_priv, submit_size); | ||
476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | static void vmw_user_surface_free(struct vmw_resource *res) | ||
481 | { | ||
482 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
483 | struct vmw_user_surface *user_srf = | ||
484 | container_of(srf, struct vmw_user_surface, srf); | ||
485 | |||
486 | kfree(srf->sizes); | ||
487 | kfree(srf->snooper.image); | ||
488 | kfree(user_srf); | ||
489 | } | ||
490 | |||
491 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, | ||
492 | struct ttm_object_file *tfile, | ||
493 | uint32_t handle, struct vmw_surface **out) | ||
494 | { | ||
495 | struct vmw_resource *res; | ||
496 | struct vmw_surface *srf; | ||
497 | struct vmw_user_surface *user_srf; | ||
498 | struct ttm_base_object *base; | ||
499 | int ret = -EINVAL; | ||
500 | |||
501 | base = ttm_base_object_lookup(tfile, handle); | ||
502 | if (unlikely(base == NULL)) | ||
503 | return -EINVAL; | ||
504 | |||
505 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
506 | goto out_bad_resource; | ||
507 | |||
508 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
509 | srf = &user_srf->srf; | ||
510 | res = &srf->res; | ||
511 | |||
512 | read_lock(&dev_priv->resource_lock); | ||
513 | |||
514 | if (!res->avail || res->res_free != &vmw_user_surface_free) { | ||
515 | read_unlock(&dev_priv->resource_lock); | ||
516 | goto out_bad_resource; | ||
517 | } | ||
518 | |||
519 | kref_get(&res->kref); | ||
520 | read_unlock(&dev_priv->resource_lock); | ||
521 | |||
522 | *out = srf; | ||
523 | ret = 0; | ||
524 | |||
525 | out_bad_resource: | ||
526 | ttm_base_object_unref(&base); | ||
527 | |||
528 | return ret; | ||
529 | } | ||
530 | |||
531 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | ||
532 | { | ||
533 | struct ttm_base_object *base = *p_base; | ||
534 | struct vmw_user_surface *user_srf = | ||
535 | container_of(base, struct vmw_user_surface, base); | ||
536 | struct vmw_resource *res = &user_srf->srf.res; | ||
537 | |||
538 | *p_base = NULL; | ||
539 | vmw_resource_unreference(&res); | ||
540 | } | ||
541 | |||
542 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
543 | struct drm_file *file_priv) | ||
544 | { | ||
545 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | ||
546 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
547 | |||
548 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); | ||
549 | } | ||
550 | |||
551 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
552 | struct drm_file *file_priv) | ||
553 | { | ||
554 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
555 | struct vmw_user_surface *user_srf = | ||
556 | kmalloc(sizeof(*user_srf), GFP_KERNEL); | ||
557 | struct vmw_surface *srf; | ||
558 | struct vmw_resource *res; | ||
559 | struct vmw_resource *tmp; | ||
560 | union drm_vmw_surface_create_arg *arg = | ||
561 | (union drm_vmw_surface_create_arg *)data; | ||
562 | struct drm_vmw_surface_create_req *req = &arg->req; | ||
563 | struct drm_vmw_surface_arg *rep = &arg->rep; | ||
564 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
565 | struct drm_vmw_size __user *user_sizes; | ||
566 | int ret; | ||
567 | int i; | ||
568 | |||
569 | if (unlikely(user_srf == NULL)) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | srf = &user_srf->srf; | ||
573 | res = &srf->res; | ||
574 | |||
575 | srf->flags = req->flags; | ||
576 | srf->format = req->format; | ||
577 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | ||
578 | srf->num_sizes = 0; | ||
579 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
580 | srf->num_sizes += srf->mip_levels[i]; | ||
581 | |||
582 | if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
583 | DRM_VMW_MAX_MIP_LEVELS) { | ||
584 | ret = -EINVAL; | ||
585 | goto out_err0; | ||
586 | } | ||
587 | |||
588 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | ||
589 | if (unlikely(srf->sizes == NULL)) { | ||
590 | ret = -ENOMEM; | ||
591 | goto out_err0; | ||
592 | } | ||
593 | |||
594 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
595 | req->size_addr; | ||
596 | |||
597 | ret = copy_from_user(srf->sizes, user_sizes, | ||
598 | srf->num_sizes * sizeof(*srf->sizes)); | ||
599 | if (unlikely(ret != 0)) | ||
600 | goto out_err1; | ||
601 | |||
602 | user_srf->base.shareable = false; | ||
603 | user_srf->base.tfile = NULL; | ||
604 | |||
605 | /** | ||
606 | * From this point, the generic resource management functions | ||
607 | * destroy the object on failure. | ||
608 | */ | ||
609 | |||
610 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
611 | if (unlikely(ret != 0)) | ||
612 | return ret; | ||
613 | |||
614 | tmp = vmw_resource_reference(&srf->res); | ||
615 | ret = ttm_base_object_init(tfile, &user_srf->base, | ||
616 | req->shareable, VMW_RES_SURFACE, | ||
617 | &vmw_user_surface_base_release, NULL); | ||
618 | |||
619 | if (unlikely(ret != 0)) { | ||
620 | vmw_resource_unreference(&tmp); | ||
621 | vmw_resource_unreference(&res); | ||
622 | return ret; | ||
623 | } | ||
624 | |||
625 | if (srf->flags & (1 << 9) && | ||
626 | srf->num_sizes == 1 && | ||
627 | srf->sizes[0].width == 64 && | ||
628 | srf->sizes[0].height == 64 && | ||
629 | srf->format == SVGA3D_A8R8G8B8) { | ||
630 | |||
631 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
632 | /* clear the image */ | ||
633 | if (srf->snooper.image) | ||
634 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
635 | else | ||
636 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
637 | |||
638 | } else { | ||
639 | srf->snooper.image = NULL; | ||
640 | } | ||
641 | srf->snooper.crtc = NULL; | ||
642 | |||
643 | rep->sid = user_srf->base.hash.key; | ||
644 | if (rep->sid == SVGA3D_INVALID_ID) | ||
645 | DRM_ERROR("Created bad Surface ID.\n"); | ||
646 | |||
647 | vmw_resource_unreference(&res); | ||
648 | return 0; | ||
649 | out_err1: | ||
650 | kfree(srf->sizes); | ||
651 | out_err0: | ||
652 | kfree(user_srf); | ||
653 | return ret; | ||
654 | } | ||
655 | |||
656 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
657 | struct drm_file *file_priv) | ||
658 | { | ||
659 | union drm_vmw_surface_reference_arg *arg = | ||
660 | (union drm_vmw_surface_reference_arg *)data; | ||
661 | struct drm_vmw_surface_arg *req = &arg->req; | ||
662 | struct drm_vmw_surface_create_req *rep = &arg->rep; | ||
663 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
664 | struct vmw_surface *srf; | ||
665 | struct vmw_user_surface *user_srf; | ||
666 | struct drm_vmw_size __user *user_sizes; | ||
667 | struct ttm_base_object *base; | ||
668 | int ret = -EINVAL; | ||
669 | |||
670 | base = ttm_base_object_lookup(tfile, req->sid); | ||
671 | if (unlikely(base == NULL)) { | ||
672 | DRM_ERROR("Could not find surface to reference.\n"); | ||
673 | return -EINVAL; | ||
674 | } | ||
675 | |||
676 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
677 | goto out_bad_resource; | ||
678 | |||
679 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
680 | srf = &user_srf->srf; | ||
681 | |||
682 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | ||
683 | if (unlikely(ret != 0)) { | ||
684 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
685 | goto out_no_reference; | ||
686 | } | ||
687 | |||
688 | rep->flags = srf->flags; | ||
689 | rep->format = srf->format; | ||
690 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | ||
691 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
692 | rep->size_addr; | ||
693 | |||
694 | if (user_sizes) | ||
695 | ret = copy_to_user(user_sizes, srf->sizes, | ||
696 | srf->num_sizes * sizeof(*srf->sizes)); | ||
697 | if (unlikely(ret != 0)) | ||
698 | DRM_ERROR("copy_to_user failed %p %u\n", | ||
699 | user_sizes, srf->num_sizes); | ||
700 | out_bad_resource: | ||
701 | out_no_reference: | ||
702 | ttm_base_object_unref(&base); | ||
703 | |||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | int vmw_surface_check(struct vmw_private *dev_priv, | ||
708 | struct ttm_object_file *tfile, | ||
709 | uint32_t handle, int *id) | ||
710 | { | ||
711 | struct ttm_base_object *base; | ||
712 | struct vmw_user_surface *user_srf; | ||
713 | |||
714 | int ret = -EPERM; | ||
715 | |||
716 | base = ttm_base_object_lookup(tfile, handle); | ||
717 | if (unlikely(base == NULL)) | ||
718 | return -EINVAL; | ||
719 | |||
720 | if (unlikely(base->object_type != VMW_RES_SURFACE)) | ||
721 | goto out_bad_surface; | ||
722 | |||
723 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
724 | *id = user_srf->srf.res.id; | ||
725 | ret = 0; | ||
726 | |||
727 | out_bad_surface: | ||
728 | /** | ||
729 | * FIXME: May deadlock here when called from the | ||
730 | * command parsing code. | ||
731 | */ | ||
732 | |||
733 | ttm_base_object_unref(&base); | ||
734 | return ret; | ||
735 | } | ||
736 | |||
737 | /** | ||
738 | * Buffer management. | ||
739 | */ | ||
740 | |||
741 | static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | ||
742 | unsigned long num_pages) | ||
743 | { | ||
744 | static size_t bo_user_size = ~0; | ||
745 | |||
746 | size_t page_array_size = | ||
747 | (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; | ||
748 | |||
749 | if (unlikely(bo_user_size == ~0)) { | ||
750 | bo_user_size = glob->ttm_bo_extra_size + | ||
751 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); | ||
752 | } | ||
753 | |||
754 | return bo_user_size + page_array_size; | ||
755 | } | ||
756 | |||
757 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | ||
758 | { | ||
759 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
760 | struct ttm_bo_global *glob = bo->glob; | ||
761 | struct vmw_private *dev_priv = | ||
762 | container_of(bo->bdev, struct vmw_private, bdev); | ||
763 | |||
764 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
765 | if (vmw_bo->gmr_bound) { | ||
766 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
767 | spin_lock(&glob->lru_lock); | ||
768 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
769 | spin_unlock(&glob->lru_lock); | ||
770 | } | ||
771 | kfree(vmw_bo); | ||
772 | } | ||
773 | |||
774 | int vmw_dmabuf_init(struct vmw_private *dev_priv, | ||
775 | struct vmw_dma_buffer *vmw_bo, | ||
776 | size_t size, struct ttm_placement *placement, | ||
777 | bool interruptible, | ||
778 | void (*bo_free) (struct ttm_buffer_object *bo)) | ||
779 | { | ||
780 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
781 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | ||
782 | size_t acc_size; | ||
783 | int ret; | ||
784 | |||
785 | BUG_ON(!bo_free); | ||
786 | |||
787 | acc_size = | ||
788 | vmw_dmabuf_acc_size(bdev->glob, | ||
789 | (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | ||
790 | |||
791 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | ||
792 | if (unlikely(ret != 0)) { | ||
793 | /* we must free the bo here as | ||
794 | * ttm_buffer_object_init does so as well */ | ||
795 | bo_free(&vmw_bo->base); | ||
796 | return ret; | ||
797 | } | ||
798 | |||
799 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | ||
800 | |||
801 | INIT_LIST_HEAD(&vmw_bo->gmr_lru); | ||
802 | INIT_LIST_HEAD(&vmw_bo->validate_list); | ||
803 | vmw_bo->gmr_id = 0; | ||
804 | vmw_bo->gmr_bound = false; | ||
805 | |||
806 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | ||
807 | ttm_bo_type_device, placement, | ||
808 | 0, 0, interruptible, | ||
809 | NULL, acc_size, bo_free); | ||
810 | return ret; | ||
811 | } | ||
812 | |||
813 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | ||
814 | { | ||
815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | ||
816 | struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; | ||
817 | struct ttm_bo_global *glob = bo->glob; | ||
818 | struct vmw_private *dev_priv = | ||
819 | container_of(bo->bdev, struct vmw_private, bdev); | ||
820 | |||
821 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
822 | if (vmw_bo->gmr_bound) { | ||
823 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
824 | spin_lock(&glob->lru_lock); | ||
825 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
826 | spin_unlock(&glob->lru_lock); | ||
827 | } | ||
828 | kfree(vmw_user_bo); | ||
829 | } | ||
830 | |||
831 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | ||
832 | { | ||
833 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
834 | struct ttm_base_object *base = *p_base; | ||
835 | struct ttm_buffer_object *bo; | ||
836 | |||
837 | *p_base = NULL; | ||
838 | |||
839 | if (unlikely(base == NULL)) | ||
840 | return; | ||
841 | |||
842 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | ||
843 | bo = &vmw_user_bo->dma.base; | ||
844 | ttm_bo_unref(&bo); | ||
845 | } | ||
846 | |||
847 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | ||
848 | struct drm_file *file_priv) | ||
849 | { | ||
850 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
851 | union drm_vmw_alloc_dmabuf_arg *arg = | ||
852 | (union drm_vmw_alloc_dmabuf_arg *)data; | ||
853 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; | ||
854 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | ||
855 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
856 | struct ttm_buffer_object *tmp; | ||
857 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
858 | int ret; | ||
859 | |||
860 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | ||
861 | if (unlikely(vmw_user_bo == NULL)) | ||
862 | return -ENOMEM; | ||
863 | |||
864 | ret = ttm_read_lock(&vmaster->lock, true); | ||
865 | if (unlikely(ret != 0)) { | ||
866 | kfree(vmw_user_bo); | ||
867 | return ret; | ||
868 | } | ||
869 | |||
870 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | ||
871 | &vmw_vram_placement, true, | ||
872 | &vmw_user_dmabuf_destroy); | ||
873 | if (unlikely(ret != 0)) | ||
874 | return ret; | ||
875 | |||
876 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | ||
877 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | ||
878 | &vmw_user_bo->base, | ||
879 | false, | ||
880 | ttm_buffer_type, | ||
881 | &vmw_user_dmabuf_release, NULL); | ||
882 | if (unlikely(ret != 0)) { | ||
883 | ttm_bo_unref(&tmp); | ||
884 | } else { | ||
885 | rep->handle = vmw_user_bo->base.hash.key; | ||
886 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | ||
887 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | ||
888 | rep->cur_gmr_offset = 0; | ||
889 | } | ||
890 | ttm_bo_unref(&tmp); | ||
891 | |||
892 | ttm_read_unlock(&vmaster->lock); | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | ||
898 | struct drm_file *file_priv) | ||
899 | { | ||
900 | struct drm_vmw_unref_dmabuf_arg *arg = | ||
901 | (struct drm_vmw_unref_dmabuf_arg *)data; | ||
902 | |||
903 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
904 | arg->handle, | ||
905 | TTM_REF_USAGE); | ||
906 | } | ||
907 | |||
908 | uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | ||
909 | uint32_t cur_validate_node) | ||
910 | { | ||
911 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
912 | |||
913 | if (likely(vmw_bo->on_validate_list)) | ||
914 | return vmw_bo->cur_validate_node; | ||
915 | |||
916 | vmw_bo->cur_validate_node = cur_validate_node; | ||
917 | vmw_bo->on_validate_list = true; | ||
918 | |||
919 | return cur_validate_node; | ||
920 | } | ||
921 | |||
922 | void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | ||
923 | { | ||
924 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
925 | |||
926 | vmw_bo->on_validate_list = false; | ||
927 | } | ||
928 | |||
929 | uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) | ||
930 | { | ||
931 | struct vmw_dma_buffer *vmw_bo; | ||
932 | |||
933 | if (bo->mem.mem_type == TTM_PL_VRAM) | ||
934 | return SVGA_GMR_FRAMEBUFFER; | ||
935 | |||
936 | vmw_bo = vmw_dma_buffer(bo); | ||
937 | |||
938 | return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; | ||
939 | } | ||
940 | |||
941 | void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) | ||
942 | { | ||
943 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
944 | vmw_bo->gmr_bound = true; | ||
945 | vmw_bo->gmr_id = id; | ||
946 | } | ||
947 | |||
948 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | ||
949 | uint32_t handle, struct vmw_dma_buffer **out) | ||
950 | { | ||
951 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
952 | struct ttm_base_object *base; | ||
953 | |||
954 | base = ttm_base_object_lookup(tfile, handle); | ||
955 | if (unlikely(base == NULL)) { | ||
956 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | ||
957 | (unsigned long)handle); | ||
958 | return -ESRCH; | ||
959 | } | ||
960 | |||
961 | if (unlikely(base->object_type != ttm_buffer_type)) { | ||
962 | ttm_base_object_unref(&base); | ||
963 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | ||
964 | (unsigned long)handle); | ||
965 | return -EINVAL; | ||
966 | } | ||
967 | |||
968 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | ||
969 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); | ||
970 | ttm_base_object_unref(&base); | ||
971 | *out = &vmw_user_bo->dma; | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * TODO: Implement a gmr id eviction mechanism. Currently we just fail | ||
978 | * when we're out of ids, causing GMR space to be allocated | ||
979 | * out of VRAM. | ||
980 | */ | ||
981 | |||
982 | int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) | ||
983 | { | ||
984 | struct ttm_bo_global *glob = dev_priv->bdev.glob; | ||
985 | int id; | ||
986 | int ret; | ||
987 | |||
988 | do { | ||
989 | if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) | ||
990 | return -ENOMEM; | ||
991 | |||
992 | spin_lock(&glob->lru_lock); | ||
993 | ret = ida_get_new(&dev_priv->gmr_ida, &id); | ||
994 | spin_unlock(&glob->lru_lock); | ||
995 | } while (ret == -EAGAIN); | ||
996 | |||
997 | if (unlikely(ret != 0)) | ||
998 | return ret; | ||
999 | |||
1000 | if (unlikely(id >= dev_priv->max_gmr_ids)) { | ||
1001 | spin_lock(&glob->lru_lock); | ||
1002 | ida_remove(&dev_priv->gmr_ida, id); | ||
1003 | spin_unlock(&glob->lru_lock); | ||
1004 | return -EBUSY; | ||
1005 | } | ||
1006 | |||
1007 | *p_id = (uint32_t) id; | ||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Stream managment | ||
1013 | */ | ||
1014 | |||
1015 | static void vmw_stream_destroy(struct vmw_resource *res) | ||
1016 | { | ||
1017 | struct vmw_private *dev_priv = res->dev_priv; | ||
1018 | struct vmw_stream *stream; | ||
1019 | int ret; | ||
1020 | |||
1021 | DRM_INFO("%s: unref\n", __func__); | ||
1022 | stream = container_of(res, struct vmw_stream, res); | ||
1023 | |||
1024 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); | ||
1025 | WARN_ON(ret != 0); | ||
1026 | } | ||
1027 | |||
1028 | static int vmw_stream_init(struct vmw_private *dev_priv, | ||
1029 | struct vmw_stream *stream, | ||
1030 | void (*res_free) (struct vmw_resource *res)) | ||
1031 | { | ||
1032 | struct vmw_resource *res = &stream->res; | ||
1033 | int ret; | ||
1034 | |||
1035 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | ||
1036 | VMW_RES_STREAM, res_free); | ||
1037 | |||
1038 | if (unlikely(ret != 0)) { | ||
1039 | if (res_free == NULL) | ||
1040 | kfree(stream); | ||
1041 | else | ||
1042 | res_free(&stream->res); | ||
1043 | return ret; | ||
1044 | } | ||
1045 | |||
1046 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); | ||
1047 | if (ret) { | ||
1048 | vmw_resource_unreference(&res); | ||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | DRM_INFO("%s: claimed\n", __func__); | ||
1053 | |||
1054 | vmw_resource_activate(&stream->res, vmw_stream_destroy); | ||
1055 | return 0; | ||
1056 | } | ||
1057 | |||
1058 | /** | ||
1059 | * User-space context management: | ||
1060 | */ | ||
1061 | |||
1062 | static void vmw_user_stream_free(struct vmw_resource *res) | ||
1063 | { | ||
1064 | struct vmw_user_stream *stream = | ||
1065 | container_of(res, struct vmw_user_stream, stream.res); | ||
1066 | |||
1067 | kfree(stream); | ||
1068 | } | ||
1069 | |||
1070 | /** | ||
1071 | * This function is called when user space has no more references on the | ||
1072 | * base object. It releases the base-object's reference on the resource object. | ||
1073 | */ | ||
1074 | |||
1075 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) | ||
1076 | { | ||
1077 | struct ttm_base_object *base = *p_base; | ||
1078 | struct vmw_user_stream *stream = | ||
1079 | container_of(base, struct vmw_user_stream, base); | ||
1080 | struct vmw_resource *res = &stream->stream.res; | ||
1081 | |||
1082 | *p_base = NULL; | ||
1083 | vmw_resource_unreference(&res); | ||
1084 | } | ||
1085 | |||
1086 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | ||
1087 | struct drm_file *file_priv) | ||
1088 | { | ||
1089 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1090 | struct vmw_resource *res; | ||
1091 | struct vmw_user_stream *stream; | ||
1092 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | ||
1093 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1094 | int ret = 0; | ||
1095 | |||
1096 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); | ||
1097 | if (unlikely(res == NULL)) | ||
1098 | return -EINVAL; | ||
1099 | |||
1100 | if (res->res_free != &vmw_user_stream_free) { | ||
1101 | ret = -EINVAL; | ||
1102 | goto out; | ||
1103 | } | ||
1104 | |||
1105 | stream = container_of(res, struct vmw_user_stream, stream.res); | ||
1106 | if (stream->base.tfile != tfile) { | ||
1107 | ret = -EINVAL; | ||
1108 | goto out; | ||
1109 | } | ||
1110 | |||
1111 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); | ||
1112 | out: | ||
1113 | vmw_resource_unreference(&res); | ||
1114 | return ret; | ||
1115 | } | ||
1116 | |||
1117 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | ||
1118 | struct drm_file *file_priv) | ||
1119 | { | ||
1120 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1121 | struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); | ||
1122 | struct vmw_resource *res; | ||
1123 | struct vmw_resource *tmp; | ||
1124 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | ||
1125 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
1126 | int ret; | ||
1127 | |||
1128 | if (unlikely(stream == NULL)) | ||
1129 | return -ENOMEM; | ||
1130 | |||
1131 | res = &stream->stream.res; | ||
1132 | stream->base.shareable = false; | ||
1133 | stream->base.tfile = NULL; | ||
1134 | |||
1135 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); | ||
1136 | if (unlikely(ret != 0)) | ||
1137 | return ret; | ||
1138 | |||
1139 | tmp = vmw_resource_reference(res); | ||
1140 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, | ||
1141 | &vmw_user_stream_base_release, NULL); | ||
1142 | |||
1143 | if (unlikely(ret != 0)) { | ||
1144 | vmw_resource_unreference(&tmp); | ||
1145 | goto out_err; | ||
1146 | } | ||
1147 | |||
1148 | arg->stream_id = res->id; | ||
1149 | out_err: | ||
1150 | vmw_resource_unreference(&res); | ||
1151 | return ret; | ||
1152 | } | ||
1153 | |||
1154 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, | ||
1155 | struct ttm_object_file *tfile, | ||
1156 | uint32_t *inout_id, struct vmw_resource **out) | ||
1157 | { | ||
1158 | struct vmw_user_stream *stream; | ||
1159 | struct vmw_resource *res; | ||
1160 | int ret; | ||
1161 | |||
1162 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); | ||
1163 | if (unlikely(res == NULL)) | ||
1164 | return -EINVAL; | ||
1165 | |||
1166 | if (res->res_free != &vmw_user_stream_free) { | ||
1167 | ret = -EINVAL; | ||
1168 | goto err_ref; | ||
1169 | } | ||
1170 | |||
1171 | stream = container_of(res, struct vmw_user_stream, stream.res); | ||
1172 | if (stream->base.tfile != tfile) { | ||
1173 | ret = -EPERM; | ||
1174 | goto err_ref; | ||
1175 | } | ||
1176 | |||
1177 | *inout_id = stream->stream.stream_id; | ||
1178 | *out = res; | ||
1179 | return 0; | ||
1180 | err_ref: | ||
1181 | vmw_resource_unreference(&res); | ||
1182 | return ret; | ||
1183 | } | ||