aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
authorJakob Bornecrantz <jakob@vmware.com>2009-12-09 19:19:58 -0500
committerDave Airlie <airlied@redhat.com>2009-12-14 17:38:43 -0500
commitfb1d9738ca053ea8afa5e86af6463155f983b01c (patch)
tree53aa407922c989f48aead5fcf61f9945ca6051d5 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
parent632f61178d0473861ba77e774bb654b37bc7eccc (diff)
drm/vmwgfx: Add DRM driver for VMware Virtual GPU
This commit adds the vmwgfx driver for the VWware Virtual GPU aka SVGA. The driver is under staging the same as Nouveau and Radeon KMS. Hopefully the 2D ioctls are bug free and don't need changing, so that part of the API should be stable. But there there is a pretty big chance that the 3D API will change in the future. Signed-off-by: Thomas Hellström <thellstrom@vmware.com> Signed-off-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1192
1 files changed, 1192 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 000000000000..a1ceed0c8e07
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1192 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41};
42
43struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
46};
47
48struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
51};
52
53struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
56};
57
58struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
61};
62
63struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
66};
67
68static inline struct vmw_dma_buffer *
69vmw_dma_buffer(struct ttm_buffer_object *bo)
70{
71 return container_of(bo, struct vmw_dma_buffer, base);
72}
73
74static inline struct vmw_user_dma_buffer *
75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76{
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79}
80
81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82{
83 kref_get(&res->kref);
84 return res;
85}
86
87static void vmw_resource_release(struct kref *kref)
88{
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
92
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
95
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
98
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
103
104 write_lock(&dev_priv->resource_lock);
105}
106
107void vmw_resource_unreference(struct vmw_resource **p_res)
108{
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
111
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
116}
117
118static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
123{
124 int ret;
125
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
133
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
137
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
141
142 } while (ret == -EAGAIN);
143
144 return ret;
145}
146
147/**
148 * vmw_resource_activate
149 *
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
162{
163 struct vmw_private *dev_priv = res->dev_priv;
164
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
169}
170
171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
173{
174 struct vmw_resource *res;
175
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
183
184 if (unlikely(res == NULL))
185 return NULL;
186
187 return res;
188}
189
190/**
191 * Context management:
192 */
193
194static void vmw_hw_context_destroy(struct vmw_resource *res)
195{
196
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
207 }
208
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214}
215
216static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
219{
220 int ret;
221
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
226
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
229
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
236 }
237
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
243 }
244
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
248
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
252}
253
254struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255{
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
258
259 if (unlikely(res == NULL))
260 return NULL;
261
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
264}
265
266/**
267 * User-space context management:
268 */
269
270static void vmw_user_context_free(struct vmw_resource *res)
271{
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
274
275 kfree(ctx);
276}
277
278/**
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
281 */
282
283static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284{
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
289
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
292}
293
294int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296{
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
303
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
307
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
317 }
318
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320out:
321 vmw_resource_unreference(&res);
322 return ret;
323}
324
325int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327{
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
335
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
338
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
342
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
346
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
350
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
354 }
355
356 arg->cid = res->id;
357out_err:
358 vmw_resource_unreference(&res);
359 return ret;
360
361}
362
363int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
366{
367 struct vmw_resource *res;
368 int ret = 0;
369
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
380
381 return ret;
382}
383
384
385/**
386 * Surface management.
387 */
388
389static void vmw_hw_surface_destroy(struct vmw_resource *res)
390{
391
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
402 }
403
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
407
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
409}
410
411void vmw_surface_res_free(struct vmw_resource *res)
412{
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
418}
419
420int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
423{
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
435
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
439
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
443 }
444
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
453 }
454
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
463 }
464
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
468
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
473 }
474
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
478}
479
480static void vmw_user_surface_free(struct vmw_resource *res)
481{
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
485
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
489}
490
491int vmw_user_surface_lookup(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 int sid, struct vmw_surface **out)
494{
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498
499 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid);
500 if (unlikely(res == NULL))
501 return -EINVAL;
502
503 if (res->res_free != &vmw_user_surface_free)
504 return -EINVAL;
505
506 srf = container_of(res, struct vmw_surface, res);
507 user_srf = container_of(srf, struct vmw_user_surface, srf);
508 if (user_srf->base.tfile != tfile && !user_srf->base.shareable)
509 return -EPERM;
510
511 *out = srf;
512 return 0;
513}
514
515static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
516{
517 struct ttm_base_object *base = *p_base;
518 struct vmw_user_surface *user_srf =
519 container_of(base, struct vmw_user_surface, base);
520 struct vmw_resource *res = &user_srf->srf.res;
521
522 *p_base = NULL;
523 vmw_resource_unreference(&res);
524}
525
526int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
527 struct drm_file *file_priv)
528{
529 struct vmw_private *dev_priv = vmw_priv(dev);
530 struct vmw_resource *res;
531 struct vmw_surface *srf;
532 struct vmw_user_surface *user_srf;
533 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
534 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
535 int ret = 0;
536
537 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
538 if (unlikely(res == NULL))
539 return -EINVAL;
540
541 if (res->res_free != &vmw_user_surface_free) {
542 ret = -EINVAL;
543 goto out;
544 }
545
546 srf = container_of(res, struct vmw_surface, res);
547 user_srf = container_of(srf, struct vmw_user_surface, srf);
548 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
549 ret = -EPERM;
550 goto out;
551 }
552
553 ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
554 TTM_REF_USAGE);
555out:
556 vmw_resource_unreference(&res);
557 return ret;
558}
559
560int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
561 struct drm_file *file_priv)
562{
563 struct vmw_private *dev_priv = vmw_priv(dev);
564 struct vmw_user_surface *user_srf =
565 kmalloc(sizeof(*user_srf), GFP_KERNEL);
566 struct vmw_surface *srf;
567 struct vmw_resource *res;
568 struct vmw_resource *tmp;
569 union drm_vmw_surface_create_arg *arg =
570 (union drm_vmw_surface_create_arg *)data;
571 struct drm_vmw_surface_create_req *req = &arg->req;
572 struct drm_vmw_surface_arg *rep = &arg->rep;
573 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
574 struct drm_vmw_size __user *user_sizes;
575 int ret;
576 int i;
577
578 if (unlikely(user_srf == NULL))
579 return -ENOMEM;
580
581 srf = &user_srf->srf;
582 res = &srf->res;
583
584 srf->flags = req->flags;
585 srf->format = req->format;
586 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
587 srf->num_sizes = 0;
588 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
589 srf->num_sizes += srf->mip_levels[i];
590
591 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
592 DRM_VMW_MAX_MIP_LEVELS) {
593 ret = -EINVAL;
594 goto out_err0;
595 }
596
597 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
598 if (unlikely(srf->sizes == NULL)) {
599 ret = -ENOMEM;
600 goto out_err0;
601 }
602
603 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
604 req->size_addr;
605
606 ret = copy_from_user(srf->sizes, user_sizes,
607 srf->num_sizes * sizeof(*srf->sizes));
608 if (unlikely(ret != 0))
609 goto out_err1;
610
611 user_srf->base.shareable = false;
612 user_srf->base.tfile = NULL;
613
614 /**
615 * From this point, the generic resource management functions
616 * destroy the object on failure.
617 */
618
619 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
620 if (unlikely(ret != 0))
621 return ret;
622
623 tmp = vmw_resource_reference(&srf->res);
624 ret = ttm_base_object_init(tfile, &user_srf->base,
625 req->shareable, VMW_RES_SURFACE,
626 &vmw_user_surface_base_release, NULL);
627
628 if (unlikely(ret != 0)) {
629 vmw_resource_unreference(&tmp);
630 vmw_resource_unreference(&res);
631 return ret;
632 }
633
634 if (srf->flags & (1 << 9) &&
635 srf->num_sizes == 1 &&
636 srf->sizes[0].width == 64 &&
637 srf->sizes[0].height == 64 &&
638 srf->format == SVGA3D_A8R8G8B8) {
639
640 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
641 /* clear the image */
642 if (srf->snooper.image)
643 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
644 else
645 DRM_ERROR("Failed to allocate cursor_image\n");
646
647 } else {
648 srf->snooper.image = NULL;
649 }
650 srf->snooper.crtc = NULL;
651
652 rep->sid = res->id;
653 vmw_resource_unreference(&res);
654 return 0;
655out_err1:
656 kfree(srf->sizes);
657out_err0:
658 kfree(user_srf);
659 return ret;
660}
661
662int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv)
664{
665 struct vmw_private *dev_priv = vmw_priv(dev);
666 union drm_vmw_surface_reference_arg *arg =
667 (union drm_vmw_surface_reference_arg *)data;
668 struct drm_vmw_surface_arg *req = &arg->req;
669 struct drm_vmw_surface_create_req *rep = &arg->rep;
670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
671 struct vmw_resource *res;
672 struct vmw_surface *srf;
673 struct vmw_user_surface *user_srf;
674 struct drm_vmw_size __user *user_sizes;
675 int ret;
676
677 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid);
678 if (unlikely(res == NULL))
679 return -EINVAL;
680
681 if (res->res_free != &vmw_user_surface_free) {
682 ret = -EINVAL;
683 goto out;
684 }
685
686 srf = container_of(res, struct vmw_surface, res);
687 user_srf = container_of(srf, struct vmw_user_surface, srf);
688 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
689 DRM_ERROR("Tried to reference none shareable surface\n");
690 ret = -EPERM;
691 goto out;
692 }
693
694 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
695 if (unlikely(ret != 0)) {
696 DRM_ERROR("Could not add a reference to a surface.\n");
697 goto out;
698 }
699
700 rep->flags = srf->flags;
701 rep->format = srf->format;
702 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
703 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
704 rep->size_addr;
705
706 if (user_sizes)
707 ret = copy_to_user(user_sizes, srf->sizes,
708 srf->num_sizes * sizeof(*srf->sizes));
709 if (unlikely(ret != 0)) {
710 DRM_ERROR("copy_to_user failed %p %u\n",
711 user_sizes, srf->num_sizes);
712 /**
713 * FIXME: Unreference surface here?
714 */
715 goto out;
716 }
717out:
718 vmw_resource_unreference(&res);
719 return ret;
720}
721
722int vmw_surface_check(struct vmw_private *dev_priv,
723 struct ttm_object_file *tfile,
724 int id)
725{
726 struct vmw_resource *res;
727 int ret = 0;
728
729 read_lock(&dev_priv->resource_lock);
730 res = idr_find(&dev_priv->surface_idr, id);
731 if (res && res->avail) {
732 struct vmw_surface *srf =
733 container_of(res, struct vmw_surface, res);
734 struct vmw_user_surface *usrf =
735 container_of(srf, struct vmw_user_surface, srf);
736
737 if (usrf->base.tfile != tfile && !usrf->base.shareable)
738 ret = -EPERM;
739 } else
740 ret = -EINVAL;
741 read_unlock(&dev_priv->resource_lock);
742
743 return ret;
744}
745
746/**
747 * Buffer management.
748 */
749
750static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
751 unsigned long num_pages)
752{
753 static size_t bo_user_size = ~0;
754
755 size_t page_array_size =
756 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
757
758 if (unlikely(bo_user_size == ~0)) {
759 bo_user_size = glob->ttm_bo_extra_size +
760 ttm_round_pot(sizeof(struct vmw_dma_buffer));
761 }
762
763 return bo_user_size + page_array_size;
764}
765
766void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
767{
768 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
769 struct ttm_bo_global *glob = bo->glob;
770 struct vmw_private *dev_priv =
771 container_of(bo->bdev, struct vmw_private, bdev);
772
773 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
774 if (vmw_bo->gmr_bound) {
775 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
776 spin_lock(&glob->lru_lock);
777 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
778 spin_unlock(&glob->lru_lock);
779 }
780 kfree(vmw_bo);
781}
782
783int vmw_dmabuf_init(struct vmw_private *dev_priv,
784 struct vmw_dma_buffer *vmw_bo,
785 size_t size, struct ttm_placement *placement,
786 bool interruptible,
787 void (*bo_free) (struct ttm_buffer_object *bo))
788{
789 struct ttm_bo_device *bdev = &dev_priv->bdev;
790 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
791 size_t acc_size;
792 int ret;
793
794 BUG_ON(!bo_free);
795
796 acc_size =
797 vmw_dmabuf_acc_size(bdev->glob,
798 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
799
800 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
801 if (unlikely(ret != 0)) {
802 /* we must free the bo here as
803 * ttm_buffer_object_init does so as well */
804 bo_free(&vmw_bo->base);
805 return ret;
806 }
807
808 memset(vmw_bo, 0, sizeof(*vmw_bo));
809
810 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
811 INIT_LIST_HEAD(&vmw_bo->validate_list);
812 vmw_bo->gmr_id = 0;
813 vmw_bo->gmr_bound = false;
814
815 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
816 ttm_bo_type_device, placement,
817 0, 0, interruptible,
818 NULL, acc_size, bo_free);
819 return ret;
820}
821
822static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
823{
824 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
825 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
826 struct ttm_bo_global *glob = bo->glob;
827 struct vmw_private *dev_priv =
828 container_of(bo->bdev, struct vmw_private, bdev);
829
830 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
831 if (vmw_bo->gmr_bound) {
832 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
833 spin_lock(&glob->lru_lock);
834 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
835 spin_unlock(&glob->lru_lock);
836 }
837 kfree(vmw_user_bo);
838}
839
840static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
841{
842 struct vmw_user_dma_buffer *vmw_user_bo;
843 struct ttm_base_object *base = *p_base;
844 struct ttm_buffer_object *bo;
845
846 *p_base = NULL;
847
848 if (unlikely(base == NULL))
849 return;
850
851 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
852 bo = &vmw_user_bo->dma.base;
853 ttm_bo_unref(&bo);
854}
855
856int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
857 struct drm_file *file_priv)
858{
859 struct vmw_private *dev_priv = vmw_priv(dev);
860 union drm_vmw_alloc_dmabuf_arg *arg =
861 (union drm_vmw_alloc_dmabuf_arg *)data;
862 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
863 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
864 struct vmw_user_dma_buffer *vmw_user_bo;
865 struct ttm_buffer_object *tmp;
866 struct vmw_master *vmaster = vmw_master(file_priv->master);
867 int ret;
868
869 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
870 if (unlikely(vmw_user_bo == NULL))
871 return -ENOMEM;
872
873 ret = ttm_read_lock(&vmaster->lock, true);
874 if (unlikely(ret != 0)) {
875 kfree(vmw_user_bo);
876 return ret;
877 }
878
879 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
880 &vmw_vram_placement, true,
881 &vmw_user_dmabuf_destroy);
882 if (unlikely(ret != 0))
883 return ret;
884
885 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
886 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
887 &vmw_user_bo->base,
888 false,
889 ttm_buffer_type,
890 &vmw_user_dmabuf_release, NULL);
891 if (unlikely(ret != 0)) {
892 ttm_bo_unref(&tmp);
893 } else {
894 rep->handle = vmw_user_bo->base.hash.key;
895 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
896 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
897 rep->cur_gmr_offset = 0;
898 }
899 ttm_bo_unref(&tmp);
900
901 ttm_read_unlock(&vmaster->lock);
902
903 return 0;
904}
905
906int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
907 struct drm_file *file_priv)
908{
909 struct drm_vmw_unref_dmabuf_arg *arg =
910 (struct drm_vmw_unref_dmabuf_arg *)data;
911
912 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
913 arg->handle,
914 TTM_REF_USAGE);
915}
916
917uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
918 uint32_t cur_validate_node)
919{
920 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
921
922 if (likely(vmw_bo->on_validate_list))
923 return vmw_bo->cur_validate_node;
924
925 vmw_bo->cur_validate_node = cur_validate_node;
926 vmw_bo->on_validate_list = true;
927
928 return cur_validate_node;
929}
930
931void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
932{
933 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
934
935 vmw_bo->on_validate_list = false;
936}
937
938uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
939{
940 struct vmw_dma_buffer *vmw_bo;
941
942 if (bo->mem.mem_type == TTM_PL_VRAM)
943 return SVGA_GMR_FRAMEBUFFER;
944
945 vmw_bo = vmw_dma_buffer(bo);
946
947 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
948}
949
950void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
951{
952 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
953 vmw_bo->gmr_bound = true;
954 vmw_bo->gmr_id = id;
955}
956
957int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
958 uint32_t handle, struct vmw_dma_buffer **out)
959{
960 struct vmw_user_dma_buffer *vmw_user_bo;
961 struct ttm_base_object *base;
962
963 base = ttm_base_object_lookup(tfile, handle);
964 if (unlikely(base == NULL)) {
965 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
966 (unsigned long)handle);
967 return -ESRCH;
968 }
969
970 if (unlikely(base->object_type != ttm_buffer_type)) {
971 ttm_base_object_unref(&base);
972 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
973 (unsigned long)handle);
974 return -EINVAL;
975 }
976
977 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
978 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
979 ttm_base_object_unref(&base);
980 *out = &vmw_user_bo->dma;
981
982 return 0;
983}
984
985/**
986 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
987 * when we're out of ids, causing GMR space to be allocated
988 * out of VRAM.
989 */
990
991int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
992{
993 struct ttm_bo_global *glob = dev_priv->bdev.glob;
994 int id;
995 int ret;
996
997 do {
998 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
999 return -ENOMEM;
1000
1001 spin_lock(&glob->lru_lock);
1002 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1003 spin_unlock(&glob->lru_lock);
1004 } while (ret == -EAGAIN);
1005
1006 if (unlikely(ret != 0))
1007 return ret;
1008
1009 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1010 spin_lock(&glob->lru_lock);
1011 ida_remove(&dev_priv->gmr_ida, id);
1012 spin_unlock(&glob->lru_lock);
1013 return -EBUSY;
1014 }
1015
1016 *p_id = (uint32_t) id;
1017 return 0;
1018}
1019
1020/*
1021 * Stream managment
1022 */
1023
1024static void vmw_stream_destroy(struct vmw_resource *res)
1025{
1026 struct vmw_private *dev_priv = res->dev_priv;
1027 struct vmw_stream *stream;
1028 int ret;
1029
1030 DRM_INFO("%s: unref\n", __func__);
1031 stream = container_of(res, struct vmw_stream, res);
1032
1033 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1034 WARN_ON(ret != 0);
1035}
1036
1037static int vmw_stream_init(struct vmw_private *dev_priv,
1038 struct vmw_stream *stream,
1039 void (*res_free) (struct vmw_resource *res))
1040{
1041 struct vmw_resource *res = &stream->res;
1042 int ret;
1043
1044 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1045 VMW_RES_STREAM, res_free);
1046
1047 if (unlikely(ret != 0)) {
1048 if (res_free == NULL)
1049 kfree(stream);
1050 else
1051 res_free(&stream->res);
1052 return ret;
1053 }
1054
1055 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1056 if (ret) {
1057 vmw_resource_unreference(&res);
1058 return ret;
1059 }
1060
1061 DRM_INFO("%s: claimed\n", __func__);
1062
1063 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1064 return 0;
1065}
1066
1067/**
1068 * User-space context management:
1069 */
1070
1071static void vmw_user_stream_free(struct vmw_resource *res)
1072{
1073 struct vmw_user_stream *stream =
1074 container_of(res, struct vmw_user_stream, stream.res);
1075
1076 kfree(stream);
1077}
1078
1079/**
1080 * This function is called when user space has no more references on the
1081 * base object. It releases the base-object's reference on the resource object.
1082 */
1083
1084static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1085{
1086 struct ttm_base_object *base = *p_base;
1087 struct vmw_user_stream *stream =
1088 container_of(base, struct vmw_user_stream, base);
1089 struct vmw_resource *res = &stream->stream.res;
1090
1091 *p_base = NULL;
1092 vmw_resource_unreference(&res);
1093}
1094
1095int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1096 struct drm_file *file_priv)
1097{
1098 struct vmw_private *dev_priv = vmw_priv(dev);
1099 struct vmw_resource *res;
1100 struct vmw_user_stream *stream;
1101 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1103 int ret = 0;
1104
1105 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1106 if (unlikely(res == NULL))
1107 return -EINVAL;
1108
1109 if (res->res_free != &vmw_user_stream_free) {
1110 ret = -EINVAL;
1111 goto out;
1112 }
1113
1114 stream = container_of(res, struct vmw_user_stream, stream.res);
1115 if (stream->base.tfile != tfile) {
1116 ret = -EINVAL;
1117 goto out;
1118 }
1119
1120 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1121out:
1122 vmw_resource_unreference(&res);
1123 return ret;
1124}
1125
1126int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1127 struct drm_file *file_priv)
1128{
1129 struct vmw_private *dev_priv = vmw_priv(dev);
1130 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1131 struct vmw_resource *res;
1132 struct vmw_resource *tmp;
1133 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1134 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1135 int ret;
1136
1137 if (unlikely(stream == NULL))
1138 return -ENOMEM;
1139
1140 res = &stream->stream.res;
1141 stream->base.shareable = false;
1142 stream->base.tfile = NULL;
1143
1144 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1145 if (unlikely(ret != 0))
1146 return ret;
1147
1148 tmp = vmw_resource_reference(res);
1149 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1150 &vmw_user_stream_base_release, NULL);
1151
1152 if (unlikely(ret != 0)) {
1153 vmw_resource_unreference(&tmp);
1154 goto out_err;
1155 }
1156
1157 arg->stream_id = res->id;
1158out_err:
1159 vmw_resource_unreference(&res);
1160 return ret;
1161}
1162
1163int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1164 struct ttm_object_file *tfile,
1165 uint32_t *inout_id, struct vmw_resource **out)
1166{
1167 struct vmw_user_stream *stream;
1168 struct vmw_resource *res;
1169 int ret;
1170
1171 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1172 if (unlikely(res == NULL))
1173 return -EINVAL;
1174
1175 if (res->res_free != &vmw_user_stream_free) {
1176 ret = -EINVAL;
1177 goto err_ref;
1178 }
1179
1180 stream = container_of(res, struct vmw_user_stream, stream.res);
1181 if (stream->base.tfile != tfile) {
1182 ret = -EPERM;
1183 goto err_ref;
1184 }
1185
1186 *inout_id = stream->stream.stream_id;
1187 *out = res;
1188 return 0;
1189err_ref:
1190 vmw_resource_unreference(&res);
1191 return ret;
1192}