aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2012-11-21 06:32:19 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2014-01-17 01:52:33 -0500
commit1d7a5cbf8f74edee0b1d9ee479367b5d876bf627 (patch)
tree28598b7931f80455a980de7356ed5ea686db14a9
parent15c6f6562317eb18e686a89735aa8c524d88096e (diff)
drm/vmwgfx: Implement a buffer object synccpu ioctl.
This ioctl enables inter-process synchronization of buffer objects, which is needed for mesa Guest-Backed objects. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c142
-rw-r--r--include/uapi/drm/vmwgfx_drm.h63
4 files changed, 211 insertions, 2 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index cfeaa478dcd7..fb56676ed3ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,6 +124,9 @@
124#define DRM_IOCTL_VMW_GB_SURFACE_REF \ 124#define DRM_IOCTL_VMW_GB_SURFACE_REF \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
126 union drm_vmw_gb_surface_reference_arg) 126 union drm_vmw_gb_surface_reference_arg)
127#define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg)
127 130
128/** 131/**
129 * The core DRM version of this macro doesn't account for 132 * The core DRM version of this macro doesn't account for
@@ -201,6 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
201 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 204 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
202 vmw_gb_surface_reference_ioctl, 205 vmw_gb_surface_reference_ioctl,
203 DRM_AUTH | DRM_UNLOCKED), 206 DRM_AUTH | DRM_UNLOCKED),
207 VMW_IOCTL_DEF(VMW_SYNCCPU,
208 vmw_user_dmabuf_synccpu_ioctl,
209 DRM_AUTH | DRM_UNLOCKED),
204}; 210};
205 211
206static struct pci_device_id vmw_pci_id_list[] = { 212static struct pci_device_id vmw_pci_id_list[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e0859eebe7b6..fe3c2e3e1cbe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -533,6 +533,8 @@ extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv); 533 struct drm_file *file_priv);
534extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 534extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
535 struct drm_file *file_priv); 535 struct drm_file *file_priv);
536extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
537 struct drm_file *file_priv);
536extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, 538extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
537 uint32_t cur_validate_node); 539 uint32_t cur_validate_node);
538extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 540extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index b40978f0ca96..12e68e58d9e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -441,6 +441,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
441 ttm_bo_unref(&bo); 441 ttm_bo_unref(&bo);
442} 442}
443 443
444static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
445 enum ttm_ref_type ref_type)
446{
447 struct vmw_user_dma_buffer *user_bo;
448 user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
449
450 switch (ref_type) {
451 case TTM_REF_SYNCCPU_WRITE:
452 ttm_bo_synccpu_write_release(&user_bo->dma.base);
453 break;
454 default:
455 BUG();
456 }
457}
458
444/** 459/**
445 * vmw_user_dmabuf_alloc - Allocate a user dma buffer 460 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
446 * 461 *
@@ -484,7 +499,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
484 &user_bo->prime, 499 &user_bo->prime,
485 shareable, 500 shareable,
486 ttm_buffer_type, 501 ttm_buffer_type,
487 &vmw_user_dmabuf_release, NULL); 502 &vmw_user_dmabuf_release,
503 &vmw_user_dmabuf_ref_obj_release);
488 if (unlikely(ret != 0)) { 504 if (unlikely(ret != 0)) {
489 ttm_bo_unref(&tmp); 505 ttm_bo_unref(&tmp);
490 goto out_no_base_object; 506 goto out_no_base_object;
@@ -517,6 +533,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
517 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 533 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
518} 534}
519 535
536/**
537 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
538 * access, idling previous GPU operations on the buffer and optionally
539 * blocking it for further command submissions.
540 *
541 * @user_bo: Pointer to the buffer object being grabbed for CPU access
542 * @tfile: Identifying the caller.
543 * @flags: Flags indicating how the grab should be performed.
544 *
545 * A blocking grab will be automatically released when @tfile is closed.
546 */
547static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
548 struct ttm_object_file *tfile,
549 uint32_t flags)
550{
551 struct ttm_buffer_object *bo = &user_bo->dma.base;
552 bool existed;
553 int ret;
554
555 if (flags & drm_vmw_synccpu_allow_cs) {
556 struct ttm_bo_device *bdev = bo->bdev;
557
558 spin_lock(&bdev->fence_lock);
559 ret = ttm_bo_wait(bo, false, true,
560 !!(flags & drm_vmw_synccpu_dontblock));
561 spin_unlock(&bdev->fence_lock);
562 return ret;
563 }
564
565 ret = ttm_bo_synccpu_write_grab
566 (bo, !!(flags & drm_vmw_synccpu_dontblock));
567 if (unlikely(ret != 0))
568 return ret;
569
570 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
571 TTM_REF_SYNCCPU_WRITE, &existed);
572 if (ret != 0 || existed)
573 ttm_bo_synccpu_write_release(&user_bo->dma.base);
574
575 return ret;
576}
577
578/**
579 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
580 * and unblock command submission on the buffer if blocked.
581 *
582 * @handle: Handle identifying the buffer object.
583 * @tfile: Identifying the caller.
584 * @flags: Flags indicating the type of release.
585 */
586static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
587 struct ttm_object_file *tfile,
588 uint32_t flags)
589{
590 if (!(flags & drm_vmw_synccpu_allow_cs))
591 return ttm_ref_object_base_unref(tfile, handle,
592 TTM_REF_SYNCCPU_WRITE);
593
594 return 0;
595}
596
597/**
598 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
599 * functionality.
600 *
601 * @dev: Identifies the drm device.
602 * @data: Pointer to the ioctl argument.
603 * @file_priv: Identifies the caller.
604 *
605 * This function checks the ioctl arguments for validity and calls the
606 * relevant synccpu functions.
607 */
608int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
609 struct drm_file *file_priv)
610{
611 struct drm_vmw_synccpu_arg *arg =
612 (struct drm_vmw_synccpu_arg *) data;
613 struct vmw_dma_buffer *dma_buf;
614 struct vmw_user_dma_buffer *user_bo;
615 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
616 int ret;
617
618 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
619 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
620 drm_vmw_synccpu_dontblock |
621 drm_vmw_synccpu_allow_cs)) != 0) {
622 DRM_ERROR("Illegal synccpu flags.\n");
623 return -EINVAL;
624 }
625
626 switch (arg->op) {
627 case drm_vmw_synccpu_grab:
628 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
629 if (unlikely(ret != 0))
630 return ret;
631
632 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
633 dma);
634 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
635 vmw_dmabuf_unreference(&dma_buf);
636 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
637 ret != -EBUSY)) {
638 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
639 (unsigned int) arg->handle);
640 return ret;
641 }
642 break;
643 case drm_vmw_synccpu_release:
644 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
645 arg->flags);
646 if (unlikely(ret != 0)) {
647 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
648 (unsigned int) arg->handle);
649 return ret;
650 }
651 break;
652 default:
653 DRM_ERROR("Invalid synccpu operation.\n");
654 return -EINVAL;
655 }
656
657 return 0;
658}
659
520int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 660int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
521 struct drm_file *file_priv) 661 struct drm_file *file_priv)
522{ 662{
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bb3b91d84914..adb7e0d0d3b6 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -28,6 +28,10 @@
28#ifndef __VMWGFX_DRM_H__ 28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#ifndef __KERNEL__
32#include <drm.h>
33#endif
34
31#define DRM_VMW_MAX_SURFACE_FACES 6 35#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24 36#define DRM_VMW_MAX_MIP_LEVELS 24
33 37
@@ -59,7 +63,7 @@
59#define DRM_VMW_UNREF_SHADER 22 63#define DRM_VMW_UNREF_SHADER 22
60#define DRM_VMW_GB_SURFACE_CREATE 23 64#define DRM_VMW_GB_SURFACE_CREATE 23
61#define DRM_VMW_GB_SURFACE_REF 24 65#define DRM_VMW_GB_SURFACE_REF 24
62 66#define DRM_VMW_SYNCCPU 25
63 67
64/*************************************************************************/ 68/*************************************************************************/
65/** 69/**
@@ -985,5 +989,62 @@ union drm_vmw_gb_surface_reference_arg {
985}; 989};
986 990
987 991
992/*************************************************************************/
993/**
994 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
995 *
996 * Idles any previously submitted GPU operations on the buffer and
997 * by default blocks command submissions that reference the buffer.
998 * If the file descriptor used to grab a blocking CPU sync is closed, the
999 * cpu sync is released.
1000 * The flags argument indicates how the grab / release operation should be
1001 * performed:
1002 */
1003
1004/**
1005 * enum drm_vmw_synccpu_flags - Synccpu flags:
1006 *
1007 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1008 * hint to the kernel to allow command submissions that references the buffer
1009 * for read-only.
1010 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1011 * referencing this buffer.
1012 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1013 * -EBUSY should the buffer be busy.
1014 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1015 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1016 * behavior.
1017 */
1018enum drm_vmw_synccpu_flags {
1019 drm_vmw_synccpu_read = (1 << 0),
1020 drm_vmw_synccpu_write = (1 << 1),
1021 drm_vmw_synccpu_dontblock = (1 << 2),
1022 drm_vmw_synccpu_allow_cs = (1 << 3)
1023};
1024
1025/**
1026 * enum drm_vmw_synccpu_op - Synccpu operations:
1027 *
1028 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1029 * @drm_vmw_synccpu_release: Release a previous grab.
1030 */
1031enum drm_vmw_synccpu_op {
1032 drm_vmw_synccpu_grab,
1033 drm_vmw_synccpu_release
1034};
1035
1036/**
1037 * struct drm_vmw_synccpu_arg
1038 *
1039 * @op: The synccpu operation as described above.
1040 * @handle: Handle identifying the buffer object.
1041 * @flags: Flags as described above.
1042 */
1043struct drm_vmw_synccpu_arg {
1044 enum drm_vmw_synccpu_op op;
1045 enum drm_vmw_synccpu_flags flags;
1046 uint32_t handle;
1047 uint32_t pad64;
1048};
988 1049
989#endif 1050#endif