diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 78 |
1 files changed, 48 insertions, 30 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c012d5927f65..c7efbd47ab84 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -35,6 +35,11 @@ | |||
35 | #define VMW_RES_SURFACE ttm_driver_type1 | 35 | #define VMW_RES_SURFACE ttm_driver_type1 |
36 | #define VMW_RES_STREAM ttm_driver_type2 | 36 | #define VMW_RES_STREAM ttm_driver_type2 |
37 | 37 | ||
38 | /* XXX: This isn't a real hardware flag, but just a hack for kernel to | ||
39 | * know about primary surfaces. Find a better way to accomplish this. | ||
40 | */ | ||
41 | #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) | ||
42 | |||
38 | struct vmw_user_context { | 43 | struct vmw_user_context { |
39 | struct ttm_base_object base; | 44 | struct ttm_base_object base; |
40 | struct vmw_resource res; | 45 | struct vmw_resource res; |
@@ -599,6 +604,36 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
599 | if (unlikely(ret != 0)) | 604 | if (unlikely(ret != 0)) |
600 | goto out_err1; | 605 | goto out_err1; |
601 | 606 | ||
607 | if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { | ||
608 | /* we should not send this flag down to hardware since | ||
609 | * its not a official one | ||
610 | */ | ||
611 | srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; | ||
612 | srf->scanout = true; | ||
613 | } else { | ||
614 | srf->scanout = false; | ||
615 | } | ||
616 | |||
617 | if (srf->scanout && | ||
618 | srf->num_sizes == 1 && | ||
619 | srf->sizes[0].width == 64 && | ||
620 | srf->sizes[0].height == 64 && | ||
621 | srf->format == SVGA3D_A8R8G8B8) { | ||
622 | |||
623 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
624 | /* clear the image */ | ||
625 | if (srf->snooper.image) { | ||
626 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
627 | } else { | ||
628 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
629 | ret = -ENOMEM; | ||
630 | goto out_err1; | ||
631 | } | ||
632 | } else { | ||
633 | srf->snooper.image = NULL; | ||
634 | } | ||
635 | srf->snooper.crtc = NULL; | ||
636 | |||
602 | user_srf->base.shareable = false; | 637 | user_srf->base.shareable = false; |
603 | user_srf->base.tfile = NULL; | 638 | user_srf->base.tfile = NULL; |
604 | 639 | ||
@@ -622,24 +657,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
622 | return ret; | 657 | return ret; |
623 | } | 658 | } |
624 | 659 | ||
625 | if (srf->flags & (1 << 9) && | ||
626 | srf->num_sizes == 1 && | ||
627 | srf->sizes[0].width == 64 && | ||
628 | srf->sizes[0].height == 64 && | ||
629 | srf->format == SVGA3D_A8R8G8B8) { | ||
630 | |||
631 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
632 | /* clear the image */ | ||
633 | if (srf->snooper.image) | ||
634 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
635 | else | ||
636 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
637 | |||
638 | } else { | ||
639 | srf->snooper.image = NULL; | ||
640 | } | ||
641 | srf->snooper.crtc = NULL; | ||
642 | |||
643 | rep->sid = user_srf->base.hash.key; | 660 | rep->sid = user_srf->base.hash.key; |
644 | if (rep->sid == SVGA3D_INVALID_ID) | 661 | if (rep->sid == SVGA3D_INVALID_ID) |
645 | DRM_ERROR("Created bad Surface ID.\n"); | 662 | DRM_ERROR("Created bad Surface ID.\n"); |
@@ -754,20 +771,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | |||
754 | return bo_user_size + page_array_size; | 771 | return bo_user_size + page_array_size; |
755 | } | 772 | } |
756 | 773 | ||
757 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 774 | void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) |
758 | { | 775 | { |
759 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 776 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
760 | struct ttm_bo_global *glob = bo->glob; | 777 | struct ttm_bo_global *glob = bo->glob; |
761 | struct vmw_private *dev_priv = | 778 | struct vmw_private *dev_priv = |
762 | container_of(bo->bdev, struct vmw_private, bdev); | 779 | container_of(bo->bdev, struct vmw_private, bdev); |
763 | 780 | ||
764 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
765 | if (vmw_bo->gmr_bound) { | 781 | if (vmw_bo->gmr_bound) { |
766 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | 782 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); |
767 | spin_lock(&glob->lru_lock); | 783 | spin_lock(&glob->lru_lock); |
768 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | 784 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); |
769 | spin_unlock(&glob->lru_lock); | 785 | spin_unlock(&glob->lru_lock); |
786 | vmw_bo->gmr_bound = false; | ||
770 | } | 787 | } |
788 | } | ||
789 | |||
790 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | ||
791 | { | ||
792 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
793 | struct ttm_bo_global *glob = bo->glob; | ||
794 | |||
795 | vmw_dmabuf_gmr_unbind(bo); | ||
796 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
771 | kfree(vmw_bo); | 797 | kfree(vmw_bo); |
772 | } | 798 | } |
773 | 799 | ||
@@ -813,18 +839,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
813 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | 839 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
814 | { | 840 | { |
815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 841 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
816 | struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; | ||
817 | struct ttm_bo_global *glob = bo->glob; | 842 | struct ttm_bo_global *glob = bo->glob; |
818 | struct vmw_private *dev_priv = | ||
819 | container_of(bo->bdev, struct vmw_private, bdev); | ||
820 | 843 | ||
844 | vmw_dmabuf_gmr_unbind(bo); | ||
821 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 845 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
822 | if (vmw_bo->gmr_bound) { | ||
823 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
824 | spin_lock(&glob->lru_lock); | ||
825 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
826 | spin_unlock(&glob->lru_lock); | ||
827 | } | ||
828 | kfree(vmw_user_bo); | 846 | kfree(vmw_user_bo); |
829 | } | 847 | } |
830 | 848 | ||
@@ -868,7 +886,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
868 | } | 886 | } |
869 | 887 | ||
870 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | 888 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, |
871 | &vmw_vram_placement, true, | 889 | &vmw_vram_sys_placement, true, |
872 | &vmw_user_dmabuf_destroy); | 890 | &vmw_user_dmabuf_destroy); |
873 | if (unlikely(ret != 0)) | 891 | if (unlikely(ret != 0)) |
874 | return ret; | 892 | return ret; |