diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 89 |
1 files changed, 8 insertions, 81 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8c40e9979db..5408b1b7996f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | |||
765 | return bo_user_size + page_array_size; | 765 | return bo_user_size + page_array_size; |
766 | } | 766 | } |
767 | 767 | ||
768 | void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) | ||
769 | { | ||
770 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
771 | struct ttm_bo_global *glob = bo->glob; | ||
772 | struct vmw_private *dev_priv = | ||
773 | container_of(bo->bdev, struct vmw_private, bdev); | ||
774 | |||
775 | if (vmw_bo->gmr_bound) { | ||
776 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
777 | spin_lock(&glob->lru_lock); | ||
778 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
779 | spin_unlock(&glob->lru_lock); | ||
780 | vmw_bo->gmr_bound = false; | ||
781 | } | ||
782 | } | ||
783 | |||
784 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | 768 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) |
785 | { | 769 | { |
786 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | 770 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); |
787 | struct ttm_bo_global *glob = bo->glob; | 771 | struct ttm_bo_global *glob = bo->glob; |
788 | 772 | ||
789 | vmw_dmabuf_gmr_unbind(bo); | ||
790 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 773 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
791 | kfree(vmw_bo); | 774 | kfree(vmw_bo); |
792 | } | 775 | } |
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
818 | 801 | ||
819 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 802 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
820 | 803 | ||
821 | INIT_LIST_HEAD(&vmw_bo->gmr_lru); | ||
822 | INIT_LIST_HEAD(&vmw_bo->validate_list); | 804 | INIT_LIST_HEAD(&vmw_bo->validate_list); |
823 | vmw_bo->gmr_id = 0; | ||
824 | vmw_bo->gmr_bound = false; | ||
825 | 805 | ||
826 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 806 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
827 | ttm_bo_type_device, placement, | 807 | ttm_bo_type_device, placement, |
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | |||
835 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | 815 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
836 | struct ttm_bo_global *glob = bo->glob; | 816 | struct ttm_bo_global *glob = bo->glob; |
837 | 817 | ||
838 | vmw_dmabuf_gmr_unbind(bo); | ||
839 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | 818 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); |
840 | kfree(vmw_user_bo); | 819 | kfree(vmw_user_bo); |
841 | } | 820 | } |
@@ -883,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
883 | &vmw_vram_sys_placement, true, | 862 | &vmw_vram_sys_placement, true, |
884 | &vmw_user_dmabuf_destroy); | 863 | &vmw_user_dmabuf_destroy); |
885 | if (unlikely(ret != 0)) | 864 | if (unlikely(ret != 0)) |
886 | return ret; | 865 | goto out_no_dmabuf; |
887 | 866 | ||
888 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | 867 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); |
889 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | 868 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, |
@@ -891,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | |||
891 | false, | 870 | false, |
892 | ttm_buffer_type, | 871 | ttm_buffer_type, |
893 | &vmw_user_dmabuf_release, NULL); | 872 | &vmw_user_dmabuf_release, NULL); |
894 | if (unlikely(ret != 0)) { | 873 | if (unlikely(ret != 0)) |
895 | ttm_bo_unref(&tmp); | 874 | goto out_no_base_object; |
896 | } else { | 875 | else { |
897 | rep->handle = vmw_user_bo->base.hash.key; | 876 | rep->handle = vmw_user_bo->base.hash.key; |
898 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | 877 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; |
899 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | 878 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; |
900 | rep->cur_gmr_offset = 0; | 879 | rep->cur_gmr_offset = 0; |
901 | } | 880 | } |
902 | ttm_bo_unref(&tmp); | ||
903 | 881 | ||
882 | out_no_base_object: | ||
883 | ttm_bo_unref(&tmp); | ||
884 | out_no_dmabuf: | ||
904 | ttm_read_unlock(&vmaster->lock); | 885 | ttm_read_unlock(&vmaster->lock); |
905 | 886 | ||
906 | return 0; | 887 | return ret; |
907 | } | 888 | } |
908 | 889 | ||
909 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | 890 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, |
@@ -938,25 +919,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | |||
938 | vmw_bo->on_validate_list = false; | 919 | vmw_bo->on_validate_list = false; |
939 | } | 920 | } |
940 | 921 | ||
941 | uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) | ||
942 | { | ||
943 | struct vmw_dma_buffer *vmw_bo; | ||
944 | |||
945 | if (bo->mem.mem_type == TTM_PL_VRAM) | ||
946 | return SVGA_GMR_FRAMEBUFFER; | ||
947 | |||
948 | vmw_bo = vmw_dma_buffer(bo); | ||
949 | |||
950 | return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; | ||
951 | } | ||
952 | |||
953 | void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) | ||
954 | { | ||
955 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
956 | vmw_bo->gmr_bound = true; | ||
957 | vmw_bo->gmr_id = id; | ||
958 | } | ||
959 | |||
960 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | 922 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
961 | uint32_t handle, struct vmw_dma_buffer **out) | 923 | uint32_t handle, struct vmw_dma_buffer **out) |
962 | { | 924 | { |
@@ -985,41 +947,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | |||
985 | return 0; | 947 | return 0; |
986 | } | 948 | } |
987 | 949 | ||
988 | /** | ||
989 | * TODO: Implement a gmr id eviction mechanism. Currently we just fail | ||
990 | * when we're out of ids, causing GMR space to be allocated | ||
991 | * out of VRAM. | ||
992 | */ | ||
993 | |||
994 | int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) | ||
995 | { | ||
996 | struct ttm_bo_global *glob = dev_priv->bdev.glob; | ||
997 | int id; | ||
998 | int ret; | ||
999 | |||
1000 | do { | ||
1001 | if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) | ||
1002 | return -ENOMEM; | ||
1003 | |||
1004 | spin_lock(&glob->lru_lock); | ||
1005 | ret = ida_get_new(&dev_priv->gmr_ida, &id); | ||
1006 | spin_unlock(&glob->lru_lock); | ||
1007 | } while (ret == -EAGAIN); | ||
1008 | |||
1009 | if (unlikely(ret != 0)) | ||
1010 | return ret; | ||
1011 | |||
1012 | if (unlikely(id >= dev_priv->max_gmr_ids)) { | ||
1013 | spin_lock(&glob->lru_lock); | ||
1014 | ida_remove(&dev_priv->gmr_ida, id); | ||
1015 | spin_unlock(&glob->lru_lock); | ||
1016 | return -EBUSY; | ||
1017 | } | ||
1018 | |||
1019 | *p_id = (uint32_t) id; | ||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | /* | 950 | /* |
1024 | * Stream management | 951 | * Stream management |
1025 | */ | 952 | */ |