diff options
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 55 |
2 files changed, 58 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 377c21f531e4..c9cfc67c2cf5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -942,6 +942,9 @@ struct drm_i915_gem_object { | |||
| 942 | 942 | ||
| 943 | /* prime dma-buf support */ | 943 | /* prime dma-buf support */ |
| 944 | struct sg_table *sg_table; | 944 | struct sg_table *sg_table; |
| 945 | void *dma_buf_vmapping; | ||
| 946 | int vmapping_count; | ||
| 947 | |||
| 945 | /** | 948 | /** |
| 946 | * Used for performing relocations during execbuffer insertion. | 949 | * Used for performing relocations during execbuffer insertion. |
| 947 | */ | 950 | */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 4fba63e896d7..aa308e1337db 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
| @@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | |||
| 74 | } | 74 | } |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) | ||
| 78 | { | ||
| 79 | struct drm_i915_gem_object *obj = dma_buf->priv; | ||
| 80 | struct drm_device *dev = obj->base.dev; | ||
| 81 | int ret; | ||
| 82 | |||
| 83 | ret = i915_mutex_lock_interruptible(dev); | ||
| 84 | if (ret) | ||
| 85 | return ERR_PTR(ret); | ||
| 86 | |||
| 87 | if (obj->dma_buf_vmapping) { | ||
| 88 | obj->vmapping_count++; | ||
| 89 | goto out_unlock; | ||
| 90 | } | ||
| 91 | |||
| 92 | if (!obj->pages) { | ||
| 93 | ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); | ||
| 94 | if (ret) { | ||
| 95 | mutex_unlock(&dev->struct_mutex); | ||
| 96 | return ERR_PTR(ret); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | |||
| 100 | obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); | ||
| 101 | if (!obj->dma_buf_vmapping) { | ||
| 102 | DRM_ERROR("failed to vmap object\n"); | ||
| 103 | goto out_unlock; | ||
| 104 | } | ||
| 105 | |||
| 106 | obj->vmapping_count = 1; | ||
| 107 | out_unlock: | ||
| 108 | mutex_unlock(&dev->struct_mutex); | ||
| 109 | return obj->dma_buf_vmapping; | ||
| 110 | } | ||
| 111 | |||
| 112 | static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) | ||
| 113 | { | ||
| 114 | struct drm_i915_gem_object *obj = dma_buf->priv; | ||
| 115 | struct drm_device *dev = obj->base.dev; | ||
| 116 | int ret; | ||
| 117 | |||
| 118 | ret = i915_mutex_lock_interruptible(dev); | ||
| 119 | if (ret) | ||
| 120 | return; | ||
| 121 | |||
| 122 | --obj->vmapping_count; | ||
| 123 | if (obj->vmapping_count == 0) { | ||
| 124 | vunmap(obj->dma_buf_vmapping); | ||
| 125 | obj->dma_buf_vmapping = NULL; | ||
| 126 | } | ||
| 127 | mutex_unlock(&dev->struct_mutex); | ||
| 128 | } | ||
| 129 | |||
| 77 | static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | 130 | static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) |
| 78 | { | 131 | { |
| 79 | return NULL; | 132 | return NULL; |
| @@ -107,6 +160,8 @@ static const struct dma_buf_ops i915_dmabuf_ops = { | |||
| 107 | .kunmap = i915_gem_dmabuf_kunmap, | 160 | .kunmap = i915_gem_dmabuf_kunmap, |
| 108 | .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, | 161 | .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, |
| 109 | .mmap = i915_gem_dmabuf_mmap, | 162 | .mmap = i915_gem_dmabuf_mmap, |
| 163 | .vmap = i915_gem_dmabuf_vmap, | ||
| 164 | .vunmap = i915_gem_dmabuf_vunmap, | ||
| 110 | }; | 165 | }; |
| 111 | 166 | ||
| 112 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | 167 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
