aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c70
1 files changed, 69 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index ae9c106979d7..8afec21dc45d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -26,6 +26,7 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27 27
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_display.h"
29#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
30#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
31 32
@@ -164,6 +165,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
164 return bo->tbo.resv; 165 return bo->tbo.resv;
165} 166}
166 167
168static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
169 enum dma_data_direction direction)
170{
171 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
172 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
173 struct ttm_operation_ctx ctx = { true, false };
174 u32 domain = amdgpu_display_framebuffer_domains(adev);
175 int ret;
176 bool reads = (direction == DMA_BIDIRECTIONAL ||
177 direction == DMA_FROM_DEVICE);
178
179 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
180 return 0;
181
182 /* move to gtt */
183 ret = amdgpu_bo_reserve(bo, false);
184 if (unlikely(ret != 0))
185 return ret;
186
187 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
188 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
189 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
190 }
191
192 amdgpu_bo_unreserve(bo);
193 return ret;
194}
195
196static const struct dma_buf_ops amdgpu_dmabuf_ops = {
197 .attach = drm_gem_map_attach,
198 .detach = drm_gem_map_detach,
199 .map_dma_buf = drm_gem_map_dma_buf,
200 .unmap_dma_buf = drm_gem_unmap_dma_buf,
201 .release = drm_gem_dmabuf_release,
202 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
203 .map = drm_gem_dmabuf_kmap,
204 .map_atomic = drm_gem_dmabuf_kmap_atomic,
205 .unmap = drm_gem_dmabuf_kunmap,
206 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
207 .mmap = drm_gem_dmabuf_mmap,
208 .vmap = drm_gem_dmabuf_vmap,
209 .vunmap = drm_gem_dmabuf_vunmap,
210};
211
167struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 212struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *gobj, 213 struct drm_gem_object *gobj,
169 int flags) 214 int flags)
@@ -176,7 +221,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
176 return ERR_PTR(-EPERM); 221 return ERR_PTR(-EPERM);
177 222
178 buf = drm_gem_prime_export(dev, gobj, flags); 223 buf = drm_gem_prime_export(dev, gobj, flags);
179 if (!IS_ERR(buf)) 224 if (!IS_ERR(buf)) {
180 buf->file->f_mapping = dev->anon_inode->i_mapping; 225 buf->file->f_mapping = dev->anon_inode->i_mapping;
226 buf->ops = &amdgpu_dmabuf_ops;
227 }
228
181 return buf; 229 return buf;
182} 230}
231
232struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
233 struct dma_buf *dma_buf)
234{
235 struct drm_gem_object *obj;
236
237 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
238 obj = dma_buf->priv;
239 if (obj->dev == dev) {
240 /*
241 * Importing dmabuf exported from out own gem increases
242 * refcount on gem itself instead of f_count of dmabuf.
243 */
244 drm_gem_object_get(obj);
245 return obj;
246 }
247 }
248
249 return drm_gem_prime_import(dev, dma_buf);
250}