aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c119
1 files changed, 119 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4683626b065f..d1f05489595b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -23,6 +23,14 @@
23 * 23 *
24 * Authors: Alex Deucher 24 * Authors: Alex Deucher
25 */ 25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
26#include <drm/drmP.h> 34#include <drm/drmP.h>
27 35
28#include "amdgpu.h" 36#include "amdgpu.h"
@@ -32,6 +40,14 @@
32 40
33static const struct dma_buf_ops amdgpu_dmabuf_ops; 41static const struct dma_buf_ops amdgpu_dmabuf_ops;
34 42
43/**
44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
45 * implementation
46 * @obj: GEM buffer object
47 *
48 * Returns:
49 * A scatter/gather table for the pinned pages of the buffer object's memory.
50 */
35struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 51struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
36{ 52{
37 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 53 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
40 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 56 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
41} 57}
42 58
59/**
60 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
61 * @obj: GEM buffer object
62 *
63 * Sets up an in-kernel virtual mapping of the buffer object's memory.
64 *
65 * Returns:
66 * The virtual address of the mapping or an error pointer.
67 */
43void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) 68void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
44{ 69{
45 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 70 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
53 return bo->dma_buf_vmap.virtual; 78 return bo->dma_buf_vmap.virtual;
54} 79}
55 80
81/**
82 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
83 * @obj: GEM buffer object
84 * @vaddr: virtual address (unused)
85 *
86 * Tears down the in-kernel virtual mapping of the buffer object's memory.
87 */
56void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 88void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
57{ 89{
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
60 ttm_bo_kunmap(&bo->dma_buf_vmap); 92 ttm_bo_kunmap(&bo->dma_buf_vmap);
61} 93}
62 94
95/**
96 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
97 * @obj: GEM buffer object
98 * @vma: virtual memory area
99 *
100 * Sets up a userspace mapping of the buffer object's memory in the given
101 * virtual memory area.
102 *
103 * Returns:
104 * 0 on success or negative error code.
105 */
63int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 106int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
64{ 107{
65 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 108 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
94 return ret; 137 return ret;
95} 138}
96 139
140/**
141 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
142 * implementation
143 * @dev: DRM device
144 * @attach: DMA-buf attachment
145 * @sg: Scatter/gather table
146 *
147 * Import shared DMA buffer memory exported by another device.
148 *
149 * Returns:
150 * A new GEM buffer object of the given DRM device, representing the memory
151 * described by the given DMA-buf attachment and scatter/gather table.
152 */
97struct drm_gem_object * 153struct drm_gem_object *
98amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 154amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
99 struct dma_buf_attachment *attach, 155 struct dma_buf_attachment *attach,
@@ -132,6 +188,19 @@ error:
132 return ERR_PTR(ret); 188 return ERR_PTR(ret);
133} 189}
134 190
191/**
192 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
193 * @dma_buf: shared DMA buffer
194 * @target_dev: target device
195 * @attach: DMA-buf attachment
196 *
197 * Makes sure that the shared DMA buffer can be accessed by the target device.
198 * For now, simply pins it to the GTT domain, where it should be accessible by
199 * all DMA devices.
200 *
201 * Returns:
202 * 0 on success or negative error code.
203 */
135static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, 204static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
136 struct device *target_dev, 205 struct device *target_dev,
137 struct dma_buf_attachment *attach) 206 struct dma_buf_attachment *attach)
@@ -181,6 +250,14 @@ error_detach:
181 return r; 250 return r;
182} 251}
183 252
253/**
254 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
255 * @dma_buf: shared DMA buffer
256 * @attach: DMA-buf attachment
257 *
258 * This is called when a shared DMA buffer no longer needs to be accessible by
259 * the other device. For now, simply unpins the buffer from GTT.
260 */
184static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, 261static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
185 struct dma_buf_attachment *attach) 262 struct dma_buf_attachment *attach)
186{ 263{
@@ -202,6 +279,13 @@ error:
202 drm_gem_map_detach(dma_buf, attach); 279 drm_gem_map_detach(dma_buf, attach);
203} 280}
204 281
282/**
283 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
284 * @obj: GEM buffer object
285 *
286 * Returns:
287 * The buffer object's reservation object.
288 */
205struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) 289struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
206{ 290{
207 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 291 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +293,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
209 return bo->tbo.resv; 293 return bo->tbo.resv;
210} 294}
211 295
296/**
297 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
298 * @dma_buf: shared DMA buffer
299 * @direction: direction of DMA transfer
300 *
301 * This is called before CPU access to the shared DMA buffer's memory. If it's
302 * a read access, the buffer is moved to the GTT domain if possible, for optimal
303 * CPU read performance.
304 *
305 * Returns:
306 * 0 on success or negative error code.
307 */
212static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, 308static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
213 enum dma_data_direction direction) 309 enum dma_data_direction direction)
214{ 310{
@@ -253,6 +349,18 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
253 .vunmap = drm_gem_dmabuf_vunmap, 349 .vunmap = drm_gem_dmabuf_vunmap,
254}; 350};
255 351
352/**
353 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
354 * @dev: DRM device
355 * @gobj: GEM buffer object
356 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
357 *
358 * The main work is done by the &drm_gem_prime_export helper, which in turn
359 * uses &amdgpu_gem_prime_res_obj.
360 *
361 * Returns:
362 * Shared DMA buffer representing the GEM buffer object from the given device.
363 */
256struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 364struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
257 struct drm_gem_object *gobj, 365 struct drm_gem_object *gobj,
258 int flags) 366 int flags)
@@ -273,6 +381,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
273 return buf; 381 return buf;
274} 382}
275 383
384/**
385 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
386 * @dev: DRM device
387 * @dma_buf: Shared DMA buffer
388 *
389 * The main work is done by the &drm_gem_prime_import helper, which in turn
390 * uses &amdgpu_gem_prime_import_sg_table.
391 *
392 * Returns:
393 * GEM buffer object representing the shared DMA buffer for the given device.
394 */
276struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 395struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
277 struct dma_buf *dma_buf) 396 struct dma_buf *dma_buf)
278{ 397{