aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-06-21 23:18:32 -0400
committerDave Airlie <airlied@redhat.com>2018-06-21 23:19:05 -0400
commit565c17b5f02dacd8430da8d95bbba60587f339af (patch)
tree63dc8a786f522f11e44058ec9707af12fb3d83c4 /drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
parentf4366e44efeb895c358fddd11f9ecee81bdad06b (diff)
parenta21daa88d4f08c959a36ad9760df045407a080e5 (diff)
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature request for 4.19. Highlights: - Add initial amdgpu documentation - Add initial GPU scheduler documention - GPU scheduler fixes for dying processes - Add support for the JPEG engine on VCN - Switch CI to use powerplay by default - EDC support for CZ - More powerplay cleanups - Misc DC fixes Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180621161138.3008-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c119
1 files changed, 119 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index a156b3891a3f..b2286bc41aec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -23,6 +23,14 @@
23 * 23 *
24 * Authors: Alex Deucher 24 * Authors: Alex Deucher
25 */ 25 */
26
27/**
28 * DOC: PRIME Buffer Sharing
29 *
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
32 */
33
26#include <drm/drmP.h> 34#include <drm/drmP.h>
27 35
28#include "amdgpu.h" 36#include "amdgpu.h"
@@ -32,6 +40,14 @@
32 40
33static const struct dma_buf_ops amdgpu_dmabuf_ops; 41static const struct dma_buf_ops amdgpu_dmabuf_ops;
34 42
43/**
44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
45 * implementation
46 * @obj: GEM buffer object
47 *
48 * Returns:
49 * A scatter/gather table for the pinned pages of the buffer object's memory.
50 */
35struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) 51struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
36{ 52{
37 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 53 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
40 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 56 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
41} 57}
42 58
59/**
60 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
61 * @obj: GEM buffer object
62 *
63 * Sets up an in-kernel virtual mapping of the buffer object's memory.
64 *
65 * Returns:
66 * The virtual address of the mapping or an error pointer.
67 */
43void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) 68void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
44{ 69{
45 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 70 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
53 return bo->dma_buf_vmap.virtual; 78 return bo->dma_buf_vmap.virtual;
54} 79}
55 80
81/**
82 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
83 * @obj: GEM buffer object
84 * @vaddr: virtual address (unused)
85 *
86 * Tears down the in-kernel virtual mapping of the buffer object's memory.
87 */
56void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 88void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
57{ 89{
58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 90 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
60 ttm_bo_kunmap(&bo->dma_buf_vmap); 92 ttm_bo_kunmap(&bo->dma_buf_vmap);
61} 93}
62 94
95/**
96 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
97 * @obj: GEM buffer object
98 * @vma: virtual memory area
99 *
100 * Sets up a userspace mapping of the buffer object's memory in the given
101 * virtual memory area.
102 *
103 * Returns:
104 * 0 on success or negative error code.
105 */
63int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 106int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
64{ 107{
65 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 108 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
94 return ret; 137 return ret;
95} 138}
96 139
140/**
141 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
142 * implementation
143 * @dev: DRM device
144 * @attach: DMA-buf attachment
145 * @sg: Scatter/gather table
146 *
147 * Import shared DMA buffer memory exported by another device.
148 *
149 * Returns:
150 * A new GEM buffer object of the given DRM device, representing the memory
151 * described by the given DMA-buf attachment and scatter/gather table.
152 */
97struct drm_gem_object * 153struct drm_gem_object *
98amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 154amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
99 struct dma_buf_attachment *attach, 155 struct dma_buf_attachment *attach,
@@ -132,6 +188,19 @@ error:
132 return ERR_PTR(ret); 188 return ERR_PTR(ret);
133} 189}
134 190
191/**
192 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
193 * @dma_buf: shared DMA buffer
194 * @target_dev: target device
195 * @attach: DMA-buf attachment
196 *
197 * Makes sure that the shared DMA buffer can be accessed by the target device.
198 * For now, simply pins it to the GTT domain, where it should be accessible by
199 * all DMA devices.
200 *
201 * Returns:
202 * 0 on success or negative error code.
203 */
135static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, 204static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
136 struct dma_buf_attachment *attach) 205 struct dma_buf_attachment *attach)
137{ 206{
@@ -180,6 +249,14 @@ error_detach:
180 return r; 249 return r;
181} 250}
182 251
252/**
253 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
254 * @dma_buf: shared DMA buffer
255 * @attach: DMA-buf attachment
256 *
257 * This is called when a shared DMA buffer no longer needs to be accessible by
258 * the other device. For now, simply unpins the buffer from GTT.
259 */
183static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, 260static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
184 struct dma_buf_attachment *attach) 261 struct dma_buf_attachment *attach)
185{ 262{
@@ -201,6 +278,13 @@ error:
201 drm_gem_map_detach(dma_buf, attach); 278 drm_gem_map_detach(dma_buf, attach);
202} 279}
203 280
281/**
282 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
283 * @obj: GEM buffer object
284 *
285 * Returns:
286 * The buffer object's reservation object.
287 */
204struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) 288struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
205{ 289{
206 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 290 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -208,6 +292,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
208 return bo->tbo.resv; 292 return bo->tbo.resv;
209} 293}
210 294
295/**
296 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
297 * @dma_buf: shared DMA buffer
298 * @direction: direction of DMA transfer
299 *
300 * This is called before CPU access to the shared DMA buffer's memory. If it's
301 * a read access, the buffer is moved to the GTT domain if possible, for optimal
302 * CPU read performance.
303 *
304 * Returns:
305 * 0 on success or negative error code.
306 */
211static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, 307static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
212 enum dma_data_direction direction) 308 enum dma_data_direction direction)
213{ 309{
@@ -250,6 +346,18 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
250 .vunmap = drm_gem_dmabuf_vunmap, 346 .vunmap = drm_gem_dmabuf_vunmap,
251}; 347};
252 348
349/**
350 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
351 * @dev: DRM device
352 * @gobj: GEM buffer object
353 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
354 *
355 * The main work is done by the &drm_gem_prime_export helper, which in turn
356 * uses &amdgpu_gem_prime_res_obj.
357 *
358 * Returns:
359 * Shared DMA buffer representing the GEM buffer object from the given device.
360 */
253struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 361struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
254 struct drm_gem_object *gobj, 362 struct drm_gem_object *gobj,
255 int flags) 363 int flags)
@@ -270,6 +378,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
270 return buf; 378 return buf;
271} 379}
272 380
381/**
382 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
383 * @dev: DRM device
384 * @dma_buf: Shared DMA buffer
385 *
386 * The main work is done by the &drm_gem_prime_import helper, which in turn
387 * uses &amdgpu_gem_prime_import_sg_table.
388 *
389 * Returns:
390 * GEM buffer object representing the shared DMA buffer for the given device.
391 */
273struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 392struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
274 struct dma_buf *dma_buf) 393 struct dma_buf *dma_buf)
275{ 394{