diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2012-05-10 18:33:13 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-05-23 05:47:11 -0400 |
commit | 40f5cf996991577ec65d36cd3599cca7ec5d87d3 (patch) | |
tree | 903797b4e09b9444450fb35b4101c1fd9b8530b0 /drivers/gpu/drm/radeon/radeon_prime.c | |
parent | 1286ff7397737e407cdd8e5cd574318db177ba1f (diff) |
drm/radeon: add PRIME support (v2)
This adds prime->fd and fd->prime support to radeon.
It passes the sg object to ttm and then populates
the gart entries using it.
Compile tested only.
v2: stub kmap + use new helpers + add reimporting
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_prime.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_prime.c | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c new file mode 100644 index 000000000000..24f51753c9aa --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * based on nouveau_prime.c | ||
23 | * | ||
24 | * Authors: Alex Deucher | ||
25 | */ | ||
26 | #include "drmP.h" | ||
27 | #include "drm.h" | ||
28 | |||
29 | #include "radeon.h" | ||
30 | #include "radeon_drm.h" | ||
31 | |||
32 | #include <linux/dma-buf.h> | ||
33 | |||
34 | static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment, | ||
35 | enum dma_data_direction dir) | ||
36 | { | ||
37 | struct radeon_bo *bo = attachment->dmabuf->priv; | ||
38 | struct drm_device *dev = bo->rdev->ddev; | ||
39 | int npages = bo->tbo.num_pages; | ||
40 | struct sg_table *sg; | ||
41 | int nents; | ||
42 | |||
43 | mutex_lock(&dev->struct_mutex); | ||
44 | sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); | ||
45 | nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
46 | mutex_unlock(&dev->struct_mutex); | ||
47 | return sg; | ||
48 | } | ||
49 | |||
50 | static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | ||
51 | struct sg_table *sg, enum dma_data_direction dir) | ||
52 | { | ||
53 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | ||
54 | sg_free_table(sg); | ||
55 | kfree(sg); | ||
56 | } | ||
57 | |||
58 | static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf) | ||
59 | { | ||
60 | struct radeon_bo *bo = dma_buf->priv; | ||
61 | |||
62 | if (bo->gem_base.export_dma_buf == dma_buf) { | ||
63 | DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base); | ||
64 | bo->gem_base.export_dma_buf = NULL; | ||
65 | drm_gem_object_unreference_unlocked(&bo->gem_base); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) | ||
70 | { | ||
71 | return NULL; | ||
72 | } | ||
73 | |||
74 | static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
75 | { | ||
76 | |||
77 | } | ||
78 | static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) | ||
79 | { | ||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) | ||
84 | { | ||
85 | |||
86 | } | ||
87 | |||
88 | struct dma_buf_ops radeon_dmabuf_ops = { | ||
89 | .map_dma_buf = radeon_gem_map_dma_buf, | ||
90 | .unmap_dma_buf = radeon_gem_unmap_dma_buf, | ||
91 | .release = radeon_gem_dmabuf_release, | ||
92 | .kmap = radeon_gem_kmap, | ||
93 | .kmap_atomic = radeon_gem_kmap_atomic, | ||
94 | .kunmap = radeon_gem_kunmap, | ||
95 | .kunmap_atomic = radeon_gem_kunmap_atomic, | ||
96 | }; | ||
97 | |||
98 | static int radeon_prime_create(struct drm_device *dev, | ||
99 | size_t size, | ||
100 | struct sg_table *sg, | ||
101 | struct radeon_bo **pbo) | ||
102 | { | ||
103 | struct radeon_device *rdev = dev->dev_private; | ||
104 | struct radeon_bo *bo; | ||
105 | int ret; | ||
106 | |||
107 | ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, | ||
108 | RADEON_GEM_DOMAIN_GTT, sg, pbo); | ||
109 | if (ret) | ||
110 | return ret; | ||
111 | bo = *pbo; | ||
112 | bo->gem_base.driver_private = bo; | ||
113 | |||
114 | mutex_lock(&rdev->gem.mutex); | ||
115 | list_add_tail(&bo->list, &rdev->gem.objects); | ||
116 | mutex_unlock(&rdev->gem.mutex); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, | ||
122 | struct drm_gem_object *obj, | ||
123 | int flags) | ||
124 | { | ||
125 | struct radeon_bo *bo = gem_to_radeon_bo(obj); | ||
126 | int ret = 0; | ||
127 | |||
128 | /* pin buffer into GTT */ | ||
129 | ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); | ||
130 | if (ret) | ||
131 | return ERR_PTR(ret); | ||
132 | |||
133 | return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); | ||
134 | } | ||
135 | |||
136 | struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, | ||
137 | struct dma_buf *dma_buf) | ||
138 | { | ||
139 | struct dma_buf_attachment *attach; | ||
140 | struct sg_table *sg; | ||
141 | struct radeon_bo *bo; | ||
142 | int ret; | ||
143 | |||
144 | if (dma_buf->ops == &radeon_dmabuf_ops) { | ||
145 | bo = dma_buf->priv; | ||
146 | if (bo->gem_base.dev == dev) { | ||
147 | drm_gem_object_reference(&bo->gem_base); | ||
148 | return &bo->gem_base; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | /* need to attach */ | ||
153 | attach = dma_buf_attach(dma_buf, dev->dev); | ||
154 | if (IS_ERR(attach)) | ||
155 | return ERR_CAST(attach); | ||
156 | |||
157 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | ||
158 | if (IS_ERR(sg)) { | ||
159 | ret = PTR_ERR(sg); | ||
160 | goto fail_detach; | ||
161 | } | ||
162 | |||
163 | ret = radeon_prime_create(dev, dma_buf->size, sg, &bo); | ||
164 | if (ret) | ||
165 | goto fail_unmap; | ||
166 | |||
167 | bo->gem_base.import_attach = attach; | ||
168 | |||
169 | return &bo->gem_base; | ||
170 | |||
171 | fail_unmap: | ||
172 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); | ||
173 | fail_detach: | ||
174 | dma_buf_detach(dma_buf, attach); | ||
175 | return ERR_PTR(ret); | ||
176 | } | ||