diff options
author | Zhenyu Wang <zhenyuw@linux.intel.com> | 2018-04-16 22:43:57 -0400 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2018-04-16 22:45:23 -0400 |
commit | 30596ec32e2cd141d73ee8701386887def9e98c0 (patch) | |
tree | c8b0d725c46fd8fa504ec0bf41c92c6ff680b406 /drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |
parent | d54e79340ff8d65b6c63ac278158add2fe211fd0 (diff) | |
parent | 60cc43fc888428bb2f18f08997432d426a243338 (diff) |
Back merge 'drm-intel-fixes' into gvt-fixes
Need for 4.17-rc1
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 164 |
1 files changed, 135 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index ae9c106979d7..4b584cb75bf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -26,9 +26,12 @@ | |||
26 | #include <drm/drmP.h> | 26 | #include <drm/drmP.h> |
27 | 27 | ||
28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
29 | #include "amdgpu_display.h" | ||
29 | #include <drm/amdgpu_drm.h> | 30 | #include <drm/amdgpu_drm.h> |
30 | #include <linux/dma-buf.h> | 31 | #include <linux/dma-buf.h> |
31 | 32 | ||
33 | static const struct dma_buf_ops amdgpu_dmabuf_ops; | ||
34 | |||
32 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | 35 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) |
33 | { | 36 | { |
34 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 37 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
@@ -102,59 +105,95 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
102 | int ret; | 105 | int ret; |
103 | 106 | ||
104 | ww_mutex_lock(&resv->lock, NULL); | 107 | ww_mutex_lock(&resv->lock, NULL); |
105 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, | 108 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, |
106 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); | 109 | AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg, |
107 | ww_mutex_unlock(&resv->lock); | 110 | resv, &bo); |
108 | if (ret) | 111 | if (ret) |
109 | return ERR_PTR(ret); | 112 | goto error; |
110 | 113 | ||
111 | bo->prime_shared_count = 1; | 114 | bo->tbo.sg = sg; |
115 | bo->tbo.ttm->sg = sg; | ||
116 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
117 | bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
118 | if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) | ||
119 | bo->prime_shared_count = 1; | ||
120 | |||
121 | ww_mutex_unlock(&resv->lock); | ||
112 | return &bo->gem_base; | 122 | return &bo->gem_base; |
123 | |||
124 | error: | ||
125 | ww_mutex_unlock(&resv->lock); | ||
126 | return ERR_PTR(ret); | ||
113 | } | 127 | } |
114 | 128 | ||
115 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) | 129 | static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, |
130 | struct device *target_dev, | ||
131 | struct dma_buf_attachment *attach) | ||
116 | { | 132 | { |
133 | struct drm_gem_object *obj = dma_buf->priv; | ||
117 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 134 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
118 | long ret = 0; | 135 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
119 | 136 | long r; | |
120 | ret = amdgpu_bo_reserve(bo, false); | 137 | |
121 | if (unlikely(ret != 0)) | 138 | r = drm_gem_map_attach(dma_buf, target_dev, attach); |
122 | return ret; | 139 | if (r) |
123 | 140 | return r; | |
124 | /* | 141 | |
125 | * Wait for all shared fences to complete before we switch to future | 142 | r = amdgpu_bo_reserve(bo, false); |
126 | * use of exclusive fence on this prime shared bo. | 143 | if (unlikely(r != 0)) |
127 | */ | 144 | goto error_detach; |
128 | ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, | 145 | |
129 | MAX_SCHEDULE_TIMEOUT); | 146 | |
130 | if (unlikely(ret < 0)) { | 147 | if (attach->dev->driver != adev->dev->driver) { |
131 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); | 148 | /* |
132 | amdgpu_bo_unreserve(bo); | 149 | * Wait for all shared fences to complete before we switch to future |
133 | return ret; | 150 | * use of exclusive fence on this prime shared bo. |
151 | */ | ||
152 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | ||
153 | true, false, | ||
154 | MAX_SCHEDULE_TIMEOUT); | ||
155 | if (unlikely(r < 0)) { | ||
156 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); | ||
157 | goto error_unreserve; | ||
158 | } | ||
134 | } | 159 | } |
135 | 160 | ||
136 | /* pin buffer into GTT */ | 161 | /* pin buffer into GTT */ |
137 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); | 162 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); |
138 | if (likely(ret == 0)) | 163 | if (r) |
164 | goto error_unreserve; | ||
165 | |||
166 | if (attach->dev->driver != adev->dev->driver) | ||
139 | bo->prime_shared_count++; | 167 | bo->prime_shared_count++; |
140 | 168 | ||
169 | error_unreserve: | ||
141 | amdgpu_bo_unreserve(bo); | 170 | amdgpu_bo_unreserve(bo); |
142 | return ret; | 171 | |
172 | error_detach: | ||
173 | if (r) | ||
174 | drm_gem_map_detach(dma_buf, attach); | ||
175 | return r; | ||
143 | } | 176 | } |
144 | 177 | ||
145 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) | 178 | static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, |
179 | struct dma_buf_attachment *attach) | ||
146 | { | 180 | { |
181 | struct drm_gem_object *obj = dma_buf->priv; | ||
147 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 182 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
183 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
148 | int ret = 0; | 184 | int ret = 0; |
149 | 185 | ||
150 | ret = amdgpu_bo_reserve(bo, true); | 186 | ret = amdgpu_bo_reserve(bo, true); |
151 | if (unlikely(ret != 0)) | 187 | if (unlikely(ret != 0)) |
152 | return; | 188 | goto error; |
153 | 189 | ||
154 | amdgpu_bo_unpin(bo); | 190 | amdgpu_bo_unpin(bo); |
155 | if (bo->prime_shared_count) | 191 | if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) |
156 | bo->prime_shared_count--; | 192 | bo->prime_shared_count--; |
157 | amdgpu_bo_unreserve(bo); | 193 | amdgpu_bo_unreserve(bo); |
194 | |||
195 | error: | ||
196 | drm_gem_map_detach(dma_buf, attach); | ||
158 | } | 197 | } |
159 | 198 | ||
160 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) | 199 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) |
@@ -164,6 +203,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) | |||
164 | return bo->tbo.resv; | 203 | return bo->tbo.resv; |
165 | } | 204 | } |
166 | 205 | ||
206 | static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, | ||
207 | enum dma_data_direction direction) | ||
208 | { | ||
209 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); | ||
210 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
211 | struct ttm_operation_ctx ctx = { true, false }; | ||
212 | u32 domain = amdgpu_display_framebuffer_domains(adev); | ||
213 | int ret; | ||
214 | bool reads = (direction == DMA_BIDIRECTIONAL || | ||
215 | direction == DMA_FROM_DEVICE); | ||
216 | |||
217 | if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) | ||
218 | return 0; | ||
219 | |||
220 | /* move to gtt */ | ||
221 | ret = amdgpu_bo_reserve(bo, false); | ||
222 | if (unlikely(ret != 0)) | ||
223 | return ret; | ||
224 | |||
225 | if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { | ||
226 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); | ||
227 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | ||
228 | } | ||
229 | |||
230 | amdgpu_bo_unreserve(bo); | ||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | static const struct dma_buf_ops amdgpu_dmabuf_ops = { | ||
235 | .attach = amdgpu_gem_map_attach, | ||
236 | .detach = amdgpu_gem_map_detach, | ||
237 | .map_dma_buf = drm_gem_map_dma_buf, | ||
238 | .unmap_dma_buf = drm_gem_unmap_dma_buf, | ||
239 | .release = drm_gem_dmabuf_release, | ||
240 | .begin_cpu_access = amdgpu_gem_begin_cpu_access, | ||
241 | .map = drm_gem_dmabuf_kmap, | ||
242 | .map_atomic = drm_gem_dmabuf_kmap_atomic, | ||
243 | .unmap = drm_gem_dmabuf_kunmap, | ||
244 | .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, | ||
245 | .mmap = drm_gem_dmabuf_mmap, | ||
246 | .vmap = drm_gem_dmabuf_vmap, | ||
247 | .vunmap = drm_gem_dmabuf_vunmap, | ||
248 | }; | ||
249 | |||
167 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, | 250 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
168 | struct drm_gem_object *gobj, | 251 | struct drm_gem_object *gobj, |
169 | int flags) | 252 | int flags) |
@@ -176,7 +259,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, | |||
176 | return ERR_PTR(-EPERM); | 259 | return ERR_PTR(-EPERM); |
177 | 260 | ||
178 | buf = drm_gem_prime_export(dev, gobj, flags); | 261 | buf = drm_gem_prime_export(dev, gobj, flags); |
179 | if (!IS_ERR(buf)) | 262 | if (!IS_ERR(buf)) { |
180 | buf->file->f_mapping = dev->anon_inode->i_mapping; | 263 | buf->file->f_mapping = dev->anon_inode->i_mapping; |
264 | buf->ops = &amdgpu_dmabuf_ops; | ||
265 | } | ||
266 | |||
181 | return buf; | 267 | return buf; |
182 | } | 268 | } |
269 | |||
270 | struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, | ||
271 | struct dma_buf *dma_buf) | ||
272 | { | ||
273 | struct drm_gem_object *obj; | ||
274 | |||
275 | if (dma_buf->ops == &amdgpu_dmabuf_ops) { | ||
276 | obj = dma_buf->priv; | ||
277 | if (obj->dev == dev) { | ||
278 | /* | ||
279 | * Importing dmabuf exported from out own gem increases | ||
280 | * refcount on gem itself instead of f_count of dmabuf. | ||
281 | */ | ||
282 | drm_gem_object_get(obj); | ||
283 | return obj; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | return drm_gem_prime_import(dev, dma_buf); | ||
288 | } | ||