aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-08-17 04:46:52 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-10-25 14:38:16 -0400
commit8892f153c83e521aff2ee7ac620856e2983cd7a0 (patch)
treeb941daec4735b465f6e51a8f82177d07f4f6c024 /drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
parent63e0ba40e52c60f25ab67e27c89ed2b99b847562 (diff)
drm/amdgpu: enable amdgpu_move_blit to handle multiple MM nodes v2
This allows us to move scattered buffers around. v2: fix a couple of typos, handle scattered to scattered moves as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net> Tested-by: Mike Lothian <mike@fireburn.co.uk> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c118
1 files changed, 85 insertions, 33 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a06cf9988912..1427c40c70bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -260,64 +260,116 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
260 new_mem->mm_node = NULL; 260 new_mem->mm_node = NULL;
261} 261}
262 262
263static int amdgpu_move_blit(struct ttm_buffer_object *bo, 263static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
264 bool evict, bool no_wait_gpu, 264 struct drm_mm_node *mm_node,
265 struct ttm_mem_reg *new_mem, 265 struct ttm_mem_reg *mem,
266 struct ttm_mem_reg *old_mem) 266 uint64_t *addr)
267{ 267{
268 struct amdgpu_device *adev;
269 struct amdgpu_ring *ring;
270 uint64_t old_start, new_start;
271 struct fence *fence;
272 int r; 268 int r;
273 269
274 adev = amdgpu_get_adev(bo->bdev); 270 switch (mem->mem_type) {
275 ring = adev->mman.buffer_funcs_ring;
276
277 switch (old_mem->mem_type) {
278 case TTM_PL_TT: 271 case TTM_PL_TT:
279 r = amdgpu_ttm_bind(bo, old_mem); 272 r = amdgpu_ttm_bind(bo, mem);
280 if (r) 273 if (r)
281 return r; 274 return r;
282 275
283 case TTM_PL_VRAM: 276 case TTM_PL_VRAM:
284 old_start = (u64)old_mem->start << PAGE_SHIFT; 277 *addr = mm_node->start << PAGE_SHIFT;
285 old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; 278 *addr += bo->bdev->man[mem->mem_type].gpu_offset;
286 break; 279 break;
287 default: 280 default:
288 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 281 DRM_ERROR("Unknown placement %d\n", mem->mem_type);
289 return -EINVAL; 282 return -EINVAL;
290 } 283 }
291 switch (new_mem->mem_type) {
292 case TTM_PL_TT:
293 r = amdgpu_ttm_bind(bo, new_mem);
294 if (r)
295 return r;
296 284
297 case TTM_PL_VRAM: 285 return 0;
298 new_start = (u64)new_mem->start << PAGE_SHIFT; 286}
299 new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; 287
300 break; 288static int amdgpu_move_blit(struct ttm_buffer_object *bo,
301 default: 289 bool evict, bool no_wait_gpu,
302 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 290 struct ttm_mem_reg *new_mem,
303 return -EINVAL; 291 struct ttm_mem_reg *old_mem)
304 } 292{
293 struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev);
294 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
295
296 struct drm_mm_node *old_mm, *new_mm;
297 uint64_t old_start, old_size, new_start, new_size;
298 unsigned long num_pages;
299 struct fence *fence = NULL;
300 int r;
301
302 BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
303
305 if (!ring->ready) { 304 if (!ring->ready) {
306 DRM_ERROR("Trying to move memory with ring turned off.\n"); 305 DRM_ERROR("Trying to move memory with ring turned off.\n");
307 return -EINVAL; 306 return -EINVAL;
308 } 307 }
309 308
310 BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); 309 old_mm = old_mem->mm_node;
310 r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
311 if (r)
312 return r;
313 old_size = old_mm->size;
314
311 315
312 r = amdgpu_copy_buffer(ring, old_start, new_start, 316 new_mm = new_mem->mm_node;
313 new_mem->num_pages * PAGE_SIZE, /* bytes */ 317 r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
314 bo->resv, &fence, false);
315 if (r) 318 if (r)
316 return r; 319 return r;
320 new_size = new_mm->size;
321
322 num_pages = new_mem->num_pages;
323 while (num_pages) {
324 unsigned long cur_pages = min(old_size, new_size);
325 struct fence *next;
326
327 r = amdgpu_copy_buffer(ring, old_start, new_start,
328 cur_pages * PAGE_SIZE,
329 bo->resv, &next, false);
330 if (r)
331 goto error;
332
333 fence_put(fence);
334 fence = next;
335
336 num_pages -= cur_pages;
337 if (!num_pages)
338 break;
339
340 old_size -= cur_pages;
341 if (!old_size) {
342 r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
343 &old_start);
344 if (r)
345 goto error;
346 old_size = old_mm->size;
347 } else {
348 old_start += cur_pages * PAGE_SIZE;
349 }
350
351 new_size -= cur_pages;
352 if (!new_size) {
353 r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
354 &new_start);
355 if (r)
356 goto error;
357
358 new_size = new_mm->size;
359 } else {
360 new_start += cur_pages * PAGE_SIZE;
361 }
362 }
317 363
318 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 364 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
319 fence_put(fence); 365 fence_put(fence);
320 return r; 366 return r;
367
368error:
369 if (fence)
370 fence_wait(fence, false);
371 fence_put(fence);
372 return r;
321} 373}
322 374
323static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, 375static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,