aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-11-17 09:40:02 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-11-23 15:08:46 -0500
commit617859e0766fae595f08f1025c1a7df6246a5f5b (patch)
tree999d3197d0d0eeff0a929d0d2386202150f308c3
parentf45dc74c93241ad0125fbc08c48b2ebe20f2f472 (diff)
drm/amdgpu: use AMDGPU_GEM_CREATE_VRAM_CLEARED for VM PD/PTs (v2)
Doesn't make much sense to have the same functionality twice. v2: rebase on dma_fence renaming Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c99
1 files changed, 4 insertions, 95 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 337c5b31d18d..1dda9321bd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -530,70 +530,6 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
530} 530}
531 531
532/** 532/**
533 * amdgpu_vm_clear_bo - initially clear the page dir/table
534 *
535 * @adev: amdgpu_device pointer
536 * @bo: bo to clear
537 *
538 * need to reserve bo first before calling it.
539 */
540static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
541 struct amdgpu_vm *vm,
542 struct amdgpu_bo *bo)
543{
544 struct amdgpu_ring *ring;
545 struct dma_fence *fence = NULL;
546 struct amdgpu_job *job;
547 struct amdgpu_pte_update_params params;
548 unsigned entries;
549 uint64_t addr;
550 int r;
551
552 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
553
554 r = reservation_object_reserve_shared(bo->tbo.resv);
555 if (r)
556 return r;
557
558 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
559 if (r)
560 goto error;
561
562 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
563 if (r)
564 goto error;
565
566 addr = amdgpu_bo_gpu_offset(bo);
567 entries = amdgpu_bo_size(bo) / 8;
568
569 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
570 if (r)
571 goto error;
572
573 memset(&params, 0, sizeof(params));
574 params.adev = adev;
575 params.ib = &job->ibs[0];
576 amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
577 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
578
579 WARN_ON(job->ibs[0].length_dw > 64);
580 r = amdgpu_job_submit(job, ring, &vm->entity,
581 AMDGPU_FENCE_OWNER_VM, &fence);
582 if (r)
583 goto error_free;
584
585 amdgpu_bo_fence(bo, fence, true);
586 dma_fence_put(fence);
587 return 0;
588
589error_free:
590 amdgpu_job_free(job);
591
592error:
593 return r;
594}
595
596/**
597 * amdgpu_vm_map_gart - Resolve gart mapping of addr 533 * amdgpu_vm_map_gart - Resolve gart mapping of addr
598 * 534 *
599 * @pages_addr: optional DMA address to use for lookup 535 * @pages_addr: optional DMA address to use for lookup
@@ -1435,7 +1371,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1435 AMDGPU_GEM_DOMAIN_VRAM, 1371 AMDGPU_GEM_DOMAIN_VRAM,
1436 AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 1372 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1437 AMDGPU_GEM_CREATE_SHADOW | 1373 AMDGPU_GEM_CREATE_SHADOW |
1438 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1374 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1375 AMDGPU_GEM_CREATE_VRAM_CLEARED,
1439 NULL, resv, &pt); 1376 NULL, resv, &pt);
1440 if (r) 1377 if (r)
1441 goto error_free; 1378 goto error_free;
@@ -1445,22 +1382,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1445 */ 1382 */
1446 pt->parent = amdgpu_bo_ref(vm->page_directory); 1383 pt->parent = amdgpu_bo_ref(vm->page_directory);
1447 1384
1448 r = amdgpu_vm_clear_bo(adev, vm, pt);
1449 if (r) {
1450 amdgpu_bo_unref(&pt->shadow);
1451 amdgpu_bo_unref(&pt);
1452 goto error_free;
1453 }
1454
1455 if (pt->shadow) {
1456 r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
1457 if (r) {
1458 amdgpu_bo_unref(&pt->shadow);
1459 amdgpu_bo_unref(&pt);
1460 goto error_free;
1461 }
1462 }
1463
1464 vm->page_tables[pt_idx].bo = pt; 1385 vm->page_tables[pt_idx].bo = pt;
1465 vm->page_tables[pt_idx].addr = 0; 1386 vm->page_tables[pt_idx].addr = 0;
1466 } 1387 }
@@ -1642,7 +1563,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1642 AMDGPU_GEM_DOMAIN_VRAM, 1563 AMDGPU_GEM_DOMAIN_VRAM,
1643 AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 1564 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1644 AMDGPU_GEM_CREATE_SHADOW | 1565 AMDGPU_GEM_CREATE_SHADOW |
1645 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1566 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1567 AMDGPU_GEM_CREATE_VRAM_CLEARED,
1646 NULL, NULL, &vm->page_directory); 1568 NULL, NULL, &vm->page_directory);
1647 if (r) 1569 if (r)
1648 goto error_free_sched_entity; 1570 goto error_free_sched_entity;
@@ -1651,24 +1573,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1651 if (r) 1573 if (r)
1652 goto error_free_page_directory; 1574 goto error_free_page_directory;
1653 1575
1654 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1655 if (r)
1656 goto error_unreserve;
1657
1658 if (vm->page_directory->shadow) {
1659 r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
1660 if (r)
1661 goto error_unreserve;
1662 }
1663
1664 vm->last_eviction_counter = atomic64_read(&adev->num_evictions); 1576 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1665 amdgpu_bo_unreserve(vm->page_directory); 1577 amdgpu_bo_unreserve(vm->page_directory);
1666 1578
1667 return 0; 1579 return 0;
1668 1580
1669error_unreserve:
1670 amdgpu_bo_unreserve(vm->page_directory);
1671
1672error_free_page_directory: 1581error_free_page_directory:
1673 amdgpu_bo_unref(&vm->page_directory->shadow); 1582 amdgpu_bo_unref(&vm->page_directory->shadow);
1674 amdgpu_bo_unref(&vm->page_directory); 1583 amdgpu_bo_unref(&vm->page_directory);