diff options
author | Dave Airlie <airlied@redhat.com> | 2016-09-29 23:18:26 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-09-29 23:21:02 -0400 |
commit | 28a396545a2a5fbdffb2b661ed6c9b6820e28772 (patch) | |
tree | 0cc4d3a0a4956cf02c5c2133e382688ed7a30678 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | b2d7e08903e62b9f504fe6a954425b737aa9ff96 (diff) | |
parent | a481daa88fd4d6b54f25348972bba10b5f6a84d0 (diff) |
Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some additional fixes for 4.9:
- The rest of Christian's GTT rework which fixes a long standing bug
in the GPUVM code among other things
- Changes to the pci shutdown callbacks for certain hypervisors
- Fix hpd interrupt storms on eDP panels which have the hpd interrupt
enabled by the bios
- misc cleanups and bug fixes
* 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (33 commits)
drm/radeon: always apply pci shutdown callbacks
drm/amdgpu: always apply pci shutdown callbacks (v2)
drm/amdgpu: improve VM PTE trace points
drm/amdgpu: fix GART_DEBUGFS define
drm/amdgpu: free userptrs even if GTT isn't bound
drm/amd/amdgpu: Various cleanups for DCEv6
drm/amdgpu: fix BO move offsets
drm/amdgpu: fix amdgpu_move_blit on 32bit systems
drm/amdgpu: fix gtt_mgr bo's offset
drm/amdgpu: fix initializing the VM BO shadow
drm/amdgpu: fix initializing the VM last eviction counter
drm/amdgpu: cleanup VM shadow BO unreferencing
drm/amdgpu: allocate GTT space for shadow VM page tables
drm/amdgpu: rename all rbo variable to abo v2
drm/amdgpu: remove unused member from struct amdgpu_bo
drm/amdgpu: add a custom GTT memory manager v2
drm/amdgpu/dce6: disable hpd on local panels
drm/amdgpu/dce8: disable hpd on local panels
drm/amdgpu/dce11: disable hpd on local panels
drm/amdgpu/dce10: disable hpd on local panels
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 67 |
1 files changed, 55 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a6a48ed9562e..bc4b22c6fc08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -487,7 +487,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, | |||
487 | unsigned count, uint32_t incr, | 487 | unsigned count, uint32_t incr, |
488 | uint32_t flags) | 488 | uint32_t flags) |
489 | { | 489 | { |
490 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 490 | trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); |
491 | 491 | ||
492 | if (count < 3) { | 492 | if (count < 3) { |
493 | amdgpu_vm_write_pte(params->adev, params->ib, pe, | 493 | amdgpu_vm_write_pte(params->adev, params->ib, pe, |
@@ -516,10 +516,12 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, | |||
516 | unsigned count, uint32_t incr, | 516 | unsigned count, uint32_t incr, |
517 | uint32_t flags) | 517 | uint32_t flags) |
518 | { | 518 | { |
519 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | 519 | uint64_t src = (params->src + (addr >> 12) * 8); |
520 | 520 | ||
521 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, | 521 | |
522 | (params->src + (addr >> 12) * 8), count); | 522 | trace_amdgpu_vm_copy_ptes(pe, src, count); |
523 | |||
524 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); | ||
523 | } | 525 | } |
524 | 526 | ||
525 | /** | 527 | /** |
@@ -552,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
552 | if (r) | 554 | if (r) |
553 | goto error; | 555 | goto error; |
554 | 556 | ||
557 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | ||
558 | if (r) | ||
559 | goto error; | ||
560 | |||
555 | addr = amdgpu_bo_gpu_offset(bo); | 561 | addr = amdgpu_bo_gpu_offset(bo); |
556 | entries = amdgpu_bo_size(bo) / 8; | 562 | entries = amdgpu_bo_size(bo) / 8; |
557 | 563 | ||
@@ -625,6 +631,11 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
625 | 631 | ||
626 | if (!pd) | 632 | if (!pd) |
627 | return 0; | 633 | return 0; |
634 | |||
635 | r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem); | ||
636 | if (r) | ||
637 | return r; | ||
638 | |||
628 | pd_addr = amdgpu_bo_gpu_offset(pd); | 639 | pd_addr = amdgpu_bo_gpu_offset(pd); |
629 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 640 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
630 | 641 | ||
@@ -650,6 +661,14 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
650 | if (bo == NULL) | 661 | if (bo == NULL) |
651 | continue; | 662 | continue; |
652 | 663 | ||
664 | if (bo->shadow) { | ||
665 | struct amdgpu_bo *shadow = bo->shadow; | ||
666 | |||
667 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | ||
668 | if (r) | ||
669 | return r; | ||
670 | } | ||
671 | |||
653 | pt = amdgpu_bo_gpu_offset(bo); | 672 | pt = amdgpu_bo_gpu_offset(bo); |
654 | if (!shadow) { | 673 | if (!shadow) { |
655 | if (vm->page_tables[pt_idx].addr == pt) | 674 | if (vm->page_tables[pt_idx].addr == pt) |
@@ -1000,6 +1019,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
1000 | AMDGPU_GPU_PAGE_SIZE); | 1019 | AMDGPU_GPU_PAGE_SIZE); |
1001 | pte[i] |= flags; | 1020 | pte[i] |= flags; |
1002 | } | 1021 | } |
1022 | addr = 0; | ||
1003 | } | 1023 | } |
1004 | 1024 | ||
1005 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); | 1025 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); |
@@ -1412,10 +1432,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1412 | 1432 | ||
1413 | r = amdgpu_vm_clear_bo(adev, vm, pt); | 1433 | r = amdgpu_vm_clear_bo(adev, vm, pt); |
1414 | if (r) { | 1434 | if (r) { |
1435 | amdgpu_bo_unref(&pt->shadow); | ||
1415 | amdgpu_bo_unref(&pt); | 1436 | amdgpu_bo_unref(&pt); |
1416 | goto error_free; | 1437 | goto error_free; |
1417 | } | 1438 | } |
1418 | 1439 | ||
1440 | if (pt->shadow) { | ||
1441 | r = amdgpu_vm_clear_bo(adev, vm, pt->shadow); | ||
1442 | if (r) { | ||
1443 | amdgpu_bo_unref(&pt->shadow); | ||
1444 | amdgpu_bo_unref(&pt); | ||
1445 | goto error_free; | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1419 | entry->robj = pt; | 1449 | entry->robj = pt; |
1420 | entry->priority = 0; | 1450 | entry->priority = 0; |
1421 | entry->tv.bo = &entry->robj->tbo; | 1451 | entry->tv.bo = &entry->robj->tbo; |
@@ -1610,14 +1640,25 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1610 | goto error_free_page_directory; | 1640 | goto error_free_page_directory; |
1611 | 1641 | ||
1612 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); | 1642 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); |
1613 | amdgpu_bo_unreserve(vm->page_directory); | ||
1614 | if (r) | 1643 | if (r) |
1615 | goto error_free_page_directory; | 1644 | goto error_unreserve; |
1645 | |||
1646 | if (vm->page_directory->shadow) { | ||
1647 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow); | ||
1648 | if (r) | ||
1649 | goto error_unreserve; | ||
1650 | } | ||
1651 | |||
1616 | vm->last_eviction_counter = atomic64_read(&adev->num_evictions); | 1652 | vm->last_eviction_counter = atomic64_read(&adev->num_evictions); |
1653 | amdgpu_bo_unreserve(vm->page_directory); | ||
1617 | 1654 | ||
1618 | return 0; | 1655 | return 0; |
1619 | 1656 | ||
1657 | error_unreserve: | ||
1658 | amdgpu_bo_unreserve(vm->page_directory); | ||
1659 | |||
1620 | error_free_page_directory: | 1660 | error_free_page_directory: |
1661 | amdgpu_bo_unref(&vm->page_directory->shadow); | ||
1621 | amdgpu_bo_unref(&vm->page_directory); | 1662 | amdgpu_bo_unref(&vm->page_directory); |
1622 | vm->page_directory = NULL; | 1663 | vm->page_directory = NULL; |
1623 | 1664 | ||
@@ -1660,15 +1701,17 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1660 | } | 1701 | } |
1661 | 1702 | ||
1662 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { | 1703 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { |
1663 | if (vm->page_tables[i].entry.robj && | 1704 | struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; |
1664 | vm->page_tables[i].entry.robj->shadow) | 1705 | |
1665 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow); | 1706 | if (!pt) |
1666 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); | 1707 | continue; |
1708 | |||
1709 | amdgpu_bo_unref(&pt->shadow); | ||
1710 | amdgpu_bo_unref(&pt); | ||
1667 | } | 1711 | } |
1668 | drm_free_large(vm->page_tables); | 1712 | drm_free_large(vm->page_tables); |
1669 | 1713 | ||
1670 | if (vm->page_directory->shadow) | 1714 | amdgpu_bo_unref(&vm->page_directory->shadow); |
1671 | amdgpu_bo_unref(&vm->page_directory->shadow); | ||
1672 | amdgpu_bo_unref(&vm->page_directory); | 1715 | amdgpu_bo_unref(&vm->page_directory); |
1673 | fence_put(vm->page_directory_fence); | 1716 | fence_put(vm->page_directory_fence); |
1674 | } | 1717 | } |