diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 170 |
1 files changed, 1 insertions, 169 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3ba97ea5e62c..7d30a8e03bed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #include "amdgpu_gds.h" | 55 | #include "amdgpu_gds.h" |
56 | #include "amdgpu_sync.h" | 56 | #include "amdgpu_sync.h" |
57 | #include "amdgpu_ring.h" | 57 | #include "amdgpu_ring.h" |
58 | #include "amdgpu_vm.h" | ||
58 | #include "amd_powerplay.h" | 59 | #include "amd_powerplay.h" |
59 | #include "amdgpu_acp.h" | 60 | #include "amdgpu_acp.h" |
60 | 61 | ||
@@ -149,7 +150,6 @@ extern int amdgpu_vram_page_split; | |||
149 | 150 | ||
150 | struct amdgpu_device; | 151 | struct amdgpu_device; |
151 | struct amdgpu_ib; | 152 | struct amdgpu_ib; |
152 | struct amdgpu_vm; | ||
153 | struct amdgpu_cs_parser; | 153 | struct amdgpu_cs_parser; |
154 | struct amdgpu_job; | 154 | struct amdgpu_job; |
155 | struct amdgpu_irq_src; | 155 | struct amdgpu_irq_src; |
@@ -629,174 +629,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
629 | struct fence **f); | 629 | struct fence **f); |
630 | 630 | ||
631 | /* | 631 | /* |
632 | * VM | ||
633 | */ | ||
634 | |||
635 | /* maximum number of VMIDs */ | ||
636 | #define AMDGPU_NUM_VM 16 | ||
637 | |||
638 | /* Maximum number of PTEs the hardware can write with one command */ | ||
639 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
640 | |||
641 | /* number of entries in page table */ | ||
642 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
643 | |||
644 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
645 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
646 | |||
647 | /* LOG2 number of continuous pages for the fragment field */ | ||
648 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
649 | |||
650 | #define AMDGPU_PTE_VALID (1 << 0) | ||
651 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
652 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
653 | |||
654 | /* VI only */ | ||
655 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
656 | |||
657 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
658 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
659 | |||
660 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
661 | |||
662 | /* How to programm VM fault handling */ | ||
663 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
664 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
665 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
666 | |||
667 | struct amdgpu_vm_pt { | ||
668 | struct amdgpu_bo *bo; | ||
669 | uint64_t addr; | ||
670 | }; | ||
671 | |||
672 | struct amdgpu_vm { | ||
673 | /* tree of virtual addresses mapped */ | ||
674 | struct rb_root va; | ||
675 | |||
676 | /* protecting invalidated */ | ||
677 | spinlock_t status_lock; | ||
678 | |||
679 | /* BOs moved, but not yet updated in the PT */ | ||
680 | struct list_head invalidated; | ||
681 | |||
682 | /* BOs cleared in the PT because of a move */ | ||
683 | struct list_head cleared; | ||
684 | |||
685 | /* BO mappings freed, but not yet updated in the PT */ | ||
686 | struct list_head freed; | ||
687 | |||
688 | /* contains the page directory */ | ||
689 | struct amdgpu_bo *page_directory; | ||
690 | unsigned max_pde_used; | ||
691 | struct fence *page_directory_fence; | ||
692 | uint64_t last_eviction_counter; | ||
693 | |||
694 | /* array of page tables, one for each page directory entry */ | ||
695 | struct amdgpu_vm_pt *page_tables; | ||
696 | |||
697 | /* for id and flush management per ring */ | ||
698 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
699 | |||
700 | /* protecting freed */ | ||
701 | spinlock_t freed_lock; | ||
702 | |||
703 | /* Scheduler entity for page table updates */ | ||
704 | struct amd_sched_entity entity; | ||
705 | |||
706 | /* client id */ | ||
707 | u64 client_id; | ||
708 | }; | ||
709 | |||
710 | struct amdgpu_vm_id { | ||
711 | struct list_head list; | ||
712 | struct fence *first; | ||
713 | struct amdgpu_sync active; | ||
714 | struct fence *last_flush; | ||
715 | atomic64_t owner; | ||
716 | |||
717 | uint64_t pd_gpu_addr; | ||
718 | /* last flushed PD/PT update */ | ||
719 | struct fence *flushed_updates; | ||
720 | |||
721 | uint32_t current_gpu_reset_count; | ||
722 | |||
723 | uint32_t gds_base; | ||
724 | uint32_t gds_size; | ||
725 | uint32_t gws_base; | ||
726 | uint32_t gws_size; | ||
727 | uint32_t oa_base; | ||
728 | uint32_t oa_size; | ||
729 | }; | ||
730 | |||
731 | struct amdgpu_vm_manager { | ||
732 | /* Handling of VMIDs */ | ||
733 | struct mutex lock; | ||
734 | unsigned num_ids; | ||
735 | struct list_head ids_lru; | ||
736 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
737 | |||
738 | /* Handling of VM fences */ | ||
739 | u64 fence_context; | ||
740 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
741 | |||
742 | uint32_t max_pfn; | ||
743 | /* vram base address for page table entry */ | ||
744 | u64 vram_base_offset; | ||
745 | /* is vm enabled? */ | ||
746 | bool enabled; | ||
747 | /* vm pte handling */ | ||
748 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
749 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
750 | unsigned vm_pte_num_rings; | ||
751 | atomic_t vm_pte_next_ring; | ||
752 | /* client id counter */ | ||
753 | atomic64_t client_counter; | ||
754 | }; | ||
755 | |||
756 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
757 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
758 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
759 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
760 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
761 | struct list_head *validated, | ||
762 | struct amdgpu_bo_list_entry *entry); | ||
763 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
764 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
765 | void *param); | ||
766 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
767 | struct amdgpu_vm *vm); | ||
768 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
769 | struct amdgpu_sync *sync, struct fence *fence, | ||
770 | struct amdgpu_job *job); | ||
771 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
772 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
773 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
774 | struct amdgpu_vm *vm); | ||
775 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
776 | struct amdgpu_vm *vm); | ||
777 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
778 | struct amdgpu_sync *sync); | ||
779 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
780 | struct amdgpu_bo_va *bo_va, | ||
781 | bool clear); | ||
782 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
783 | struct amdgpu_bo *bo); | ||
784 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
785 | struct amdgpu_bo *bo); | ||
786 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
787 | struct amdgpu_vm *vm, | ||
788 | struct amdgpu_bo *bo); | ||
789 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
790 | struct amdgpu_bo_va *bo_va, | ||
791 | uint64_t addr, uint64_t offset, | ||
792 | uint64_t size, uint32_t flags); | ||
793 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
794 | struct amdgpu_bo_va *bo_va, | ||
795 | uint64_t addr); | ||
796 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
797 | struct amdgpu_bo_va *bo_va); | ||
798 | |||
799 | /* | ||
800 | * context related structures | 632 | * context related structures |
801 | */ | 633 | */ |
802 | 634 | ||