diff options
author | Christian König <christian.koenig@amd.com> | 2016-09-28 09:41:50 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-10-25 14:38:29 -0400 |
commit | 073440d26272dc983bed51a3aa7bddc4aa344ab0 (patch) | |
tree | 987ecba096dedcae75fe43778c02f47893a262e2 | |
parent | 78023016116f9fbf4783a99293987ccdfc4d7a92 (diff) |
drm/amdgpu: move VM defines into amdgpu_vm.h
Only cleanup, no intended functional change.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 170 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 205 |
2 files changed, 206 insertions, 169 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3ba97ea5e62c..7d30a8e03bed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -55,6 +55,7 @@ | |||
55 | #include "amdgpu_gds.h" | 55 | #include "amdgpu_gds.h" |
56 | #include "amdgpu_sync.h" | 56 | #include "amdgpu_sync.h" |
57 | #include "amdgpu_ring.h" | 57 | #include "amdgpu_ring.h" |
58 | #include "amdgpu_vm.h" | ||
58 | #include "amd_powerplay.h" | 59 | #include "amd_powerplay.h" |
59 | #include "amdgpu_acp.h" | 60 | #include "amdgpu_acp.h" |
60 | 61 | ||
@@ -149,7 +150,6 @@ extern int amdgpu_vram_page_split; | |||
149 | 150 | ||
150 | struct amdgpu_device; | 151 | struct amdgpu_device; |
151 | struct amdgpu_ib; | 152 | struct amdgpu_ib; |
152 | struct amdgpu_vm; | ||
153 | struct amdgpu_cs_parser; | 153 | struct amdgpu_cs_parser; |
154 | struct amdgpu_job; | 154 | struct amdgpu_job; |
155 | struct amdgpu_irq_src; | 155 | struct amdgpu_irq_src; |
@@ -629,174 +629,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
629 | struct fence **f); | 629 | struct fence **f); |
630 | 630 | ||
631 | /* | 631 | /* |
632 | * VM | ||
633 | */ | ||
634 | |||
635 | /* maximum number of VMIDs */ | ||
636 | #define AMDGPU_NUM_VM 16 | ||
637 | |||
638 | /* Maximum number of PTEs the hardware can write with one command */ | ||
639 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
640 | |||
641 | /* number of entries in page table */ | ||
642 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
643 | |||
644 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
645 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
646 | |||
647 | /* LOG2 number of continuous pages for the fragment field */ | ||
648 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
649 | |||
650 | #define AMDGPU_PTE_VALID (1 << 0) | ||
651 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
652 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
653 | |||
654 | /* VI only */ | ||
655 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
656 | |||
657 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
658 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
659 | |||
660 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
661 | |||
662 | /* How to programm VM fault handling */ | ||
663 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
664 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
665 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
666 | |||
667 | struct amdgpu_vm_pt { | ||
668 | struct amdgpu_bo *bo; | ||
669 | uint64_t addr; | ||
670 | }; | ||
671 | |||
672 | struct amdgpu_vm { | ||
673 | /* tree of virtual addresses mapped */ | ||
674 | struct rb_root va; | ||
675 | |||
676 | /* protecting invalidated */ | ||
677 | spinlock_t status_lock; | ||
678 | |||
679 | /* BOs moved, but not yet updated in the PT */ | ||
680 | struct list_head invalidated; | ||
681 | |||
682 | /* BOs cleared in the PT because of a move */ | ||
683 | struct list_head cleared; | ||
684 | |||
685 | /* BO mappings freed, but not yet updated in the PT */ | ||
686 | struct list_head freed; | ||
687 | |||
688 | /* contains the page directory */ | ||
689 | struct amdgpu_bo *page_directory; | ||
690 | unsigned max_pde_used; | ||
691 | struct fence *page_directory_fence; | ||
692 | uint64_t last_eviction_counter; | ||
693 | |||
694 | /* array of page tables, one for each page directory entry */ | ||
695 | struct amdgpu_vm_pt *page_tables; | ||
696 | |||
697 | /* for id and flush management per ring */ | ||
698 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
699 | |||
700 | /* protecting freed */ | ||
701 | spinlock_t freed_lock; | ||
702 | |||
703 | /* Scheduler entity for page table updates */ | ||
704 | struct amd_sched_entity entity; | ||
705 | |||
706 | /* client id */ | ||
707 | u64 client_id; | ||
708 | }; | ||
709 | |||
710 | struct amdgpu_vm_id { | ||
711 | struct list_head list; | ||
712 | struct fence *first; | ||
713 | struct amdgpu_sync active; | ||
714 | struct fence *last_flush; | ||
715 | atomic64_t owner; | ||
716 | |||
717 | uint64_t pd_gpu_addr; | ||
718 | /* last flushed PD/PT update */ | ||
719 | struct fence *flushed_updates; | ||
720 | |||
721 | uint32_t current_gpu_reset_count; | ||
722 | |||
723 | uint32_t gds_base; | ||
724 | uint32_t gds_size; | ||
725 | uint32_t gws_base; | ||
726 | uint32_t gws_size; | ||
727 | uint32_t oa_base; | ||
728 | uint32_t oa_size; | ||
729 | }; | ||
730 | |||
731 | struct amdgpu_vm_manager { | ||
732 | /* Handling of VMIDs */ | ||
733 | struct mutex lock; | ||
734 | unsigned num_ids; | ||
735 | struct list_head ids_lru; | ||
736 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
737 | |||
738 | /* Handling of VM fences */ | ||
739 | u64 fence_context; | ||
740 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
741 | |||
742 | uint32_t max_pfn; | ||
743 | /* vram base address for page table entry */ | ||
744 | u64 vram_base_offset; | ||
745 | /* is vm enabled? */ | ||
746 | bool enabled; | ||
747 | /* vm pte handling */ | ||
748 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
749 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
750 | unsigned vm_pte_num_rings; | ||
751 | atomic_t vm_pte_next_ring; | ||
752 | /* client id counter */ | ||
753 | atomic64_t client_counter; | ||
754 | }; | ||
755 | |||
756 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
757 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
758 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
759 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
760 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
761 | struct list_head *validated, | ||
762 | struct amdgpu_bo_list_entry *entry); | ||
763 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
764 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
765 | void *param); | ||
766 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
767 | struct amdgpu_vm *vm); | ||
768 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
769 | struct amdgpu_sync *sync, struct fence *fence, | ||
770 | struct amdgpu_job *job); | ||
771 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
772 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
773 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
774 | struct amdgpu_vm *vm); | ||
775 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
776 | struct amdgpu_vm *vm); | ||
777 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
778 | struct amdgpu_sync *sync); | ||
779 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
780 | struct amdgpu_bo_va *bo_va, | ||
781 | bool clear); | ||
782 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
783 | struct amdgpu_bo *bo); | ||
784 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
785 | struct amdgpu_bo *bo); | ||
786 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
787 | struct amdgpu_vm *vm, | ||
788 | struct amdgpu_bo *bo); | ||
789 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
790 | struct amdgpu_bo_va *bo_va, | ||
791 | uint64_t addr, uint64_t offset, | ||
792 | uint64_t size, uint32_t flags); | ||
793 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
794 | struct amdgpu_bo_va *bo_va, | ||
795 | uint64_t addr); | ||
796 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
797 | struct amdgpu_bo_va *bo_va); | ||
798 | |||
799 | /* | ||
800 | * context related structures | 632 | * context related structures |
801 | */ | 633 | */ |
802 | 634 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h new file mode 100644 index 000000000000..42a629b56095 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_VM_H__ | ||
25 | #define __AMDGPU_VM_H__ | ||
26 | |||
27 | #include <linux/rbtree.h> | ||
28 | |||
29 | #include "gpu_scheduler.h" | ||
30 | #include "amdgpu_sync.h" | ||
31 | #include "amdgpu_ring.h" | ||
32 | |||
33 | struct amdgpu_bo_va; | ||
34 | struct amdgpu_job; | ||
35 | struct amdgpu_bo_list_entry; | ||
36 | |||
37 | /* | ||
38 | * GPUVM handling | ||
39 | */ | ||
40 | |||
41 | /* maximum number of VMIDs */ | ||
42 | #define AMDGPU_NUM_VM 16 | ||
43 | |||
44 | /* Maximum number of PTEs the hardware can write with one command */ | ||
45 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
46 | |||
47 | /* number of entries in page table */ | ||
48 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
49 | |||
50 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
51 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
52 | |||
53 | /* LOG2 number of continuous pages for the fragment field */ | ||
54 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
55 | |||
56 | #define AMDGPU_PTE_VALID (1 << 0) | ||
57 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
58 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
59 | |||
60 | /* VI only */ | ||
61 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
62 | |||
63 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
64 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
65 | |||
66 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
67 | |||
68 | /* How to programm VM fault handling */ | ||
69 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
70 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
71 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
72 | |||
73 | struct amdgpu_vm_pt { | ||
74 | struct amdgpu_bo *bo; | ||
75 | uint64_t addr; | ||
76 | }; | ||
77 | |||
78 | struct amdgpu_vm { | ||
79 | /* tree of virtual addresses mapped */ | ||
80 | struct rb_root va; | ||
81 | |||
82 | /* protecting invalidated */ | ||
83 | spinlock_t status_lock; | ||
84 | |||
85 | /* BOs moved, but not yet updated in the PT */ | ||
86 | struct list_head invalidated; | ||
87 | |||
88 | /* BOs cleared in the PT because of a move */ | ||
89 | struct list_head cleared; | ||
90 | |||
91 | /* BO mappings freed, but not yet updated in the PT */ | ||
92 | struct list_head freed; | ||
93 | |||
94 | /* contains the page directory */ | ||
95 | struct amdgpu_bo *page_directory; | ||
96 | unsigned max_pde_used; | ||
97 | struct fence *page_directory_fence; | ||
98 | uint64_t last_eviction_counter; | ||
99 | |||
100 | /* array of page tables, one for each page directory entry */ | ||
101 | struct amdgpu_vm_pt *page_tables; | ||
102 | |||
103 | /* for id and flush management per ring */ | ||
104 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
105 | |||
106 | /* protecting freed */ | ||
107 | spinlock_t freed_lock; | ||
108 | |||
109 | /* Scheduler entity for page table updates */ | ||
110 | struct amd_sched_entity entity; | ||
111 | |||
112 | /* client id */ | ||
113 | u64 client_id; | ||
114 | }; | ||
115 | |||
116 | struct amdgpu_vm_id { | ||
117 | struct list_head list; | ||
118 | struct fence *first; | ||
119 | struct amdgpu_sync active; | ||
120 | struct fence *last_flush; | ||
121 | atomic64_t owner; | ||
122 | |||
123 | uint64_t pd_gpu_addr; | ||
124 | /* last flushed PD/PT update */ | ||
125 | struct fence *flushed_updates; | ||
126 | |||
127 | uint32_t current_gpu_reset_count; | ||
128 | |||
129 | uint32_t gds_base; | ||
130 | uint32_t gds_size; | ||
131 | uint32_t gws_base; | ||
132 | uint32_t gws_size; | ||
133 | uint32_t oa_base; | ||
134 | uint32_t oa_size; | ||
135 | }; | ||
136 | |||
137 | struct amdgpu_vm_manager { | ||
138 | /* Handling of VMIDs */ | ||
139 | struct mutex lock; | ||
140 | unsigned num_ids; | ||
141 | struct list_head ids_lru; | ||
142 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
143 | |||
144 | /* Handling of VM fences */ | ||
145 | u64 fence_context; | ||
146 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
147 | |||
148 | uint32_t max_pfn; | ||
149 | /* vram base address for page table entry */ | ||
150 | u64 vram_base_offset; | ||
151 | /* is vm enabled? */ | ||
152 | bool enabled; | ||
153 | /* vm pte handling */ | ||
154 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
155 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
156 | unsigned vm_pte_num_rings; | ||
157 | atomic_t vm_pte_next_ring; | ||
158 | /* client id counter */ | ||
159 | atomic64_t client_counter; | ||
160 | }; | ||
161 | |||
162 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
163 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
164 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
165 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
166 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
167 | struct list_head *validated, | ||
168 | struct amdgpu_bo_list_entry *entry); | ||
169 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
170 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
171 | void *param); | ||
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
173 | struct amdgpu_vm *vm); | ||
174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
175 | struct amdgpu_sync *sync, struct fence *fence, | ||
176 | struct amdgpu_job *job); | ||
177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
179 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
180 | struct amdgpu_vm *vm); | ||
181 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
182 | struct amdgpu_vm *vm); | ||
183 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
184 | struct amdgpu_sync *sync); | ||
185 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
186 | struct amdgpu_bo_va *bo_va, | ||
187 | bool clear); | ||
188 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
189 | struct amdgpu_bo *bo); | ||
190 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
191 | struct amdgpu_bo *bo); | ||
192 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
193 | struct amdgpu_vm *vm, | ||
194 | struct amdgpu_bo *bo); | ||
195 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
196 | struct amdgpu_bo_va *bo_va, | ||
197 | uint64_t addr, uint64_t offset, | ||
198 | uint64_t size, uint32_t flags); | ||
199 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
200 | struct amdgpu_bo_va *bo_va, | ||
201 | uint64_t addr); | ||
202 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
203 | struct amdgpu_bo_va *bo_va); | ||
204 | |||
205 | #endif | ||