diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 205 |
1 files changed, 205 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h new file mode 100644 index 000000000000..42a629b56095 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_VM_H__ | ||
25 | #define __AMDGPU_VM_H__ | ||
26 | |||
27 | #include <linux/rbtree.h> | ||
28 | |||
29 | #include "gpu_scheduler.h" | ||
30 | #include "amdgpu_sync.h" | ||
31 | #include "amdgpu_ring.h" | ||
32 | |||
33 | struct amdgpu_bo_va; | ||
34 | struct amdgpu_job; | ||
35 | struct amdgpu_bo_list_entry; | ||
36 | |||
37 | /* | ||
38 | * GPUVM handling | ||
39 | */ | ||
40 | |||
41 | /* maximum number of VMIDs */ | ||
42 | #define AMDGPU_NUM_VM 16 | ||
43 | |||
44 | /* Maximum number of PTEs the hardware can write with one command */ | ||
45 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
46 | |||
47 | /* number of entries in page table */ | ||
48 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
49 | |||
50 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
51 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
52 | |||
53 | /* LOG2 number of continuous pages for the fragment field */ | ||
54 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
55 | |||
56 | #define AMDGPU_PTE_VALID (1 << 0) | ||
57 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
58 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
59 | |||
60 | /* VI only */ | ||
61 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
62 | |||
63 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
64 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
65 | |||
66 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
67 | |||
68 | /* How to programm VM fault handling */ | ||
69 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
70 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
71 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
72 | |||
73 | struct amdgpu_vm_pt { | ||
74 | struct amdgpu_bo *bo; | ||
75 | uint64_t addr; | ||
76 | }; | ||
77 | |||
78 | struct amdgpu_vm { | ||
79 | /* tree of virtual addresses mapped */ | ||
80 | struct rb_root va; | ||
81 | |||
82 | /* protecting invalidated */ | ||
83 | spinlock_t status_lock; | ||
84 | |||
85 | /* BOs moved, but not yet updated in the PT */ | ||
86 | struct list_head invalidated; | ||
87 | |||
88 | /* BOs cleared in the PT because of a move */ | ||
89 | struct list_head cleared; | ||
90 | |||
91 | /* BO mappings freed, but not yet updated in the PT */ | ||
92 | struct list_head freed; | ||
93 | |||
94 | /* contains the page directory */ | ||
95 | struct amdgpu_bo *page_directory; | ||
96 | unsigned max_pde_used; | ||
97 | struct fence *page_directory_fence; | ||
98 | uint64_t last_eviction_counter; | ||
99 | |||
100 | /* array of page tables, one for each page directory entry */ | ||
101 | struct amdgpu_vm_pt *page_tables; | ||
102 | |||
103 | /* for id and flush management per ring */ | ||
104 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
105 | |||
106 | /* protecting freed */ | ||
107 | spinlock_t freed_lock; | ||
108 | |||
109 | /* Scheduler entity for page table updates */ | ||
110 | struct amd_sched_entity entity; | ||
111 | |||
112 | /* client id */ | ||
113 | u64 client_id; | ||
114 | }; | ||
115 | |||
116 | struct amdgpu_vm_id { | ||
117 | struct list_head list; | ||
118 | struct fence *first; | ||
119 | struct amdgpu_sync active; | ||
120 | struct fence *last_flush; | ||
121 | atomic64_t owner; | ||
122 | |||
123 | uint64_t pd_gpu_addr; | ||
124 | /* last flushed PD/PT update */ | ||
125 | struct fence *flushed_updates; | ||
126 | |||
127 | uint32_t current_gpu_reset_count; | ||
128 | |||
129 | uint32_t gds_base; | ||
130 | uint32_t gds_size; | ||
131 | uint32_t gws_base; | ||
132 | uint32_t gws_size; | ||
133 | uint32_t oa_base; | ||
134 | uint32_t oa_size; | ||
135 | }; | ||
136 | |||
137 | struct amdgpu_vm_manager { | ||
138 | /* Handling of VMIDs */ | ||
139 | struct mutex lock; | ||
140 | unsigned num_ids; | ||
141 | struct list_head ids_lru; | ||
142 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
143 | |||
144 | /* Handling of VM fences */ | ||
145 | u64 fence_context; | ||
146 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
147 | |||
148 | uint32_t max_pfn; | ||
149 | /* vram base address for page table entry */ | ||
150 | u64 vram_base_offset; | ||
151 | /* is vm enabled? */ | ||
152 | bool enabled; | ||
153 | /* vm pte handling */ | ||
154 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
155 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
156 | unsigned vm_pte_num_rings; | ||
157 | atomic_t vm_pte_next_ring; | ||
158 | /* client id counter */ | ||
159 | atomic64_t client_counter; | ||
160 | }; | ||
161 | |||
162 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
163 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
164 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
165 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
166 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
167 | struct list_head *validated, | ||
168 | struct amdgpu_bo_list_entry *entry); | ||
169 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
170 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
171 | void *param); | ||
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
173 | struct amdgpu_vm *vm); | ||
174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
175 | struct amdgpu_sync *sync, struct fence *fence, | ||
176 | struct amdgpu_job *job); | ||
177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
179 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
180 | struct amdgpu_vm *vm); | ||
181 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
182 | struct amdgpu_vm *vm); | ||
183 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
184 | struct amdgpu_sync *sync); | ||
185 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
186 | struct amdgpu_bo_va *bo_va, | ||
187 | bool clear); | ||
188 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
189 | struct amdgpu_bo *bo); | ||
190 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
191 | struct amdgpu_bo *bo); | ||
192 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
193 | struct amdgpu_vm *vm, | ||
194 | struct amdgpu_bo *bo); | ||
195 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
196 | struct amdgpu_bo_va *bo_va, | ||
197 | uint64_t addr, uint64_t offset, | ||
198 | uint64_t size, uint32_t flags); | ||
199 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
200 | struct amdgpu_bo_va *bo_va, | ||
201 | uint64_t addr); | ||
202 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
203 | struct amdgpu_bo_va *bo_va); | ||
204 | |||
205 | #endif | ||