aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2017-10-03 05:09:16 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-10-03 05:09:16 -0400
commit0d3c24e936feefeca854073ccb40613cd6eba9a9 (patch)
tree1f675397b924846740b0931b066ddce6f3d7eb3d /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
parent1af0838de60e723cb02253ecc9b555c30f8f6a6f (diff)
parentebec44a2456fbe5fe18aae88f6010f6878f0cb4a (diff)
Merge airlied/drm-next into drm-misc-next
Just catching up with upstream. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h85
1 files changed, 58 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 217ecba8f4cc..0af090667dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -25,6 +25,7 @@
25#define __AMDGPU_VM_H__ 25#define __AMDGPU_VM_H__
26 26
27#include <linux/rbtree.h> 27#include <linux/rbtree.h>
28#include <linux/idr.h>
28 29
29#include "gpu_scheduler.h" 30#include "gpu_scheduler.h"
30#include "amdgpu_sync.h" 31#include "amdgpu_sync.h"
@@ -50,11 +51,6 @@ struct amdgpu_bo_list_entry;
50/* PTBs (Page Table Blocks) need to be aligned to 32K */ 51/* PTBs (Page Table Blocks) need to be aligned to 32K */
51#define AMDGPU_VM_PTB_ALIGN_SIZE 32768 52#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
52 53
53/* LOG2 number of continuous pages for the fragment field */
54#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \
55 ((adev)->asic_type < CHIP_VEGA10 ? 4 : \
56 (adev)->vm_manager.block_size)
57
58#define AMDGPU_PTE_VALID (1ULL << 0) 54#define AMDGPU_PTE_VALID (1ULL << 0)
59#define AMDGPU_PTE_SYSTEM (1ULL << 1) 55#define AMDGPU_PTE_SYSTEM (1ULL << 1)
60#define AMDGPU_PTE_SNOOPED (1ULL << 2) 56#define AMDGPU_PTE_SNOOPED (1ULL << 2)
@@ -99,37 +95,57 @@ struct amdgpu_bo_list_entry;
99#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 95#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
100#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 96#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
101 97
98/* base structure for tracking BO usage in a VM */
99struct amdgpu_vm_bo_base {
100 /* constant after initialization */
101 struct amdgpu_vm *vm;
102 struct amdgpu_bo *bo;
103
104 /* protected by bo being reserved */
105 struct list_head bo_list;
106
107 /* protected by spinlock */
108 struct list_head vm_status;
109
110 /* protected by the BO being reserved */
111 bool moved;
112};
102 113
103struct amdgpu_vm_pt { 114struct amdgpu_vm_pt {
104 struct amdgpu_bo *bo; 115 struct amdgpu_vm_bo_base base;
105 uint64_t addr; 116 uint64_t addr;
106 bool huge_page;
107 117
108 /* array of page tables, one for each directory entry */ 118 /* array of page tables, one for each directory entry */
109 struct amdgpu_vm_pt *entries; 119 struct amdgpu_vm_pt *entries;
110 unsigned last_entry_used; 120 unsigned last_entry_used;
111}; 121};
112 122
123#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
124#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
125#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
126
113struct amdgpu_vm { 127struct amdgpu_vm {
114 /* tree of virtual addresses mapped */ 128 /* tree of virtual addresses mapped */
115 struct rb_root va; 129 struct rb_root_cached va;
116 130
117 /* protecting invalidated */ 131 /* protecting invalidated */
118 spinlock_t status_lock; 132 spinlock_t status_lock;
119 133
120 /* BOs moved, but not yet updated in the PT */ 134 /* BOs who needs a validation */
121 struct list_head invalidated; 135 struct list_head evicted;
136
137 /* PT BOs which relocated and their parent need an update */
138 struct list_head relocated;
122 139
123 /* BOs cleared in the PT because of a move */ 140 /* BOs moved, but not yet updated in the PT */
124 struct list_head cleared; 141 struct list_head moved;
125 142
126 /* BO mappings freed, but not yet updated in the PT */ 143 /* BO mappings freed, but not yet updated in the PT */
127 struct list_head freed; 144 struct list_head freed;
128 145
129 /* contains the page directory */ 146 /* contains the page directory */
130 struct amdgpu_vm_pt root; 147 struct amdgpu_vm_pt root;
131 struct dma_fence *last_dir_update; 148 struct dma_fence *last_update;
132 uint64_t last_eviction_counter;
133 149
134 /* protecting freed */ 150 /* protecting freed */
135 spinlock_t freed_lock; 151 spinlock_t freed_lock;
@@ -137,18 +153,20 @@ struct amdgpu_vm {
137 /* Scheduler entity for page table updates */ 153 /* Scheduler entity for page table updates */
138 struct amd_sched_entity entity; 154 struct amd_sched_entity entity;
139 155
140 /* client id */ 156 /* client id and PASID (TODO: replace client_id with PASID) */
141 u64 client_id; 157 u64 client_id;
158 unsigned int pasid;
142 /* dedicated to vm */ 159 /* dedicated to vm */
143 struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; 160 struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
144 /* each VM will map on CSA */
145 struct amdgpu_bo_va *csa_bo_va;
146 161
147 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 162 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
148 bool use_cpu_for_update; 163 bool use_cpu_for_update;
149 164
150 /* Flag to indicate ATS support from PTE for GFX9 */ 165 /* Flag to indicate ATS support from PTE for GFX9 */
151 bool pte_support_ats; 166 bool pte_support_ats;
167
168 /* Up to 128 pending page faults */
169 DECLARE_KFIFO(faults, u64, 128);
152}; 170};
153 171
154struct amdgpu_vm_id { 172struct amdgpu_vm_id {
@@ -191,6 +209,7 @@ struct amdgpu_vm_manager {
191 uint32_t num_level; 209 uint32_t num_level;
192 uint64_t vm_size; 210 uint64_t vm_size;
193 uint32_t block_size; 211 uint32_t block_size;
212 uint32_t fragment_size;
194 /* vram base address for page table entry */ 213 /* vram base address for page table entry */
195 u64 vram_base_offset; 214 u64 vram_base_offset;
196 /* vm pte handling */ 215 /* vm pte handling */
@@ -210,21 +229,28 @@ struct amdgpu_vm_manager {
210 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 229 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
211 */ 230 */
212 int vm_update_mode; 231 int vm_update_mode;
232
233 /* PASID to VM mapping, will be used in interrupt context to
234 * look up VM of a page fault
235 */
236 struct idr pasid_idr;
237 spinlock_t pasid_lock;
213}; 238};
214 239
240int amdgpu_vm_alloc_pasid(unsigned int bits);
241void amdgpu_vm_free_pasid(unsigned int pasid);
215void amdgpu_vm_manager_init(struct amdgpu_device *adev); 242void amdgpu_vm_manager_init(struct amdgpu_device *adev);
216void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 243void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
217int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 244int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
218 int vm_context); 245 int vm_context, unsigned int pasid);
219void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 246void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
220void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 247void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
221 struct list_head *validated, 248 struct list_head *validated,
222 struct amdgpu_bo_list_entry *entry); 249 struct amdgpu_bo_list_entry *entry);
250bool amdgpu_vm_ready(struct amdgpu_vm *vm);
223int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 251int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
224 int (*callback)(void *p, struct amdgpu_bo *bo), 252 int (*callback)(void *p, struct amdgpu_bo *bo),
225 void *param); 253 void *param);
226void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
227 struct amdgpu_vm *vm);
228int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 254int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
229 struct amdgpu_vm *vm, 255 struct amdgpu_vm *vm,
230 uint64_t saddr, uint64_t size); 256 uint64_t saddr, uint64_t size);
@@ -240,13 +266,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
240int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 266int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
241 struct amdgpu_vm *vm, 267 struct amdgpu_vm *vm,
242 struct dma_fence **fence); 268 struct dma_fence **fence);
243int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, 269int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
244 struct amdgpu_sync *sync); 270 struct amdgpu_vm *vm);
245int amdgpu_vm_bo_update(struct amdgpu_device *adev, 271int amdgpu_vm_bo_update(struct amdgpu_device *adev,
246 struct amdgpu_bo_va *bo_va, 272 struct amdgpu_bo_va *bo_va,
247 bool clear); 273 bool clear);
248void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 274void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
249 struct amdgpu_bo *bo); 275 struct amdgpu_bo *bo, bool evicted);
250struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 276struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
251 struct amdgpu_bo *bo); 277 struct amdgpu_bo *bo);
252struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 278struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -266,9 +292,14 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
266int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 292int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
267 struct amdgpu_vm *vm, 293 struct amdgpu_vm *vm,
268 uint64_t saddr, uint64_t size); 294 uint64_t saddr, uint64_t size);
295struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
296 uint64_t addr);
269void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 297void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
270 struct amdgpu_bo_va *bo_va); 298 struct amdgpu_bo_va *bo_va);
271void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); 299void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
300 uint32_t fragment_size_default);
301void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
302 uint32_t fragment_size_default);
272int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 303int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
273bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 304bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
274 struct amdgpu_job *job); 305 struct amdgpu_job *job);