aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-03-13 05:13:37 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-03-29 23:54:00 -0400
commit663e4577a5733fab18d601128f54486d78595bc0 (patch)
treeaf0a0b2121dea7755bcb0b1bbde9af53f986db18 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parent4388fc2ab0e83fd6247c48c7e97064cc75511848 (diff)
drm/amdgpu: separate page table allocation from mapping
This makes it easier to implement a replace operation. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c136
1 files changed, 75 insertions, 61 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f9f4e2055084..296e985d0b65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -187,6 +187,78 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
187 spin_unlock(&glob->lru_lock); 187 spin_unlock(&glob->lru_lock);
188} 188}
189 189
190/**
191 * amdgpu_vm_alloc_pts - Allocate page tables.
192 *
193 * @adev: amdgpu_device pointer
194 * @vm: VM to allocate page tables for
195 * @saddr: Start address which needs to be allocated
196 * @size: Size from start address we need.
197 *
198 * Make sure the page tables are allocated.
199 */
200int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
201 struct amdgpu_vm *vm,
202 uint64_t saddr, uint64_t size)
203{
204 unsigned last_pfn, pt_idx;
205 uint64_t eaddr;
206 int r;
207
208 /* validate the parameters */
209 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
210 return -EINVAL;
211
212 eaddr = saddr + size - 1;
213 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
214 if (last_pfn >= adev->vm_manager.max_pfn) {
215 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
216 last_pfn, adev->vm_manager.max_pfn);
217 return -EINVAL;
218 }
219
220 saddr /= AMDGPU_GPU_PAGE_SIZE;
221 eaddr /= AMDGPU_GPU_PAGE_SIZE;
222
223 saddr >>= amdgpu_vm_block_size;
224 eaddr >>= amdgpu_vm_block_size;
225
226 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
227
228 if (eaddr > vm->max_pde_used)
229 vm->max_pde_used = eaddr;
230
231 /* walk over the address space and allocate the page tables */
232 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
233 struct reservation_object *resv = vm->page_directory->tbo.resv;
234 struct amdgpu_bo *pt;
235
236 if (vm->page_tables[pt_idx].bo)
237 continue;
238
239 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
240 AMDGPU_GPU_PAGE_SIZE, true,
241 AMDGPU_GEM_DOMAIN_VRAM,
242 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
243 AMDGPU_GEM_CREATE_SHADOW |
244 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
245 AMDGPU_GEM_CREATE_VRAM_CLEARED,
246 NULL, resv, &pt);
247 if (r)
248 return r;
249
250 /* Keep a reference to the page table to avoid freeing
251 * them up in the wrong order.
252 */
253 pt->parent = amdgpu_bo_ref(vm->page_directory);
254
255 vm->page_tables[pt_idx].bo = pt;
256 vm->page_tables[pt_idx].addr = 0;
257 }
258
259 return 0;
260}
261
190static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, 262static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
191 struct amdgpu_vm_id *id) 263 struct amdgpu_vm_id *id)
192{ 264{
@@ -1442,9 +1514,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1442 struct amdgpu_bo_va_mapping *mapping; 1514 struct amdgpu_bo_va_mapping *mapping;
1443 struct amdgpu_vm *vm = bo_va->vm; 1515 struct amdgpu_vm *vm = bo_va->vm;
1444 struct interval_tree_node *it; 1516 struct interval_tree_node *it;
1445 unsigned last_pfn, pt_idx;
1446 uint64_t eaddr; 1517 uint64_t eaddr;
1447 int r;
1448 1518
1449 /* validate the parameters */ 1519 /* validate the parameters */
1450 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1520 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
@@ -1457,13 +1527,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1457 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) 1527 (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
1458 return -EINVAL; 1528 return -EINVAL;
1459 1529
1460 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1461 if (last_pfn >= adev->vm_manager.max_pfn) {
1462 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1463 last_pfn, adev->vm_manager.max_pfn);
1464 return -EINVAL;
1465 }
1466
1467 saddr /= AMDGPU_GPU_PAGE_SIZE; 1530 saddr /= AMDGPU_GPU_PAGE_SIZE;
1468 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1531 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1469 1532
@@ -1475,15 +1538,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1475 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1538 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1476 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1539 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1477 tmp->it.start, tmp->it.last + 1); 1540 tmp->it.start, tmp->it.last + 1);
1478 r = -EINVAL; 1541 return -EINVAL;
1479 goto error;
1480 } 1542 }
1481 1543
1482 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1544 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1483 if (!mapping) { 1545 if (!mapping)
1484 r = -ENOMEM; 1546 return -ENOMEM;
1485 goto error;
1486 }
1487 1547
1488 INIT_LIST_HEAD(&mapping->list); 1548 INIT_LIST_HEAD(&mapping->list);
1489 mapping->it.start = saddr; 1549 mapping->it.start = saddr;
@@ -1494,56 +1554,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1494 list_add(&mapping->list, &bo_va->invalids); 1554 list_add(&mapping->list, &bo_va->invalids);
1495 interval_tree_insert(&mapping->it, &vm->va); 1555 interval_tree_insert(&mapping->it, &vm->va);
1496 1556
1497 /* Make sure the page tables are allocated */
1498 saddr >>= amdgpu_vm_block_size;
1499 eaddr >>= amdgpu_vm_block_size;
1500
1501 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1502
1503 if (eaddr > vm->max_pde_used)
1504 vm->max_pde_used = eaddr;
1505
1506 /* walk over the address space and allocate the page tables */
1507 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1508 struct reservation_object *resv = vm->page_directory->tbo.resv;
1509 struct amdgpu_bo *pt;
1510
1511 if (vm->page_tables[pt_idx].bo)
1512 continue;
1513
1514 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1515 AMDGPU_GPU_PAGE_SIZE, true,
1516 AMDGPU_GEM_DOMAIN_VRAM,
1517 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1518 AMDGPU_GEM_CREATE_SHADOW |
1519 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
1520 AMDGPU_GEM_CREATE_VRAM_CLEARED,
1521 NULL, resv, &pt);
1522 if (r)
1523 goto error_free;
1524
1525 /* Keep a reference to the page table to avoid freeing
1526 * them up in the wrong order.
1527 */
1528 pt->parent = amdgpu_bo_ref(vm->page_directory);
1529
1530 vm->page_tables[pt_idx].bo = pt;
1531 vm->page_tables[pt_idx].addr = 0;
1532 }
1533
1534 if (flags & AMDGPU_PTE_PRT) 1557 if (flags & AMDGPU_PTE_PRT)
1535 amdgpu_vm_prt_get(adev); 1558 amdgpu_vm_prt_get(adev);
1536 1559
1537 return 0; 1560 return 0;
1538
1539error_free:
1540 list_del(&mapping->list);
1541 interval_tree_remove(&mapping->it, &vm->va);
1542 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1543 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
1544
1545error:
1546 return r;
1547} 1561}
1548 1562
1549/** 1563/**