aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2012-10-09 07:31:18 -0400
committerAlex Deucher <alexander.deucher@amd.com>2012-10-15 13:21:01 -0400
commitd72d43cfc5847c176edabc72e6431ba691322c98 (patch)
tree5106b3d8c9fde2f8c5670d2ede983364e77ce722 /drivers/gpu
parent90a51a329258e3c868f6f4c1fb264ca01c590c57 (diff)
drm/radeon: don't add the IB pool to all VMs v2
We want to use VMs without the IB pool in the future. v2: also remove it from radeon_vm_finish. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
3 files changed, 25 insertions, 33 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index bc6b56bf274a..54cf9b594ca6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1848,7 +1848,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
1848 */ 1848 */
1849int radeon_vm_manager_init(struct radeon_device *rdev); 1849int radeon_vm_manager_init(struct radeon_device *rdev);
1850void radeon_vm_manager_fini(struct radeon_device *rdev); 1850void radeon_vm_manager_fini(struct radeon_device *rdev);
1851int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1851void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1852void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1852void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1853int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 1853int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1854struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1854struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 98b170a0df90..7e21ea32242a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -602,7 +602,6 @@ int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
602 * @vm: vm to bind 602 * @vm: vm to bind
603 * 603 *
604 * Allocate a page table for the requested vm (cayman+). 604 * Allocate a page table for the requested vm (cayman+).
605 * Also starts to populate the page table.
606 * Returns 0 for success, error for failure. 605 * Returns 0 for success, error for failure.
607 * 606 *
608 * Global and local mutex must be locked! 607 * Global and local mutex must be locked!
@@ -655,8 +654,7 @@ retry:
655 } 654 }
656 655
657 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 656 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
658 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, 657 return 0;
659 &rdev->ring_tmp_bo.bo->tbo.mem);
660} 658}
661 659
662/** 660/**
@@ -1241,30 +1239,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1241 * @rdev: radeon_device pointer 1239 * @rdev: radeon_device pointer
1242 * @vm: requested vm 1240 * @vm: requested vm
1243 * 1241 *
1244 * Init @vm (cayman+). 1242 * Init @vm fields (cayman+).
1245 * Map the IB pool and any other shared objects into the VM
1246 * by default as it's used by all VMs.
1247 * Returns 0 for success, error for failure.
1248 */ 1243 */
1249int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1244void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1250{ 1245{
1251 struct radeon_bo_va *bo_va;
1252 int r;
1253
1254 vm->id = 0; 1246 vm->id = 0;
1255 vm->fence = NULL; 1247 vm->fence = NULL;
1256 mutex_init(&vm->mutex); 1248 mutex_init(&vm->mutex);
1257 INIT_LIST_HEAD(&vm->list); 1249 INIT_LIST_HEAD(&vm->list);
1258 INIT_LIST_HEAD(&vm->va); 1250 INIT_LIST_HEAD(&vm->va);
1259
1260 /* map the ib pool buffer at 0 in virtual address space, set
1261 * read only
1262 */
1263 bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
1264 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1265 RADEON_VM_PAGE_READABLE |
1266 RADEON_VM_PAGE_SNOOPED);
1267 return r;
1268} 1251}
1269 1252
1270/** 1253/**
@@ -1286,17 +1269,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1286 radeon_vm_free_pt(rdev, vm); 1269 radeon_vm_free_pt(rdev, vm);
1287 mutex_unlock(&rdev->vm_manager.lock); 1270 mutex_unlock(&rdev->vm_manager.lock);
1288 1271
1289 /* remove all bo at this point non are busy any more because unbind
1290 * waited for the last vm fence to signal
1291 */
1292 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1293 if (!r) {
1294 bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
1295 list_del_init(&bo_va->bo_list);
1296 list_del_init(&bo_va->vm_list);
1297 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1298 kfree(bo_va);
1299 }
1300 if (!list_empty(&vm->va)) { 1272 if (!list_empty(&vm->va)) {
1301 dev_err(rdev->dev, "still active bo inside vm\n"); 1273 dev_err(rdev->dev, "still active bo inside vm\n");
1302 } 1274 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 83b8d8aa71c0..dc781c49b96b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
419 /* new gpu have virtual address space support */ 419 /* new gpu have virtual address space support */
420 if (rdev->family >= CHIP_CAYMAN) { 420 if (rdev->family >= CHIP_CAYMAN) {
421 struct radeon_fpriv *fpriv; 421 struct radeon_fpriv *fpriv;
422 struct radeon_bo_va *bo_va;
422 int r; 423 int r;
423 424
424 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 425 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
426 return -ENOMEM; 427 return -ENOMEM;
427 } 428 }
428 429
429 r = radeon_vm_init(rdev, &fpriv->vm); 430 radeon_vm_init(rdev, &fpriv->vm);
431
432 /* map the ib pool buffer read only into
433 * virtual address space */
434 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
435 rdev->ring_tmp_bo.bo);
436 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
437 RADEON_VM_PAGE_READABLE |
438 RADEON_VM_PAGE_SNOOPED);
430 if (r) { 439 if (r) {
431 radeon_vm_fini(rdev, &fpriv->vm); 440 radeon_vm_fini(rdev, &fpriv->vm);
432 kfree(fpriv); 441 kfree(fpriv);
@@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
454 /* new gpu have virtual address space support */ 463 /* new gpu have virtual address space support */
455 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 464 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
456 struct radeon_fpriv *fpriv = file_priv->driver_priv; 465 struct radeon_fpriv *fpriv = file_priv->driver_priv;
466 struct radeon_bo_va *bo_va;
467 int r;
468
469 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
470 if (!r) {
471 bo_va = radeon_vm_bo_find(&fpriv->vm,
472 rdev->ring_tmp_bo.bo);
473 if (bo_va)
474 radeon_vm_bo_rmv(rdev, bo_va);
475 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
476 }
457 477
458 radeon_vm_fini(rdev, &fpriv->vm); 478 radeon_vm_fini(rdev, &fpriv->vm);
459 kfree(fpriv); 479 kfree(fpriv);