aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_gart.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2012-07-17 14:02:40 -0400
committerChristian König <deathsimple@vodafone.de>2012-07-18 07:53:39 -0400
commit09db86443230503f57d4079694a337f4e3c7b5a2 (patch)
tree78660c784cd364a83c00137144e92174d34c30d9 /drivers/gpu/drm/radeon/radeon_gart.c
parent03eec93bbc944ad4e467e083dd768d92d00213f0 (diff)
drm/radeon: document VM functions in radeon_gart.c (v3)
Document the VM functions in radeon_gart.c v2: adjust per Christian's suggestions v3: adjust to Christians's latest changes Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c142
1 files changed, 142 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 12135ec94e8d..9727ea117223 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -397,11 +397,39 @@ void radeon_gart_fini(struct radeon_device *rdev)
397} 397}
398 398
399/* 399/*
400 * GPUVM
401 * GPUVM is similar to the legacy gart on older asics, however
402 * rather than there being a single global gart table
403 * for the entire GPU, there are multiple VM page tables active
404 * at any given time. The VM page tables can contain a mix
405 * vram pages and system memory pages and system memory pages
406 * can be mapped as snooped (cached system pages) or unsnooped
407 * (uncached system pages).
408 * Each VM has an ID associated with it and there is a page table
409 * associated with each VMID. When execting a command buffer,
410 * the kernel tells the the ring what VMID to use for that command
411 * buffer. VMIDs are allocated dynamically as commands are submitted.
412 * The userspace drivers maintain their own address space and the kernel
413 * sets up their pages tables accordingly when they submit their
414 * command buffers and a VMID is assigned.
415 * Cayman/Trinity support up to 8 active VMs at any given time;
416 * SI supports 16.
417 */
418
419/*
400 * vm helpers 420 * vm helpers
401 * 421 *
402 * TODO bind a default page at vm initialization for default address 422 * TODO bind a default page at vm initialization for default address
403 */ 423 */
404 424
425/**
426 * radeon_vm_manager_init - init the vm manager
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Init the vm manager (cayman+).
431 * Returns 0 for success, error for failure.
432 */
405int radeon_vm_manager_init(struct radeon_device *rdev) 433int radeon_vm_manager_init(struct radeon_device *rdev)
406{ 434{
407 struct radeon_vm *vm; 435 struct radeon_vm *vm;
@@ -456,6 +484,16 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
456} 484}
457 485
458/* global mutex must be lock */ 486/* global mutex must be lock */
487/**
488 * radeon_vm_unbind_locked - unbind a specific vm
489 *
490 * @rdev: radeon_device pointer
491 * @vm: vm to unbind
492 *
493 * Unbind the requested vm (cayman+).
494 * Wait for use of the VM to finish, then unbind the page table,
495 * and free the page table memory.
496 */
459static void radeon_vm_unbind_locked(struct radeon_device *rdev, 497static void radeon_vm_unbind_locked(struct radeon_device *rdev,
460 struct radeon_vm *vm) 498 struct radeon_vm *vm)
461{ 499{
@@ -495,6 +533,13 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
495 } 533 }
496} 534}
497 535
536/**
537 * radeon_vm_manager_fini - tear down the vm manager
538 *
539 * @rdev: radeon_device pointer
540 *
541 * Tear down the VM manager (cayman+).
542 */
498void radeon_vm_manager_fini(struct radeon_device *rdev) 543void radeon_vm_manager_fini(struct radeon_device *rdev)
499{ 544{
500 struct radeon_vm *vm, *tmp; 545 struct radeon_vm *vm, *tmp;
@@ -516,6 +561,14 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
516} 561}
517 562
518/* global mutex must be locked */ 563/* global mutex must be locked */
564/**
565 * radeon_vm_unbind - locked version of unbind
566 *
567 * @rdev: radeon_device pointer
568 * @vm: vm to unbind
569 *
570 * Locked version that wraps radeon_vm_unbind_locked (cayman+).
571 */
519void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) 572void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
520{ 573{
521 mutex_lock(&vm->mutex); 574 mutex_lock(&vm->mutex);
@@ -524,6 +577,18 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
524} 577}
525 578
526/* global and local mutex must be locked */ 579/* global and local mutex must be locked */
580/**
581 * radeon_vm_bind - bind a page table to a VMID
582 *
583 * @rdev: radeon_device pointer
584 * @vm: vm to bind
585 *
586 * Bind the requested vm (cayman+).
587 * Suballocate memory for the page table, allocate a VMID
588 * and bind the page table to it, and finally start to populate
589 * the page table.
590 * Returns 0 for success, error for failure.
591 */
527int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) 592int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
528{ 593{
529 struct radeon_vm *vm_evict; 594 struct radeon_vm *vm_evict;
@@ -586,6 +651,20 @@ retry_id:
586} 651}
587 652
588/* object have to be reserved */ 653/* object have to be reserved */
654/**
655 * radeon_vm_bo_add - add a bo to a specific vm
656 *
657 * @rdev: radeon_device pointer
658 * @vm: requested vm
659 * @bo: radeon buffer object
660 * @offset: requested offset of the buffer in the VM address space
661 * @flags: attributes of pages (read/write/valid/etc.)
662 *
663 * Add @bo into the requested vm (cayman+).
664 * Add @bo to the list of bos associated with the vm and validate
665 * the offset requested within the vm address space.
666 * Returns 0 for success, error for failure.
667 */
589int radeon_vm_bo_add(struct radeon_device *rdev, 668int radeon_vm_bo_add(struct radeon_device *rdev,
590 struct radeon_vm *vm, 669 struct radeon_vm *vm,
591 struct radeon_bo *bo, 670 struct radeon_bo *bo,
@@ -663,6 +742,17 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
663 return 0; 742 return 0;
664} 743}
665 744
745/**
746 * radeon_vm_get_addr - get the physical address of the page
747 *
748 * @rdev: radeon_device pointer
749 * @mem: ttm mem
750 * @pfn: pfn
751 *
752 * Look up the physical address of the page that the pte resolves
753 * to (cayman+).
754 * Returns the physical address of the page.
755 */
666static u64 radeon_vm_get_addr(struct radeon_device *rdev, 756static u64 radeon_vm_get_addr(struct radeon_device *rdev,
667 struct ttm_mem_reg *mem, 757 struct ttm_mem_reg *mem,
668 unsigned pfn) 758 unsigned pfn)
@@ -692,6 +782,17 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
692} 782}
693 783
694/* object have to be reserved & global and local mutex must be locked */ 784/* object have to be reserved & global and local mutex must be locked */
785/**
786 * radeon_vm_bo_update_pte - map a bo into the vm page table
787 *
788 * @rdev: radeon_device pointer
789 * @vm: requested vm
790 * @bo: radeon buffer object
791 * @mem: ttm mem
792 *
793 * Fill in the page table entries for @bo (cayman+).
794 * Returns 0 for success, -EINVAL for failure.
795 */
695int radeon_vm_bo_update_pte(struct radeon_device *rdev, 796int radeon_vm_bo_update_pte(struct radeon_device *rdev,
696 struct radeon_vm *vm, 797 struct radeon_vm *vm,
697 struct radeon_bo *bo, 798 struct radeon_bo *bo,
@@ -740,6 +841,18 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
740} 841}
741 842
742/* object have to be reserved */ 843/* object have to be reserved */
844/**
845 * radeon_vm_bo_rmv - remove a bo to a specific vm
846 *
847 * @rdev: radeon_device pointer
848 * @vm: requested vm
849 * @bo: radeon buffer object
850 *
851 * Remove @bo from the requested vm (cayman+).
852 * Remove @bo from the list of bos associated with the vm and
853 * remove the ptes for @bo in the page table.
854 * Returns 0 for success.
855 */
743int radeon_vm_bo_rmv(struct radeon_device *rdev, 856int radeon_vm_bo_rmv(struct radeon_device *rdev,
744 struct radeon_vm *vm, 857 struct radeon_vm *vm,
745 struct radeon_bo *bo) 858 struct radeon_bo *bo)
@@ -762,6 +875,15 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
762 return 0; 875 return 0;
763} 876}
764 877
878/**
879 * radeon_vm_bo_invalidate - mark the bo as invalid
880 *
881 * @rdev: radeon_device pointer
882 * @vm: requested vm
883 * @bo: radeon buffer object
884 *
885 * Mark @bo as invalid (cayman+).
886 */
765void radeon_vm_bo_invalidate(struct radeon_device *rdev, 887void radeon_vm_bo_invalidate(struct radeon_device *rdev,
766 struct radeon_bo *bo) 888 struct radeon_bo *bo)
767{ 889{
@@ -773,6 +895,17 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
773 } 895 }
774} 896}
775 897
898/**
899 * radeon_vm_init - initialize a vm instance
900 *
901 * @rdev: radeon_device pointer
902 * @vm: requested vm
903 *
904 * Init @vm (cayman+).
905 * Map the IB pool and any other shared objects into the VM
906 * by default as it's used by all VMs.
907 * Returns 0 for success, error for failure.
908 */
776int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 909int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
777{ 910{
778 int r; 911 int r;
@@ -791,6 +924,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
791 return r; 924 return r;
792} 925}
793 926
927/**
928 * radeon_vm_init - tear down a vm instance
929 *
930 * @rdev: radeon_device pointer
931 * @vm: requested vm
932 *
933 * Tear down @vm (cayman+).
934 * Unbind the VM and remove all bos from the vm bo list
935 */
794void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) 936void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
795{ 937{
796 struct radeon_bo_va *bo_va, *tmp; 938 struct radeon_bo_va *bo_va, *tmp;