diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 147 |
1 files changed, 95 insertions, 52 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2c594910064d..2f28ff34c085 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -693,51 +693,83 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, | |||
693 | * @rdev: radeon_device pointer | 693 | * @rdev: radeon_device pointer |
694 | * @vm: requested vm | 694 | * @vm: requested vm |
695 | * @bo: radeon buffer object | 695 | * @bo: radeon buffer object |
696 | * @offset: requested offset of the buffer in the VM address space | ||
697 | * @flags: attributes of pages (read/write/valid/etc.) | ||
698 | * | 696 | * |
699 | * Add @bo into the requested vm (cayman+). | 697 | * Add @bo into the requested vm (cayman+). |
700 | * Add @bo to the list of bos associated with the vm and validate | 698 | * Add @bo to the list of bos associated with the vm |
701 | * the offset requested within the vm address space. | 699 | * Returns newly added bo_va or NULL for failure |
702 | * Returns 0 for success, error for failure. | ||
703 | * | 700 | * |
704 | * Object has to be reserved! | 701 | * Object has to be reserved! |
705 | */ | 702 | */ |
706 | int radeon_vm_bo_add(struct radeon_device *rdev, | 703 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
707 | struct radeon_vm *vm, | 704 | struct radeon_vm *vm, |
708 | struct radeon_bo *bo, | 705 | struct radeon_bo *bo) |
709 | uint64_t offset, | ||
710 | uint32_t flags) | ||
711 | { | 706 | { |
712 | struct radeon_bo_va *bo_va, *tmp; | 707 | struct radeon_bo_va *bo_va; |
713 | struct list_head *head; | ||
714 | uint64_t size = radeon_bo_size(bo), last_offset = 0; | ||
715 | unsigned last_pfn; | ||
716 | 708 | ||
717 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); | 709 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
718 | if (bo_va == NULL) { | 710 | if (bo_va == NULL) { |
719 | return -ENOMEM; | 711 | return NULL; |
720 | } | 712 | } |
721 | bo_va->vm = vm; | 713 | bo_va->vm = vm; |
722 | bo_va->bo = bo; | 714 | bo_va->bo = bo; |
723 | bo_va->soffset = offset; | 715 | bo_va->soffset = 0; |
724 | bo_va->eoffset = offset + size; | 716 | bo_va->eoffset = 0; |
725 | bo_va->flags = flags; | 717 | bo_va->flags = 0; |
726 | bo_va->valid = false; | 718 | bo_va->valid = false; |
719 | bo_va->ref_count = 1; | ||
727 | INIT_LIST_HEAD(&bo_va->bo_list); | 720 | INIT_LIST_HEAD(&bo_va->bo_list); |
728 | INIT_LIST_HEAD(&bo_va->vm_list); | 721 | INIT_LIST_HEAD(&bo_va->vm_list); |
729 | /* make sure object fit at this offset */ | ||
730 | if (bo_va->soffset >= bo_va->eoffset) { | ||
731 | kfree(bo_va); | ||
732 | return -EINVAL; | ||
733 | } | ||
734 | 722 | ||
735 | last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE; | 723 | mutex_lock(&vm->mutex); |
736 | if (last_pfn > rdev->vm_manager.max_pfn) { | 724 | list_add(&bo_va->vm_list, &vm->va); |
737 | kfree(bo_va); | 725 | list_add_tail(&bo_va->bo_list, &bo->va); |
738 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | 726 | mutex_unlock(&vm->mutex); |
739 | last_pfn, rdev->vm_manager.max_pfn); | 727 | |
740 | return -EINVAL; | 728 | return bo_va; |
729 | } | ||
730 | |||
731 | /** | ||
732 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm | ||
733 | * | ||
734 | * @rdev: radeon_device pointer | ||
735 | * @bo_va: bo_va to store the address | ||
736 | * @soffset: requested offset of the buffer in the VM address space | ||
737 | * @flags: attributes of pages (read/write/valid/etc.) | ||
738 | * | ||
739 | * Set offset of @bo_va (cayman+). | ||
740 | * Validate and set the offset requested within the vm address space. | ||
741 | * Returns 0 for success, error for failure. | ||
742 | * | ||
743 | * Object has to be reserved! | ||
744 | */ | ||
745 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, | ||
746 | struct radeon_bo_va *bo_va, | ||
747 | uint64_t soffset, | ||
748 | uint32_t flags) | ||
749 | { | ||
750 | uint64_t size = radeon_bo_size(bo_va->bo); | ||
751 | uint64_t eoffset, last_offset = 0; | ||
752 | struct radeon_vm *vm = bo_va->vm; | ||
753 | struct radeon_bo_va *tmp; | ||
754 | struct list_head *head; | ||
755 | unsigned last_pfn; | ||
756 | |||
757 | if (soffset) { | ||
758 | /* make sure object fit at this offset */ | ||
759 | eoffset = soffset + size; | ||
760 | if (soffset >= eoffset) { | ||
761 | return -EINVAL; | ||
762 | } | ||
763 | |||
764 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; | ||
765 | if (last_pfn > rdev->vm_manager.max_pfn) { | ||
766 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", | ||
767 | last_pfn, rdev->vm_manager.max_pfn); | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | |||
771 | } else { | ||
772 | eoffset = last_pfn = 0; | ||
741 | } | 773 | } |
742 | 774 | ||
743 | mutex_lock(&vm->mutex); | 775 | mutex_lock(&vm->mutex); |
@@ -758,24 +790,33 @@ int radeon_vm_bo_add(struct radeon_device *rdev, | |||
758 | head = &vm->va; | 790 | head = &vm->va; |
759 | last_offset = 0; | 791 | last_offset = 0; |
760 | list_for_each_entry(tmp, &vm->va, vm_list) { | 792 | list_for_each_entry(tmp, &vm->va, vm_list) { |
761 | if (bo_va->soffset >= last_offset && bo_va->eoffset <= tmp->soffset) { | 793 | if (bo_va == tmp) { |
794 | /* skip over currently modified bo */ | ||
795 | continue; | ||
796 | } | ||
797 | |||
798 | if (soffset >= last_offset && eoffset <= tmp->soffset) { | ||
762 | /* bo can be added before this one */ | 799 | /* bo can be added before this one */ |
763 | break; | 800 | break; |
764 | } | 801 | } |
765 | if (bo_va->eoffset > tmp->soffset && bo_va->soffset < tmp->eoffset) { | 802 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
766 | /* bo and tmp overlap, invalid offset */ | 803 | /* bo and tmp overlap, invalid offset */ |
767 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", | 804 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
768 | bo, (unsigned)bo_va->soffset, tmp->bo, | 805 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
769 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); | 806 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
770 | kfree(bo_va); | ||
771 | mutex_unlock(&vm->mutex); | 807 | mutex_unlock(&vm->mutex); |
772 | return -EINVAL; | 808 | return -EINVAL; |
773 | } | 809 | } |
774 | last_offset = tmp->eoffset; | 810 | last_offset = tmp->eoffset; |
775 | head = &tmp->vm_list; | 811 | head = &tmp->vm_list; |
776 | } | 812 | } |
777 | list_add(&bo_va->vm_list, head); | 813 | |
778 | list_add_tail(&bo_va->bo_list, &bo->va); | 814 | bo_va->soffset = soffset; |
815 | bo_va->eoffset = eoffset; | ||
816 | bo_va->flags = flags; | ||
817 | bo_va->valid = false; | ||
818 | list_move(&bo_va->vm_list, head); | ||
819 | |||
779 | mutex_unlock(&vm->mutex); | 820 | mutex_unlock(&vm->mutex); |
780 | return 0; | 821 | return 0; |
781 | } | 822 | } |
@@ -855,6 +896,12 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
855 | return -EINVAL; | 896 | return -EINVAL; |
856 | } | 897 | } |
857 | 898 | ||
899 | if (!bo_va->soffset) { | ||
900 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", | ||
901 | bo, vm); | ||
902 | return -EINVAL; | ||
903 | } | ||
904 | |||
858 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) | 905 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
859 | return 0; | 906 | return 0; |
860 | 907 | ||
@@ -921,33 +968,26 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
921 | * radeon_vm_bo_rmv - remove a bo to a specific vm | 968 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
922 | * | 969 | * |
923 | * @rdev: radeon_device pointer | 970 | * @rdev: radeon_device pointer |
924 | * @vm: requested vm | 971 | * @bo_va: requested bo_va |
925 | * @bo: radeon buffer object | ||
926 | * | 972 | * |
927 | * Remove @bo from the requested vm (cayman+). | 973 | * Remove @bo_va->bo from the requested vm (cayman+). |
928 | * Remove @bo from the list of bos associated with the vm and | 974 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
929 | * remove the ptes for @bo in the page table. | 975 | * remove the ptes for @bo_va in the page table. |
930 | * Returns 0 for success. | 976 | * Returns 0 for success. |
931 | * | 977 | * |
932 | * Object have to be reserved! | 978 | * Object have to be reserved! |
933 | */ | 979 | */ |
934 | int radeon_vm_bo_rmv(struct radeon_device *rdev, | 980 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
935 | struct radeon_vm *vm, | 981 | struct radeon_bo_va *bo_va) |
936 | struct radeon_bo *bo) | ||
937 | { | 982 | { |
938 | struct radeon_bo_va *bo_va; | ||
939 | int r; | 983 | int r; |
940 | 984 | ||
941 | bo_va = radeon_vm_bo_find(vm, bo); | ||
942 | if (bo_va == NULL) | ||
943 | return 0; | ||
944 | |||
945 | mutex_lock(&rdev->vm_manager.lock); | 985 | mutex_lock(&rdev->vm_manager.lock); |
946 | mutex_lock(&vm->mutex); | 986 | mutex_lock(&bo_va->vm->mutex); |
947 | r = radeon_vm_bo_update_pte(rdev, vm, bo, NULL); | 987 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
948 | mutex_unlock(&rdev->vm_manager.lock); | 988 | mutex_unlock(&rdev->vm_manager.lock); |
949 | list_del(&bo_va->vm_list); | 989 | list_del(&bo_va->vm_list); |
950 | mutex_unlock(&vm->mutex); | 990 | mutex_unlock(&bo_va->vm->mutex); |
951 | list_del(&bo_va->bo_list); | 991 | list_del(&bo_va->bo_list); |
952 | 992 | ||
953 | kfree(bo_va); | 993 | kfree(bo_va); |
@@ -987,6 +1027,7 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, | |||
987 | */ | 1027 | */ |
988 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | 1028 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
989 | { | 1029 | { |
1030 | struct radeon_bo_va *bo_va; | ||
990 | int r; | 1031 | int r; |
991 | 1032 | ||
992 | vm->id = 0; | 1033 | vm->id = 0; |
@@ -1006,8 +1047,10 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | |||
1006 | /* map the ib pool buffer at 0 in virtual address space, set | 1047 | /* map the ib pool buffer at 0 in virtual address space, set |
1007 | * read only | 1048 | * read only |
1008 | */ | 1049 | */ |
1009 | r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, RADEON_VA_IB_OFFSET, | 1050 | bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); |
1010 | RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); | 1051 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, |
1052 | RADEON_VM_PAGE_READABLE | | ||
1053 | RADEON_VM_PAGE_SNOOPED); | ||
1011 | return r; | 1054 | return r; |
1012 | } | 1055 | } |
1013 | 1056 | ||