diff options
| author | Christian König <christian.koenig@amd.com> | 2016-06-06 04:17:58 -0400 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2016-07-07 14:54:38 -0400 |
| commit | 3cabaa54e7878c9b87dcbf8984c7534688330df3 (patch) | |
| tree | 70af7a74bba5929d7b70565cbd69012acadf2d2e | |
| parent | 61ede070555395fb1c231311c35464d9cd1a8a81 (diff) | |
drm/amdgpu: sync to buffer moves before VM updates
Otherwise we could update the VM page tables while the move is only scheduled.
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2c22ec040a16..18daa2d64d57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -830,6 +830,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
| 830 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | 830 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table |
| 831 | * | 831 | * |
| 832 | * @adev: amdgpu_device pointer | 832 | * @adev: amdgpu_device pointer |
| 833 | * @exclusive: fence we need to sync to | ||
| 833 | * @src: address where to copy page table entries from | 834 | * @src: address where to copy page table entries from |
| 834 | * @pages_addr: DMA addresses to use for mapping | 835 | * @pages_addr: DMA addresses to use for mapping |
| 835 | * @vm: requested vm | 836 | * @vm: requested vm |
| @@ -843,6 +844,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |||
| 843 | * Returns 0 for success, -EINVAL for failure. | 844 | * Returns 0 for success, -EINVAL for failure. |
| 844 | */ | 845 | */ |
| 845 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | 846 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, |
| 847 | struct fence *exclusive, | ||
| 846 | uint64_t src, | 848 | uint64_t src, |
| 847 | dma_addr_t *pages_addr, | 849 | dma_addr_t *pages_addr, |
| 848 | struct amdgpu_vm *vm, | 850 | struct amdgpu_vm *vm, |
| @@ -903,6 +905,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |||
| 903 | 905 | ||
| 904 | vm_update_params.ib = &job->ibs[0]; | 906 | vm_update_params.ib = &job->ibs[0]; |
| 905 | 907 | ||
| 908 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); | ||
| 909 | if (r) | ||
| 910 | goto error_free; | ||
| 911 | |||
| 906 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | 912 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
| 907 | owner); | 913 | owner); |
| 908 | if (r) | 914 | if (r) |
| @@ -939,6 +945,7 @@ error_free: | |||
| 939 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks | 945 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks |
| 940 | * | 946 | * |
| 941 | * @adev: amdgpu_device pointer | 947 | * @adev: amdgpu_device pointer |
| 948 | * @exclusive: fence we need to sync to | ||
| 942 | * @gtt_flags: flags as they are used for GTT | 949 | * @gtt_flags: flags as they are used for GTT |
| 943 | * @pages_addr: DMA addresses to use for mapping | 950 | * @pages_addr: DMA addresses to use for mapping |
| 944 | * @vm: requested vm | 951 | * @vm: requested vm |
| @@ -952,6 +959,7 @@ error_free: | |||
| 952 | * Returns 0 for success, -EINVAL for failure. | 959 | * Returns 0 for success, -EINVAL for failure. |
| 953 | */ | 960 | */ |
| 954 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | 961 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, |
| 962 | struct fence *exclusive, | ||
| 955 | uint32_t gtt_flags, | 963 | uint32_t gtt_flags, |
| 956 | dma_addr_t *pages_addr, | 964 | dma_addr_t *pages_addr, |
| 957 | struct amdgpu_vm *vm, | 965 | struct amdgpu_vm *vm, |
| @@ -982,7 +990,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
| 982 | addr += mapping->offset; | 990 | addr += mapping->offset; |
| 983 | 991 | ||
| 984 | if (!pages_addr || src) | 992 | if (!pages_addr || src) |
| 985 | return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, | 993 | return amdgpu_vm_bo_update_mapping(adev, exclusive, |
| 994 | src, pages_addr, vm, | ||
| 986 | start, mapping->it.last, | 995 | start, mapping->it.last, |
| 987 | flags, addr, fence); | 996 | flags, addr, fence); |
| 988 | 997 | ||
| @@ -990,7 +999,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
| 990 | uint64_t last; | 999 | uint64_t last; |
| 991 | 1000 | ||
| 992 | last = min((uint64_t)mapping->it.last, start + max_size - 1); | 1001 | last = min((uint64_t)mapping->it.last, start + max_size - 1); |
| 993 | r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, | 1002 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, |
| 1003 | src, pages_addr, vm, | ||
| 994 | start, last, flags, addr, | 1004 | start, last, flags, addr, |
| 995 | fence); | 1005 | fence); |
| 996 | if (r) | 1006 | if (r) |
| @@ -1023,6 +1033,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1023 | struct amdgpu_bo_va_mapping *mapping; | 1033 | struct amdgpu_bo_va_mapping *mapping; |
| 1024 | dma_addr_t *pages_addr = NULL; | 1034 | dma_addr_t *pages_addr = NULL; |
| 1025 | uint32_t gtt_flags, flags; | 1035 | uint32_t gtt_flags, flags; |
| 1036 | struct fence *exclusive; | ||
| 1026 | uint64_t addr; | 1037 | uint64_t addr; |
| 1027 | int r; | 1038 | int r; |
| 1028 | 1039 | ||
| @@ -1044,8 +1055,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1044 | default: | 1055 | default: |
| 1045 | break; | 1056 | break; |
| 1046 | } | 1057 | } |
| 1058 | |||
| 1059 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); | ||
| 1047 | } else { | 1060 | } else { |
| 1048 | addr = 0; | 1061 | addr = 0; |
| 1062 | exclusive = NULL; | ||
| 1049 | } | 1063 | } |
| 1050 | 1064 | ||
| 1051 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | 1065 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
| @@ -1057,7 +1071,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
| 1057 | spin_unlock(&vm->status_lock); | 1071 | spin_unlock(&vm->status_lock); |
| 1058 | 1072 | ||
| 1059 | list_for_each_entry(mapping, &bo_va->invalids, list) { | 1073 | list_for_each_entry(mapping, &bo_va->invalids, list) { |
| 1060 | r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, | 1074 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, |
| 1075 | gtt_flags, pages_addr, vm, | ||
| 1061 | mapping, flags, addr, | 1076 | mapping, flags, addr, |
| 1062 | &bo_va->last_pt_update); | 1077 | &bo_va->last_pt_update); |
| 1063 | if (r) | 1078 | if (r) |
| @@ -1104,7 +1119,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |||
| 1104 | struct amdgpu_bo_va_mapping, list); | 1119 | struct amdgpu_bo_va_mapping, list); |
| 1105 | list_del(&mapping->list); | 1120 | list_del(&mapping->list); |
| 1106 | 1121 | ||
| 1107 | r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, | 1122 | r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, |
| 1108 | 0, 0, NULL); | 1123 | 0, 0, NULL); |
| 1109 | kfree(mapping); | 1124 | kfree(mapping); |
| 1110 | if (r) | 1125 | if (r) |
