aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-08-16 11:38:37 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-10-25 14:38:15 -0400
commit63e0ba40e52c60f25ab67e27c89ed2b99b847562 (patch)
tree430821cc3185ac5001fc250e970b007202865ac0 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parentd2e938701ac54cc44f28f79aa1dd640ba43b936f (diff)
drm/amdgpu: handle multiple MM nodes in the VMs v2
This allows us to map scattered VRAM BOs to the VMs. v2: fix offset handling, use pfn instead of offset, fix PAGE_SIZE != AMDGPU_GPU_PAGE_SIZE case Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Edward O'Callaghan <funfunctor@folklore1984.net> Tested-by: Mike Lothian <mike@fireburn.co.uk> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c81
1 files changed, 46 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7b9e94f235ab..e2fefbbd3cc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1065,8 +1065,8 @@ error_free:
1065 * @pages_addr: DMA addresses to use for mapping 1065 * @pages_addr: DMA addresses to use for mapping
1066 * @vm: requested vm 1066 * @vm: requested vm
1067 * @mapping: mapped range and flags to use for the update 1067 * @mapping: mapped range and flags to use for the update
1068 * @addr: addr to set the area to
1069 * @flags: HW flags for the mapping 1068 * @flags: HW flags for the mapping
1069 * @nodes: array of drm_mm_nodes with the MC addresses
1070 * @fence: optional resulting fence 1070 * @fence: optional resulting fence
1071 * 1071 *
1072 * Split the mapping into smaller chunks so that each update fits 1072 * Split the mapping into smaller chunks so that each update fits
@@ -1079,12 +1079,11 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1079 dma_addr_t *pages_addr, 1079 dma_addr_t *pages_addr,
1080 struct amdgpu_vm *vm, 1080 struct amdgpu_vm *vm,
1081 struct amdgpu_bo_va_mapping *mapping, 1081 struct amdgpu_bo_va_mapping *mapping,
1082 uint32_t flags, uint64_t addr, 1082 uint32_t flags,
1083 struct drm_mm_node *nodes,
1083 struct fence **fence) 1084 struct fence **fence)
1084{ 1085{
1085 const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; 1086 uint64_t pfn, src = 0, start = mapping->it.start;
1086
1087 uint64_t src = 0, start = mapping->it.start;
1088 int r; 1087 int r;
1089 1088
1090 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1089 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1097,23 +1096,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1097 1096
1098 trace_amdgpu_vm_bo_update(mapping); 1097 trace_amdgpu_vm_bo_update(mapping);
1099 1098
1100 if (pages_addr) { 1099 pfn = mapping->offset >> PAGE_SHIFT;
1101 if (flags == gtt_flags) 1100 if (nodes) {
1102 src = adev->gart.table_addr + (addr >> 12) * 8; 1101 while (pfn >= nodes->size) {
1103 addr = 0; 1102 pfn -= nodes->size;
1103 ++nodes;
1104 }
1104 } 1105 }
1105 addr += mapping->offset;
1106 1106
1107 if (!pages_addr || src) 1107 do {
1108 return amdgpu_vm_bo_update_mapping(adev, exclusive, 1108 uint64_t max_entries;
1109 src, pages_addr, vm, 1109 uint64_t addr, last;
1110 start, mapping->it.last,
1111 flags, addr, fence);
1112 1110
1113 while (start != mapping->it.last + 1) { 1111 if (nodes) {
1114 uint64_t last; 1112 addr = nodes->start << PAGE_SHIFT;
1113 max_entries = (nodes->size - pfn) *
1114 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1115 } else {
1116 addr = 0;
1117 max_entries = S64_MAX;
1118 }
1115 1119
1116 last = min((uint64_t)mapping->it.last, start + max_size - 1); 1120 if (pages_addr) {
1121 if (flags == gtt_flags)
1122 src = adev->gart.table_addr +
1123 (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1124 else
1125 max_entries = min(max_entries, 16ull * 1024ull);
1126 addr = 0;
1127 } else if (flags & AMDGPU_PTE_VALID) {
1128 addr += adev->vm_manager.vram_base_offset;
1129 }
1130 addr += pfn << PAGE_SHIFT;
1131
1132 last = min((uint64_t)mapping->it.last, start + max_entries - 1);
1117 r = amdgpu_vm_bo_update_mapping(adev, exclusive, 1133 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1118 src, pages_addr, vm, 1134 src, pages_addr, vm,
1119 start, last, flags, addr, 1135 start, last, flags, addr,
@@ -1121,9 +1137,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1121 if (r) 1137 if (r)
1122 return r; 1138 return r;
1123 1139
1140 pfn += last - start + 1;
1141 if (nodes && nodes->size == pfn) {
1142 pfn = 0;
1143 ++nodes;
1144 }
1124 start = last + 1; 1145 start = last + 1;
1125 addr += max_size * AMDGPU_GPU_PAGE_SIZE; 1146
1126 } 1147 } while (unlikely(start != mapping->it.last + 1));
1127 1148
1128 return 0; 1149 return 0;
1129} 1150}
@@ -1147,34 +1168,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1147 dma_addr_t *pages_addr = NULL; 1168 dma_addr_t *pages_addr = NULL;
1148 uint32_t gtt_flags, flags; 1169 uint32_t gtt_flags, flags;
1149 struct ttm_mem_reg *mem; 1170 struct ttm_mem_reg *mem;
1171 struct drm_mm_node *nodes;
1150 struct fence *exclusive; 1172 struct fence *exclusive;
1151 uint64_t addr;
1152 int r; 1173 int r;
1153 1174
1154 if (clear) { 1175 if (clear) {
1155 mem = NULL; 1176 mem = NULL;
1156 addr = 0; 1177 nodes = NULL;
1157 exclusive = NULL; 1178 exclusive = NULL;
1158 } else { 1179 } else {
1159 struct ttm_dma_tt *ttm; 1180 struct ttm_dma_tt *ttm;
1160 1181
1161 mem = &bo_va->bo->tbo.mem; 1182 mem = &bo_va->bo->tbo.mem;
1162 addr = (u64)mem->start << PAGE_SHIFT; 1183 nodes = mem->mm_node;
1163 switch (mem->mem_type) { 1184 if (mem->mem_type == TTM_PL_TT) {
1164 case TTM_PL_TT:
1165 ttm = container_of(bo_va->bo->tbo.ttm, struct 1185 ttm = container_of(bo_va->bo->tbo.ttm, struct
1166 ttm_dma_tt, ttm); 1186 ttm_dma_tt, ttm);
1167 pages_addr = ttm->dma_address; 1187 pages_addr = ttm->dma_address;
1168 break;
1169
1170 case TTM_PL_VRAM:
1171 addr += adev->vm_manager.vram_base_offset;
1172 break;
1173
1174 default:
1175 break;
1176 } 1188 }
1177
1178 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); 1189 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1179 } 1190 }
1180 1191
@@ -1190,7 +1201,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1190 list_for_each_entry(mapping, &bo_va->invalids, list) { 1201 list_for_each_entry(mapping, &bo_va->invalids, list) {
1191 r = amdgpu_vm_bo_split_mapping(adev, exclusive, 1202 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1192 gtt_flags, pages_addr, vm, 1203 gtt_flags, pages_addr, vm,
1193 mapping, flags, addr, 1204 mapping, flags, nodes,
1194 &bo_va->last_pt_update); 1205 &bo_va->last_pt_update);
1195 if (r) 1206 if (r)
1196 return r; 1207 return r;