diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_vm.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_vm.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 244fd38fdb84..ef0832b29ad2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c | |||
@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) | |||
172 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); | 172 | vm->map_pgt(vpgd->obj, pde, vpgt->obj); |
173 | } | 173 | } |
174 | 174 | ||
175 | mutex_unlock(&vm->mm->mutex); | 175 | mutex_unlock(&vm->mm.mutex); |
176 | nouveau_gpuobj_ref(NULL, &pgt); | 176 | nouveau_gpuobj_ref(NULL, &pgt); |
177 | mutex_lock(&vm->mm->mutex); | 177 | mutex_lock(&vm->mm.mutex); |
178 | } | 178 | } |
179 | } | 179 | } |
180 | 180 | ||
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) | |||
191 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; | 191 | pgt_size = (1 << (vm->pgt_bits + 12)) >> type; |
192 | pgt_size *= 8; | 192 | pgt_size *= 8; |
193 | 193 | ||
194 | mutex_unlock(&vm->mm->mutex); | 194 | mutex_unlock(&vm->mm.mutex); |
195 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, | 195 | ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, |
196 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); | 196 | NVOBJ_FLAG_ZERO_ALLOC, &pgt); |
197 | mutex_lock(&vm->mm->mutex); | 197 | mutex_lock(&vm->mm.mutex); |
198 | if (unlikely(ret)) | 198 | if (unlikely(ret)) |
199 | return ret; | 199 | return ret; |
200 | 200 | ||
201 | /* someone beat us to filling the PDE while we didn't have the lock */ | 201 | /* someone beat us to filling the PDE while we didn't have the lock */ |
202 | if (unlikely(vpgt->refcount[big]++)) { | 202 | if (unlikely(vpgt->refcount[big]++)) { |
203 | mutex_unlock(&vm->mm->mutex); | 203 | mutex_unlock(&vm->mm.mutex); |
204 | nouveau_gpuobj_ref(NULL, &pgt); | 204 | nouveau_gpuobj_ref(NULL, &pgt); |
205 | mutex_lock(&vm->mm->mutex); | 205 | mutex_lock(&vm->mm.mutex); |
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | 208 | ||
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | |||
223 | u32 fpde, lpde, pde; | 223 | u32 fpde, lpde, pde; |
224 | int ret; | 224 | int ret; |
225 | 225 | ||
226 | mutex_lock(&vm->mm->mutex); | 226 | mutex_lock(&vm->mm.mutex); |
227 | ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); | 227 | ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); |
228 | if (unlikely(ret != 0)) { | 228 | if (unlikely(ret != 0)) { |
229 | mutex_unlock(&vm->mm->mutex); | 229 | mutex_unlock(&vm->mm.mutex); |
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, | |||
245 | if (ret) { | 245 | if (ret) { |
246 | if (pde != fpde) | 246 | if (pde != fpde) |
247 | nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); | 247 | nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); |
248 | nouveau_mm_put(vm->mm, vma->node); | 248 | nouveau_mm_put(&vm->mm, vma->node); |
249 | mutex_unlock(&vm->mm->mutex); | 249 | mutex_unlock(&vm->mm.mutex); |
250 | vma->node = NULL; | 250 | vma->node = NULL; |
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
253 | } | 253 | } |
254 | mutex_unlock(&vm->mm->mutex); | 254 | mutex_unlock(&vm->mm.mutex); |
255 | 255 | ||
256 | vma->vm = vm; | 256 | vma->vm = vm; |
257 | vma->offset = (u64)vma->node->offset << 12; | 257 | vma->offset = (u64)vma->node->offset << 12; |
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma) | |||
270 | fpde = (vma->node->offset >> vm->pgt_bits); | 270 | fpde = (vma->node->offset >> vm->pgt_bits); |
271 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; | 271 | lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; |
272 | 272 | ||
273 | mutex_lock(&vm->mm->mutex); | 273 | mutex_lock(&vm->mm.mutex); |
274 | nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); | 274 | nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); |
275 | nouveau_mm_put(vm->mm, vma->node); | 275 | nouveau_mm_put(&vm->mm, vma->node); |
276 | vma->node = NULL; | 276 | vma->node = NULL; |
277 | mutex_unlock(&vm->mm->mutex); | 277 | mutex_unlock(&vm->mm.mutex); |
278 | } | 278 | } |
279 | 279 | ||
280 | int | 280 | int |
@@ -306,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, | |||
306 | block = length; | 306 | block = length; |
307 | 307 | ||
308 | } else | 308 | } else |
309 | if (dev_priv->card_type == NV_C0) { | 309 | if (dev_priv->card_type >= NV_C0) { |
310 | vm->map_pgt = nvc0_vm_map_pgt; | 310 | vm->map_pgt = nvc0_vm_map_pgt; |
311 | vm->map = nvc0_vm_map; | 311 | vm->map = nvc0_vm_map; |
312 | vm->map_sg = nvc0_vm_map_sg; | 312 | vm->map_sg = nvc0_vm_map_sg; |
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) | |||
360 | 360 | ||
361 | nouveau_gpuobj_ref(pgd, &vpgd->obj); | 361 | nouveau_gpuobj_ref(pgd, &vpgd->obj); |
362 | 362 | ||
363 | mutex_lock(&vm->mm->mutex); | 363 | mutex_lock(&vm->mm.mutex); |
364 | for (i = vm->fpde; i <= vm->lpde; i++) | 364 | for (i = vm->fpde; i <= vm->lpde; i++) |
365 | vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); | 365 | vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); |
366 | list_add(&vpgd->head, &vm->pgd_list); | 366 | list_add(&vpgd->head, &vm->pgd_list); |
367 | mutex_unlock(&vm->mm->mutex); | 367 | mutex_unlock(&vm->mm.mutex); |
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
377 | if (!mpgd) | 377 | if (!mpgd) |
378 | return; | 378 | return; |
379 | 379 | ||
380 | mutex_lock(&vm->mm->mutex); | 380 | mutex_lock(&vm->mm.mutex); |
381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { | 381 | list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { |
382 | if (vpgd->obj == mpgd) { | 382 | if (vpgd->obj == mpgd) { |
383 | pgd = vpgd->obj; | 383 | pgd = vpgd->obj; |
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) | |||
386 | break; | 386 | break; |
387 | } | 387 | } |
388 | } | 388 | } |
389 | mutex_unlock(&vm->mm->mutex); | 389 | mutex_unlock(&vm->mm.mutex); |
390 | 390 | ||
391 | nouveau_gpuobj_ref(NULL, &pgd); | 391 | nouveau_gpuobj_ref(NULL, &pgd); |
392 | } | 392 | } |