aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-05-13 08:26:26 -0400
committerBen Skeggs <bskeggs@redhat.com>2013-06-30 23:45:03 -0400
commit4e67bee8e129c072e5498bd192b9cb8aa7e62a89 (patch)
tree64f83ce0bffd874d82b4fe474ae696b4c5cccf11
parent15cace591788552717269f0d1a5f292b08af39ed (diff)
drm/nouveau/vm: take subdev mutex, not the mm, protects against race with vm/nvc0
nvc0_vm_flush() accesses the pgd list, which will soon be able to race with vm_unlink() during channel destruction. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 77c67fc970e6..6fc389163532 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -236,9 +236,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj); 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
237 } 237 }
238 238
239 mutex_unlock(&vm->mm.mutex); 239 mutex_unlock(&nv_subdev(vmm)->mutex);
240 nouveau_gpuobj_ref(NULL, &pgt); 240 nouveau_gpuobj_ref(NULL, &pgt);
241 mutex_lock(&vm->mm.mutex); 241 mutex_lock(&nv_subdev(vmm)->mutex);
242 } 242 }
243} 243}
244 244
@@ -256,18 +256,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type; 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
257 pgt_size *= 8; 257 pgt_size *= 8;
258 258
259 mutex_unlock(&vm->mm.mutex); 259 mutex_unlock(&nv_subdev(vmm)->mutex);
260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000, 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
261 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
262 mutex_lock(&vm->mm.mutex); 262 mutex_lock(&nv_subdev(vmm)->mutex);
263 if (unlikely(ret)) 263 if (unlikely(ret))
264 return ret; 264 return ret;
265 265
266 /* someone beat us to filling the PDE while we didn't have the lock */ 266 /* someone beat us to filling the PDE while we didn't have the lock */
267 if (unlikely(vpgt->refcount[big]++)) { 267 if (unlikely(vpgt->refcount[big]++)) {
268 mutex_unlock(&vm->mm.mutex); 268 mutex_unlock(&nv_subdev(vmm)->mutex);
269 nouveau_gpuobj_ref(NULL, &pgt); 269 nouveau_gpuobj_ref(NULL, &pgt);
270 mutex_lock(&vm->mm.mutex); 270 mutex_lock(&nv_subdev(vmm)->mutex);
271 return 0; 271 return 0;
272 } 272 }
273 273
@@ -289,11 +289,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
289 u32 fpde, lpde, pde; 289 u32 fpde, lpde, pde;
290 int ret; 290 int ret;
291 291
292 mutex_lock(&vm->mm.mutex); 292 mutex_lock(&nv_subdev(vmm)->mutex);
293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align, 293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294 &vma->node); 294 &vma->node);
295 if (unlikely(ret != 0)) { 295 if (unlikely(ret != 0)) {
296 mutex_unlock(&vm->mm.mutex); 296 mutex_unlock(&nv_subdev(vmm)->mutex);
297 return ret; 297 return ret;
298 } 298 }
299 299
@@ -314,11 +314,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
314 if (pde != fpde) 314 if (pde != fpde)
315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
316 nouveau_mm_free(&vm->mm, &vma->node); 316 nouveau_mm_free(&vm->mm, &vma->node);
317 mutex_unlock(&vm->mm.mutex); 317 mutex_unlock(&nv_subdev(vmm)->mutex);
318 return ret; 318 return ret;
319 } 319 }
320 } 320 }
321 mutex_unlock(&vm->mm.mutex); 321 mutex_unlock(&nv_subdev(vmm)->mutex);
322 322
323 vma->vm = vm; 323 vma->vm = vm;
324 vma->offset = (u64)vma->node->offset << 12; 324 vma->offset = (u64)vma->node->offset << 12;
@@ -338,10 +338,10 @@ nouveau_vm_put(struct nouveau_vma *vma)
338 fpde = (vma->node->offset >> vmm->pgt_bits); 338 fpde = (vma->node->offset >> vmm->pgt_bits);
339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits; 339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
340 340
341 mutex_lock(&vm->mm.mutex); 341 mutex_lock(&nv_subdev(vmm)->mutex);
342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde); 342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
343 nouveau_mm_free(&vm->mm, &vma->node); 343 nouveau_mm_free(&vm->mm, &vma->node);
344 mutex_unlock(&vm->mm.mutex); 344 mutex_unlock(&nv_subdev(vmm)->mutex);
345} 345}
346 346
347int 347int
@@ -405,24 +405,25 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
405 405
406 nouveau_gpuobj_ref(pgd, &vpgd->obj); 406 nouveau_gpuobj_ref(pgd, &vpgd->obj);
407 407
408 mutex_lock(&vm->mm.mutex); 408 mutex_lock(&nv_subdev(vmm)->mutex);
409 for (i = vm->fpde; i <= vm->lpde; i++) 409 for (i = vm->fpde; i <= vm->lpde; i++)
410 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 410 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
411 list_add(&vpgd->head, &vm->pgd_list); 411 list_add(&vpgd->head, &vm->pgd_list);
412 mutex_unlock(&vm->mm.mutex); 412 mutex_unlock(&nv_subdev(vmm)->mutex);
413 return 0; 413 return 0;
414} 414}
415 415
416static void 416static void
417nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) 417nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
418{ 418{
419 struct nouveau_vmmgr *vmm = vm->vmm;
419 struct nouveau_vm_pgd *vpgd, *tmp; 420 struct nouveau_vm_pgd *vpgd, *tmp;
420 struct nouveau_gpuobj *pgd = NULL; 421 struct nouveau_gpuobj *pgd = NULL;
421 422
422 if (!mpgd) 423 if (!mpgd)
423 return; 424 return;
424 425
425 mutex_lock(&vm->mm.mutex); 426 mutex_lock(&nv_subdev(vmm)->mutex);
426 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 427 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
427 if (vpgd->obj == mpgd) { 428 if (vpgd->obj == mpgd) {
428 pgd = vpgd->obj; 429 pgd = vpgd->obj;
@@ -431,7 +432,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
431 break; 432 break;
432 } 433 }
433 } 434 }
434 mutex_unlock(&vm->mm.mutex); 435 mutex_unlock(&nv_subdev(vmm)->mutex);
435 436
436 nouveau_gpuobj_ref(NULL, &pgd); 437 nouveau_gpuobj_ref(NULL, &pgd);
437} 438}