aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-09 21:59:51 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:46:01 -0500
commit26c0c9e33a2eb44b345d22d5928d5c8b7b261226 (patch)
treed15305e77bfc4547a36cfa9755aeeffb15dd59ce /drivers/gpu/drm/nouveau/nouveau_sgdma.c
parentd5f423947a11103c43ad26ebb680d049c2d8edd6 (diff)
drm/nv50-nvc0: delay GART binding until move_notify time
The immediate benefit of doing this is that on NV50 and up, the GPU virtual address of any buffer is now constant, regardless of what memtype they're placed in. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c30
1 files changed, 9 insertions, 21 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a2b89bf0ada1..1205f0f345b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -375,12 +375,10 @@ static int
375nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 375nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
376{ 376{
377 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 377 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
378 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 378 struct nouveau_mem *node = mem->mm_node;
379 379 /* noop: bound in move_notify() */
380 nvbe->offset = mem->start << PAGE_SHIFT; 380 node->pages = nvbe->pages;
381 381 nvbe->pages = (dma_addr_t *)node;
382 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
383 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
384 nvbe->bound = true; 382 nvbe->bound = true;
385 return 0; 383 return 0;
386} 384}
@@ -389,13 +387,10 @@ static int
389nv50_sgdma_unbind(struct ttm_backend *be) 387nv50_sgdma_unbind(struct ttm_backend *be)
390{ 388{
391 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 389 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
392 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; 390 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
393 391 /* noop: unbound in move_notify() */
394 if (!nvbe->bound) 392 nvbe->pages = node->pages;
395 return 0; 393 node->pages = NULL;
396
397 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
398 nvbe->nr_pages << PAGE_SHIFT);
399 nvbe->bound = false; 394 nvbe->bound = false;
400 return 0; 395 return 0;
401} 396}
@@ -457,13 +452,7 @@ nouveau_sgdma_init(struct drm_device *dev)
457 } 452 }
458 453
459 if (dev_priv->card_type >= NV_50) { 454 if (dev_priv->card_type >= NV_50) {
460 ret = nouveau_vm_get(dev_priv->chan_vm, aper_size, 455 dev_priv->gart_info.aper_base = 0;
461 12, NV_MEM_ACCESS_RW,
462 &dev_priv->gart_info.vma);
463 if (ret)
464 return ret;
465
466 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
467 dev_priv->gart_info.aper_size = aper_size; 456 dev_priv->gart_info.aper_size = aper_size;
468 dev_priv->gart_info.type = NOUVEAU_GART_HW; 457 dev_priv->gart_info.type = NOUVEAU_GART_HW;
469 dev_priv->gart_info.func = &nv50_sgdma_backend; 458 dev_priv->gart_info.func = &nv50_sgdma_backend;
@@ -522,7 +511,6 @@ nouveau_sgdma_takedown(struct drm_device *dev)
522 struct drm_nouveau_private *dev_priv = dev->dev_private; 511 struct drm_nouveau_private *dev_priv = dev->dev_private;
523 512
524 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 513 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
525 nouveau_vm_put(&dev_priv->gart_info.vma);
526 514
527 if (dev_priv->gart_info.dummy.page) { 515 if (dev_priv->gart_info.dummy.page) {
528 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr, 516 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,