aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-14 20:54:21 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-07 22:48:13 -0500
commit4c1361429841344ce4d164492ee7620cf3286eb7 (patch)
tree7cd23e9e99299b3265b2e59d49e3aa5b77a465f0 /drivers/gpu/drm/nouveau/nouveau_bo.c
parentf869ef882382a4b6cb42d259e399aeec3781d4bb (diff)
drm/nv50: implement global channel address space on new VM code
As of this commit, it's guaranteed that if an object is in VRAM that its GPU virtual address will be constant. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c41
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4d142031d542..203e75de4128 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,6 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma);
52 kfree(nvbo); 53 kfree(nvbo);
53} 54}
54 55
@@ -113,6 +114,15 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
113 &align, &size); 114 &align, &size);
114 align >>= PAGE_SHIFT; 115 align >>= PAGE_SHIFT;
115 116
117 if (!nvbo->no_vm && dev_priv->chan_vm) {
118 ret = nouveau_vm_get(dev_priv->chan_vm, size, 16,
119 NV_MEM_ACCESS_RW, &nvbo->vma);
120 if (ret) {
121 kfree(nvbo);
122 return ret;
123 }
124 }
125
116 nouveau_bo_placement_set(nvbo, flags, 0); 126 nouveau_bo_placement_set(nvbo, flags, 0);
117 127
118 nvbo->channel = chan; 128 nvbo->channel = chan;
@@ -125,6 +135,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
125 } 135 }
126 nvbo->channel = NULL; 136 nvbo->channel = NULL;
127 137
138 if (nvbo->vma.node) {
139 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
140 nvbo->bo.offset = nvbo->vma.offset;
141 }
142
128 *pnvbo = nvbo; 143 *pnvbo = nvbo;
129 return 0; 144 return 0;
130} 145}
@@ -294,6 +309,11 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
294 if (ret) 309 if (ret)
295 return ret; 310 return ret;
296 311
312 if (nvbo->vma.node) {
313 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
314 nvbo->bo.offset = nvbo->vma.offset;
315 }
316
297 return 0; 317 return 0;
298} 318}
299 319
@@ -400,10 +420,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
400 man->available_caching = TTM_PL_FLAG_UNCACHED | 420 man->available_caching = TTM_PL_FLAG_UNCACHED |
401 TTM_PL_FLAG_WC; 421 TTM_PL_FLAG_WC;
402 man->default_caching = TTM_PL_FLAG_WC; 422 man->default_caching = TTM_PL_FLAG_WC;
403 if (dev_priv->card_type == NV_50) 423 man->gpu_offset = 0;
404 man->gpu_offset = 0x40000000;
405 else
406 man->gpu_offset = 0;
407 break; 424 break;
408 case TTM_PL_TT: 425 case TTM_PL_TT:
409 man->func = &ttm_bo_manager_func; 426 man->func = &ttm_bo_manager_func;
@@ -507,12 +524,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
507 dst_offset = new_mem->start << PAGE_SHIFT; 524 dst_offset = new_mem->start << PAGE_SHIFT;
508 if (!nvbo->no_vm) { 525 if (!nvbo->no_vm) {
509 if (old_mem->mem_type == TTM_PL_VRAM) 526 if (old_mem->mem_type == TTM_PL_VRAM)
510 src_offset += dev_priv->vm_vram_base; 527 src_offset = nvbo->vma.offset;
511 else 528 else
512 src_offset += dev_priv->vm_gart_base; 529 src_offset += dev_priv->vm_gart_base;
513 530
514 if (new_mem->mem_type == TTM_PL_VRAM) 531 if (new_mem->mem_type == TTM_PL_VRAM)
515 dst_offset += dev_priv->vm_vram_base; 532 dst_offset = nvbo->vma.offset;
516 else 533 else
517 dst_offset += dev_priv->vm_gart_base; 534 dst_offset += dev_priv->vm_gart_base;
518 } 535 }
@@ -756,7 +773,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
756 struct drm_device *dev = dev_priv->dev; 773 struct drm_device *dev = dev_priv->dev;
757 struct nouveau_bo *nvbo = nouveau_bo(bo); 774 struct nouveau_bo *nvbo = nouveau_bo(bo);
758 uint64_t offset; 775 uint64_t offset;
759 int ret;
760 776
761 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 777 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
762 /* Nothing to do. */ 778 /* Nothing to do. */
@@ -766,15 +782,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
766 782
767 offset = new_mem->start << PAGE_SHIFT; 783 offset = new_mem->start << PAGE_SHIFT;
768 784
769 if (dev_priv->card_type == NV_50) { 785 if (dev_priv->chan_vm) {
770 ret = nv50_mem_vm_bind_linear(dev, 786 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
771 offset + dev_priv->vm_vram_base,
772 new_mem->size,
773 nouveau_bo_tile_layout(nvbo),
774 offset);
775 if (ret)
776 return ret;
777
778 } else if (dev_priv->card_type >= NV_10) { 787 } else if (dev_priv->card_type >= NV_10) {
779 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 788 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
780 nvbo->tile_mode, 789 nvbo->tile_mode,