aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-15 17:41:56 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:45:34 -0500
commitd550c41e4ff11fe69b5f92868157253d27937d1f (patch)
tree5c51d494f497d6cdffc822964bfeca0ba310ac3d /drivers/gpu/drm/nouveau/nouveau_bo.c
parent2503c6fa3edf7c2bb001c7f7926786eed24cc06e (diff)
drm/nouveau: remove no_vm/mappable flags from nouveau_bo
'mappable' isn't really used at all, nor is it necessary anymore as the bo code is capable of moving buffers to mappable vram as required. 'no_vm' isn't necessary anymore either, any places that don't want to be mapped into a GPU address space should allocate the VRAM directly instead. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c62
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index bf260af18b31..897c55509a6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
98int 98int
99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100 int size, int align, uint32_t flags, uint32_t tile_mode, 100 int size, int align, uint32_t flags, uint32_t tile_mode,
101 uint32_t tile_flags, bool no_vm, bool mappable, 101 uint32_t tile_flags, struct nouveau_bo **pnvbo)
102 struct nouveau_bo **pnvbo)
103{ 102{
104 struct drm_nouveau_private *dev_priv = dev->dev_private; 103 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_bo *nvbo; 104 struct nouveau_bo *nvbo;
@@ -110,8 +109,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
110 return -ENOMEM; 109 return -ENOMEM;
111 INIT_LIST_HEAD(&nvbo->head); 110 INIT_LIST_HEAD(&nvbo->head);
112 INIT_LIST_HEAD(&nvbo->entry); 111 INIT_LIST_HEAD(&nvbo->entry);
113 nvbo->mappable = mappable;
114 nvbo->no_vm = no_vm;
115 nvbo->tile_mode = tile_mode; 112 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags; 113 nvbo->tile_flags = tile_flags;
117 nvbo->bo.bdev = &dev_priv->ttm.bdev; 114 nvbo->bo.bdev = &dev_priv->ttm.bdev;
@@ -119,7 +116,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); 116 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
120 align >>= PAGE_SHIFT; 117 align >>= PAGE_SHIFT;
121 118
122 if (!nvbo->no_vm && dev_priv->chan_vm) { 119 if (dev_priv->chan_vm) {
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, 120 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma); 121 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) { 122 if (ret) {
@@ -504,14 +501,6 @@ static inline uint32_t
504nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 501nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
505 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 502 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
506{ 503{
507 struct nouveau_bo *nvbo = nouveau_bo(bo);
508
509 if (nvbo->no_vm) {
510 if (mem->mem_type == TTM_PL_TT)
511 return NvDmaGART;
512 return NvDmaVRAM;
513 }
514
515 if (mem->mem_type == TTM_PL_TT) 504 if (mem->mem_type == TTM_PL_TT)
516 return chan->gart_handle; 505 return chan->gart_handle;
517 return chan->vram_handle; 506 return chan->vram_handle;
@@ -523,22 +512,21 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
523{ 512{
524 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 513 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
525 struct nouveau_bo *nvbo = nouveau_bo(bo); 514 struct nouveau_bo *nvbo = nouveau_bo(bo);
526 u64 src_offset = old_mem->start << PAGE_SHIFT;
527 u64 dst_offset = new_mem->start << PAGE_SHIFT;
528 u32 page_count = new_mem->num_pages; 515 u32 page_count = new_mem->num_pages;
516 u64 src_offset, dst_offset;
529 int ret; 517 int ret;
530 518
531 if (!nvbo->no_vm) { 519 src_offset = old_mem->start << PAGE_SHIFT;
532 if (old_mem->mem_type == TTM_PL_VRAM) 520 if (old_mem->mem_type == TTM_PL_VRAM)
533 src_offset = nvbo->vma.offset; 521 src_offset = nvbo->vma.offset;
534 else 522 else
535 src_offset += dev_priv->gart_info.aper_base; 523 src_offset += dev_priv->gart_info.aper_base;
536 524
537 if (new_mem->mem_type == TTM_PL_VRAM) 525 dst_offset = new_mem->start << PAGE_SHIFT;
538 dst_offset = nvbo->vma.offset; 526 if (new_mem->mem_type == TTM_PL_VRAM)
539 else 527 dst_offset = nvbo->vma.offset;
540 dst_offset += dev_priv->gart_info.aper_base; 528 else
541 } 529 dst_offset += dev_priv->gart_info.aper_base;
542 530
543 page_count = new_mem->num_pages; 531 page_count = new_mem->num_pages;
544 while (page_count) { 532 while (page_count) {
@@ -580,18 +568,16 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
580 int ret; 568 int ret;
581 569
582 src_offset = old_mem->start << PAGE_SHIFT; 570 src_offset = old_mem->start << PAGE_SHIFT;
583 dst_offset = new_mem->start << PAGE_SHIFT; 571 if (old_mem->mem_type == TTM_PL_VRAM)
584 if (!nvbo->no_vm) { 572 src_offset = nvbo->vma.offset;
585 if (old_mem->mem_type == TTM_PL_VRAM) 573 else
586 src_offset = nvbo->vma.offset; 574 src_offset += dev_priv->gart_info.aper_base;
587 else
588 src_offset += dev_priv->gart_info.aper_base;
589 575
590 if (new_mem->mem_type == TTM_PL_VRAM) 576 dst_offset = new_mem->start << PAGE_SHIFT;
591 dst_offset = nvbo->vma.offset; 577 if (new_mem->mem_type == TTM_PL_VRAM)
592 else 578 dst_offset = nvbo->vma.offset;
593 dst_offset += dev_priv->gart_info.aper_base; 579 else
594 } 580 dst_offset += dev_priv->gart_info.aper_base;
595 581
596 ret = RING_SPACE(chan, 3); 582 ret = RING_SPACE(chan, 3);
597 if (ret) 583 if (ret)
@@ -737,7 +723,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
737 int ret; 723 int ret;
738 724
739 chan = nvbo->channel; 725 chan = nvbo->channel;
740 if (!chan || nvbo->no_vm) { 726 if (!chan) {
741 chan = dev_priv->channel; 727 chan = dev_priv->channel;
742 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 728 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
743 } 729 }
@@ -836,7 +822,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
836 struct nouveau_bo *nvbo = nouveau_bo(bo); 822 struct nouveau_bo *nvbo = nouveau_bo(bo);
837 uint64_t offset; 823 uint64_t offset;
838 824
839 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 825 if (new_mem->mem_type != TTM_PL_VRAM) {
840 /* Nothing to do. */ 826 /* Nothing to do. */
841 *new_tile = NULL; 827 *new_tile = NULL;
842 return 0; 828 return 0;