diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-02-15 17:41:56 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-02-24 15:45:34 -0500 |
commit | d550c41e4ff11fe69b5f92868157253d27937d1f (patch) | |
tree | 5c51d494f497d6cdffc822964bfeca0ba310ac3d /drivers/gpu/drm | |
parent | 2503c6fa3edf7c2bb001c7f7926786eed24cc06e (diff) |
drm/nouveau: remove no_vm/mappable flags from nouveau_bo
'mappable' isn't really used at all, nor is it necessary anymore as the
bo code is capable of moving buffers to mappable vram as required.
'no_vm' isn't necessary anymore either, any places that don't want to be
mapped into a GPU address space should allocate the VRAM directly instead.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dma.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_notifier.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_crtc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_evo.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_fifo.c | 2 |
14 files changed, 56 insertions, 109 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index bf260af18b31..897c55509a6b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, | |||
98 | int | 98 | int |
99 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | 99 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, |
100 | int size, int align, uint32_t flags, uint32_t tile_mode, | 100 | int size, int align, uint32_t flags, uint32_t tile_mode, |
101 | uint32_t tile_flags, bool no_vm, bool mappable, | 101 | uint32_t tile_flags, struct nouveau_bo **pnvbo) |
102 | struct nouveau_bo **pnvbo) | ||
103 | { | 102 | { |
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 103 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
105 | struct nouveau_bo *nvbo; | 104 | struct nouveau_bo *nvbo; |
@@ -110,8 +109,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
110 | return -ENOMEM; | 109 | return -ENOMEM; |
111 | INIT_LIST_HEAD(&nvbo->head); | 110 | INIT_LIST_HEAD(&nvbo->head); |
112 | INIT_LIST_HEAD(&nvbo->entry); | 111 | INIT_LIST_HEAD(&nvbo->entry); |
113 | nvbo->mappable = mappable; | ||
114 | nvbo->no_vm = no_vm; | ||
115 | nvbo->tile_mode = tile_mode; | 112 | nvbo->tile_mode = tile_mode; |
116 | nvbo->tile_flags = tile_flags; | 113 | nvbo->tile_flags = tile_flags; |
117 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | 114 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
@@ -119,7 +116,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
119 | nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); | 116 | nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); |
120 | align >>= PAGE_SHIFT; | 117 | align >>= PAGE_SHIFT; |
121 | 118 | ||
122 | if (!nvbo->no_vm && dev_priv->chan_vm) { | 119 | if (dev_priv->chan_vm) { |
123 | ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, | 120 | ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, |
124 | NV_MEM_ACCESS_RW, &nvbo->vma); | 121 | NV_MEM_ACCESS_RW, &nvbo->vma); |
125 | if (ret) { | 122 | if (ret) { |
@@ -504,14 +501,6 @@ static inline uint32_t | |||
504 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | 501 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
505 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | 502 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) |
506 | { | 503 | { |
507 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
508 | |||
509 | if (nvbo->no_vm) { | ||
510 | if (mem->mem_type == TTM_PL_TT) | ||
511 | return NvDmaGART; | ||
512 | return NvDmaVRAM; | ||
513 | } | ||
514 | |||
515 | if (mem->mem_type == TTM_PL_TT) | 504 | if (mem->mem_type == TTM_PL_TT) |
516 | return chan->gart_handle; | 505 | return chan->gart_handle; |
517 | return chan->vram_handle; | 506 | return chan->vram_handle; |
@@ -523,22 +512,21 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
523 | { | 512 | { |
524 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 513 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
525 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 514 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
526 | u64 src_offset = old_mem->start << PAGE_SHIFT; | ||
527 | u64 dst_offset = new_mem->start << PAGE_SHIFT; | ||
528 | u32 page_count = new_mem->num_pages; | 515 | u32 page_count = new_mem->num_pages; |
516 | u64 src_offset, dst_offset; | ||
529 | int ret; | 517 | int ret; |
530 | 518 | ||
531 | if (!nvbo->no_vm) { | 519 | src_offset = old_mem->start << PAGE_SHIFT; |
532 | if (old_mem->mem_type == TTM_PL_VRAM) | 520 | if (old_mem->mem_type == TTM_PL_VRAM) |
533 | src_offset = nvbo->vma.offset; | 521 | src_offset = nvbo->vma.offset; |
534 | else | 522 | else |
535 | src_offset += dev_priv->gart_info.aper_base; | 523 | src_offset += dev_priv->gart_info.aper_base; |
536 | 524 | ||
537 | if (new_mem->mem_type == TTM_PL_VRAM) | 525 | dst_offset = new_mem->start << PAGE_SHIFT; |
538 | dst_offset = nvbo->vma.offset; | 526 | if (new_mem->mem_type == TTM_PL_VRAM) |
539 | else | 527 | dst_offset = nvbo->vma.offset; |
540 | dst_offset += dev_priv->gart_info.aper_base; | 528 | else |
541 | } | 529 | dst_offset += dev_priv->gart_info.aper_base; |
542 | 530 | ||
543 | page_count = new_mem->num_pages; | 531 | page_count = new_mem->num_pages; |
544 | while (page_count) { | 532 | while (page_count) { |
@@ -580,18 +568,16 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
580 | int ret; | 568 | int ret; |
581 | 569 | ||
582 | src_offset = old_mem->start << PAGE_SHIFT; | 570 | src_offset = old_mem->start << PAGE_SHIFT; |
583 | dst_offset = new_mem->start << PAGE_SHIFT; | 571 | if (old_mem->mem_type == TTM_PL_VRAM) |
584 | if (!nvbo->no_vm) { | 572 | src_offset = nvbo->vma.offset; |
585 | if (old_mem->mem_type == TTM_PL_VRAM) | 573 | else |
586 | src_offset = nvbo->vma.offset; | 574 | src_offset += dev_priv->gart_info.aper_base; |
587 | else | ||
588 | src_offset += dev_priv->gart_info.aper_base; | ||
589 | 575 | ||
590 | if (new_mem->mem_type == TTM_PL_VRAM) | 576 | dst_offset = new_mem->start << PAGE_SHIFT; |
591 | dst_offset = nvbo->vma.offset; | 577 | if (new_mem->mem_type == TTM_PL_VRAM) |
592 | else | 578 | dst_offset = nvbo->vma.offset; |
593 | dst_offset += dev_priv->gart_info.aper_base; | 579 | else |
594 | } | 580 | dst_offset += dev_priv->gart_info.aper_base; |
595 | 581 | ||
596 | ret = RING_SPACE(chan, 3); | 582 | ret = RING_SPACE(chan, 3); |
597 | if (ret) | 583 | if (ret) |
@@ -737,7 +723,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
737 | int ret; | 723 | int ret; |
738 | 724 | ||
739 | chan = nvbo->channel; | 725 | chan = nvbo->channel; |
740 | if (!chan || nvbo->no_vm) { | 726 | if (!chan) { |
741 | chan = dev_priv->channel; | 727 | chan = dev_priv->channel; |
742 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | 728 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
743 | } | 729 | } |
@@ -836,7 +822,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
836 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 822 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
837 | uint64_t offset; | 823 | uint64_t offset; |
838 | 824 | ||
839 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { | 825 | if (new_mem->mem_type != TTM_PL_VRAM) { |
840 | /* Nothing to do. */ | 826 | /* Nothing to do. */ |
841 | *new_tile = NULL; | 827 | *new_tile = NULL; |
842 | return 0; | 828 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 3d7b316c3bbd..3837090d66af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |||
90 | else | 90 | else |
91 | location = TTM_PL_FLAG_TT; | 91 | location = TTM_PL_FLAG_TT; |
92 | 92 | ||
93 | ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, | 93 | ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf); |
94 | true, &pushbuf); | ||
95 | if (ret) { | 94 | if (ret) { |
96 | NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); | 95 | NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); |
97 | return NULL; | 96 | return NULL; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 6f0f4bb93796..23d4edf992b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h | |||
@@ -61,8 +61,6 @@ enum { | |||
61 | NvM2MF = 0x80000001, | 61 | NvM2MF = 0x80000001, |
62 | NvDmaFB = 0x80000002, | 62 | NvDmaFB = 0x80000002, |
63 | NvDmaTT = 0x80000003, | 63 | NvDmaTT = 0x80000003, |
64 | NvDmaVRAM = 0x80000004, | ||
65 | NvDmaGART = 0x80000005, | ||
66 | NvNotify0 = 0x80000006, | 64 | NvNotify0 = 0x80000006, |
67 | Nv2D = 0x80000007, | 65 | Nv2D = 0x80000007, |
68 | NvCtxSurf2D = 0x80000008, | 66 | NvCtxSurf2D = 0x80000008, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8f6491845692..f591c84a2792 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -104,8 +104,6 @@ struct nouveau_bo { | |||
104 | struct nouveau_channel *channel; | 104 | struct nouveau_channel *channel; |
105 | 105 | ||
106 | struct nouveau_vma vma; | 106 | struct nouveau_vma vma; |
107 | bool mappable; | ||
108 | bool no_vm; | ||
109 | 107 | ||
110 | uint32_t tile_mode; | 108 | uint32_t tile_mode; |
111 | uint32_t tile_flags; | 109 | uint32_t tile_flags; |
@@ -1293,7 +1291,7 @@ extern struct ttm_bo_driver nouveau_bo_driver; | |||
1293 | extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, | 1291 | extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, |
1294 | int size, int align, uint32_t flags, | 1292 | int size, int align, uint32_t flags, |
1295 | uint32_t tile_mode, uint32_t tile_flags, | 1293 | uint32_t tile_mode, uint32_t tile_flags, |
1296 | bool no_vm, bool mappable, struct nouveau_bo **); | 1294 | struct nouveau_bo **); |
1297 | extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); | 1295 | extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); |
1298 | extern int nouveau_bo_unpin(struct nouveau_bo *); | 1296 | extern int nouveau_bo_unpin(struct nouveau_bo *); |
1299 | extern int nouveau_bo_map(struct nouveau_bo *); | 1297 | extern int nouveau_bo_map(struct nouveau_bo *); |
@@ -1356,7 +1354,7 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) | |||
1356 | extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, | 1354 | extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, |
1357 | int size, int align, uint32_t flags, | 1355 | int size, int align, uint32_t flags, |
1358 | uint32_t tile_mode, uint32_t tile_flags, | 1356 | uint32_t tile_mode, uint32_t tile_flags, |
1359 | bool no_vm, bool mappable, struct nouveau_bo **); | 1357 | struct nouveau_bo **); |
1360 | extern int nouveau_gem_object_new(struct drm_gem_object *); | 1358 | extern int nouveau_gem_object_new(struct drm_gem_object *); |
1361 | extern void nouveau_gem_object_del(struct drm_gem_object *); | 1359 | extern void nouveau_gem_object_del(struct drm_gem_object *); |
1362 | extern int nouveau_gem_ioctl_new(struct drm_device *, void *, | 1360 | extern int nouveau_gem_ioctl_new(struct drm_device *, void *, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 60769d2f9a66..9d7a98876074 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -297,7 +297,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
297 | size = roundup(size, PAGE_SIZE); | 297 | size = roundup(size, PAGE_SIZE); |
298 | 298 | ||
299 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, | 299 | ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, |
300 | 0, 0x0000, false, true, &nvbo); | 300 | 0, 0x0000, &nvbo); |
301 | if (ret) { | 301 | if (ret) { |
302 | NV_ERROR(dev, "failed to allocate framebuffer\n"); | 302 | NV_ERROR(dev, "failed to allocate framebuffer\n"); |
303 | goto out; | 303 | goto out; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 8b46392b0ca9..a244702bb227 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -578,7 +578,7 @@ nouveau_fence_init(struct drm_device *dev) | |||
578 | /* Create a shared VRAM heap for cross-channel sync. */ | 578 | /* Create a shared VRAM heap for cross-channel sync. */ |
579 | if (USE_SEMA(dev)) { | 579 | if (USE_SEMA(dev)) { |
580 | ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, | 580 | ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, |
581 | 0, 0, false, true, &dev_priv->fence.bo); | 581 | 0, 0, &dev_priv->fence.bo); |
582 | if (ret) | 582 | if (ret) |
583 | return ret; | 583 | return ret; |
584 | 584 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 506c508b7eda..29ededdee980 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -62,14 +62,13 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
62 | int | 62 | int |
63 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, | 63 | nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, |
64 | int size, int align, uint32_t flags, uint32_t tile_mode, | 64 | int size, int align, uint32_t flags, uint32_t tile_mode, |
65 | uint32_t tile_flags, bool no_vm, bool mappable, | 65 | uint32_t tile_flags, struct nouveau_bo **pnvbo) |
66 | struct nouveau_bo **pnvbo) | ||
67 | { | 66 | { |
68 | struct nouveau_bo *nvbo; | 67 | struct nouveau_bo *nvbo; |
69 | int ret; | 68 | int ret; |
70 | 69 | ||
71 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, | 70 | ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, |
72 | tile_flags, no_vm, mappable, pnvbo); | 71 | tile_flags, pnvbo); |
73 | if (ret) | 72 | if (ret) |
74 | return ret; | 73 | return ret; |
75 | nvbo = *pnvbo; | 74 | nvbo = *pnvbo; |
@@ -97,7 +96,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) | |||
97 | 96 | ||
98 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | 97 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
99 | rep->offset = nvbo->bo.offset; | 98 | rep->offset = nvbo->bo.offset; |
100 | rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; | 99 | rep->map_handle = nvbo->bo.addr_space_offset; |
101 | rep->tile_mode = nvbo->tile_mode; | 100 | rep->tile_mode = nvbo->tile_mode; |
102 | rep->tile_flags = nvbo->tile_flags; | 101 | rep->tile_flags = nvbo->tile_flags; |
103 | return 0; | 102 | return 0; |
@@ -136,9 +135,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
136 | } | 135 | } |
137 | 136 | ||
138 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | 137 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, |
139 | req->info.tile_mode, req->info.tile_flags, false, | 138 | req->info.tile_mode, req->info.tile_flags, &nvbo); |
140 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | ||
141 | &nvbo); | ||
142 | if (chan) | 139 | if (chan) |
143 | nouveau_channel_put(&chan); | 140 | nouveau_channel_put(&chan); |
144 | if (ret) | 141 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5cf924ed4ac6..16eee50a0572 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev) | |||
152 | { | 152 | { |
153 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 153 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
154 | 154 | ||
155 | nouveau_bo_unpin(dev_priv->vga_ram); | ||
156 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | 155 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); |
157 | 156 | ||
158 | ttm_bo_device_release(&dev_priv->ttm.bdev); | 157 | ttm_bo_device_release(&dev_priv->ttm.bdev); |
@@ -461,13 +460,17 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
461 | return ret; | 460 | return ret; |
462 | } | 461 | } |
463 | 462 | ||
464 | ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, | 463 | if (dev_priv->card_type < NV_50) { |
465 | 0, 0, true, true, &dev_priv->vga_ram); | 464 | ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, |
466 | if (ret == 0) | 465 | 0, 0, &dev_priv->vga_ram); |
467 | ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); | 466 | if (ret == 0) |
468 | if (ret) { | 467 | ret = nouveau_bo_pin(dev_priv->vga_ram, |
469 | NV_WARN(dev, "failed to reserve VGA memory\n"); | 468 | TTM_PL_FLAG_VRAM); |
470 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | 469 | |
470 | if (ret) { | ||
471 | NV_WARN(dev, "failed to reserve VGA memory\n"); | ||
472 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | ||
473 | } | ||
471 | } | 474 | } |
472 | 475 | ||
473 | dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), | 476 | dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), |
@@ -672,13 +675,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size | |||
672 | { | 675 | { |
673 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | 676 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); |
674 | struct nouveau_mm *mm; | 677 | struct nouveau_mm *mm; |
675 | u32 b_size; | 678 | u64 size, block, rsvd; |
676 | int ret; | 679 | int ret; |
677 | 680 | ||
678 | p_size = (p_size << PAGE_SHIFT) >> 12; | 681 | rsvd = (256 * 1024); /* vga memory */ |
679 | b_size = dev_priv->vram_rblock_size >> 12; | 682 | size = (p_size << PAGE_SHIFT) - rsvd; |
683 | block = dev_priv->vram_rblock_size; | ||
680 | 684 | ||
681 | ret = nouveau_mm_init(&mm, 0, p_size, b_size); | 685 | ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12); |
682 | if (ret) | 686 | if (ret) |
683 | return ret; | 687 | return ret; |
684 | 688 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..92c029920efe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -43,8 +43,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
43 | else | 43 | else |
44 | flags = TTM_PL_FLAG_TT; | 44 | flags = TTM_PL_FLAG_TT; |
45 | 45 | ||
46 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, | 46 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); |
47 | 0, 0x0000, false, true, &ntfy); | ||
48 | if (ret) | 47 | if (ret) |
49 | return ret; | 48 | return ret; |
50 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 43acfc2aded5..05294910e135 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -544,7 +544,6 @@ static int | |||
544 | nouveau_card_init_channel(struct drm_device *dev) | 544 | nouveau_card_init_channel(struct drm_device *dev) |
545 | { | 545 | { |
546 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 546 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
547 | struct nouveau_gpuobj *gpuobj = NULL; | ||
548 | int ret; | 547 | int ret; |
549 | 548 | ||
550 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, | 549 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, |
@@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
552 | if (ret) | 551 | if (ret) |
553 | return ret; | 552 | return ret; |
554 | 553 | ||
555 | /* no dma objects on fermi... */ | ||
556 | if (dev_priv->card_type >= NV_C0) | ||
557 | goto out_done; | ||
558 | |||
559 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | ||
560 | 0, dev_priv->vram_size, | ||
561 | NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, | ||
562 | &gpuobj); | ||
563 | if (ret) | ||
564 | goto out_err; | ||
565 | |||
566 | ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); | ||
567 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
568 | if (ret) | ||
569 | goto out_err; | ||
570 | |||
571 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | ||
572 | 0, dev_priv->gart_info.aper_size, | ||
573 | NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART, | ||
574 | &gpuobj); | ||
575 | if (ret) | ||
576 | goto out_err; | ||
577 | |||
578 | ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); | ||
579 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
580 | if (ret) | ||
581 | goto out_err; | ||
582 | |||
583 | out_done: | ||
584 | mutex_unlock(&dev_priv->channel->mutex); | 554 | mutex_unlock(&dev_priv->channel->mutex); |
585 | return 0; | 555 | return 0; |
586 | |||
587 | out_err: | ||
588 | nouveau_channel_put(&dev_priv->channel); | ||
589 | return ret; | ||
590 | } | 556 | } |
591 | 557 | ||
592 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, | 558 | static void nouveau_switcheroo_set_state(struct pci_dev *pdev, |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 297505eb98d5..a260fbbe3d9b 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) | |||
1031 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 1031 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
1032 | 1032 | ||
1033 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 1033 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, |
1034 | 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); | 1034 | 0, 0x0000, &nv_crtc->cursor.nvbo); |
1035 | if (!ret) { | 1035 | if (!ret) { |
1036 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | 1036 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); |
1037 | if (!ret) | 1037 | if (!ret) |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 568fb4704166..2b9984027f41 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -752,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
752 | nv_crtc->lut.depth = 0; | 752 | nv_crtc->lut.depth = 0; |
753 | 753 | ||
754 | ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, | 754 | ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, |
755 | 0, 0x0000, false, true, &nv_crtc->lut.nvbo); | 755 | 0, 0x0000, &nv_crtc->lut.nvbo); |
756 | if (!ret) { | 756 | if (!ret) { |
757 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); | 757 | ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); |
758 | if (!ret) | 758 | if (!ret) |
@@ -778,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
778 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 778 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
779 | 779 | ||
780 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 780 | ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, |
781 | 0, 0x0000, false, true, &nv_crtc->cursor.nvbo); | 781 | 0, 0x0000, &nv_crtc->cursor.nvbo); |
782 | if (!ret) { | 782 | if (!ret) { |
783 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | 783 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); |
784 | if (!ret) | 784 | if (!ret) |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 18fbf27376c1..a2cfaa691e9b 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
@@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid, | |||
117 | evo->user_put = 0; | 117 | evo->user_put = 0; |
118 | 118 | ||
119 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, | 119 | ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, |
120 | false, true, &evo->pushbuf_bo); | 120 | &evo->pushbuf_bo); |
121 | if (ret == 0) | 121 | if (ret == 0) |
122 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); | 122 | ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); |
123 | if (ret) { | 123 | if (ret) { |
@@ -331,7 +331,7 @@ nv50_evo_create(struct drm_device *dev) | |||
331 | goto err; | 331 | goto err; |
332 | 332 | ||
333 | ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 333 | ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
334 | 0, 0x0000, false, true, &dispc->sem.bo); | 334 | 0, 0x0000, &dispc->sem.bo); |
335 | if (!ret) { | 335 | if (!ret) { |
336 | offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; | 336 | offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; |
337 | 337 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index e9f8643bed9b..2886f2726a9e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c | |||
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan) | |||
116 | 116 | ||
117 | /* allocate vram for control regs, map into polling area */ | 117 | /* allocate vram for control regs, map into polling area */ |
118 | ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, | 118 | ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, |
119 | 0, 0, true, true, &fifoch->user); | 119 | 0, 0, &fifoch->user); |
120 | if (ret) | 120 | if (ret) |
121 | goto error; | 121 | goto error; |
122 | 122 | ||