diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 94 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_crtc.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fbcon.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_graph.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_vm.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_crypt.c | 2 |
13 files changed, 86 insertions, 216 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4d142031d542..203e75de4128 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,6 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_vm_put(&nvbo->vma); | ||
52 | kfree(nvbo); | 53 | kfree(nvbo); |
53 | } | 54 | } |
54 | 55 | ||
@@ -113,6 +114,15 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
113 | &align, &size); | 114 | &align, &size); |
114 | align >>= PAGE_SHIFT; | 115 | align >>= PAGE_SHIFT; |
115 | 116 | ||
117 | if (!nvbo->no_vm && dev_priv->chan_vm) { | ||
118 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 16, | ||
119 | NV_MEM_ACCESS_RW, &nvbo->vma); | ||
120 | if (ret) { | ||
121 | kfree(nvbo); | ||
122 | return ret; | ||
123 | } | ||
124 | } | ||
125 | |||
116 | nouveau_bo_placement_set(nvbo, flags, 0); | 126 | nouveau_bo_placement_set(nvbo, flags, 0); |
117 | 127 | ||
118 | nvbo->channel = chan; | 128 | nvbo->channel = chan; |
@@ -125,6 +135,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
125 | } | 135 | } |
126 | nvbo->channel = NULL; | 136 | nvbo->channel = NULL; |
127 | 137 | ||
138 | if (nvbo->vma.node) { | ||
139 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | ||
140 | nvbo->bo.offset = nvbo->vma.offset; | ||
141 | } | ||
142 | |||
128 | *pnvbo = nvbo; | 143 | *pnvbo = nvbo; |
129 | return 0; | 144 | return 0; |
130 | } | 145 | } |
@@ -294,6 +309,11 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |||
294 | if (ret) | 309 | if (ret) |
295 | return ret; | 310 | return ret; |
296 | 311 | ||
312 | if (nvbo->vma.node) { | ||
313 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | ||
314 | nvbo->bo.offset = nvbo->vma.offset; | ||
315 | } | ||
316 | |||
297 | return 0; | 317 | return 0; |
298 | } | 318 | } |
299 | 319 | ||
@@ -400,10 +420,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
400 | man->available_caching = TTM_PL_FLAG_UNCACHED | | 420 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
401 | TTM_PL_FLAG_WC; | 421 | TTM_PL_FLAG_WC; |
402 | man->default_caching = TTM_PL_FLAG_WC; | 422 | man->default_caching = TTM_PL_FLAG_WC; |
403 | if (dev_priv->card_type == NV_50) | 423 | man->gpu_offset = 0; |
404 | man->gpu_offset = 0x40000000; | ||
405 | else | ||
406 | man->gpu_offset = 0; | ||
407 | break; | 424 | break; |
408 | case TTM_PL_TT: | 425 | case TTM_PL_TT: |
409 | man->func = &ttm_bo_manager_func; | 426 | man->func = &ttm_bo_manager_func; |
@@ -507,12 +524,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
507 | dst_offset = new_mem->start << PAGE_SHIFT; | 524 | dst_offset = new_mem->start << PAGE_SHIFT; |
508 | if (!nvbo->no_vm) { | 525 | if (!nvbo->no_vm) { |
509 | if (old_mem->mem_type == TTM_PL_VRAM) | 526 | if (old_mem->mem_type == TTM_PL_VRAM) |
510 | src_offset += dev_priv->vm_vram_base; | 527 | src_offset = nvbo->vma.offset; |
511 | else | 528 | else |
512 | src_offset += dev_priv->vm_gart_base; | 529 | src_offset += dev_priv->vm_gart_base; |
513 | 530 | ||
514 | if (new_mem->mem_type == TTM_PL_VRAM) | 531 | if (new_mem->mem_type == TTM_PL_VRAM) |
515 | dst_offset += dev_priv->vm_vram_base; | 532 | dst_offset = nvbo->vma.offset; |
516 | else | 533 | else |
517 | dst_offset += dev_priv->vm_gart_base; | 534 | dst_offset += dev_priv->vm_gart_base; |
518 | } | 535 | } |
@@ -756,7 +773,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
756 | struct drm_device *dev = dev_priv->dev; | 773 | struct drm_device *dev = dev_priv->dev; |
757 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 774 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
758 | uint64_t offset; | 775 | uint64_t offset; |
759 | int ret; | ||
760 | 776 | ||
761 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { | 777 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
762 | /* Nothing to do. */ | 778 | /* Nothing to do. */ |
@@ -766,15 +782,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
766 | 782 | ||
767 | offset = new_mem->start << PAGE_SHIFT; | 783 | offset = new_mem->start << PAGE_SHIFT; |
768 | 784 | ||
769 | if (dev_priv->card_type == NV_50) { | 785 | if (dev_priv->chan_vm) { |
770 | ret = nv50_mem_vm_bind_linear(dev, | 786 | nouveau_vm_map(&nvbo->vma, new_mem->mm_node); |
771 | offset + dev_priv->vm_vram_base, | ||
772 | new_mem->size, | ||
773 | nouveau_bo_tile_layout(nvbo), | ||
774 | offset); | ||
775 | if (ret) | ||
776 | return ret; | ||
777 | |||
778 | } else if (dev_priv->card_type >= NV_10) { | 787 | } else if (dev_priv->card_type >= NV_10) { |
779 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | 788 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, |
780 | nvbo->tile_mode, | 789 | nvbo->tile_mode, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a3d33a582a98..6f37995aee2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -39,7 +39,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
39 | 39 | ||
40 | if (dev_priv->card_type >= NV_50) { | 40 | if (dev_priv->card_type >= NV_50) { |
41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
42 | dev_priv->vm_end, NV_MEM_ACCESS_RO, | 42 | (1ULL << 40), NV_MEM_ACCESS_RO, |
43 | NV_MEM_TARGET_VM, &pushbuf); | 43 | NV_MEM_TARGET_VM, &pushbuf); |
44 | chan->pushbuf_base = pb->bo.offset; | 44 | chan->pushbuf_base = pb->bo.offset; |
45 | } else | 45 | } else |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index a48c7da133d2..bb170570938b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -339,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
339 | 339 | ||
340 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 340 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
341 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 341 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
342 | u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; | ||
342 | 343 | ||
343 | nv_crtc->cursor.set_offset(nv_crtc, | 344 | nv_crtc->cursor.set_offset(nv_crtc, offset); |
344 | nv_crtc->cursor.nvbo->bo.offset - | ||
345 | dev_priv->vm_vram_base); | ||
346 | |||
347 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, | 345 | nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, |
348 | nv_crtc->cursor_saved_y); | 346 | nv_crtc->cursor_saved_y); |
349 | } | 347 | } |
350 | 348 | ||
351 | /* Force CLUT to get re-loaded during modeset */ | 349 | /* Force CLUT to get re-loaded during modeset */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 452a8652a498..dce9a5f6f6c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -65,10 +65,6 @@ struct nouveau_vram; | |||
65 | #define NOUVEAU_MAX_CHANNEL_NR 128 | 65 | #define NOUVEAU_MAX_CHANNEL_NR 128 |
66 | #define NOUVEAU_MAX_TILE_NR 15 | 66 | #define NOUVEAU_MAX_TILE_NR 15 |
67 | 67 | ||
68 | #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) | ||
69 | #define NV50_VM_BLOCK (512*1024*1024ULL) | ||
70 | #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) | ||
71 | |||
72 | struct nouveau_vram { | 68 | struct nouveau_vram { |
73 | struct drm_device *dev; | 69 | struct drm_device *dev; |
74 | 70 | ||
@@ -106,6 +102,7 @@ struct nouveau_bo { | |||
106 | 102 | ||
107 | struct nouveau_channel *channel; | 103 | struct nouveau_channel *channel; |
108 | 104 | ||
105 | struct nouveau_vma vma; | ||
109 | bool mappable; | 106 | bool mappable; |
110 | bool no_vm; | 107 | bool no_vm; |
111 | 108 | ||
@@ -252,7 +249,6 @@ struct nouveau_channel { | |||
252 | struct nouveau_vm *vm; | 249 | struct nouveau_vm *vm; |
253 | struct nouveau_gpuobj *vm_pd; | 250 | struct nouveau_gpuobj *vm_pd; |
254 | struct nouveau_gpuobj *vm_gart_pt; | 251 | struct nouveau_gpuobj *vm_gart_pt; |
255 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | ||
256 | 252 | ||
257 | /* Objects */ | 253 | /* Objects */ |
258 | struct nouveau_gpuobj *ramin; /* Private instmem */ | 254 | struct nouveau_gpuobj *ramin; /* Private instmem */ |
@@ -712,13 +708,9 @@ struct drm_nouveau_private { | |||
712 | struct nouveau_vm *bar3_vm; | 708 | struct nouveau_vm *bar3_vm; |
713 | 709 | ||
714 | /* G8x/G9x virtual address space */ | 710 | /* G8x/G9x virtual address space */ |
711 | struct nouveau_vm *chan_vm; | ||
715 | uint64_t vm_gart_base; | 712 | uint64_t vm_gart_base; |
716 | uint64_t vm_gart_size; | 713 | uint64_t vm_gart_size; |
717 | uint64_t vm_vram_base; | ||
718 | uint64_t vm_vram_size; | ||
719 | uint64_t vm_end; | ||
720 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | ||
721 | int vm_vram_pt_nr; | ||
722 | 714 | ||
723 | struct nvbios vbios; | 715 | struct nvbios vbios; |
724 | 716 | ||
@@ -836,11 +828,6 @@ extern struct nouveau_tile_reg *nv10_mem_set_tiling( | |||
836 | extern void nv10_mem_put_tile_region(struct drm_device *dev, | 828 | extern void nv10_mem_put_tile_region(struct drm_device *dev, |
837 | struct nouveau_tile_reg *tile, | 829 | struct nouveau_tile_reg *tile, |
838 | struct nouveau_fence *fence); | 830 | struct nouveau_fence *fence); |
839 | extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, | ||
840 | uint32_t size, uint32_t flags, | ||
841 | uint64_t phys); | ||
842 | extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, | ||
843 | uint32_t size); | ||
844 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | 831 | extern const struct ttm_mem_type_manager_func nouveau_vram_manager; |
845 | 832 | ||
846 | /* nouveau_notifier.c */ | 833 | /* nouveau_notifier.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0fce4eb914d5..ea861c915149 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -338,8 +338,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, | |||
338 | FBINFO_HWACCEL_IMAGEBLIT; | 338 | FBINFO_HWACCEL_IMAGEBLIT; |
339 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; | 339 | info->flags |= FBINFO_CAN_FORCE_OUTPUT; |
340 | info->fbops = &nouveau_fbcon_sw_ops; | 340 | info->fbops = &nouveau_fbcon_sw_ops; |
341 | info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - | 341 | info->fix.smem_start = dev->mode_config.fb_base + |
342 | dev_priv->vm_vram_base; | 342 | (nvbo->bo.mem.start << PAGE_SHIFT); |
343 | info->fix.smem_len = size; | 343 | info->fix.smem_len = size; |
344 | 344 | ||
345 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); | 345 | info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2d02401e8227..4d2d3de97ee9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -145,100 +145,6 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | |||
145 | } | 145 | } |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * NV50 VM helpers | ||
149 | */ | ||
150 | int | ||
151 | nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, | ||
152 | uint32_t flags, uint64_t phys) | ||
153 | { | ||
154 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
155 | struct nouveau_gpuobj *pgt; | ||
156 | unsigned block; | ||
157 | int i; | ||
158 | |||
159 | virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; | ||
160 | size = (size >> 16) << 1; | ||
161 | |||
162 | phys |= ((uint64_t)flags << 32); | ||
163 | phys |= 1; | ||
164 | if (dev_priv->vram_sys_base) { | ||
165 | phys += dev_priv->vram_sys_base; | ||
166 | phys |= 0x30; | ||
167 | } | ||
168 | |||
169 | while (size) { | ||
170 | unsigned offset_h = upper_32_bits(phys); | ||
171 | unsigned offset_l = lower_32_bits(phys); | ||
172 | unsigned pte, end; | ||
173 | |||
174 | for (i = 7; i >= 0; i--) { | ||
175 | block = 1 << (i + 1); | ||
176 | if (size >= block && !(virt & (block - 1))) | ||
177 | break; | ||
178 | } | ||
179 | offset_l |= (i << 7); | ||
180 | |||
181 | phys += block << 15; | ||
182 | size -= block; | ||
183 | |||
184 | while (block) { | ||
185 | pgt = dev_priv->vm_vram_pt[virt >> 14]; | ||
186 | pte = virt & 0x3ffe; | ||
187 | |||
188 | end = pte + block; | ||
189 | if (end > 16384) | ||
190 | end = 16384; | ||
191 | block -= (end - pte); | ||
192 | virt += (end - pte); | ||
193 | |||
194 | while (pte < end) { | ||
195 | nv_wo32(pgt, (pte * 4) + 0, offset_l); | ||
196 | nv_wo32(pgt, (pte * 4) + 4, offset_h); | ||
197 | pte += 2; | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | |||
202 | dev_priv->engine.instmem.flush(dev); | ||
203 | dev_priv->engine.fifo.tlb_flush(dev); | ||
204 | dev_priv->engine.graph.tlb_flush(dev); | ||
205 | nv50_vm_flush_engine(dev, 6); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | void | ||
210 | nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | ||
211 | { | ||
212 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
213 | struct nouveau_gpuobj *pgt; | ||
214 | unsigned pages, pte, end; | ||
215 | |||
216 | virt -= dev_priv->vm_vram_base; | ||
217 | pages = (size >> 16) << 1; | ||
218 | |||
219 | while (pages) { | ||
220 | pgt = dev_priv->vm_vram_pt[virt >> 29]; | ||
221 | pte = (virt & 0x1ffe0000ULL) >> 15; | ||
222 | |||
223 | end = pte + pages; | ||
224 | if (end > 16384) | ||
225 | end = 16384; | ||
226 | pages -= (end - pte); | ||
227 | virt += (end - pte) << 15; | ||
228 | |||
229 | while (pte < end) { | ||
230 | nv_wo32(pgt, (pte * 4), 0); | ||
231 | pte++; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | dev_priv->engine.instmem.flush(dev); | ||
236 | dev_priv->engine.fifo.tlb_flush(dev); | ||
237 | dev_priv->engine.graph.tlb_flush(dev); | ||
238 | nv50_vm_flush_engine(dev, 6); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Cleanup everything | 148 | * Cleanup everything |
243 | */ | 149 | */ |
244 | void | 150 | void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd1859f7d8b0..573fd7316d63 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "nouveau_drv.h" | 35 | #include "nouveau_drv.h" |
36 | #include "nouveau_drm.h" | 36 | #include "nouveau_drm.h" |
37 | #include "nouveau_ramht.h" | 37 | #include "nouveau_ramht.h" |
38 | #include "nouveau_vm.h" | ||
38 | 39 | ||
39 | struct nouveau_gpuobj_method { | 40 | struct nouveau_gpuobj_method { |
40 | struct list_head head; | 41 | struct list_head head; |
@@ -770,9 +771,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
770 | { | 771 | { |
771 | struct drm_device *dev = chan->dev; | 772 | struct drm_device *dev = chan->dev; |
772 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 773 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
773 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
774 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | 774 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; |
775 | int ret, i; | 775 | int ret; |
776 | 776 | ||
777 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | 777 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); |
778 | 778 | ||
@@ -783,16 +783,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
783 | return ret; | 783 | return ret; |
784 | } | 784 | } |
785 | 785 | ||
786 | /* NV50 VM | 786 | /* NV50/NVC0 VM |
787 | * - Allocate per-channel page-directory | 787 | * - Allocate per-channel page-directory |
788 | * - Map GART and VRAM into the channel's address space at the | 788 | * - Link with shared channel VM |
789 | * locations determined during init. | ||
790 | */ | 789 | */ |
791 | if (dev_priv->card_type >= NV_50) { | 790 | if (dev_priv->chan_vm) { |
792 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; | 791 | u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; |
793 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; | 792 | u64 vm_vinst = chan->ramin->vinst + pgd_offs; |
794 | u32 vm_pinst = chan->ramin->pinst; | 793 | u32 vm_pinst = chan->ramin->pinst; |
795 | u32 pde; | ||
796 | 794 | ||
797 | if (vm_pinst != ~0) | 795 | if (vm_pinst != ~0) |
798 | vm_pinst += pgd_offs; | 796 | vm_pinst += pgd_offs; |
@@ -801,29 +799,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
801 | 0, &chan->vm_pd); | 799 | 0, &chan->vm_pd); |
802 | if (ret) | 800 | if (ret) |
803 | return ret; | 801 | return ret; |
804 | for (i = 0; i < 0x4000; i += 8) { | ||
805 | nv_wo32(chan->vm_pd, i + 0, 0x00000000); | ||
806 | nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); | ||
807 | } | ||
808 | |||
809 | nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, | ||
810 | &chan->vm_gart_pt); | ||
811 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; | ||
812 | nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); | ||
813 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | ||
814 | |||
815 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; | ||
816 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | ||
817 | nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], | ||
818 | &chan->vm_vram_pt[i]); | ||
819 | |||
820 | nv_wo32(chan->vm_pd, pde + 0, | ||
821 | chan->vm_vram_pt[i]->vinst | 0x61); | ||
822 | nv_wo32(chan->vm_pd, pde + 4, 0x00000000); | ||
823 | pde += 8; | ||
824 | } | ||
825 | 802 | ||
826 | instmem->flush(dev); | 803 | nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); |
804 | chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma); | ||
827 | } | 805 | } |
828 | 806 | ||
829 | /* RAMHT */ | 807 | /* RAMHT */ |
@@ -846,8 +824,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
846 | /* VRAM ctxdma */ | 824 | /* VRAM ctxdma */ |
847 | if (dev_priv->card_type >= NV_50) { | 825 | if (dev_priv->card_type >= NV_50) { |
848 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 826 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
849 | 0, dev_priv->vm_end, | 827 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
850 | NV_MEM_ACCESS_RW, | ||
851 | NV_MEM_TARGET_VM, &vram); | 828 | NV_MEM_TARGET_VM, &vram); |
852 | if (ret) { | 829 | if (ret) { |
853 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | 830 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); |
@@ -874,8 +851,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
874 | /* TT memory ctxdma */ | 851 | /* TT memory ctxdma */ |
875 | if (dev_priv->card_type >= NV_50) { | 852 | if (dev_priv->card_type >= NV_50) { |
876 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 853 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
877 | 0, dev_priv->vm_end, | 854 | 0, (1ULL << 40), NV_MEM_ACCESS_RW, |
878 | NV_MEM_ACCESS_RW, | ||
879 | NV_MEM_TARGET_VM, &tt); | 855 | NV_MEM_TARGET_VM, &tt); |
880 | } else { | 856 | } else { |
881 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 857 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
@@ -902,9 +878,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | |||
902 | void | 878 | void |
903 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | 879 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) |
904 | { | 880 | { |
905 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
906 | struct drm_device *dev = chan->dev; | 881 | struct drm_device *dev = chan->dev; |
907 | int i; | ||
908 | 882 | ||
909 | NV_DEBUG(dev, "ch%d\n", chan->id); | 883 | NV_DEBUG(dev, "ch%d\n", chan->id); |
910 | 884 | ||
@@ -913,10 +887,9 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
913 | 887 | ||
914 | nouveau_ramht_ref(NULL, &chan->ramht, chan); | 888 | nouveau_ramht_ref(NULL, &chan->ramht, chan); |
915 | 889 | ||
890 | nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); | ||
916 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); | 891 | nouveau_gpuobj_ref(NULL, &chan->vm_pd); |
917 | nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); | 892 | nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); |
918 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | ||
919 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); | ||
920 | 893 | ||
921 | if (chan->ramin_heap.free_stack.next) | 894 | if (chan->ramin_heap.free_stack.next) |
922 | drm_mm_takedown(&chan->ramin_heap); | 895 | drm_mm_takedown(&chan->ramin_heap); |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index f3570cc45017..2c346f797285 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
@@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
345 | uint32_t buffer_handle, uint32_t width, uint32_t height) | 345 | uint32_t buffer_handle, uint32_t width, uint32_t height) |
346 | { | 346 | { |
347 | struct drm_device *dev = crtc->dev; | 347 | struct drm_device *dev = crtc->dev; |
348 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
349 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 348 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
350 | struct nouveau_bo *cursor = NULL; | 349 | struct nouveau_bo *cursor = NULL; |
351 | struct drm_gem_object *gem; | 350 | struct drm_gem_object *gem; |
@@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
374 | 373 | ||
375 | nouveau_bo_unmap(cursor); | 374 | nouveau_bo_unmap(cursor); |
376 | 375 | ||
377 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - | 376 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); |
378 | dev_priv->vm_vram_base); | ||
379 | nv_crtc->cursor.show(nv_crtc, true); | 377 | nv_crtc->cursor.show(nv_crtc, true); |
380 | 378 | ||
381 | out: | 379 | out: |
@@ -548,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
548 | return -EINVAL; | 546 | return -EINVAL; |
549 | } | 547 | } |
550 | 548 | ||
551 | nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; | 549 | nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; |
552 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); | 550 | nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); |
553 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; | 551 | nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; |
554 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { | 552 | if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 156731993907..6d38cb1488ae 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include "nouveau_dma.h" | 3 | #include "nouveau_dma.h" |
4 | #include "nouveau_ramht.h" | 4 | #include "nouveau_ramht.h" |
5 | #include "nouveau_fbcon.h" | 5 | #include "nouveau_fbcon.h" |
6 | #include "nouveau_mm.h" | ||
6 | 7 | ||
7 | int | 8 | int |
8 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 9 | nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
@@ -134,10 +135,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
134 | struct drm_device *dev = nfbdev->dev; | 135 | struct drm_device *dev = nfbdev->dev; |
135 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 136 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
136 | struct nouveau_channel *chan = dev_priv->channel; | 137 | struct nouveau_channel *chan = dev_priv->channel; |
138 | struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; | ||
137 | int ret, format; | 139 | int ret, format; |
138 | uint64_t fb; | ||
139 | |||
140 | fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; | ||
141 | 140 | ||
142 | switch (info->var.bits_per_pixel) { | 141 | switch (info->var.bits_per_pixel) { |
143 | case 8: | 142 | case 8: |
@@ -224,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
224 | OUT_RING(chan, info->fix.line_length); | 223 | OUT_RING(chan, info->fix.line_length); |
225 | OUT_RING(chan, info->var.xres_virtual); | 224 | OUT_RING(chan, info->var.xres_virtual); |
226 | OUT_RING(chan, info->var.yres_virtual); | 225 | OUT_RING(chan, info->var.yres_virtual); |
227 | OUT_RING(chan, upper_32_bits(fb)); | 226 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); |
228 | OUT_RING(chan, lower_32_bits(fb)); | 227 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); |
229 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); | 228 | BEGIN_RING(chan, NvSub2D, 0x0230, 2); |
230 | OUT_RING(chan, format); | 229 | OUT_RING(chan, format); |
231 | OUT_RING(chan, 1); | 230 | OUT_RING(chan, 1); |
@@ -233,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info) | |||
233 | OUT_RING(chan, info->fix.line_length); | 232 | OUT_RING(chan, info->fix.line_length); |
234 | OUT_RING(chan, info->var.xres_virtual); | 233 | OUT_RING(chan, info->var.xres_virtual); |
235 | OUT_RING(chan, info->var.yres_virtual); | 234 | OUT_RING(chan, info->var.yres_virtual); |
236 | OUT_RING(chan, upper_32_bits(fb)); | 235 | OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); |
237 | OUT_RING(chan, lower_32_bits(fb)); | 236 | OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); |
238 | 237 | ||
239 | return 0; | 238 | return 0; |
240 | } | 239 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index f5fd1b296d27..c510e74acf4d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -246,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
246 | nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); | 246 | nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); |
247 | 247 | ||
248 | dev_priv->engine.instmem.flush(dev); | 248 | dev_priv->engine.instmem.flush(dev); |
249 | atomic_inc(&chan->vm->pgraph_refs); | ||
249 | return 0; | 250 | return 0; |
250 | } | 251 | } |
251 | 252 | ||
@@ -277,6 +278,8 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) | |||
277 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 278 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
278 | 279 | ||
279 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | 280 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); |
281 | |||
282 | atomic_dec(&chan->vm->pgraph_refs); | ||
280 | } | 283 | } |
281 | 284 | ||
282 | static int | 285 | static int |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 4ba8f74e77b1..08202fd682e4 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -131,6 +131,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
131 | struct nouveau_channel *chan; | 131 | struct nouveau_channel *chan; |
132 | struct nouveau_vm *vm; | 132 | struct nouveau_vm *vm; |
133 | int ret, i; | 133 | int ret, i; |
134 | u64 nongart_o; | ||
134 | u32 tmp; | 135 | u32 tmp; |
135 | 136 | ||
136 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 137 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -215,37 +216,18 @@ nv50_instmem_init(struct drm_device *dev) | |||
215 | for (i = 0; i < 8; i++) | 216 | for (i = 0; i < 8; i++) |
216 | nv_wr32(dev, 0x1900 + (i*4), 0); | 217 | nv_wr32(dev, 0x1900 + (i*4), 0); |
217 | 218 | ||
218 | /* Determine VM layout */ | 219 | /* Create shared channel VM, space is reserved for GART mappings at |
219 | dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); | 220 | * the beginning of this address space, it's managed separately |
220 | dev_priv->vm_gart_size = NV50_VM_BLOCK; | 221 | * because TTM makes life painful |
221 | 222 | */ | |
222 | dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; | 223 | dev_priv->vm_gart_base = 0x0020000000ULL; |
223 | dev_priv->vm_vram_size = dev_priv->vram_size; | 224 | dev_priv->vm_gart_size = 512 * 1024 * 1024; |
224 | if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) | 225 | nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size; |
225 | dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; | 226 | |
226 | dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); | 227 | ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o, |
227 | dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK; | 228 | 29, 12, 16, &dev_priv->chan_vm); |
228 | 229 | if (ret) | |
229 | dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size; | 230 | return ret; |
230 | |||
231 | NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n", | ||
232 | dev_priv->vm_gart_base, | ||
233 | dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1); | ||
234 | NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n", | ||
235 | dev_priv->vm_vram_base, | ||
236 | dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); | ||
237 | |||
238 | /* VRAM page table(s), mapped into VM at +1GiB */ | ||
239 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | ||
240 | ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, | ||
241 | 0, NVOBJ_FLAG_ZERO_ALLOC, | ||
242 | &dev_priv->vm_vram_pt[i]); | ||
243 | if (ret) { | ||
244 | NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); | ||
245 | dev_priv->vm_vram_pt_nr = i; | ||
246 | return ret; | ||
247 | } | ||
248 | } | ||
249 | 231 | ||
250 | return 0; | 232 | return 0; |
251 | 233 | ||
@@ -269,9 +251,7 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
269 | 251 | ||
270 | dev_priv->ramin_available = false; | 252 | dev_priv->ramin_available = false; |
271 | 253 | ||
272 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | 254 | nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); |
273 | nouveau_gpuobj_ref(NULL, &dev_priv->vm_vram_pt[i]); | ||
274 | dev_priv->vm_vram_pt_nr = 0; | ||
275 | 255 | ||
276 | for (i = 0x1700; i <= 0x1710; i += 4) | 256 | for (i = 0x1700; i <= 0x1710; i += 4) |
277 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); | 257 | nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); |
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index efc63c0b0d92..eebab95f59b2 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -149,9 +149,24 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
149 | { | 149 | { |
150 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; | 150 | struct drm_nouveau_private *dev_priv = vm->dev->dev_private; |
151 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 151 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
152 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | ||
153 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
154 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; | ||
152 | 155 | ||
153 | pinstmem->flush(vm->dev); | 156 | pinstmem->flush(vm->dev); |
154 | nv50_vm_flush_engine(vm->dev, 6); | 157 | |
158 | /* BAR */ | ||
159 | if (vm != dev_priv->chan_vm) { | ||
160 | nv50_vm_flush_engine(vm->dev, 6); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | pfifo->tlb_flush(vm->dev); | ||
165 | |||
166 | if (atomic_read(&vm->pgraph_refs)) | ||
167 | pgraph->tlb_flush(vm->dev); | ||
168 | if (atomic_read(&vm->pcrypt_refs)) | ||
169 | pcrypt->tlb_flush(vm->dev); | ||
155 | } | 170 | } |
156 | 171 | ||
157 | void | 172 | void |
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index a333e5905346..ec18ae1c3886 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c | |||
@@ -53,6 +53,7 @@ nv84_crypt_create_context(struct nouveau_channel *chan) | |||
53 | nv_wo32(ramin, 0xb4, 0); | 53 | nv_wo32(ramin, 0xb4, 0); |
54 | 54 | ||
55 | dev_priv->engine.instmem.flush(dev); | 55 | dev_priv->engine.instmem.flush(dev); |
56 | atomic_inc(&chan->vm->pcrypt_refs); | ||
56 | return 0; | 57 | return 0; |
57 | } | 58 | } |
58 | 59 | ||
@@ -80,6 +81,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan) | |||
80 | nv_wr32(dev, 0x10200c, 0x00000010); | 81 | nv_wr32(dev, 0x10200c, 0x00000010); |
81 | 82 | ||
82 | nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); | 83 | nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); |
84 | atomic_dec(&chan->vm->pcrypt_refs); | ||
83 | } | 85 | } |
84 | 86 | ||
85 | void | 87 | void |