diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 347 |
1 files changed, 160 insertions, 187 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c3e66ae04c83..3465df327227 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -27,66 +27,57 @@ | |||
27 | * Jeremy Kolb <jkolb@brandeis.edu> | 27 | * Jeremy Kolb <jkolb@brandeis.edu> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include "drmP.h" | 30 | #include <core/engine.h> |
31 | #include "ttm/ttm_page_alloc.h" | ||
32 | 31 | ||
33 | #include <nouveau_drm.h> | 32 | #include <subdev/fb.h> |
34 | #include "nouveau_drv.h" | 33 | #include <subdev/vm.h> |
34 | #include <subdev/bar.h> | ||
35 | |||
36 | #include "nouveau_drm.h" | ||
35 | #include "nouveau_dma.h" | 37 | #include "nouveau_dma.h" |
36 | #include <core/mm.h> | ||
37 | #include "nouveau_fence.h" | 38 | #include "nouveau_fence.h" |
38 | #include <core/ramht.h> | ||
39 | #include <engine/fifo.h> | ||
40 | 39 | ||
41 | #include <linux/log2.h> | 40 | #include "nouveau_bo.h" |
42 | #include <linux/slab.h> | 41 | #include "nouveau_ttm.h" |
42 | #include "nouveau_gem.h" | ||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * NV10-NV40 tiling helpers | 45 | * NV10-NV40 tiling helpers |
46 | */ | 46 | */ |
47 | 47 | ||
48 | static void | 48 | static void |
49 | nv10_bo_update_tile_region(struct drm_device *dev, | 49 | nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, |
50 | struct nouveau_tile_reg *tilereg, uint32_t addr, | 50 | u32 addr, u32 size, u32 pitch, u32 flags) |
51 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
52 | { | 51 | { |
53 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 52 | struct nouveau_drm *drm = nouveau_newpriv(dev); |
54 | int i = tilereg - dev_priv->tile.reg, j; | 53 | int i = reg - drm->tile.reg; |
55 | struct nouveau_fb_tile *tile = nvfb_tile(dev, i); | 54 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
56 | unsigned long save; | 55 | struct nouveau_fb_tile *tile = &pfb->tile.region[i]; |
56 | struct nouveau_engine *engine; | ||
57 | 57 | ||
58 | nouveau_fence_unref(&tilereg->fence); | 58 | nouveau_fence_unref(®->fence); |
59 | 59 | ||
60 | if (tile->pitch) | 60 | if (tile->pitch) |
61 | nvfb_tile_fini(dev, i); | 61 | pfb->tile.fini(pfb, i, tile); |
62 | 62 | ||
63 | if (pitch) | 63 | if (pitch) |
64 | nvfb_tile_init(dev, i, addr, size, pitch, flags); | 64 | pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); |
65 | |||
66 | spin_lock_irqsave(&dev_priv->context_switch_lock, save); | ||
67 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
68 | nv04_fifo_cache_pull(dev, false); | ||
69 | 65 | ||
70 | nouveau_wait_for_idle(dev); | 66 | pfb->tile.prog(pfb, i, tile); |
71 | |||
72 | nvfb_tile_prog(dev, i); | ||
73 | for (j = 0; j < NVOBJ_ENGINE_NR; j++) { | ||
74 | if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region) | ||
75 | dev_priv->eng[j]->set_tile_region(dev, i); | ||
76 | } | ||
77 | 67 | ||
78 | nv04_fifo_cache_pull(dev, true); | 68 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) |
79 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | 69 | engine->tile_prog(engine, i); |
80 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); | 70 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) |
71 | engine->tile_prog(engine, i); | ||
81 | } | 72 | } |
82 | 73 | ||
83 | static struct nouveau_tile_reg * | 74 | static struct nouveau_drm_tile * |
84 | nv10_bo_get_tile_region(struct drm_device *dev, int i) | 75 | nv10_bo_get_tile_region(struct drm_device *dev, int i) |
85 | { | 76 | { |
86 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 77 | struct nouveau_drm *drm = nouveau_newpriv(dev); |
87 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 78 | struct nouveau_drm_tile *tile = &drm->tile.reg[i]; |
88 | 79 | ||
89 | spin_lock(&dev_priv->tile.lock); | 80 | spin_lock(&drm->tile.lock); |
90 | 81 | ||
91 | if (!tile->used && | 82 | if (!tile->used && |
92 | (!tile->fence || nouveau_fence_done(tile->fence))) | 83 | (!tile->fence || nouveau_fence_done(tile->fence))) |
@@ -94,18 +85,18 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) | |||
94 | else | 85 | else |
95 | tile = NULL; | 86 | tile = NULL; |
96 | 87 | ||
97 | spin_unlock(&dev_priv->tile.lock); | 88 | spin_unlock(&drm->tile.lock); |
98 | return tile; | 89 | return tile; |
99 | } | 90 | } |
100 | 91 | ||
101 | static void | 92 | static void |
102 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, | 93 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
103 | struct nouveau_fence *fence) | 94 | struct nouveau_fence *fence) |
104 | { | 95 | { |
105 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 96 | struct nouveau_drm *drm = nouveau_newpriv(dev); |
106 | 97 | ||
107 | if (tile) { | 98 | if (tile) { |
108 | spin_lock(&dev_priv->tile.lock); | 99 | spin_lock(&drm->tile.lock); |
109 | if (fence) { | 100 | if (fence) { |
110 | /* Mark it as pending. */ | 101 | /* Mark it as pending. */ |
111 | tile->fence = fence; | 102 | tile->fence = fence; |
@@ -113,25 +104,27 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, | |||
113 | } | 104 | } |
114 | 105 | ||
115 | tile->used = false; | 106 | tile->used = false; |
116 | spin_unlock(&dev_priv->tile.lock); | 107 | spin_unlock(&drm->tile.lock); |
117 | } | 108 | } |
118 | } | 109 | } |
119 | 110 | ||
120 | static struct nouveau_tile_reg * | 111 | static struct nouveau_drm_tile * |
121 | nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | 112 | nv10_bo_set_tiling(struct drm_device *dev, u32 addr, |
122 | uint32_t pitch, uint32_t flags) | 113 | u32 size, u32 pitch, u32 flags) |
123 | { | 114 | { |
124 | struct nouveau_tile_reg *tile, *found = NULL; | 115 | struct nouveau_drm *drm = nouveau_newpriv(dev); |
116 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | ||
117 | struct nouveau_drm_tile *tile, *found = NULL; | ||
125 | int i; | 118 | int i; |
126 | 119 | ||
127 | for (i = 0; i < nvfb_tile_nr(dev); i++) { | 120 | for (i = 0; i < pfb->tile.regions; i++) { |
128 | tile = nv10_bo_get_tile_region(dev, i); | 121 | tile = nv10_bo_get_tile_region(dev, i); |
129 | 122 | ||
130 | if (pitch && !found) { | 123 | if (pitch && !found) { |
131 | found = tile; | 124 | found = tile; |
132 | continue; | 125 | continue; |
133 | 126 | ||
134 | } else if (tile && nvfb_tile(dev, i)->pitch) { | 127 | } else if (tile && pfb->tile.region[i].pitch) { |
135 | /* Kill an unused tile region. */ | 128 | /* Kill an unused tile region. */ |
136 | nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); | 129 | nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); |
137 | } | 130 | } |
@@ -148,13 +141,12 @@ nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | |||
148 | static void | 141 | static void |
149 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | 142 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
150 | { | 143 | { |
151 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 144 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
152 | struct drm_device *dev = dev_priv->dev; | 145 | struct drm_device *dev = drm->dev; |
153 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 146 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
154 | 147 | ||
155 | if (unlikely(nvbo->gem)) | 148 | if (unlikely(nvbo->gem)) |
156 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 149 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
157 | |||
158 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 150 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
159 | kfree(nvbo); | 151 | kfree(nvbo); |
160 | } | 152 | } |
@@ -163,23 +155,24 @@ static void | |||
163 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | 155 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
164 | int *align, int *size) | 156 | int *align, int *size) |
165 | { | 157 | { |
166 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 158 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
159 | struct nouveau_device *device = nv_device(drm->device); | ||
167 | 160 | ||
168 | if (dev_priv->card_type < NV_50) { | 161 | if (device->card_type < NV_50) { |
169 | if (nvbo->tile_mode) { | 162 | if (nvbo->tile_mode) { |
170 | if (dev_priv->chipset >= 0x40) { | 163 | if (device->chipset >= 0x40) { |
171 | *align = 65536; | 164 | *align = 65536; |
172 | *size = roundup(*size, 64 * nvbo->tile_mode); | 165 | *size = roundup(*size, 64 * nvbo->tile_mode); |
173 | 166 | ||
174 | } else if (dev_priv->chipset >= 0x30) { | 167 | } else if (device->chipset >= 0x30) { |
175 | *align = 32768; | 168 | *align = 32768; |
176 | *size = roundup(*size, 64 * nvbo->tile_mode); | 169 | *size = roundup(*size, 64 * nvbo->tile_mode); |
177 | 170 | ||
178 | } else if (dev_priv->chipset >= 0x20) { | 171 | } else if (device->chipset >= 0x20) { |
179 | *align = 16384; | 172 | *align = 16384; |
180 | *size = roundup(*size, 64 * nvbo->tile_mode); | 173 | *size = roundup(*size, 64 * nvbo->tile_mode); |
181 | 174 | ||
182 | } else if (dev_priv->chipset >= 0x10) { | 175 | } else if (device->chipset >= 0x10) { |
183 | *align = 16384; | 176 | *align = 16384; |
184 | *size = roundup(*size, 32 * nvbo->tile_mode); | 177 | *size = roundup(*size, 32 * nvbo->tile_mode); |
185 | } | 178 | } |
@@ -198,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
198 | struct sg_table *sg, | 191 | struct sg_table *sg, |
199 | struct nouveau_bo **pnvbo) | 192 | struct nouveau_bo **pnvbo) |
200 | { | 193 | { |
201 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 194 | struct nouveau_drm *drm = nouveau_newpriv(dev); |
202 | struct nouveau_bo *nvbo; | 195 | struct nouveau_bo *nvbo; |
203 | size_t acc_size; | 196 | size_t acc_size; |
204 | int ret; | 197 | int ret; |
@@ -215,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
215 | INIT_LIST_HEAD(&nvbo->vma_list); | 208 | INIT_LIST_HEAD(&nvbo->vma_list); |
216 | nvbo->tile_mode = tile_mode; | 209 | nvbo->tile_mode = tile_mode; |
217 | nvbo->tile_flags = tile_flags; | 210 | nvbo->tile_flags = tile_flags; |
218 | nvbo->bo.bdev = &dev_priv->ttm.bdev; | 211 | nvbo->bo.bdev = &drm->ttm.bdev; |
219 | 212 | ||
220 | nvbo->page_shift = 12; | 213 | nvbo->page_shift = 12; |
221 | if (dev_priv->chan_vm) { | 214 | if (drm->client.base.vm) { |
222 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) | 215 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) |
223 | nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm); | 216 | nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; |
224 | } | 217 | } |
225 | 218 | ||
226 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | 219 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); |
227 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | 220 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
228 | nouveau_bo_placement_set(nvbo, flags, 0); | 221 | nouveau_bo_placement_set(nvbo, flags, 0); |
229 | 222 | ||
230 | acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, | 223 | acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, |
231 | sizeof(struct nouveau_bo)); | 224 | sizeof(struct nouveau_bo)); |
232 | 225 | ||
233 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 226 | ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
234 | type, &nvbo->placement, | 227 | type, &nvbo->placement, |
235 | align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, | 228 | align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, |
236 | nouveau_bo_del_ttm); | 229 | nouveau_bo_del_ttm); |
@@ -259,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |||
259 | static void | 252 | static void |
260 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 253 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
261 | { | 254 | { |
262 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 255 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
263 | int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT; | 256 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
257 | u32 vram_pages = pfb->ram.size >> PAGE_SHIFT; | ||
264 | 258 | ||
265 | if (dev_priv->card_type == NV_10 && | 259 | if (nv_device(drm->device)->card_type == NV_10 && |
266 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && | 260 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
267 | nvbo->bo.mem.num_pages < vram_pages / 4) { | 261 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
268 | /* | 262 | /* |
@@ -302,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) | |||
302 | int | 296 | int |
303 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | 297 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) |
304 | { | 298 | { |
305 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 299 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
306 | struct ttm_buffer_object *bo = &nvbo->bo; | 300 | struct ttm_buffer_object *bo = &nvbo->bo; |
307 | int ret; | 301 | int ret; |
308 | 302 | ||
309 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | 303 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { |
310 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | 304 | NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, |
311 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | ||
312 | 1 << bo->mem.mem_type, memtype); | 305 | 1 << bo->mem.mem_type, memtype); |
313 | return -EINVAL; | 306 | return -EINVAL; |
314 | } | 307 | } |
@@ -326,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |||
326 | if (ret == 0) { | 319 | if (ret == 0) { |
327 | switch (bo->mem.mem_type) { | 320 | switch (bo->mem.mem_type) { |
328 | case TTM_PL_VRAM: | 321 | case TTM_PL_VRAM: |
329 | dev_priv->fb_aper_free -= bo->mem.size; | 322 | drm->gem.vram_available -= bo->mem.size; |
330 | break; | 323 | break; |
331 | case TTM_PL_TT: | 324 | case TTM_PL_TT: |
332 | dev_priv->gart_info.aper_free -= bo->mem.size; | 325 | drm->gem.gart_available -= bo->mem.size; |
333 | break; | 326 | break; |
334 | default: | 327 | default: |
335 | break; | 328 | break; |
@@ -345,7 +338,7 @@ out: | |||
345 | int | 338 | int |
346 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | 339 | nouveau_bo_unpin(struct nouveau_bo *nvbo) |
347 | { | 340 | { |
348 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
349 | struct ttm_buffer_object *bo = &nvbo->bo; | 342 | struct ttm_buffer_object *bo = &nvbo->bo; |
350 | int ret; | 343 | int ret; |
351 | 344 | ||
@@ -362,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
362 | if (ret == 0) { | 355 | if (ret == 0) { |
363 | switch (bo->mem.mem_type) { | 356 | switch (bo->mem.mem_type) { |
364 | case TTM_PL_VRAM: | 357 | case TTM_PL_VRAM: |
365 | dev_priv->fb_aper_free += bo->mem.size; | 358 | drm->gem.vram_available += bo->mem.size; |
366 | break; | 359 | break; |
367 | case TTM_PL_TT: | 360 | case TTM_PL_TT: |
368 | dev_priv->gart_info.aper_free += bo->mem.size; | 361 | drm->gem.gart_available += bo->mem.size; |
369 | break; | 362 | break; |
370 | default: | 363 | default: |
371 | break; | 364 | break; |
@@ -460,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |||
460 | } | 453 | } |
461 | 454 | ||
462 | static struct ttm_tt * | 455 | static struct ttm_tt * |
463 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, | 456 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, |
464 | unsigned long size, uint32_t page_flags, | 457 | uint32_t page_flags, struct page *dummy_read) |
465 | struct page *dummy_read_page) | ||
466 | { | 458 | { |
467 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | 459 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
468 | struct drm_device *dev = dev_priv->dev; | 460 | struct drm_device *dev = drm->dev; |
469 | 461 | ||
470 | switch (dev_priv->gart_info.type) { | 462 | if (drm->agp.stat == ENABLED) { |
471 | #if __OS_HAS_AGP | 463 | return ttm_agp_tt_create(bdev, dev->agp->bridge, size, |
472 | case NOUVEAU_GART_AGP: | 464 | page_flags, dummy_read); |
473 | return ttm_agp_tt_create(bdev, dev->agp->bridge, | ||
474 | size, page_flags, dummy_read_page); | ||
475 | #endif | ||
476 | case NOUVEAU_GART_PDMA: | ||
477 | case NOUVEAU_GART_HW: | ||
478 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, | ||
479 | dummy_read_page); | ||
480 | default: | ||
481 | NV_ERROR(dev, "Unknown GART type %d\n", | ||
482 | dev_priv->gart_info.type); | ||
483 | break; | ||
484 | } | 465 | } |
485 | 466 | ||
486 | return NULL; | 467 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); |
487 | } | 468 | } |
488 | 469 | ||
489 | static int | 470 | static int |
@@ -497,8 +478,7 @@ static int | |||
497 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | 478 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
498 | struct ttm_mem_type_manager *man) | 479 | struct ttm_mem_type_manager *man) |
499 | { | 480 | { |
500 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | 481 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
501 | struct drm_device *dev = dev_priv->dev; | ||
502 | 482 | ||
503 | switch (type) { | 483 | switch (type) { |
504 | case TTM_PL_SYSTEM: | 484 | case TTM_PL_SYSTEM: |
@@ -507,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
507 | man->default_caching = TTM_PL_FLAG_CACHED; | 487 | man->default_caching = TTM_PL_FLAG_CACHED; |
508 | break; | 488 | break; |
509 | case TTM_PL_VRAM: | 489 | case TTM_PL_VRAM: |
510 | if (dev_priv->card_type >= NV_50) { | 490 | if (nv_device(drm->device)->card_type >= NV_50) { |
511 | man->func = &nouveau_vram_manager; | 491 | man->func = &nouveau_vram_manager; |
512 | man->io_reserve_fastpath = false; | 492 | man->io_reserve_fastpath = false; |
513 | man->use_io_reserve_lru = true; | 493 | man->use_io_reserve_lru = true; |
@@ -521,35 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
521 | man->default_caching = TTM_PL_FLAG_WC; | 501 | man->default_caching = TTM_PL_FLAG_WC; |
522 | break; | 502 | break; |
523 | case TTM_PL_TT: | 503 | case TTM_PL_TT: |
524 | if (dev_priv->card_type >= NV_50) | 504 | if (nv_device(drm->device)->card_type >= NV_50) |
525 | man->func = &nouveau_gart_manager; | 505 | man->func = &nouveau_gart_manager; |
526 | else | 506 | else |
527 | if (dev_priv->gart_info.type != NOUVEAU_GART_AGP) | 507 | if (drm->agp.stat != ENABLED) |
528 | man->func = &nv04_gart_manager; | 508 | man->func = &nv04_gart_manager; |
529 | else | 509 | else |
530 | man->func = &ttm_bo_manager_func; | 510 | man->func = &ttm_bo_manager_func; |
531 | switch (dev_priv->gart_info.type) { | 511 | |
532 | case NOUVEAU_GART_AGP: | 512 | if (drm->agp.stat == ENABLED) { |
533 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | 513 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
534 | man->available_caching = TTM_PL_FLAG_UNCACHED | | 514 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
535 | TTM_PL_FLAG_WC; | 515 | TTM_PL_FLAG_WC; |
536 | man->default_caching = TTM_PL_FLAG_WC; | 516 | man->default_caching = TTM_PL_FLAG_WC; |
537 | break; | 517 | } else { |
538 | case NOUVEAU_GART_PDMA: | ||
539 | case NOUVEAU_GART_HW: | ||
540 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | 518 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
541 | TTM_MEMTYPE_FLAG_CMA; | 519 | TTM_MEMTYPE_FLAG_CMA; |
542 | man->available_caching = TTM_PL_MASK_CACHING; | 520 | man->available_caching = TTM_PL_MASK_CACHING; |
543 | man->default_caching = TTM_PL_FLAG_CACHED; | 521 | man->default_caching = TTM_PL_FLAG_CACHED; |
544 | break; | ||
545 | default: | ||
546 | NV_ERROR(dev, "Unknown GART type: %d\n", | ||
547 | dev_priv->gart_info.type); | ||
548 | return -EINVAL; | ||
549 | } | 522 | } |
523 | |||
550 | break; | 524 | break; |
551 | default: | 525 | default: |
552 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | ||
553 | return -EINVAL; | 526 | return -EINVAL; |
554 | } | 527 | } |
555 | return 0; | 528 | return 0; |
@@ -783,20 +756,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
783 | static int | 756 | static int |
784 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | 757 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) |
785 | { | 758 | { |
786 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | 759 | int ret = RING_SPACE(chan, 6); |
787 | &chan->m2mf_ntfy); | ||
788 | if (ret == 0) { | 760 | if (ret == 0) { |
789 | ret = RING_SPACE(chan, 6); | 761 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
790 | if (ret == 0) { | 762 | OUT_RING (chan, handle); |
791 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | 763 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); |
792 | OUT_RING (chan, handle); | 764 | OUT_RING (chan, NvNotify0); |
793 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); | 765 | OUT_RING (chan, NvDmaFB); |
794 | OUT_RING (chan, NvNotify0); | 766 | OUT_RING (chan, NvDmaFB); |
795 | OUT_RING (chan, NvDmaFB); | ||
796 | OUT_RING (chan, NvDmaFB); | ||
797 | } else { | ||
798 | nouveau_ramht_remove(chan, NvNotify0); | ||
799 | } | ||
800 | } | 767 | } |
801 | 768 | ||
802 | return ret; | 769 | return ret; |
@@ -895,16 +862,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
895 | static int | 862 | static int |
896 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | 863 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) |
897 | { | 864 | { |
898 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | 865 | int ret = RING_SPACE(chan, 4); |
899 | &chan->m2mf_ntfy); | ||
900 | if (ret == 0) { | 866 | if (ret == 0) { |
901 | ret = RING_SPACE(chan, 4); | 867 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
902 | if (ret == 0) { | 868 | OUT_RING (chan, handle); |
903 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | 869 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); |
904 | OUT_RING (chan, handle); | 870 | OUT_RING (chan, NvNotify0); |
905 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); | ||
906 | OUT_RING (chan, NvNotify0); | ||
907 | } | ||
908 | } | 871 | } |
909 | 872 | ||
910 | return ret; | 873 | return ret; |
@@ -915,8 +878,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | |||
915 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | 878 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) |
916 | { | 879 | { |
917 | if (mem->mem_type == TTM_PL_TT) | 880 | if (mem->mem_type == TTM_PL_TT) |
918 | return chan->gart_handle; | 881 | return NvDmaTT; |
919 | return chan->vram_handle; | 882 | return NvDmaFB; |
920 | } | 883 | } |
921 | 884 | ||
922 | static int | 885 | static int |
@@ -972,8 +935,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | |||
972 | struct nouveau_mem *node = mem->mm_node; | 935 | struct nouveau_mem *node = mem->mm_node; |
973 | int ret; | 936 | int ret; |
974 | 937 | ||
975 | ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, | 938 | ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << |
976 | node->page_shift, NV_MEM_ACCESS_RO, vma); | 939 | PAGE_SHIFT, node->page_shift, |
940 | NV_MEM_ACCESS_RW, vma); | ||
977 | if (ret) | 941 | if (ret) |
978 | return ret; | 942 | return ret; |
979 | 943 | ||
@@ -990,19 +954,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
990 | bool no_wait_reserve, bool no_wait_gpu, | 954 | bool no_wait_reserve, bool no_wait_gpu, |
991 | struct ttm_mem_reg *new_mem) | 955 | struct ttm_mem_reg *new_mem) |
992 | { | 956 | { |
993 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 957 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
994 | struct nouveau_channel *chan = chan = dev_priv->channel; | 958 | struct nouveau_channel *chan = chan = drm->channel; |
995 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 959 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
996 | struct ttm_mem_reg *old_mem = &bo->mem; | 960 | struct ttm_mem_reg *old_mem = &bo->mem; |
997 | int ret; | 961 | int ret; |
998 | 962 | ||
999 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | 963 | mutex_lock(&chan->cli->mutex); |
1000 | 964 | ||
1001 | /* create temporary vmas for the transfer and attach them to the | 965 | /* create temporary vmas for the transfer and attach them to the |
1002 | * old nouveau_mem node, these will get cleaned up after ttm has | 966 | * old nouveau_mem node, these will get cleaned up after ttm has |
1003 | * destroyed the ttm_mem_reg | 967 | * destroyed the ttm_mem_reg |
1004 | */ | 968 | */ |
1005 | if (dev_priv->card_type >= NV_50) { | 969 | if (nv_device(drm->device)->card_type >= NV_50) { |
1006 | struct nouveau_mem *node = old_mem->mm_node; | 970 | struct nouveau_mem *node = old_mem->mm_node; |
1007 | 971 | ||
1008 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); | 972 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
@@ -1014,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
1014 | goto out; | 978 | goto out; |
1015 | } | 979 | } |
1016 | 980 | ||
1017 | ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); | 981 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
1018 | if (ret == 0) { | 982 | if (ret == 0) { |
1019 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | 983 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, |
1020 | no_wait_reserve, | 984 | no_wait_reserve, |
@@ -1022,14 +986,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
1022 | } | 986 | } |
1023 | 987 | ||
1024 | out: | 988 | out: |
1025 | mutex_unlock(&chan->mutex); | 989 | mutex_unlock(&chan->cli->mutex); |
1026 | return ret; | 990 | return ret; |
1027 | } | 991 | } |
1028 | 992 | ||
1029 | void | 993 | void |
1030 | nouveau_bo_move_init(struct nouveau_channel *chan) | 994 | nouveau_bo_move_init(struct nouveau_channel *chan) |
1031 | { | 995 | { |
1032 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | 996 | struct nouveau_cli *cli = chan->cli; |
997 | struct nouveau_drm *drm = chan->drm; | ||
1033 | static const struct { | 998 | static const struct { |
1034 | const char *name; | 999 | const char *name; |
1035 | int engine; | 1000 | int engine; |
@@ -1054,19 +1019,26 @@ nouveau_bo_move_init(struct nouveau_channel *chan) | |||
1054 | int ret; | 1019 | int ret; |
1055 | 1020 | ||
1056 | do { | 1021 | do { |
1022 | struct nouveau_object *object; | ||
1057 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1023 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
1058 | ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); | 1024 | |
1025 | ret = nouveau_object_new(nv_object(cli), chan->handle, handle, | ||
1026 | mthd->oclass, NULL, 0, &object); | ||
1059 | if (ret == 0) { | 1027 | if (ret == 0) { |
1060 | ret = mthd->init(chan, handle); | 1028 | ret = mthd->init(chan, handle); |
1061 | if (ret == 0) { | 1029 | if (ret) { |
1062 | dev_priv->ttm.move = mthd->exec; | 1030 | nouveau_object_del(nv_object(cli), |
1063 | name = mthd->name; | 1031 | chan->handle, handle); |
1064 | break; | 1032 | continue; |
1065 | } | 1033 | } |
1034 | |||
1035 | drm->ttm.move = mthd->exec; | ||
1036 | name = mthd->name; | ||
1037 | break; | ||
1066 | } | 1038 | } |
1067 | } while ((++mthd)->exec); | 1039 | } while ((++mthd)->exec); |
1068 | 1040 | ||
1069 | NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); | 1041 | NV_INFO(drm, "MM: using %s for buffer copies\n", name); |
1070 | } | 1042 | } |
1071 | 1043 | ||
1072 | static int | 1044 | static int |
@@ -1151,7 +1123,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |||
1151 | nouveau_vm_map(vma, new_mem->mm_node); | 1123 | nouveau_vm_map(vma, new_mem->mm_node); |
1152 | } else | 1124 | } else |
1153 | if (new_mem && new_mem->mem_type == TTM_PL_TT && | 1125 | if (new_mem && new_mem->mem_type == TTM_PL_TT && |
1154 | nvbo->page_shift == nvvm_spg_shift(vma->vm)) { | 1126 | nvbo->page_shift == vma->vm->vmm->spg_shift) { |
1155 | if (((struct nouveau_mem *)new_mem->mm_node)->sg) | 1127 | if (((struct nouveau_mem *)new_mem->mm_node)->sg) |
1156 | nouveau_vm_map_sg_table(vma, 0, new_mem-> | 1128 | nouveau_vm_map_sg_table(vma, 0, new_mem-> |
1157 | num_pages << PAGE_SHIFT, | 1129 | num_pages << PAGE_SHIFT, |
@@ -1168,10 +1140,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |||
1168 | 1140 | ||
1169 | static int | 1141 | static int |
1170 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | 1142 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
1171 | struct nouveau_tile_reg **new_tile) | 1143 | struct nouveau_drm_tile **new_tile) |
1172 | { | 1144 | { |
1173 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 1145 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1174 | struct drm_device *dev = dev_priv->dev; | 1146 | struct drm_device *dev = drm->dev; |
1175 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1147 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1176 | u64 offset = new_mem->start << PAGE_SHIFT; | 1148 | u64 offset = new_mem->start << PAGE_SHIFT; |
1177 | 1149 | ||
@@ -1179,7 +1151,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
1179 | if (new_mem->mem_type != TTM_PL_VRAM) | 1151 | if (new_mem->mem_type != TTM_PL_VRAM) |
1180 | return 0; | 1152 | return 0; |
1181 | 1153 | ||
1182 | if (dev_priv->card_type >= NV_10) { | 1154 | if (nv_device(drm->device)->card_type >= NV_10) { |
1183 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, | 1155 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, |
1184 | nvbo->tile_mode, | 1156 | nvbo->tile_mode, |
1185 | nvbo->tile_flags); | 1157 | nvbo->tile_flags); |
@@ -1190,11 +1162,11 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, | |||
1190 | 1162 | ||
1191 | static void | 1163 | static void |
1192 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | 1164 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, |
1193 | struct nouveau_tile_reg *new_tile, | 1165 | struct nouveau_drm_tile *new_tile, |
1194 | struct nouveau_tile_reg **old_tile) | 1166 | struct nouveau_drm_tile **old_tile) |
1195 | { | 1167 | { |
1196 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 1168 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1197 | struct drm_device *dev = dev_priv->dev; | 1169 | struct drm_device *dev = drm->dev; |
1198 | 1170 | ||
1199 | nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); | 1171 | nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); |
1200 | *old_tile = new_tile; | 1172 | *old_tile = new_tile; |
@@ -1205,13 +1177,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1205 | bool no_wait_reserve, bool no_wait_gpu, | 1177 | bool no_wait_reserve, bool no_wait_gpu, |
1206 | struct ttm_mem_reg *new_mem) | 1178 | struct ttm_mem_reg *new_mem) |
1207 | { | 1179 | { |
1208 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 1180 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1209 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1181 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1210 | struct ttm_mem_reg *old_mem = &bo->mem; | 1182 | struct ttm_mem_reg *old_mem = &bo->mem; |
1211 | struct nouveau_tile_reg *new_tile = NULL; | 1183 | struct nouveau_drm_tile *new_tile = NULL; |
1212 | int ret = 0; | 1184 | int ret = 0; |
1213 | 1185 | ||
1214 | if (dev_priv->card_type < NV_50) { | 1186 | if (nv_device(drm->device)->card_type < NV_50) { |
1215 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | 1187 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); |
1216 | if (ret) | 1188 | if (ret) |
1217 | return ret; | 1189 | return ret; |
@@ -1226,7 +1198,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1226 | } | 1198 | } |
1227 | 1199 | ||
1228 | /* CPU copy if we have no accelerated method available */ | 1200 | /* CPU copy if we have no accelerated method available */ |
1229 | if (!dev_priv->ttm.move) { | 1201 | if (!drm->ttm.move) { |
1230 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 1202 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
1231 | goto out; | 1203 | goto out; |
1232 | } | 1204 | } |
@@ -1246,7 +1218,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
1246 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 1218 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
1247 | 1219 | ||
1248 | out: | 1220 | out: |
1249 | if (dev_priv->card_type < NV_50) { | 1221 | if (nv_device(drm->device)->card_type < NV_50) { |
1250 | if (ret) | 1222 | if (ret) |
1251 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | 1223 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); |
1252 | else | 1224 | else |
@@ -1266,8 +1238,8 @@ static int | |||
1266 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 1238 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1267 | { | 1239 | { |
1268 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 1240 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
1269 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | 1241 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1270 | struct drm_device *dev = dev_priv->dev; | 1242 | struct drm_device *dev = drm->dev; |
1271 | int ret; | 1243 | int ret; |
1272 | 1244 | ||
1273 | mem->bus.addr = NULL; | 1245 | mem->bus.addr = NULL; |
@@ -1283,9 +1255,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1283 | return 0; | 1255 | return 0; |
1284 | case TTM_PL_TT: | 1256 | case TTM_PL_TT: |
1285 | #if __OS_HAS_AGP | 1257 | #if __OS_HAS_AGP |
1286 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | 1258 | if (drm->agp.stat == ENABLED) { |
1287 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1259 | mem->bus.offset = mem->start << PAGE_SHIFT; |
1288 | mem->bus.base = dev_priv->gart_info.aper_base; | 1260 | mem->bus.base = drm->agp.base; |
1289 | mem->bus.is_iomem = true; | 1261 | mem->bus.is_iomem = true; |
1290 | } | 1262 | } |
1291 | #endif | 1263 | #endif |
@@ -1294,10 +1266,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1294 | mem->bus.offset = mem->start << PAGE_SHIFT; | 1266 | mem->bus.offset = mem->start << PAGE_SHIFT; |
1295 | mem->bus.base = pci_resource_start(dev->pdev, 1); | 1267 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
1296 | mem->bus.is_iomem = true; | 1268 | mem->bus.is_iomem = true; |
1297 | if (dev_priv->card_type >= NV_50) { | 1269 | if (nv_device(drm->device)->card_type >= NV_50) { |
1270 | struct nouveau_bar *bar = nouveau_bar(drm->device); | ||
1298 | struct nouveau_mem *node = mem->mm_node; | 1271 | struct nouveau_mem *node = mem->mm_node; |
1299 | 1272 | ||
1300 | ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW, | 1273 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, |
1301 | &node->bar_vma); | 1274 | &node->bar_vma); |
1302 | if (ret) | 1275 | if (ret) |
1303 | return ret; | 1276 | return ret; |
@@ -1314,40 +1287,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1314 | static void | 1287 | static void |
1315 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 1288 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1316 | { | 1289 | { |
1317 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | 1290 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
1291 | struct nouveau_bar *bar = nouveau_bar(drm->device); | ||
1318 | struct nouveau_mem *node = mem->mm_node; | 1292 | struct nouveau_mem *node = mem->mm_node; |
1319 | 1293 | ||
1320 | if (mem->mem_type != TTM_PL_VRAM) | ||
1321 | return; | ||
1322 | |||
1323 | if (!node->bar_vma.node) | 1294 | if (!node->bar_vma.node) |
1324 | return; | 1295 | return; |
1325 | 1296 | ||
1326 | nvbar_unmap(dev_priv->dev, &node->bar_vma); | 1297 | bar->unmap(bar, &node->bar_vma); |
1327 | } | 1298 | } |
1328 | 1299 | ||
1329 | static int | 1300 | static int |
1330 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | 1301 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
1331 | { | 1302 | { |
1332 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | 1303 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1333 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 1304 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
1305 | struct nouveau_device *device = nv_device(drm->device); | ||
1306 | u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; | ||
1334 | 1307 | ||
1335 | /* as long as the bo isn't in vram, and isn't tiled, we've got | 1308 | /* as long as the bo isn't in vram, and isn't tiled, we've got |
1336 | * nothing to do here. | 1309 | * nothing to do here. |
1337 | */ | 1310 | */ |
1338 | if (bo->mem.mem_type != TTM_PL_VRAM) { | 1311 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
1339 | if (dev_priv->card_type < NV_50 || | 1312 | if (nv_device(drm->device)->card_type < NV_50 || |
1340 | !nouveau_bo_tile_layout(nvbo)) | 1313 | !nouveau_bo_tile_layout(nvbo)) |
1341 | return 0; | 1314 | return 0; |
1342 | } | 1315 | } |
1343 | 1316 | ||
1344 | /* make sure bo is in mappable vram */ | 1317 | /* make sure bo is in mappable vram */ |
1345 | if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) | 1318 | if (bo->mem.start + bo->mem.num_pages < mappable) |
1346 | return 0; | 1319 | return 0; |
1347 | 1320 | ||
1348 | 1321 | ||
1349 | nvbo->placement.fpfn = 0; | 1322 | nvbo->placement.fpfn = 0; |
1350 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; | 1323 | nvbo->placement.lpfn = mappable; |
1351 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); | 1324 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
1352 | return nouveau_bo_validate(nvbo, false, true, false); | 1325 | return nouveau_bo_validate(nvbo, false, true, false); |
1353 | } | 1326 | } |
@@ -1356,7 +1329,7 @@ static int | |||
1356 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) | 1329 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) |
1357 | { | 1330 | { |
1358 | struct ttm_dma_tt *ttm_dma = (void *)ttm; | 1331 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
1359 | struct drm_nouveau_private *dev_priv; | 1332 | struct nouveau_drm *drm; |
1360 | struct drm_device *dev; | 1333 | struct drm_device *dev; |
1361 | unsigned i; | 1334 | unsigned i; |
1362 | int r; | 1335 | int r; |
@@ -1373,11 +1346,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |||
1373 | return 0; | 1346 | return 0; |
1374 | } | 1347 | } |
1375 | 1348 | ||
1376 | dev_priv = nouveau_bdev(ttm->bdev); | 1349 | drm = nouveau_bdev(ttm->bdev); |
1377 | dev = dev_priv->dev; | 1350 | dev = drm->dev; |
1378 | 1351 | ||
1379 | #if __OS_HAS_AGP | 1352 | #if __OS_HAS_AGP |
1380 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | 1353 | if (drm->agp.stat == ENABLED) { |
1381 | return ttm_agp_tt_populate(ttm); | 1354 | return ttm_agp_tt_populate(ttm); |
1382 | } | 1355 | } |
1383 | #endif | 1356 | #endif |
@@ -1414,7 +1387,7 @@ static void | |||
1414 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | 1387 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) |
1415 | { | 1388 | { |
1416 | struct ttm_dma_tt *ttm_dma = (void *)ttm; | 1389 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
1417 | struct drm_nouveau_private *dev_priv; | 1390 | struct nouveau_drm *drm; |
1418 | struct drm_device *dev; | 1391 | struct drm_device *dev; |
1419 | unsigned i; | 1392 | unsigned i; |
1420 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | 1393 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
@@ -1422,11 +1395,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1422 | if (slave) | 1395 | if (slave) |
1423 | return; | 1396 | return; |
1424 | 1397 | ||
1425 | dev_priv = nouveau_bdev(ttm->bdev); | 1398 | drm = nouveau_bdev(ttm->bdev); |
1426 | dev = dev_priv->dev; | 1399 | dev = drm->dev; |
1427 | 1400 | ||
1428 | #if __OS_HAS_AGP | 1401 | #if __OS_HAS_AGP |
1429 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | 1402 | if (drm->agp.stat == ENABLED) { |
1430 | ttm_agp_tt_unpopulate(ttm); | 1403 | ttm_agp_tt_unpopulate(ttm); |
1431 | return; | 1404 | return; |
1432 | } | 1405 | } |