diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 742 |
1 files changed, 69 insertions, 673 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 7f0afad13653..7e0ff10a2759 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -30,446 +30,10 @@ | |||
30 | * Roy Spliet <r.spliet@student.tudelft.nl> | 30 | * Roy Spliet <r.spliet@student.tudelft.nl> |
31 | */ | 31 | */ |
32 | 32 | ||
33 | 33 | #include "nouveau_drm.h" | |
34 | #include <drm/drmP.h> | ||
35 | |||
36 | #include "nouveau_drv.h" | ||
37 | #include "nouveau_pm.h" | 34 | #include "nouveau_pm.h" |
38 | #include "nouveau_mm.h" | ||
39 | #include "nouveau_vm.h" | ||
40 | #include "nouveau_fifo.h" | ||
41 | #include "nouveau_fence.h" | ||
42 | |||
43 | /* | ||
44 | * NV10-NV40 tiling helpers | ||
45 | */ | ||
46 | |||
47 | static void | ||
48 | nv10_mem_update_tile_region(struct drm_device *dev, | ||
49 | struct nouveau_tile_reg *tile, uint32_t addr, | ||
50 | uint32_t size, uint32_t pitch, uint32_t flags) | ||
51 | { | ||
52 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
53 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
54 | int i = tile - dev_priv->tile.reg, j; | ||
55 | unsigned long save; | ||
56 | |||
57 | nouveau_fence_unref(&tile->fence); | ||
58 | |||
59 | if (tile->pitch) | ||
60 | pfb->free_tile_region(dev, i); | ||
61 | |||
62 | if (pitch) | ||
63 | pfb->init_tile_region(dev, i, addr, size, pitch, flags); | ||
64 | |||
65 | spin_lock_irqsave(&dev_priv->context_switch_lock, save); | ||
66 | nv_wr32(dev, NV03_PFIFO_CACHES, 0); | ||
67 | nv04_fifo_cache_pull(dev, false); | ||
68 | |||
69 | nouveau_wait_for_idle(dev); | ||
70 | |||
71 | pfb->set_tile_region(dev, i); | ||
72 | for (j = 0; j < NVOBJ_ENGINE_NR; j++) { | ||
73 | if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region) | ||
74 | dev_priv->eng[j]->set_tile_region(dev, i); | ||
75 | } | ||
76 | |||
77 | nv04_fifo_cache_pull(dev, true); | ||
78 | nv_wr32(dev, NV03_PFIFO_CACHES, 1); | ||
79 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); | ||
80 | } | ||
81 | |||
82 | static struct nouveau_tile_reg * | ||
83 | nv10_mem_get_tile_region(struct drm_device *dev, int i) | ||
84 | { | ||
85 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
86 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | ||
87 | |||
88 | spin_lock(&dev_priv->tile.lock); | ||
89 | |||
90 | if (!tile->used && | ||
91 | (!tile->fence || nouveau_fence_done(tile->fence))) | ||
92 | tile->used = true; | ||
93 | else | ||
94 | tile = NULL; | ||
95 | |||
96 | spin_unlock(&dev_priv->tile.lock); | ||
97 | return tile; | ||
98 | } | ||
99 | |||
100 | void | ||
101 | nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, | ||
102 | struct nouveau_fence *fence) | ||
103 | { | ||
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
105 | |||
106 | if (tile) { | ||
107 | spin_lock(&dev_priv->tile.lock); | ||
108 | if (fence) { | ||
109 | /* Mark it as pending. */ | ||
110 | tile->fence = fence; | ||
111 | nouveau_fence_ref(fence); | ||
112 | } | ||
113 | |||
114 | tile->used = false; | ||
115 | spin_unlock(&dev_priv->tile.lock); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | struct nouveau_tile_reg * | ||
120 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | ||
121 | uint32_t pitch, uint32_t flags) | ||
122 | { | ||
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
124 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | ||
125 | struct nouveau_tile_reg *tile, *found = NULL; | ||
126 | int i; | ||
127 | |||
128 | for (i = 0; i < pfb->num_tiles; i++) { | ||
129 | tile = nv10_mem_get_tile_region(dev, i); | ||
130 | |||
131 | if (pitch && !found) { | ||
132 | found = tile; | ||
133 | continue; | ||
134 | |||
135 | } else if (tile && tile->pitch) { | ||
136 | /* Kill an unused tile region. */ | ||
137 | nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); | ||
138 | } | ||
139 | |||
140 | nv10_mem_put_tile_region(dev, tile, NULL); | ||
141 | } | ||
142 | |||
143 | if (found) | ||
144 | nv10_mem_update_tile_region(dev, found, addr, size, | ||
145 | pitch, flags); | ||
146 | return found; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Cleanup everything | ||
151 | */ | ||
152 | void | ||
153 | nouveau_mem_vram_fini(struct drm_device *dev) | ||
154 | { | ||
155 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
156 | |||
157 | ttm_bo_device_release(&dev_priv->ttm.bdev); | ||
158 | |||
159 | nouveau_ttm_global_release(dev_priv); | ||
160 | |||
161 | if (dev_priv->fb_mtrr >= 0) { | ||
162 | drm_mtrr_del(dev_priv->fb_mtrr, | ||
163 | pci_resource_start(dev->pdev, 1), | ||
164 | pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); | ||
165 | dev_priv->fb_mtrr = -1; | ||
166 | } | ||
167 | } | ||
168 | |||
169 | void | ||
170 | nouveau_mem_gart_fini(struct drm_device *dev) | ||
171 | { | ||
172 | nouveau_sgdma_takedown(dev); | ||
173 | |||
174 | if (drm_core_has_AGP(dev) && dev->agp) { | ||
175 | struct drm_agp_mem *entry, *tempe; | ||
176 | |||
177 | /* Remove AGP resources, but leave dev->agp | ||
178 | intact until drv_cleanup is called. */ | ||
179 | list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { | ||
180 | if (entry->bound) | ||
181 | drm_unbind_agp(entry->memory); | ||
182 | drm_free_agp(entry->memory, entry->pages); | ||
183 | kfree(entry); | ||
184 | } | ||
185 | INIT_LIST_HEAD(&dev->agp->memory); | ||
186 | 35 | ||
187 | if (dev->agp->acquired) | 36 | #include <subdev/fb.h> |
188 | drm_agp_release(dev); | ||
189 | |||
190 | dev->agp->acquired = 0; | ||
191 | dev->agp->enabled = 0; | ||
192 | } | ||
193 | } | ||
194 | |||
195 | bool | ||
196 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | ||
197 | { | ||
198 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | ||
199 | return true; | ||
200 | |||
201 | return false; | ||
202 | } | ||
203 | |||
204 | #if __OS_HAS_AGP | ||
205 | static unsigned long | ||
206 | get_agp_mode(struct drm_device *dev, unsigned long mode) | ||
207 | { | ||
208 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
209 | |||
210 | /* | ||
211 | * FW seems to be broken on nv18, it makes the card lock up | ||
212 | * randomly. | ||
213 | */ | ||
214 | if (dev_priv->chipset == 0x18) | ||
215 | mode &= ~PCI_AGP_COMMAND_FW; | ||
216 | |||
217 | /* | ||
218 | * AGP mode set in the command line. | ||
219 | */ | ||
220 | if (nouveau_agpmode > 0) { | ||
221 | bool agpv3 = mode & 0x8; | ||
222 | int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; | ||
223 | |||
224 | mode = (mode & ~0x7) | (rate & 0x7); | ||
225 | } | ||
226 | |||
227 | return mode; | ||
228 | } | ||
229 | #endif | ||
230 | |||
231 | int | ||
232 | nouveau_mem_reset_agp(struct drm_device *dev) | ||
233 | { | ||
234 | #if __OS_HAS_AGP | ||
235 | uint32_t saved_pci_nv_1, pmc_enable; | ||
236 | int ret; | ||
237 | |||
238 | /* First of all, disable fast writes, otherwise if it's | ||
239 | * already enabled in the AGP bridge and we disable the card's | ||
240 | * AGP controller we might be locking ourselves out of it. */ | ||
241 | if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | | ||
242 | dev->agp->mode) & PCI_AGP_COMMAND_FW) { | ||
243 | struct drm_agp_info info; | ||
244 | struct drm_agp_mode mode; | ||
245 | |||
246 | ret = drm_agp_info(dev, &info); | ||
247 | if (ret) | ||
248 | return ret; | ||
249 | |||
250 | mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; | ||
251 | ret = drm_agp_enable(dev, mode); | ||
252 | if (ret) | ||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); | ||
257 | |||
258 | /* clear busmaster bit */ | ||
259 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); | ||
260 | /* disable AGP */ | ||
261 | nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0); | ||
262 | |||
263 | /* power cycle pgraph, if enabled */ | ||
264 | pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); | ||
265 | if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { | ||
266 | nv_wr32(dev, NV03_PMC_ENABLE, | ||
267 | pmc_enable & ~NV_PMC_ENABLE_PGRAPH); | ||
268 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | ||
269 | NV_PMC_ENABLE_PGRAPH); | ||
270 | } | ||
271 | |||
272 | /* and restore (gives effect of resetting AGP) */ | ||
273 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); | ||
274 | #endif | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | int | ||
280 | nouveau_mem_init_agp(struct drm_device *dev) | ||
281 | { | ||
282 | #if __OS_HAS_AGP | ||
283 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
284 | struct drm_agp_info info; | ||
285 | struct drm_agp_mode mode; | ||
286 | int ret; | ||
287 | |||
288 | if (!dev->agp->acquired) { | ||
289 | ret = drm_agp_acquire(dev); | ||
290 | if (ret) { | ||
291 | NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); | ||
292 | return ret; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | nouveau_mem_reset_agp(dev); | ||
297 | |||
298 | ret = drm_agp_info(dev, &info); | ||
299 | if (ret) { | ||
300 | NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | /* see agp.h for the AGPSTAT_* modes available */ | ||
305 | mode.mode = get_agp_mode(dev, info.mode); | ||
306 | ret = drm_agp_enable(dev, mode); | ||
307 | if (ret) { | ||
308 | NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); | ||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | dev_priv->gart_info.type = NOUVEAU_GART_AGP; | ||
313 | dev_priv->gart_info.aper_base = info.aperture_base; | ||
314 | dev_priv->gart_info.aper_size = info.aperture_size; | ||
315 | #endif | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | static const struct vram_types { | ||
320 | int value; | ||
321 | const char *name; | ||
322 | } vram_type_map[] = { | ||
323 | { NV_MEM_TYPE_STOLEN , "stolen system memory" }, | ||
324 | { NV_MEM_TYPE_SGRAM , "SGRAM" }, | ||
325 | { NV_MEM_TYPE_SDRAM , "SDRAM" }, | ||
326 | { NV_MEM_TYPE_DDR1 , "DDR1" }, | ||
327 | { NV_MEM_TYPE_DDR2 , "DDR2" }, | ||
328 | { NV_MEM_TYPE_DDR3 , "DDR3" }, | ||
329 | { NV_MEM_TYPE_GDDR2 , "GDDR2" }, | ||
330 | { NV_MEM_TYPE_GDDR3 , "GDDR3" }, | ||
331 | { NV_MEM_TYPE_GDDR4 , "GDDR4" }, | ||
332 | { NV_MEM_TYPE_GDDR5 , "GDDR5" }, | ||
333 | { NV_MEM_TYPE_UNKNOWN, "unknown type" } | ||
334 | }; | ||
335 | |||
336 | int | ||
337 | nouveau_mem_vram_init(struct drm_device *dev) | ||
338 | { | ||
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
340 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | ||
341 | const struct vram_types *vram_type; | ||
342 | int ret, dma_bits; | ||
343 | |||
344 | dma_bits = 32; | ||
345 | if (dev_priv->card_type >= NV_50) { | ||
346 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) | ||
347 | dma_bits = 40; | ||
348 | } else | ||
349 | if (0 && pci_is_pcie(dev->pdev) && | ||
350 | dev_priv->chipset > 0x40 && | ||
351 | dev_priv->chipset != 0x45) { | ||
352 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) | ||
353 | dma_bits = 39; | ||
354 | } | ||
355 | |||
356 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | ||
357 | if (ret) | ||
358 | return ret; | ||
359 | ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | ||
360 | if (ret) { | ||
361 | /* Reset to default value. */ | ||
362 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); | ||
363 | } | ||
364 | |||
365 | |||
366 | ret = nouveau_ttm_global_init(dev_priv); | ||
367 | if (ret) | ||
368 | return ret; | ||
369 | |||
370 | ret = ttm_bo_device_init(&dev_priv->ttm.bdev, | ||
371 | dev_priv->ttm.bo_global_ref.ref.object, | ||
372 | &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, | ||
373 | dma_bits <= 32 ? true : false); | ||
374 | if (ret) { | ||
375 | NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); | ||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | vram_type = vram_type_map; | ||
380 | while (vram_type->value != NV_MEM_TYPE_UNKNOWN) { | ||
381 | if (nouveau_vram_type) { | ||
382 | if (!strcasecmp(nouveau_vram_type, vram_type->name)) | ||
383 | break; | ||
384 | dev_priv->vram_type = vram_type->value; | ||
385 | } else { | ||
386 | if (vram_type->value == dev_priv->vram_type) | ||
387 | break; | ||
388 | } | ||
389 | vram_type++; | ||
390 | } | ||
391 | |||
392 | NV_INFO(dev, "Detected %dMiB VRAM (%s)\n", | ||
393 | (int)(dev_priv->vram_size >> 20), vram_type->name); | ||
394 | if (dev_priv->vram_sys_base) { | ||
395 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | ||
396 | dev_priv->vram_sys_base); | ||
397 | } | ||
398 | |||
399 | dev_priv->fb_available_size = dev_priv->vram_size; | ||
400 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | ||
401 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | ||
402 | dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); | ||
403 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | ||
404 | |||
405 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; | ||
406 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | ||
407 | |||
408 | /* mappable vram */ | ||
409 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | ||
410 | dev_priv->fb_available_size >> PAGE_SHIFT); | ||
411 | if (ret) { | ||
412 | NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | if (dev_priv->card_type < NV_50) { | ||
417 | ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM, | ||
418 | 0, 0, NULL, &dev_priv->vga_ram); | ||
419 | if (ret == 0) | ||
420 | ret = nouveau_bo_pin(dev_priv->vga_ram, | ||
421 | TTM_PL_FLAG_VRAM); | ||
422 | |||
423 | if (ret) { | ||
424 | NV_WARN(dev, "failed to reserve VGA memory\n"); | ||
425 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), | ||
430 | pci_resource_len(dev->pdev, 1), | ||
431 | DRM_MTRR_WC); | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | int | ||
436 | nouveau_mem_gart_init(struct drm_device *dev) | ||
437 | { | ||
438 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
439 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | ||
440 | int ret; | ||
441 | |||
442 | dev_priv->gart_info.type = NOUVEAU_GART_NONE; | ||
443 | |||
444 | #if !defined(__powerpc__) && !defined(__ia64__) | ||
445 | if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) { | ||
446 | ret = nouveau_mem_init_agp(dev); | ||
447 | if (ret) | ||
448 | NV_ERROR(dev, "Error initialising AGP: %d\n", ret); | ||
449 | } | ||
450 | #endif | ||
451 | |||
452 | if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { | ||
453 | ret = nouveau_sgdma_init(dev); | ||
454 | if (ret) { | ||
455 | NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); | ||
456 | return ret; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | NV_INFO(dev, "%d MiB GART (aperture)\n", | ||
461 | (int)(dev_priv->gart_info.aper_size >> 20)); | ||
462 | dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; | ||
463 | |||
464 | ret = ttm_bo_init_mm(bdev, TTM_PL_TT, | ||
465 | dev_priv->gart_info.aper_size >> PAGE_SHIFT); | ||
466 | if (ret) { | ||
467 | NV_ERROR(dev, "Failed TT mm init: %d\n", ret); | ||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | 37 | ||
474 | static int | 38 | static int |
475 | nv40_mem_timing_calc(struct drm_device *dev, u32 freq, | 39 | nv40_mem_timing_calc(struct drm_device *dev, u32 freq, |
@@ -477,6 +41,8 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
477 | struct nouveau_pm_memtiming *boot, | 41 | struct nouveau_pm_memtiming *boot, |
478 | struct nouveau_pm_memtiming *t) | 42 | struct nouveau_pm_memtiming *t) |
479 | { | 43 | { |
44 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
45 | |||
480 | t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); | 46 | t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); |
481 | 47 | ||
482 | /* XXX: I don't trust the -1's and +1's... they must come | 48 | /* XXX: I don't trust the -1's and +1's... they must come |
@@ -492,7 +58,7 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
492 | e->tRCDWR << 8 | | 58 | e->tRCDWR << 8 | |
493 | e->tRCDRD); | 59 | e->tRCDRD); |
494 | 60 | ||
495 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id, | 61 | NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id, |
496 | t->reg[0], t->reg[1], t->reg[2]); | 62 | t->reg[0], t->reg[1], t->reg[2]); |
497 | return 0; | 63 | return 0; |
498 | } | 64 | } |
@@ -503,7 +69,9 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
503 | struct nouveau_pm_memtiming *boot, | 69 | struct nouveau_pm_memtiming *boot, |
504 | struct nouveau_pm_memtiming *t) | 70 | struct nouveau_pm_memtiming *t) |
505 | { | 71 | { |
506 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 72 | struct nouveau_device *device = nouveau_dev(dev); |
73 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
74 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
507 | struct bit_entry P; | 75 | struct bit_entry P; |
508 | uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; | 76 | uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; |
509 | 77 | ||
@@ -557,7 +125,7 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
557 | t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; | 125 | t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; |
558 | 126 | ||
559 | /* XXX: P.version == 1 only has DDR2 and GDDR3? */ | 127 | /* XXX: P.version == 1 only has DDR2 and GDDR3? */ |
560 | if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) { | 128 | if (pfb->ram.type == NV_MEM_TYPE_DDR2) { |
561 | t->reg[5] |= (e->tCL + 3) << 8; | 129 | t->reg[5] |= (e->tCL + 3) << 8; |
562 | t->reg[6] |= (t->tCWL - 2) << 8; | 130 | t->reg[6] |= (t->tCWL - 2) << 8; |
563 | t->reg[8] |= (e->tCL - 4); | 131 | t->reg[8] |= (e->tCL - 4); |
@@ -590,11 +158,11 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
590 | 0x202; | 158 | 0x202; |
591 | } | 159 | } |
592 | 160 | ||
593 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, | 161 | NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, |
594 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); | 162 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); |
595 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", | 163 | NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n", |
596 | t->reg[4], t->reg[5], t->reg[6], t->reg[7]); | 164 | t->reg[4], t->reg[5], t->reg[6], t->reg[7]); |
597 | NV_DEBUG(dev, " 240: %08x\n", t->reg[8]); | 165 | NV_DEBUG(drm, " 240: %08x\n", t->reg[8]); |
598 | return 0; | 166 | return 0; |
599 | } | 167 | } |
600 | 168 | ||
@@ -604,6 +172,8 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
604 | struct nouveau_pm_memtiming *boot, | 172 | struct nouveau_pm_memtiming *boot, |
605 | struct nouveau_pm_memtiming *t) | 173 | struct nouveau_pm_memtiming *t) |
606 | { | 174 | { |
175 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
176 | |||
607 | if (e->tCWL > 0) | 177 | if (e->tCWL > 0) |
608 | t->tCWL = e->tCWL; | 178 | t->tCWL = e->tCWL; |
609 | 179 | ||
@@ -626,9 +196,9 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
626 | t->reg[4] = (boot->reg[4] & 0xfff00fff) | | 196 | t->reg[4] = (boot->reg[4] & 0xfff00fff) | |
627 | (e->tRRD&0x1f) << 15; | 197 | (e->tRRD&0x1f) << 15; |
628 | 198 | ||
629 | NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, | 199 | NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, |
630 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); | 200 | t->reg[0], t->reg[1], t->reg[2], t->reg[3]); |
631 | NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]); | 201 | NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]); |
632 | return 0; | 202 | return 0; |
633 | } | 203 | } |
634 | 204 | ||
@@ -642,6 +212,8 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq, | |||
642 | struct nouveau_pm_memtiming *boot, | 212 | struct nouveau_pm_memtiming *boot, |
643 | struct nouveau_pm_memtiming *t) | 213 | struct nouveau_pm_memtiming *t) |
644 | { | 214 | { |
215 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
216 | |||
645 | t->drive_strength = 0; | 217 | t->drive_strength = 0; |
646 | if (len < 15) { | 218 | if (len < 15) { |
647 | t->odt = boot->odt; | 219 | t->odt = boot->odt; |
@@ -650,17 +222,17 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq, | |||
650 | } | 222 | } |
651 | 223 | ||
652 | if (e->tCL >= NV_MEM_CL_DDR2_MAX) { | 224 | if (e->tCL >= NV_MEM_CL_DDR2_MAX) { |
653 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | 225 | NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); |
654 | return -ERANGE; | 226 | return -ERANGE; |
655 | } | 227 | } |
656 | 228 | ||
657 | if (e->tWR >= NV_MEM_WR_DDR2_MAX) { | 229 | if (e->tWR >= NV_MEM_WR_DDR2_MAX) { |
658 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | 230 | NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); |
659 | return -ERANGE; | 231 | return -ERANGE; |
660 | } | 232 | } |
661 | 233 | ||
662 | if (t->odt > 3) { | 234 | if (t->odt > 3) { |
663 | NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x", | 235 | NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x", |
664 | t->id, t->odt); | 236 | t->id, t->odt); |
665 | t->odt = 0; | 237 | t->odt = 0; |
666 | } | 238 | } |
@@ -672,11 +244,11 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq, | |||
672 | (t->odt & 0x1) << 2 | | 244 | (t->odt & 0x1) << 2 | |
673 | (t->odt & 0x2) << 5; | 245 | (t->odt & 0x2) << 5; |
674 | 246 | ||
675 | NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]); | 247 | NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]); |
676 | return 0; | 248 | return 0; |
677 | } | 249 | } |
678 | 250 | ||
679 | uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { | 251 | static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { |
680 | 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; | 252 | 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; |
681 | 253 | ||
682 | static int | 254 | static int |
@@ -685,6 +257,7 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq, | |||
685 | struct nouveau_pm_memtiming *boot, | 257 | struct nouveau_pm_memtiming *boot, |
686 | struct nouveau_pm_memtiming *t) | 258 | struct nouveau_pm_memtiming *t) |
687 | { | 259 | { |
260 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
688 | u8 cl = e->tCL - 4; | 261 | u8 cl = e->tCL - 4; |
689 | 262 | ||
690 | t->drive_strength = 0; | 263 | t->drive_strength = 0; |
@@ -695,17 +268,17 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq, | |||
695 | } | 268 | } |
696 | 269 | ||
697 | if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { | 270 | if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { |
698 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | 271 | NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); |
699 | return -ERANGE; | 272 | return -ERANGE; |
700 | } | 273 | } |
701 | 274 | ||
702 | if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { | 275 | if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { |
703 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | 276 | NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); |
704 | return -ERANGE; | 277 | return -ERANGE; |
705 | } | 278 | } |
706 | 279 | ||
707 | if (e->tCWL < 5) { | 280 | if (e->tCWL < 5) { |
708 | NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL); | 281 | NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL); |
709 | return -ERANGE; | 282 | return -ERANGE; |
710 | } | 283 | } |
711 | 284 | ||
@@ -720,13 +293,13 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq, | |||
720 | (t->odt & 0x4) << 7; | 293 | (t->odt & 0x4) << 7; |
721 | t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; | 294 | t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; |
722 | 295 | ||
723 | NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); | 296 | NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); |
724 | return 0; | 297 | return 0; |
725 | } | 298 | } |
726 | 299 | ||
727 | uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { | 300 | static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { |
728 | 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; | 301 | 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; |
729 | uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { | 302 | static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { |
730 | 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; | 303 | 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; |
731 | 304 | ||
732 | static int | 305 | static int |
@@ -735,6 +308,8 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq, | |||
735 | struct nouveau_pm_memtiming *boot, | 308 | struct nouveau_pm_memtiming *boot, |
736 | struct nouveau_pm_memtiming *t) | 309 | struct nouveau_pm_memtiming *t) |
737 | { | 310 | { |
311 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
312 | |||
738 | if (len < 15) { | 313 | if (len < 15) { |
739 | t->drive_strength = boot->drive_strength; | 314 | t->drive_strength = boot->drive_strength; |
740 | t->odt = boot->odt; | 315 | t->odt = boot->odt; |
@@ -744,17 +319,17 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq, | |||
744 | } | 319 | } |
745 | 320 | ||
746 | if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { | 321 | if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { |
747 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | 322 | NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); |
748 | return -ERANGE; | 323 | return -ERANGE; |
749 | } | 324 | } |
750 | 325 | ||
751 | if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { | 326 | if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { |
752 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | 327 | NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); |
753 | return -ERANGE; | 328 | return -ERANGE; |
754 | } | 329 | } |
755 | 330 | ||
756 | if (t->odt > 3) { | 331 | if (t->odt > 3) { |
757 | NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", | 332 | NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x", |
758 | t->id, t->odt); | 333 | t->id, t->odt); |
759 | t->odt = 0; | 334 | t->odt = 0; |
760 | } | 335 | } |
@@ -768,7 +343,7 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq, | |||
768 | (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; | 343 | (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; |
769 | t->mr[2] = boot->mr[2]; | 344 | t->mr[2] = boot->mr[2]; |
770 | 345 | ||
771 | NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id, | 346 | NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id, |
772 | t->mr[0], t->mr[1], t->mr[2]); | 347 | t->mr[0], t->mr[1], t->mr[2]); |
773 | return 0; | 348 | return 0; |
774 | } | 349 | } |
@@ -779,6 +354,8 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq, | |||
779 | struct nouveau_pm_memtiming *boot, | 354 | struct nouveau_pm_memtiming *boot, |
780 | struct nouveau_pm_memtiming *t) | 355 | struct nouveau_pm_memtiming *t) |
781 | { | 356 | { |
357 | struct nouveau_drm *drm = nouveau_drm(dev); | ||
358 | |||
782 | if (len < 15) { | 359 | if (len < 15) { |
783 | t->drive_strength = boot->drive_strength; | 360 | t->drive_strength = boot->drive_strength; |
784 | t->odt = boot->odt; | 361 | t->odt = boot->odt; |
@@ -788,17 +365,17 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq, | |||
788 | } | 365 | } |
789 | 366 | ||
790 | if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { | 367 | if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { |
791 | NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); | 368 | NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); |
792 | return -ERANGE; | 369 | return -ERANGE; |
793 | } | 370 | } |
794 | 371 | ||
795 | if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { | 372 | if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { |
796 | NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); | 373 | NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); |
797 | return -ERANGE; | 374 | return -ERANGE; |
798 | } | 375 | } |
799 | 376 | ||
800 | if (t->odt > 3) { | 377 | if (t->odt > 3) { |
801 | NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", | 378 | NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x", |
802 | t->id, t->odt); | 379 | t->id, t->odt); |
803 | t->odt = 0; | 380 | t->odt = 0; |
804 | } | 381 | } |
@@ -810,7 +387,7 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq, | |||
810 | t->drive_strength | | 387 | t->drive_strength | |
811 | (t->odt << 2); | 388 | (t->odt << 2); |
812 | 389 | ||
813 | NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); | 390 | NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); |
814 | return 0; | 391 | return 0; |
815 | } | 392 | } |
816 | 393 | ||
@@ -818,8 +395,9 @@ int | |||
818 | nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | 395 | nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, |
819 | struct nouveau_pm_memtiming *t) | 396 | struct nouveau_pm_memtiming *t) |
820 | { | 397 | { |
821 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 398 | struct nouveau_device *device = nouveau_dev(dev); |
822 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 399 | struct nouveau_fb *pfb = nouveau_fb(device); |
400 | struct nouveau_pm *pm = nouveau_pm(dev); | ||
823 | struct nouveau_pm_memtiming *boot = &pm->boot.timing; | 401 | struct nouveau_pm_memtiming *boot = &pm->boot.timing; |
824 | struct nouveau_pm_tbl_entry *e; | 402 | struct nouveau_pm_tbl_entry *e; |
825 | u8 ver, len, *ptr, *ramcfg; | 403 | u8 ver, len, *ptr, *ramcfg; |
@@ -834,7 +412,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
834 | 412 | ||
835 | t->tCWL = boot->tCWL; | 413 | t->tCWL = boot->tCWL; |
836 | 414 | ||
837 | switch (dev_priv->card_type) { | 415 | switch (device->card_type) { |
838 | case NV_40: | 416 | case NV_40: |
839 | ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); | 417 | ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); |
840 | break; | 418 | break; |
@@ -850,7 +428,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
850 | break; | 428 | break; |
851 | } | 429 | } |
852 | 430 | ||
853 | switch (dev_priv->vram_type * !ret) { | 431 | switch (pfb->ram.type * !ret) { |
854 | case NV_MEM_TYPE_GDDR3: | 432 | case NV_MEM_TYPE_GDDR3: |
855 | ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); | 433 | ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); |
856 | break; | 434 | break; |
@@ -877,7 +455,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
877 | else | 455 | else |
878 | dll_off = !!(ramcfg[2] & 0x40); | 456 | dll_off = !!(ramcfg[2] & 0x40); |
879 | 457 | ||
880 | switch (dev_priv->vram_type) { | 458 | switch (pfb->ram.type) { |
881 | case NV_MEM_TYPE_GDDR3: | 459 | case NV_MEM_TYPE_GDDR3: |
882 | t->mr[1] &= ~0x00000040; | 460 | t->mr[1] &= ~0x00000040; |
883 | t->mr[1] |= 0x00000040 * dll_off; | 461 | t->mr[1] |= 0x00000040 * dll_off; |
@@ -895,11 +473,12 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, | |||
895 | void | 473 | void |
896 | nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) | 474 | nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) |
897 | { | 475 | { |
898 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 476 | struct nouveau_device *device = nouveau_dev(dev); |
477 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
899 | u32 timing_base, timing_regs, mr_base; | 478 | u32 timing_base, timing_regs, mr_base; |
900 | int i; | 479 | int i; |
901 | 480 | ||
902 | if (dev_priv->card_type >= 0xC0) { | 481 | if (device->card_type >= 0xC0) { |
903 | timing_base = 0x10f290; | 482 | timing_base = 0x10f290; |
904 | mr_base = 0x10f300; | 483 | mr_base = 0x10f300; |
905 | } else { | 484 | } else { |
@@ -909,7 +488,7 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) | |||
909 | 488 | ||
910 | t->id = -1; | 489 | t->id = -1; |
911 | 490 | ||
912 | switch (dev_priv->card_type) { | 491 | switch (device->card_type) { |
913 | case NV_50: | 492 | case NV_50: |
914 | timing_regs = 9; | 493 | timing_regs = 9; |
915 | break; | 494 | break; |
@@ -926,24 +505,24 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) | |||
926 | return; | 505 | return; |
927 | } | 506 | } |
928 | for(i = 0; i < timing_regs; i++) | 507 | for(i = 0; i < timing_regs; i++) |
929 | t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i)); | 508 | t->reg[i] = nv_rd32(device, timing_base + (0x04 * i)); |
930 | 509 | ||
931 | t->tCWL = 0; | 510 | t->tCWL = 0; |
932 | if (dev_priv->card_type < NV_C0) { | 511 | if (device->card_type < NV_C0) { |
933 | t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1; | 512 | t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1; |
934 | } else if (dev_priv->card_type <= NV_D0) { | 513 | } else if (device->card_type <= NV_D0) { |
935 | t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7); | 514 | t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7); |
936 | } | 515 | } |
937 | 516 | ||
938 | t->mr[0] = nv_rd32(dev, mr_base); | 517 | t->mr[0] = nv_rd32(device, mr_base); |
939 | t->mr[1] = nv_rd32(dev, mr_base + 0x04); | 518 | t->mr[1] = nv_rd32(device, mr_base + 0x04); |
940 | t->mr[2] = nv_rd32(dev, mr_base + 0x20); | 519 | t->mr[2] = nv_rd32(device, mr_base + 0x20); |
941 | t->mr[3] = nv_rd32(dev, mr_base + 0x24); | 520 | t->mr[3] = nv_rd32(device, mr_base + 0x24); |
942 | 521 | ||
943 | t->odt = 0; | 522 | t->odt = 0; |
944 | t->drive_strength = 0; | 523 | t->drive_strength = 0; |
945 | 524 | ||
946 | switch (dev_priv->vram_type) { | 525 | switch (pfb->ram.type) { |
947 | case NV_MEM_TYPE_DDR3: | 526 | case NV_MEM_TYPE_DDR3: |
948 | t->odt |= (t->mr[1] & 0x200) >> 7; | 527 | t->odt |= (t->mr[1] & 0x200) >> 7; |
949 | case NV_MEM_TYPE_DDR2: | 528 | case NV_MEM_TYPE_DDR2: |
@@ -964,13 +543,15 @@ int | |||
964 | nouveau_mem_exec(struct nouveau_mem_exec_func *exec, | 543 | nouveau_mem_exec(struct nouveau_mem_exec_func *exec, |
965 | struct nouveau_pm_level *perflvl) | 544 | struct nouveau_pm_level *perflvl) |
966 | { | 545 | { |
967 | struct drm_nouveau_private *dev_priv = exec->dev->dev_private; | 546 | struct nouveau_drm *drm = nouveau_drm(exec->dev); |
547 | struct nouveau_device *device = nouveau_dev(exec->dev); | ||
548 | struct nouveau_fb *pfb = nouveau_fb(device); | ||
968 | struct nouveau_pm_memtiming *info = &perflvl->timing; | 549 | struct nouveau_pm_memtiming *info = &perflvl->timing; |
969 | u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; | 550 | u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; |
970 | u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; | 551 | u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; |
971 | u32 mr1_dlloff; | 552 | u32 mr1_dlloff; |
972 | 553 | ||
973 | switch (dev_priv->vram_type) { | 554 | switch (pfb->ram.type) { |
974 | case NV_MEM_TYPE_DDR2: | 555 | case NV_MEM_TYPE_DDR2: |
975 | tDLLK = 2000; | 556 | tDLLK = 2000; |
976 | mr1_dlloff = 0x00000001; | 557 | mr1_dlloff = 0x00000001; |
@@ -986,12 +567,12 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec, | |||
986 | mr1_dlloff = 0x00000040; | 567 | mr1_dlloff = 0x00000040; |
987 | break; | 568 | break; |
988 | default: | 569 | default: |
989 | NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n"); | 570 | NV_ERROR(drm, "cannot reclock unsupported memtype\n"); |
990 | return -ENODEV; | 571 | return -ENODEV; |
991 | } | 572 | } |
992 | 573 | ||
993 | /* fetch current MRs */ | 574 | /* fetch current MRs */ |
994 | switch (dev_priv->vram_type) { | 575 | switch (pfb->ram.type) { |
995 | case NV_MEM_TYPE_GDDR3: | 576 | case NV_MEM_TYPE_GDDR3: |
996 | case NV_MEM_TYPE_DDR3: | 577 | case NV_MEM_TYPE_DDR3: |
997 | mr[2] = exec->mrg(exec, 2); | 578 | mr[2] = exec->mrg(exec, 2); |
@@ -1058,194 +639,9 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec, | |||
1058 | exec->mrs (exec, 0, info->mr[0] | 0x00000000); | 639 | exec->mrs (exec, 0, info->mr[0] | 0x00000000); |
1059 | exec->wait(exec, tMRD); | 640 | exec->wait(exec, tMRD); |
1060 | exec->wait(exec, tDLLK); | 641 | exec->wait(exec, tDLLK); |
1061 | if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3) | 642 | if (pfb->ram.type == NV_MEM_TYPE_GDDR3) |
1062 | exec->precharge(exec); | 643 | exec->precharge(exec); |
1063 | } | 644 | } |
1064 | 645 | ||
1065 | return 0; | 646 | return 0; |
1066 | } | 647 | } |
1067 | |||
1068 | int | ||
1069 | nouveau_mem_vbios_type(struct drm_device *dev) | ||
1070 | { | ||
1071 | struct bit_entry M; | ||
1072 | u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; | ||
1073 | if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) { | ||
1074 | u8 *table = ROMPTR(dev, M.data[3]); | ||
1075 | if (table && table[0] == 0x10 && ramcfg < table[3]) { | ||
1076 | u8 *entry = table + table[1] + (ramcfg * table[2]); | ||
1077 | switch (entry[0] & 0x0f) { | ||
1078 | case 0: return NV_MEM_TYPE_DDR2; | ||
1079 | case 1: return NV_MEM_TYPE_DDR3; | ||
1080 | case 2: return NV_MEM_TYPE_GDDR3; | ||
1081 | case 3: return NV_MEM_TYPE_GDDR5; | ||
1082 | default: | ||
1083 | break; | ||
1084 | } | ||
1085 | |||
1086 | } | ||
1087 | } | ||
1088 | return NV_MEM_TYPE_UNKNOWN; | ||
1089 | } | ||
1090 | |||
1091 | static int | ||
1092 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | ||
1093 | { | ||
1094 | /* nothing to do */ | ||
1095 | return 0; | ||
1096 | } | ||
1097 | |||
1098 | static int | ||
1099 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | ||
1100 | { | ||
1101 | /* nothing to do */ | ||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | static inline void | ||
1106 | nouveau_mem_node_cleanup(struct nouveau_mem *node) | ||
1107 | { | ||
1108 | if (node->vma[0].node) { | ||
1109 | nouveau_vm_unmap(&node->vma[0]); | ||
1110 | nouveau_vm_put(&node->vma[0]); | ||
1111 | } | ||
1112 | |||
1113 | if (node->vma[1].node) { | ||
1114 | nouveau_vm_unmap(&node->vma[1]); | ||
1115 | nouveau_vm_put(&node->vma[1]); | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | static void | ||
1120 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | ||
1121 | struct ttm_mem_reg *mem) | ||
1122 | { | ||
1123 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
1124 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
1125 | struct drm_device *dev = dev_priv->dev; | ||
1126 | |||
1127 | nouveau_mem_node_cleanup(mem->mm_node); | ||
1128 | vram->put(dev, (struct nouveau_mem **)&mem->mm_node); | ||
1129 | } | ||
1130 | |||
1131 | static int | ||
1132 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | ||
1133 | struct ttm_buffer_object *bo, | ||
1134 | struct ttm_placement *placement, | ||
1135 | struct ttm_mem_reg *mem) | ||
1136 | { | ||
1137 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | ||
1138 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; | ||
1139 | struct drm_device *dev = dev_priv->dev; | ||
1140 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
1141 | struct nouveau_mem *node; | ||
1142 | u32 size_nc = 0; | ||
1143 | int ret; | ||
1144 | |||
1145 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | ||
1146 | size_nc = 1 << nvbo->page_shift; | ||
1147 | |||
1148 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | ||
1149 | mem->page_alignment << PAGE_SHIFT, size_nc, | ||
1150 | (nvbo->tile_flags >> 8) & 0x3ff, &node); | ||
1151 | if (ret) { | ||
1152 | mem->mm_node = NULL; | ||
1153 | return (ret == -ENOSPC) ? 0 : ret; | ||
1154 | } | ||
1155 | |||
1156 | node->page_shift = nvbo->page_shift; | ||
1157 | |||
1158 | mem->mm_node = node; | ||
1159 | mem->start = node->offset >> PAGE_SHIFT; | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | void | ||
1164 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | ||
1165 | { | ||
1166 | struct nouveau_mm *mm = man->priv; | ||
1167 | struct nouveau_mm_node *r; | ||
1168 | u32 total = 0, free = 0; | ||
1169 | |||
1170 | mutex_lock(&mm->mutex); | ||
1171 | list_for_each_entry(r, &mm->nodes, nl_entry) { | ||
1172 | printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", | ||
1173 | prefix, r->type, ((u64)r->offset << 12), | ||
1174 | (((u64)r->offset + r->length) << 12)); | ||
1175 | |||
1176 | total += r->length; | ||
1177 | if (!r->type) | ||
1178 | free += r->length; | ||
1179 | } | ||
1180 | mutex_unlock(&mm->mutex); | ||
1181 | |||
1182 | printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", | ||
1183 | prefix, (u64)total << 12, (u64)free << 12); | ||
1184 | printk(KERN_DEBUG "%s block: 0x%08x\n", | ||
1185 | prefix, mm->block_size << 12); | ||
1186 | } | ||
1187 | |||
1188 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | ||
1189 | nouveau_vram_manager_init, | ||
1190 | nouveau_vram_manager_fini, | ||
1191 | nouveau_vram_manager_new, | ||
1192 | nouveau_vram_manager_del, | ||
1193 | nouveau_vram_manager_debug | ||
1194 | }; | ||
1195 | |||
1196 | static int | ||
1197 | nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | ||
1198 | { | ||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | static int | ||
1203 | nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) | ||
1204 | { | ||
1205 | return 0; | ||
1206 | } | ||
1207 | |||
1208 | static void | ||
1209 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | ||
1210 | struct ttm_mem_reg *mem) | ||
1211 | { | ||
1212 | nouveau_mem_node_cleanup(mem->mm_node); | ||
1213 | kfree(mem->mm_node); | ||
1214 | mem->mm_node = NULL; | ||
1215 | } | ||
1216 | |||
1217 | static int | ||
1218 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | ||
1219 | struct ttm_buffer_object *bo, | ||
1220 | struct ttm_placement *placement, | ||
1221 | struct ttm_mem_reg *mem) | ||
1222 | { | ||
1223 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
1224 | struct nouveau_mem *node; | ||
1225 | |||
1226 | if (unlikely((mem->num_pages << PAGE_SHIFT) >= | ||
1227 | dev_priv->gart_info.aper_size)) | ||
1228 | return -ENOMEM; | ||
1229 | |||
1230 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
1231 | if (!node) | ||
1232 | return -ENOMEM; | ||
1233 | node->page_shift = 12; | ||
1234 | |||
1235 | mem->mm_node = node; | ||
1236 | mem->start = 0; | ||
1237 | return 0; | ||
1238 | } | ||
1239 | |||
1240 | void | ||
1241 | nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | ||
1242 | { | ||
1243 | } | ||
1244 | |||
1245 | const struct ttm_mem_type_manager_func nouveau_gart_manager = { | ||
1246 | nouveau_gart_manager_init, | ||
1247 | nouveau_gart_manager_fini, | ||
1248 | nouveau_gart_manager_new, | ||
1249 | nouveau_gart_manager_del, | ||
1250 | nouveau_gart_manager_debug | ||
1251 | }; | ||