aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c363
1 files changed, 313 insertions, 50 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9689d414768..a163c7c612e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,6 +35,8 @@
35#include "drm_sarea.h" 35#include "drm_sarea.h"
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37 37
38#define MIN(a,b) a < b ? a : b
39
38/* 40/*
39 * NV10-NV40 tiling helpers 41 * NV10-NV40 tiling helpers
40 */ 42 */
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
49 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
50 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 52 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
51 53
52 tile->addr = addr; 54 tile->addr = addr;
53 tile->size = size; 55 tile->size = size;
54 tile->used = !!pitch; 56 tile->used = !!pitch;
55 nouveau_fence_unref((void **)&tile->fence); 57 nouveau_fence_unref((void **)&tile->fence);
56 58
57 if (!pfifo->cache_flush(dev))
58 return;
59
60 pfifo->reassign(dev, false); 59 pfifo->reassign(dev, false);
61 pfifo->cache_flush(dev);
62 pfifo->cache_pull(dev, false); 60 pfifo->cache_pull(dev, false);
63 61
64 nouveau_wait_for_idle(dev); 62 nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
76{ 74{
77 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
79 struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; 77 struct nouveau_tile_reg *found = NULL;
80 int i; 78 unsigned long i, flags;
81 79
82 spin_lock(&dev_priv->tile.lock); 80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
83 81
84 for (i = 0; i < pfb->num_tiles; i++) { 82 for (i = 0; i < pfb->num_tiles; i++) {
85 if (tile[i].used) 83 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
84
85 if (tile->used)
86 /* Tile region in use. */ 86 /* Tile region in use. */
87 continue; 87 continue;
88 88
89 if (tile[i].fence && 89 if (tile->fence &&
90 !nouveau_fence_signalled(tile[i].fence, NULL)) 90 !nouveau_fence_signalled(tile->fence, NULL))
91 /* Pending tile region. */ 91 /* Pending tile region. */
92 continue; 92 continue;
93 93
94 if (max(tile[i].addr, addr) < 94 if (max(tile->addr, addr) <
95 min(tile[i].addr + tile[i].size, addr + size)) 95 min(tile->addr + tile->size, addr + size))
96 /* Kill an intersecting tile region. */ 96 /* Kill an intersecting tile region. */
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0); 97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
98 98
99 if (pitch && !found) { 99 if (pitch && !found) {
100 /* Free tile region. */ 100 /* Free tile region. */
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch); 101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
102 found = &tile[i]; 102 found = tile;
103 } 103 }
104 } 104 }
105 105
106 spin_unlock(&dev_priv->tile.lock); 106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
107 107
108 return found; 108 return found;
109} 109}
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
169 virt += (end - pte); 169 virt += (end - pte);
170 170
171 while (pte < end) { 171 while (pte < end) {
172 nv_wo32(dev, pgt, pte++, offset_l); 172 nv_wo32(pgt, (pte * 4) + 0, offset_l);
173 nv_wo32(dev, pgt, pte++, offset_h); 173 nv_wo32(pgt, (pte * 4) + 4, offset_h);
174 pte += 2;
174 } 175 }
175 } 176 }
176 } 177 }
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
203 pages -= (end - pte); 204 pages -= (end - pte);
204 virt += (end - pte) << 15; 205 virt += (end - pte) << 15;
205 206
206 while (pte < end) 207 while (pte < end) {
207 nv_wo32(dev, pgt, pte++, 0); 208 nv_wo32(pgt, (pte * 4), 0);
209 pte++;
210 }
208 } 211 }
209 dev_priv->engine.instmem.flush(dev); 212 dev_priv->engine.instmem.flush(dev);
210 213
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
218 * Cleanup everything 221 * Cleanup everything
219 */ 222 */
220void 223void
221nouveau_mem_close(struct drm_device *dev) 224nouveau_mem_vram_fini(struct drm_device *dev)
222{ 225{
223 struct drm_nouveau_private *dev_priv = dev->dev_private; 226 struct drm_nouveau_private *dev_priv = dev->dev_private;
224 227
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
229 232
230 nouveau_ttm_global_release(dev_priv); 233 nouveau_ttm_global_release(dev_priv);
231 234
235 if (dev_priv->fb_mtrr >= 0) {
236 drm_mtrr_del(dev_priv->fb_mtrr,
237 pci_resource_start(dev->pdev, 1),
238 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
239 dev_priv->fb_mtrr = -1;
240 }
241}
242
243void
244nouveau_mem_gart_fini(struct drm_device *dev)
245{
246 nouveau_sgdma_takedown(dev);
247
232 if (drm_core_has_AGP(dev) && dev->agp) { 248 if (drm_core_has_AGP(dev) && dev->agp) {
233 struct drm_agp_mem *entry, *tempe; 249 struct drm_agp_mem *entry, *tempe;
234 250
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
248 dev->agp->acquired = 0; 264 dev->agp->acquired = 0;
249 dev->agp->enabled = 0; 265 dev->agp->enabled = 0;
250 } 266 }
251
252 if (dev_priv->fb_mtrr) {
253 drm_mtrr_del(dev_priv->fb_mtrr,
254 pci_resource_start(dev->pdev, 1),
255 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
256 dev_priv->fb_mtrr = -1;
257 }
258} 267}
259 268
260static uint32_t 269static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
305 return 0; 314 return 0;
306} 315}
307 316
308/* returns the amount of FB ram in bytes */ 317static void
309int 318nv50_vram_preinit(struct drm_device *dev)
319{
320 struct drm_nouveau_private *dev_priv = dev->dev_private;
321 int i, parts, colbits, rowbitsa, rowbitsb, banks;
322 u64 rowsize, predicted;
323 u32 r0, r4, rt, ru;
324
325 r0 = nv_rd32(dev, 0x100200);
326 r4 = nv_rd32(dev, 0x100204);
327 rt = nv_rd32(dev, 0x100250);
328 ru = nv_rd32(dev, 0x001540);
329 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
330
331 for (i = 0, parts = 0; i < 8; i++) {
332 if (ru & (0x00010000 << i))
333 parts++;
334 }
335
336 colbits = (r4 & 0x0000f000) >> 12;
337 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
338 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
339 banks = ((r4 & 0x01000000) ? 8 : 4);
340
341 rowsize = parts * banks * (1 << colbits) * 8;
342 predicted = rowsize << rowbitsa;
343 if (r0 & 0x00000004)
344 predicted += rowsize << rowbitsb;
345
346 if (predicted != dev_priv->vram_size) {
347 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
348 (u32)(dev_priv->vram_size >> 20));
349 NV_WARN(dev, "we calculated %dMiB VRAM\n",
350 (u32)(predicted >> 20));
351 }
352
353 dev_priv->vram_rblock_size = rowsize >> 12;
354 if (rt & 1)
355 dev_priv->vram_rblock_size *= 3;
356
357 NV_DEBUG(dev, "rblock %lld bytes\n",
358 (u64)dev_priv->vram_rblock_size << 12);
359}
360
361static void
362nvaa_vram_preinit(struct drm_device *dev)
363{
364 struct drm_nouveau_private *dev_priv = dev->dev_private;
365
366 /* To our knowledge, there's no large scale reordering of pages
367 * that occurs on IGP chipsets.
368 */
369 dev_priv->vram_rblock_size = 1;
370}
371
372static int
310nouveau_mem_detect(struct drm_device *dev) 373nouveau_mem_detect(struct drm_device *dev)
311{ 374{
312 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
325 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 388 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
326 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; 389 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
327 dev_priv->vram_size &= 0xffffffff00ll; 390 dev_priv->vram_size &= 0xffffffff00ll;
328 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { 391
392 switch (dev_priv->chipset) {
393 case 0xaa:
394 case 0xac:
395 case 0xaf:
329 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); 396 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
330 dev_priv->vram_sys_base <<= 12; 397 dev_priv->vram_sys_base <<= 12;
398 nvaa_vram_preinit(dev);
399 break;
400 default:
401 nv50_vram_preinit(dev);
402 break;
331 } 403 }
332 } else { 404 } else {
333 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; 405 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
345 return -ENOMEM; 417 return -ENOMEM;
346} 418}
347 419
420#if __OS_HAS_AGP
421static unsigned long
422get_agp_mode(struct drm_device *dev, unsigned long mode)
423{
424 struct drm_nouveau_private *dev_priv = dev->dev_private;
425
426 /*
427 * FW seems to be broken on nv18, it makes the card lock up
428 * randomly.
429 */
430 if (dev_priv->chipset == 0x18)
431 mode &= ~PCI_AGP_COMMAND_FW;
432
433 /*
434 * AGP mode set in the command line.
435 */
436 if (nouveau_agpmode > 0) {
437 bool agpv3 = mode & 0x8;
438 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
439
440 mode = (mode & ~0x7) | (rate & 0x7);
441 }
442
443 return mode;
444}
445#endif
446
348int 447int
349nouveau_mem_reset_agp(struct drm_device *dev) 448nouveau_mem_reset_agp(struct drm_device *dev)
350{ 449{
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
355 /* First of all, disable fast writes, otherwise if it's 454 /* First of all, disable fast writes, otherwise if it's
356 * already enabled in the AGP bridge and we disable the card's 455 * already enabled in the AGP bridge and we disable the card's
357 * AGP controller we might be locking ourselves out of it. */ 456 * AGP controller we might be locking ourselves out of it. */
358 if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) { 457 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
458 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
359 struct drm_agp_info info; 459 struct drm_agp_info info;
360 struct drm_agp_mode mode; 460 struct drm_agp_mode mode;
361 461
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
363 if (ret) 463 if (ret)
364 return ret; 464 return ret;
365 465
366 mode.mode = info.mode & ~PCI_AGP_COMMAND_FW; 466 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
367 ret = drm_agp_enable(dev, mode); 467 ret = drm_agp_enable(dev, mode);
368 if (ret) 468 if (ret)
369 return ret; 469 return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
418 } 518 }
419 519
420 /* see agp.h for the AGPSTAT_* modes available */ 520 /* see agp.h for the AGPSTAT_* modes available */
421 mode.mode = info.mode; 521 mode.mode = get_agp_mode(dev, info.mode);
422 ret = drm_agp_enable(dev, mode); 522 ret = drm_agp_enable(dev, mode);
423 if (ret) { 523 if (ret) {
424 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); 524 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
433} 533}
434 534
435int 535int
436nouveau_mem_init(struct drm_device *dev) 536nouveau_mem_vram_init(struct drm_device *dev)
437{ 537{
438 struct drm_nouveau_private *dev_priv = dev->dev_private; 538 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; 539 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
440 int ret, dma_bits = 32; 540 int ret, dma_bits;
441
442 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
443 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
444 541
445 if (dev_priv->card_type >= NV_50 && 542 if (dev_priv->card_type >= NV_50 &&
446 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 543 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
447 dma_bits = 40; 544 dma_bits = 40;
545 else
546 dma_bits = 32;
448 547
449 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); 548 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
450 if (ret) { 549 if (ret)
451 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
452 return ret; 550 return ret;
453 } 551
552 ret = nouveau_mem_detect(dev);
553 if (ret)
554 return ret;
555
556 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
454 557
455 ret = nouveau_ttm_global_init(dev_priv); 558 ret = nouveau_ttm_global_init(dev_priv);
456 if (ret) 559 if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
465 return ret; 568 return ret;
466 } 569 }
467 570
468 spin_lock_init(&dev_priv->tile.lock);
469
470 dev_priv->fb_available_size = dev_priv->vram_size; 571 dev_priv->fb_available_size = dev_priv->vram_size;
471 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 572 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
472 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) 573 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
474 pci_resource_len(dev->pdev, 1); 575 pci_resource_len(dev->pdev, 1);
475 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 576 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
476 577
477 /* remove reserved space at end of vram from available amount */ 578 /* reserve space at end of VRAM for PRAMIN */
579 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
580 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
581 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
582 else
583 if (dev_priv->card_type >= NV_40)
584 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
585 else
586 dev_priv->ramin_rsvd_vram = (512 * 1024);
587
478 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 588 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
479 dev_priv->fb_aper_free = dev_priv->fb_available_size; 589 dev_priv->fb_aper_free = dev_priv->fb_available_size;
480 590
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
495 nouveau_bo_ref(NULL, &dev_priv->vga_ram); 605 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
496 } 606 }
497 607
498 /* GART */ 608 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
609 pci_resource_len(dev->pdev, 1),
610 DRM_MTRR_WC);
611 return 0;
612}
613
614int
615nouveau_mem_gart_init(struct drm_device *dev)
616{
617 struct drm_nouveau_private *dev_priv = dev->dev_private;
618 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
619 int ret;
620
621 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
622
499#if !defined(__powerpc__) && !defined(__ia64__) 623#if !defined(__powerpc__) && !defined(__ia64__)
500 if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { 624 if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
501 ret = nouveau_mem_init_agp(dev); 625 ret = nouveau_mem_init_agp(dev);
502 if (ret) 626 if (ret)
503 NV_ERROR(dev, "Error initialising AGP: %d\n", ret); 627 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
523 return ret; 647 return ret;
524 } 648 }
525 649
526 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
527 pci_resource_len(dev->pdev, 1),
528 DRM_MTRR_WC);
529
530 return 0; 650 return 0;
531} 651}
532 652
653void
654nouveau_mem_timing_init(struct drm_device *dev)
655{
656 struct drm_nouveau_private *dev_priv = dev->dev_private;
657 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
658 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
659 struct nvbios *bios = &dev_priv->vbios;
660 struct bit_entry P;
661 u8 tUNK_0, tUNK_1, tUNK_2;
662 u8 tRP; /* Byte 3 */
663 u8 tRAS; /* Byte 5 */
664 u8 tRFC; /* Byte 7 */
665 u8 tRC; /* Byte 9 */
666 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
667 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
668 u8 *mem = NULL, *entry;
669 int i, recordlen, entries;
670
671 if (bios->type == NVBIOS_BIT) {
672 if (bit_table(dev, 'P', &P))
673 return;
674
675 if (P.version == 1)
676 mem = ROMPTR(bios, P.data[4]);
677 else
678 if (P.version == 2)
679 mem = ROMPTR(bios, P.data[8]);
680 else {
681 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
682 }
683 } else {
684 NV_DEBUG(dev, "BMP version too old for memory\n");
685 return;
686 }
687
688 if (!mem) {
689 NV_DEBUG(dev, "memory timing table pointer invalid\n");
690 return;
691 }
533 692
693 if (mem[0] != 0x10) {
694 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
695 return;
696 }
697
698 /* validate record length */
699 entries = mem[2];
700 recordlen = mem[3];
701 if (recordlen < 15) {
702 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
703 return;
704 }
705
706 /* parse vbios entries into common format */
707 memtimings->timing =
708 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
709 if (!memtimings->timing)
710 return;
711
712 entry = mem + mem[1];
713 for (i = 0; i < entries; i++, entry += recordlen) {
714 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
715 if (entry[0] == 0)
716 continue;
717
718 tUNK_18 = 1;
719 tUNK_19 = 1;
720 tUNK_20 = 0;
721 tUNK_21 = 0;
722 switch (MIN(recordlen,21)) {
723 case 21:
724 tUNK_21 = entry[21];
725 case 20:
726 tUNK_20 = entry[20];
727 case 19:
728 tUNK_19 = entry[19];
729 case 18:
730 tUNK_18 = entry[18];
731 default:
732 tUNK_0 = entry[0];
733 tUNK_1 = entry[1];
734 tUNK_2 = entry[2];
735 tRP = entry[3];
736 tRAS = entry[5];
737 tRFC = entry[7];
738 tRC = entry[9];
739 tUNK_10 = entry[10];
740 tUNK_11 = entry[11];
741 tUNK_12 = entry[12];
742 tUNK_13 = entry[13];
743 tUNK_14 = entry[14];
744 break;
745 }
746
747 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
748
749 /* XXX: I don't trust the -1's and +1's... they must come
750 * from somewhere! */
751 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
752 tUNK_18 << 16 |
753 (tUNK_1 + tUNK_19 + 1) << 8 |
754 (tUNK_2 - 1));
755
756 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
757 if(recordlen > 19) {
758 timing->reg_100228 += (tUNK_19 - 1) << 24;
759 } else {
760 timing->reg_100228 += tUNK_12 << 24;
761 }
762
763 /* XXX: reg_10022c */
764
765 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
766 tUNK_13 << 8 | tUNK_13);
767
768 /* XXX: +6? */
769 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
770 if(tUNK_10 > tUNK_11) {
771 timing->reg_100234 += tUNK_10 << 16;
772 } else {
773 timing->reg_100234 += tUNK_11 << 16;
774 }
775
776 /* XXX; reg_100238, reg_10023c */
777 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
778 timing->reg_100220, timing->reg_100224,
779 timing->reg_100228, timing->reg_10022c);
780 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
781 timing->reg_100230, timing->reg_100234,
782 timing->reg_100238, timing->reg_10023c);
783 }
784
785 memtimings->nr_timing = entries;
786 memtimings->supported = true;
787}
788
789void
790nouveau_mem_timing_fini(struct drm_device *dev)
791{
792 struct drm_nouveau_private *dev_priv = dev->dev_private;
793 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
794
795 kfree(mem->timing);
796}