aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv20_graph.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv20_graph.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
1 files changed, 177 insertions, 67 deletions
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56eca..8464b76798d5 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
32#define NV34_GRCTX_SIZE (18140) 32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396) 33#define NV35_36_GRCTX_SIZE (22396)
34 34
35static int nv20_graph_register(struct drm_device *);
36static int nv30_graph_register(struct drm_device *);
37static void nv20_graph_isr(struct drm_device *);
38
35static void 39static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 40nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{ 41{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
425 struct drm_device *dev = chan->dev; 429 struct drm_device *dev = chan->dev;
426 struct drm_nouveau_private *dev_priv = dev->dev_private; 430 struct drm_nouveau_private *dev_priv = dev->dev_private;
427 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 431 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
432 unsigned long flags;
428 433
429 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 434 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
435 pgraph->fifo_access(dev, false);
436
437 /* Unload the context if it's the currently active one */
438 if (pgraph->channel(dev) == chan)
439 pgraph->unload_context(dev);
440
441 pgraph->fifo_access(dev, true);
442 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
443
444 /* Free the context resources */
430 nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 445 nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
446 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
431} 447}
432 448
433int 449int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
496} 512}
497 513
498void 514void
499nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 515nv20_graph_set_tile_region(struct drm_device *dev, int i)
500 uint32_t size, uint32_t pitch)
501{ 516{
502 uint32_t limit = max(1u, addr + size) - 1; 517 struct drm_nouveau_private *dev_priv = dev->dev_private;
503 518 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
504 if (pitch)
505 addr |= 1;
506 519
507 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 520 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
508 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 521 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
509 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 522 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
510 523
511 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 524 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); 525 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
513 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 526 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
514 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); 527 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
515 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 528 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
516 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); 529 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
530
531 if (dev_priv->card_type == NV_20) {
532 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
533 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
534 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
535 }
517} 536}
518 537
519int 538int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
560 579
561 nv20_graph_rdi(dev); 580 nv20_graph_rdi(dev);
562 581
582 ret = nv20_graph_register(dev);
583 if (ret) {
584 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
585 return ret;
586 }
587
588 nouveau_irq_register(dev, 12, nv20_graph_isr);
563 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 589 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
564 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 590 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
565 591
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
571 nv_wr32(dev, 0x40009C , 0x00000040); 597 nv_wr32(dev, 0x40009C , 0x00000040);
572 598
573 if (dev_priv->chipset >= 0x25) { 599 if (dev_priv->chipset >= 0x25) {
574 nv_wr32(dev, 0x400890, 0x00080000); 600 nv_wr32(dev, 0x400890, 0x00a8cfff);
575 nv_wr32(dev, 0x400610, 0x304B1FB6); 601 nv_wr32(dev, 0x400610, 0x304B1FB6);
576 nv_wr32(dev, 0x400B80, 0x18B82880); 602 nv_wr32(dev, 0x400B80, 0x1cbd3883);
577 nv_wr32(dev, 0x400B84, 0x44000000); 603 nv_wr32(dev, 0x400B84, 0x44000000);
578 nv_wr32(dev, 0x400098, 0x40000080); 604 nv_wr32(dev, 0x400098, 0x40000080);
579 nv_wr32(dev, 0x400B88, 0x000000ff); 605 nv_wr32(dev, 0x400B88, 0x000000ff);
606
580 } else { 607 } else {
581 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ 608 nv_wr32(dev, 0x400880, 0x0008c7df);
582 nv_wr32(dev, 0x400094, 0x00000005); 609 nv_wr32(dev, 0x400094, 0x00000005);
583 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ 610 nv_wr32(dev, 0x400B80, 0x45eae20e);
584 nv_wr32(dev, 0x400B84, 0x24000000); 611 nv_wr32(dev, 0x400B84, 0x24000000);
585 nv_wr32(dev, 0x400098, 0x00000040); 612 nv_wr32(dev, 0x400098, 0x00000040);
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); 613 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
591 618
592 /* Turn all the tiling regions off. */ 619 /* Turn all the tiling regions off. */
593 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 620 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
594 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 621 nv20_graph_set_tile_region(dev, i);
595 622
596 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
599 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
600 nv_rd32(dev, 0x100300 + i * 4));
601 }
602 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); 623 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
603 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); 624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
604 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); 625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 663 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 664 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
644 665
666 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
667 nouveau_irq_unregister(dev, 12);
668
645 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 669 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
646} 670}
647 671
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
684 return ret; 708 return ret;
685 } 709 }
686 710
711 ret = nv30_graph_register(dev);
712 if (ret) {
713 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
714 return ret;
715 }
716
687 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 717 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
688 pgraph->ctx_table->pinst >> 4); 718 pgraph->ctx_table->pinst >> 4);
689 719
720 nouveau_irq_register(dev, 12, nv20_graph_isr);
690 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 721 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
691 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 722 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
692 723
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
724 755
725 /* Turn all the tiling regions off. */ 756 /* Turn all the tiling regions off. */
726 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 757 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
727 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 758 nv20_graph_set_tile_region(dev, i);
728 759
729 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 760 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
730 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 761 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
744 return 0; 775 return 0;
745} 776}
746 777
747struct nouveau_pgraph_object_class nv20_graph_grclass[] = { 778static int
748 { 0x0030, false, NULL }, /* null */ 779nv20_graph_register(struct drm_device *dev)
749 { 0x0039, false, NULL }, /* m2mf */ 780{
750 { 0x004a, false, NULL }, /* gdirect */ 781 struct drm_nouveau_private *dev_priv = dev->dev_private;
751 { 0x009f, false, NULL }, /* imageblit (nv12) */ 782
752 { 0x008a, false, NULL }, /* ifc */ 783 if (dev_priv->engine.graph.registered)
753 { 0x0089, false, NULL }, /* sifm */ 784 return 0;
754 { 0x0062, false, NULL }, /* surf2d */ 785
755 { 0x0043, false, NULL }, /* rop */ 786 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
756 { 0x0012, false, NULL }, /* beta1 */ 787 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
757 { 0x0072, false, NULL }, /* beta4 */ 788 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
758 { 0x0019, false, NULL }, /* cliprect */ 789 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
759 { 0x0044, false, NULL }, /* pattern */ 790 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
760 { 0x009e, false, NULL }, /* swzsurf */ 791 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
761 { 0x0096, false, NULL }, /* celcius */ 792 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
762 { 0x0097, false, NULL }, /* kelvin (nv20) */ 793 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
763 { 0x0597, false, NULL }, /* kelvin (nv25) */ 794 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
764 {} 795 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
765}; 796 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
766 797 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
767struct nouveau_pgraph_object_class nv30_graph_grclass[] = { 798 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
768 { 0x0030, false, NULL }, /* null */ 799 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
769 { 0x0039, false, NULL }, /* m2mf */ 800 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
770 { 0x004a, false, NULL }, /* gdirect */ 801
771 { 0x009f, false, NULL }, /* imageblit (nv12) */ 802 /* kelvin */
772 { 0x008a, false, NULL }, /* ifc */ 803 if (dev_priv->chipset < 0x25)
773 { 0x038a, false, NULL }, /* ifc (nv30) */ 804 NVOBJ_CLASS(dev, 0x0097, GR);
774 { 0x0089, false, NULL }, /* sifm */ 805 else
775 { 0x0389, false, NULL }, /* sifm (nv30) */ 806 NVOBJ_CLASS(dev, 0x0597, GR);
776 { 0x0062, false, NULL }, /* surf2d */ 807
777 { 0x0362, false, NULL }, /* surf2d (nv30) */ 808 /* nvsw */
778 { 0x0043, false, NULL }, /* rop */ 809 NVOBJ_CLASS(dev, 0x506e, SW);
779 { 0x0012, false, NULL }, /* beta1 */ 810 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
780 { 0x0072, false, NULL }, /* beta4 */ 811
781 { 0x0019, false, NULL }, /* cliprect */ 812 dev_priv->engine.graph.registered = true;
782 { 0x0044, false, NULL }, /* pattern */ 813 return 0;
783 { 0x039e, false, NULL }, /* swzsurf */ 814}
784 { 0x0397, false, NULL }, /* rankine (nv30) */ 815
785 { 0x0497, false, NULL }, /* rankine (nv35) */ 816static int
786 { 0x0697, false, NULL }, /* rankine (nv34) */ 817nv30_graph_register(struct drm_device *dev)
787 {} 818{
788}; 819 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 820
821 if (dev_priv->engine.graph.registered)
822 return 0;
823
824 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
825 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
826 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
827 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
828 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
829 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
830 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
831 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
832 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
833 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
834 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
835 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
836 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
837 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
838 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
839 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
840 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
841
842 /* rankine */
843 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
844 NVOBJ_CLASS(dev, 0x0397, GR);
845 else
846 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
847 NVOBJ_CLASS(dev, 0x0697, GR);
848 else
849 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
850 NVOBJ_CLASS(dev, 0x0497, GR);
851
852 /* nvsw */
853 NVOBJ_CLASS(dev, 0x506e, SW);
854 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
855
856 dev_priv->engine.graph.registered = true;
857 return 0;
858}
859
860static void
861nv20_graph_isr(struct drm_device *dev)
862{
863 u32 stat;
864
865 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
866 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
867 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
868 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
869 u32 chid = (addr & 0x01f00000) >> 20;
870 u32 subc = (addr & 0x00070000) >> 16;
871 u32 mthd = (addr & 0x00001ffc);
872 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
873 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
874 u32 show = stat;
875
876 if (stat & NV_PGRAPH_INTR_ERROR) {
877 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
878 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
879 show &= ~NV_PGRAPH_INTR_ERROR;
880 }
881 }
882
883 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
884 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
885
886 if (show && nouveau_ratelimit()) {
887 NV_INFO(dev, "PGRAPH -");
888 nouveau_bitfield_print(nv10_graph_intr, show);
889 printk(" nsource:");
890 nouveau_bitfield_print(nv04_graph_nsource, nsource);
891 printk(" nstatus:");
892 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
893 printk("\n");
894 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
895 "mthd 0x%04x data 0x%08x\n",
896 chid, subc, class, mthd, data);
897 }
898 }
899}