diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv10_graph.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nv10_graph.c | 203 |
1 files changed, 147 insertions, 56 deletions
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 8e68c9731159..8c92edb7bbcd 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -26,6 +26,10 @@ | |||
26 | #include "drm.h" | 26 | #include "drm.h" |
27 | #include "nouveau_drm.h" | 27 | #include "nouveau_drm.h" |
28 | #include "nouveau_drv.h" | 28 | #include "nouveau_drv.h" |
29 | #include "nouveau_util.h" | ||
30 | |||
31 | static int nv10_graph_register(struct drm_device *); | ||
32 | static void nv10_graph_isr(struct drm_device *); | ||
29 | 33 | ||
30 | #define NV10_FIFO_NUMBER 32 | 34 | #define NV10_FIFO_NUMBER 32 |
31 | 35 | ||
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev) | |||
786 | return 0; | 790 | return 0; |
787 | } | 791 | } |
788 | 792 | ||
789 | void | 793 | static void |
790 | nv10_graph_context_switch(struct drm_device *dev) | 794 | nv10_graph_context_switch(struct drm_device *dev) |
791 | { | 795 | { |
792 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 796 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
793 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
794 | struct nouveau_channel *chan = NULL; | 797 | struct nouveau_channel *chan = NULL; |
795 | int chid; | 798 | int chid; |
796 | 799 | ||
797 | pgraph->fifo_access(dev, false); | ||
798 | nouveau_wait_for_idle(dev); | 800 | nouveau_wait_for_idle(dev); |
799 | 801 | ||
800 | /* If previous context is valid, we need to save it */ | 802 | /* If previous context is valid, we need to save it */ |
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev) | |||
802 | 804 | ||
803 | /* Load context for next channel */ | 805 | /* Load context for next channel */ |
804 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; | 806 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; |
805 | chan = dev_priv->fifos[chid]; | 807 | chan = dev_priv->channels.ptr[chid]; |
806 | if (chan && chan->pgraph_ctx) | 808 | if (chan && chan->pgraph_ctx) |
807 | nv10_graph_load_context(chan); | 809 | nv10_graph_load_context(chan); |
808 | |||
809 | pgraph->fifo_access(dev, true); | ||
810 | } | 810 | } |
811 | 811 | ||
812 | #define NV_WRITE_CTX(reg, val) do { \ | 812 | #define NV_WRITE_CTX(reg, val) do { \ |
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev) | |||
833 | if (chid >= dev_priv->engine.fifo.channels) | 833 | if (chid >= dev_priv->engine.fifo.channels) |
834 | return NULL; | 834 | return NULL; |
835 | 835 | ||
836 | return dev_priv->fifos[chid]; | 836 | return dev_priv->channels.ptr[chid]; |
837 | } | 837 | } |
838 | 838 | ||
839 | int nv10_graph_create_context(struct nouveau_channel *chan) | 839 | int nv10_graph_create_context(struct nouveau_channel *chan) |
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan) | |||
875 | 875 | ||
876 | void nv10_graph_destroy_context(struct nouveau_channel *chan) | 876 | void nv10_graph_destroy_context(struct nouveau_channel *chan) |
877 | { | 877 | { |
878 | struct drm_device *dev = chan->dev; | ||
879 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
880 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
878 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; | 881 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; |
882 | unsigned long flags; | ||
883 | |||
884 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
885 | pgraph->fifo_access(dev, false); | ||
886 | |||
887 | /* Unload the context if it's the currently active one */ | ||
888 | if (pgraph->channel(dev) == chan) | ||
889 | pgraph->unload_context(dev); | ||
879 | 890 | ||
891 | /* Free the context resources */ | ||
880 | kfree(pgraph_ctx); | 892 | kfree(pgraph_ctx); |
881 | chan->pgraph_ctx = NULL; | 893 | chan->pgraph_ctx = NULL; |
894 | |||
895 | pgraph->fifo_access(dev, true); | ||
896 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
882 | } | 897 | } |
883 | 898 | ||
884 | void | 899 | void |
885 | nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, | 900 | nv10_graph_set_tile_region(struct drm_device *dev, int i) |
886 | uint32_t size, uint32_t pitch) | ||
887 | { | 901 | { |
888 | uint32_t limit = max(1u, addr + size) - 1; | 902 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
889 | 903 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | |
890 | if (pitch) | ||
891 | addr |= 1 << 31; | ||
892 | 904 | ||
893 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); | 905 | nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); |
894 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); | 906 | nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); |
895 | nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); | 907 | nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); |
896 | } | 908 | } |
897 | 909 | ||
898 | int nv10_graph_init(struct drm_device *dev) | 910 | int nv10_graph_init(struct drm_device *dev) |
899 | { | 911 | { |
900 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 912 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
901 | uint32_t tmp; | 913 | uint32_t tmp; |
902 | int i; | 914 | int ret, i; |
903 | 915 | ||
904 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 916 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
905 | ~NV_PMC_ENABLE_PGRAPH); | 917 | ~NV_PMC_ENABLE_PGRAPH); |
906 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | 918 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | |
907 | NV_PMC_ENABLE_PGRAPH); | 919 | NV_PMC_ENABLE_PGRAPH); |
908 | 920 | ||
921 | ret = nv10_graph_register(dev); | ||
922 | if (ret) | ||
923 | return ret; | ||
924 | |||
925 | nouveau_irq_register(dev, 12, nv10_graph_isr); | ||
909 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 926 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
910 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); | 927 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); |
911 | 928 | ||
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev) | |||
928 | 945 | ||
929 | /* Turn all the tiling regions off. */ | 946 | /* Turn all the tiling regions off. */ |
930 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) | 947 | for (i = 0; i < NV10_PFB_TILE__SIZE; i++) |
931 | nv10_graph_set_region_tiling(dev, i, 0, 0, 0); | 948 | nv10_graph_set_tile_region(dev, i); |
932 | 949 | ||
933 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); | 950 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); |
934 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); | 951 | nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); |
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev) | |||
948 | 965 | ||
949 | void nv10_graph_takedown(struct drm_device *dev) | 966 | void nv10_graph_takedown(struct drm_device *dev) |
950 | { | 967 | { |
968 | nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); | ||
969 | nouveau_irq_unregister(dev, 12); | ||
951 | } | 970 | } |
952 | 971 | ||
953 | static int | 972 | static int |
954 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, | 973 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, |
955 | int mthd, uint32_t data) | 974 | u32 class, u32 mthd, u32 data) |
956 | { | 975 | { |
957 | struct drm_device *dev = chan->dev; | 976 | struct drm_device *dev = chan->dev; |
958 | struct graph_state *ctx = chan->pgraph_ctx; | 977 | struct graph_state *ctx = chan->pgraph_ctx; |
959 | struct pipe_state *pipe = &ctx->pipe_state; | 978 | struct pipe_state *pipe = &ctx->pipe_state; |
960 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
961 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
962 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; | 979 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; |
963 | uint32_t xfmode0, xfmode1; | 980 | uint32_t xfmode0, xfmode1; |
964 | int i; | 981 | int i; |
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, | |||
1025 | 1042 | ||
1026 | nouveau_wait_for_idle(dev); | 1043 | nouveau_wait_for_idle(dev); |
1027 | 1044 | ||
1028 | pgraph->fifo_access(dev, true); | ||
1029 | |||
1030 | return 0; | 1045 | return 0; |
1031 | } | 1046 | } |
1032 | 1047 | ||
1033 | static int | 1048 | static int |
1034 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, | 1049 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, |
1035 | int mthd, uint32_t data) | 1050 | u32 class, u32 mthd, u32 data) |
1036 | { | 1051 | { |
1037 | struct drm_device *dev = chan->dev; | 1052 | struct drm_device *dev = chan->dev; |
1038 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1039 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
1040 | 1053 | ||
1041 | nouveau_wait_for_idle(dev); | 1054 | nouveau_wait_for_idle(dev); |
1042 | 1055 | ||
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, | |||
1045 | nv_wr32(dev, 0x004006b0, | 1058 | nv_wr32(dev, 0x004006b0, |
1046 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); | 1059 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); |
1047 | 1060 | ||
1048 | pgraph->fifo_access(dev, true); | 1061 | return 0; |
1062 | } | ||
1063 | |||
1064 | static int | ||
1065 | nv10_graph_register(struct drm_device *dev) | ||
1066 | { | ||
1067 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1068 | |||
1069 | if (dev_priv->engine.graph.registered) | ||
1070 | return 0; | ||
1071 | |||
1072 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
1073 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
1074 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
1075 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
1076 | NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ | ||
1077 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
1078 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
1079 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
1080 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
1081 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
1082 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
1083 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
1084 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
1085 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
1086 | NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ | ||
1087 | NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ | ||
1088 | NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ | ||
1089 | NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ | ||
1090 | |||
1091 | /* celcius */ | ||
1092 | if (dev_priv->chipset <= 0x10) { | ||
1093 | NVOBJ_CLASS(dev, 0x0056, GR); | ||
1094 | } else | ||
1095 | if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { | ||
1096 | NVOBJ_CLASS(dev, 0x0096, GR); | ||
1097 | } else { | ||
1098 | NVOBJ_CLASS(dev, 0x0099, GR); | ||
1099 | NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); | ||
1100 | NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); | ||
1101 | NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); | ||
1102 | NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); | ||
1103 | NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); | ||
1104 | } | ||
1049 | 1105 | ||
1106 | /* nvsw */ | ||
1107 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
1108 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
1109 | |||
1110 | dev_priv->engine.graph.registered = true; | ||
1050 | return 0; | 1111 | return 0; |
1051 | } | 1112 | } |
1052 | 1113 | ||
1053 | static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { | 1114 | struct nouveau_bitfield nv10_graph_intr[] = { |
1054 | { 0x1638, nv17_graph_mthd_lma_window }, | 1115 | { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, |
1055 | { 0x163c, nv17_graph_mthd_lma_window }, | 1116 | { NV_PGRAPH_INTR_ERROR, "ERROR" }, |
1056 | { 0x1640, nv17_graph_mthd_lma_window }, | ||
1057 | { 0x1644, nv17_graph_mthd_lma_window }, | ||
1058 | { 0x1658, nv17_graph_mthd_lma_enable }, | ||
1059 | {} | 1117 | {} |
1060 | }; | 1118 | }; |
1061 | 1119 | ||
1062 | struct nouveau_pgraph_object_class nv10_graph_grclass[] = { | 1120 | struct nouveau_bitfield nv10_graph_nstatus[] = |
1063 | { 0x0030, false, NULL }, /* null */ | 1121 | { |
1064 | { 0x0039, false, NULL }, /* m2mf */ | 1122 | { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, |
1065 | { 0x004a, false, NULL }, /* gdirect */ | 1123 | { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, |
1066 | { 0x005f, false, NULL }, /* imageblit */ | 1124 | { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, |
1067 | { 0x009f, false, NULL }, /* imageblit (nv12) */ | 1125 | { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, |
1068 | { 0x008a, false, NULL }, /* ifc */ | ||
1069 | { 0x0089, false, NULL }, /* sifm */ | ||
1070 | { 0x0062, false, NULL }, /* surf2d */ | ||
1071 | { 0x0043, false, NULL }, /* rop */ | ||
1072 | { 0x0012, false, NULL }, /* beta1 */ | ||
1073 | { 0x0072, false, NULL }, /* beta4 */ | ||
1074 | { 0x0019, false, NULL }, /* cliprect */ | ||
1075 | { 0x0044, false, NULL }, /* pattern */ | ||
1076 | { 0x0052, false, NULL }, /* swzsurf */ | ||
1077 | { 0x0093, false, NULL }, /* surf3d */ | ||
1078 | { 0x0094, false, NULL }, /* tex_tri */ | ||
1079 | { 0x0095, false, NULL }, /* multitex_tri */ | ||
1080 | { 0x0056, false, NULL }, /* celcius (nv10) */ | ||
1081 | { 0x0096, false, NULL }, /* celcius (nv11) */ | ||
1082 | { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ | ||
1083 | {} | 1126 | {} |
1084 | }; | 1127 | }; |
1128 | |||
1129 | static void | ||
1130 | nv10_graph_isr(struct drm_device *dev) | ||
1131 | { | ||
1132 | u32 stat; | ||
1133 | |||
1134 | while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { | ||
1135 | u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); | ||
1136 | u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); | ||
1137 | u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); | ||
1138 | u32 chid = (addr & 0x01f00000) >> 20; | ||
1139 | u32 subc = (addr & 0x00070000) >> 16; | ||
1140 | u32 mthd = (addr & 0x00001ffc); | ||
1141 | u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); | ||
1142 | u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; | ||
1143 | u32 show = stat; | ||
1144 | |||
1145 | if (stat & NV_PGRAPH_INTR_ERROR) { | ||
1146 | if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { | ||
1147 | if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) | ||
1148 | show &= ~NV_PGRAPH_INTR_ERROR; | ||
1149 | } | ||
1150 | } | ||
1151 | |||
1152 | if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { | ||
1153 | nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); | ||
1154 | stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1155 | show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; | ||
1156 | nv10_graph_context_switch(dev); | ||
1157 | } | ||
1158 | |||
1159 | nv_wr32(dev, NV03_PGRAPH_INTR, stat); | ||
1160 | nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); | ||
1161 | |||
1162 | if (show && nouveau_ratelimit()) { | ||
1163 | NV_INFO(dev, "PGRAPH -"); | ||
1164 | nouveau_bitfield_print(nv10_graph_intr, show); | ||
1165 | printk(" nsource:"); | ||
1166 | nouveau_bitfield_print(nv04_graph_nsource, nsource); | ||
1167 | printk(" nstatus:"); | ||
1168 | nouveau_bitfield_print(nv10_graph_nstatus, nstatus); | ||
1169 | printk("\n"); | ||
1170 | NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " | ||
1171 | "mthd 0x%04x data 0x%08x\n", | ||
1172 | chid, subc, class, mthd, data); | ||
1173 | } | ||
1174 | } | ||
1175 | } | ||