aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_irq.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-02 20:56:05 -0400
committerBen Skeggs <bskeggs@redhat.com>2010-12-03 00:11:39 -0500
commit5178d40dff23b5eef7f0a3be2411fa6a347e750d (patch)
tree67b58fc1d23ed15074f2d80d97aae366ade1a558 /drivers/gpu/drm/nouveau/nouveau_irq.c
parent25b85783da8c71e577c676173e9d60a1b7e6113a (diff)
drm/nouveau: move PFIFO ISR into nv04_fifo.c
Reviewed-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_irq.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c203
1 files changed, 0 insertions, 203 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6c30669ac0b6..16f42f774a9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -69,204 +69,6 @@ nouveau_irq_uninstall(struct drm_device *dev)
69 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 69 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
70} 70}
71 71
72static bool
73nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_channel *chan = NULL;
77 struct nouveau_gpuobj *obj;
78 unsigned long flags;
79 const int subc = (addr >> 13) & 0x7;
80 const int mthd = addr & 0x1ffc;
81 bool handled = false;
82 u32 engine;
83
84 spin_lock_irqsave(&dev_priv->channels.lock, flags);
85 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
86 chan = dev_priv->channels.ptr[chid];
87 if (unlikely(!chan))
88 goto out;
89
90 switch (mthd) {
91 case 0x0000: /* bind object to subchannel */
92 obj = nouveau_ramht_find(chan, data);
93 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
94 break;
95
96 chan->sw_subchannel[subc] = obj->class;
97 engine = 0x0000000f << (subc * 4);
98
99 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
100 handled = true;
101 break;
102 default:
103 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
104 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
105 break;
106
107 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
108 mthd, data))
109 handled = true;
110 break;
111 }
112
113out:
114 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
115 return handled;
116}
117
118static void
119nouveau_fifo_irq_handler(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_engine *engine = &dev_priv->engine;
123 uint32_t status, reassign;
124 int cnt = 0;
125
126 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
127 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
128 uint32_t chid, get;
129
130 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
131
132 chid = engine->fifo.channel_id(dev);
133 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
134
135 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
136 uint32_t mthd, data;
137 int ptr;
138
139 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
140 * wrapping on my G80 chips, but CACHE1 isn't big
141 * enough for this much data.. Tests show that it
142 * wraps around to the start at GET=0x800.. No clue
143 * as to why..
144 */
145 ptr = (get & 0x7ff) >> 2;
146
147 if (dev_priv->card_type < NV_40) {
148 mthd = nv_rd32(dev,
149 NV04_PFIFO_CACHE1_METHOD(ptr));
150 data = nv_rd32(dev,
151 NV04_PFIFO_CACHE1_DATA(ptr));
152 } else {
153 mthd = nv_rd32(dev,
154 NV40_PFIFO_CACHE1_METHOD(ptr));
155 data = nv_rd32(dev,
156 NV40_PFIFO_CACHE1_DATA(ptr));
157 }
158
159 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
160 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
161 "Mthd 0x%04x Data 0x%08x\n",
162 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
163 data);
164 }
165
166 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
167 nv_wr32(dev, NV03_PFIFO_INTR_0,
168 NV_PFIFO_INTR_CACHE_ERROR);
169
170 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
171 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
172 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
173 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
174 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
175 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
176
177 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
178 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
179 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
180
181 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
182 }
183
184 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
185 u32 dma_get = nv_rd32(dev, 0x003244);
186 u32 dma_put = nv_rd32(dev, 0x003240);
187 u32 push = nv_rd32(dev, 0x003220);
188 u32 state = nv_rd32(dev, 0x003228);
189
190 if (dev_priv->card_type == NV_50) {
191 u32 ho_get = nv_rd32(dev, 0x003328);
192 u32 ho_put = nv_rd32(dev, 0x003320);
193 u32 ib_get = nv_rd32(dev, 0x003334);
194 u32 ib_put = nv_rd32(dev, 0x003330);
195
196 if (nouveau_ratelimit())
197 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
198 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
199 "State 0x%08x Push 0x%08x\n",
200 chid, ho_get, dma_get, ho_put,
201 dma_put, ib_get, ib_put, state,
202 push);
203
204 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
205 nv_wr32(dev, 0x003364, 0x00000000);
206 if (dma_get != dma_put || ho_get != ho_put) {
207 nv_wr32(dev, 0x003244, dma_put);
208 nv_wr32(dev, 0x003328, ho_put);
209 } else
210 if (ib_get != ib_put) {
211 nv_wr32(dev, 0x003334, ib_put);
212 }
213 } else {
214 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
215 "Put 0x%08x State 0x%08x Push 0x%08x\n",
216 chid, dma_get, dma_put, state, push);
217
218 if (dma_get != dma_put)
219 nv_wr32(dev, 0x003244, dma_put);
220 }
221
222 nv_wr32(dev, 0x003228, 0x00000000);
223 nv_wr32(dev, 0x003220, 0x00000001);
224 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
225 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
226 }
227
228 if (status & NV_PFIFO_INTR_SEMAPHORE) {
229 uint32_t sem;
230
231 status &= ~NV_PFIFO_INTR_SEMAPHORE;
232 nv_wr32(dev, NV03_PFIFO_INTR_0,
233 NV_PFIFO_INTR_SEMAPHORE);
234
235 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
236 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
237
238 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
239 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
240 }
241
242 if (dev_priv->card_type == NV_50) {
243 if (status & 0x00000010) {
244 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
245 status &= ~0x00000010;
246 nv_wr32(dev, 0x002100, 0x00000010);
247 }
248 }
249
250 if (status) {
251 if (nouveau_ratelimit())
252 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
253 status, chid);
254 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
255 status = 0;
256 }
257
258 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
259 }
260
261 if (status) {
262 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
263 nv_wr32(dev, 0x2140, 0);
264 nv_wr32(dev, 0x140, 0);
265 }
266
267 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
268}
269
270static struct nouveau_bitfield nstatus_names[] = 72static struct nouveau_bitfield nstatus_names[] =
271{ 73{
272 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, 74 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
@@ -1146,11 +948,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
1146 948
1147 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 949 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1148 950
1149 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1150 nouveau_fifo_irq_handler(dev);
1151 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1152 }
1153
1154 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { 951 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1155 if (dev_priv->card_type >= NV_50) 952 if (dev_priv->card_type >= NV_50)
1156 nv50_pgraph_irq_handler(dev); 953 nv50_pgraph_irq_handler(dev);