aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv50_fifo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2009-12-11 04:24:15 -0500
committerDave Airlie <airlied@redhat.com>2009-12-11 06:29:34 -0500
commit6ee738610f41b59733f63718f0bdbcba7d3a3f12 (patch)
treeeccb9f07671998c50a1bc606a54cd6f82ba43e0a /drivers/gpu/drm/nouveau/nv50_fifo.c
parentd1ede145cea25c5b6d2ebb19b167af14e374bb45 (diff)
drm/nouveau: Add DRM driver for NVIDIA GPUs
This adds a drm/kms staging non-API stable driver for GPUs from NVIDIA. This driver is a KMS-based driver and requires a compatible nouveau userspace libdrm and nouveau X.org driver. This driver requires firmware files not available in this kernel tree, interested parties can find them via the nouveau project git archive. This driver is reverse engineered, and is in no way supported by nVidia. Support for nearly the complete range of nvidia hw from nv04->g80 (nv50) is available, and the kms driver should support driving nearly all output types (displayport is under development still) along with supporting suspend/resume. This work is all from the upstream nouveau project found at nouveau.freedesktop.org. The original authors list from nouveau git tree is: Anssi Hannula <anssi.hannula@iki.fi> Ben Skeggs <bskeggs@redhat.com> Francisco Jerez <currojerez@riseup.net> Maarten Maathuis <madman2003@gmail.com> Marcin Koƛcielnicki <koriakin@0x04.net> Matthew Garrett <mjg@redhat.com> Matt Parnell <mparnell@gmail.com> Patrice Mandin <patmandin@gmail.com> Pekka Paalanen <pq@iki.fi> Xavier Chantry <shiningxc@gmail.com> along with project founder Stephane Marchesin <marchesin@icps.u-strasbg.fr> Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv50_fifo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
1 files changed, 494 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 000000000000..77ae1aaa0bce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31struct nv50_fifo_priv {
32 struct nouveau_gpuobj_ref *thingo[2];
33 int cur_thingo;
34};
35
36#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
37
38static void
39nv50_fifo_init_thingo(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
43 struct nouveau_gpuobj_ref *cur;
44 int i, nr;
45
46 NV_DEBUG(dev, "\n");
47
48 cur = priv->thingo[priv->cur_thingo];
49 priv->cur_thingo = !priv->cur_thingo;
50
51 /* We never schedule channel 0 or 127 */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 1, nr = 0; i < 127; i++) {
54 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
55 nv_wo32(dev, cur->gpuobj, nr++, i);
56 }
57 dev_priv->engine.instmem.finish_access(dev);
58
59 nv_wr32(dev, 0x32f4, cur->instance >> 12);
60 nv_wr32(dev, 0x32ec, nr);
61 nv_wr32(dev, 0x2500, 0x101);
62}
63
64static int
65nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *chan = dev_priv->fifos[channel];
69 uint32_t inst;
70
71 NV_DEBUG(dev, "ch%d\n", channel);
72
73 if (!chan->ramfc)
74 return -EINVAL;
75
76 if (IS_G80)
77 inst = chan->ramfc->instance >> 12;
78 else
79 inst = chan->ramfc->instance >> 8;
80 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
81 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
82
83 if (!nt)
84 nv50_fifo_init_thingo(dev);
85 return 0;
86}
87
88static void
89nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t inst;
93
94 NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
95
96 if (IS_G80)
97 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
98 else
99 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
100 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
101
102 if (!nt)
103 nv50_fifo_init_thingo(dev);
104}
105
106static void
107nv50_fifo_init_reset(struct drm_device *dev)
108{
109 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
110
111 NV_DEBUG(dev, "\n");
112
113 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
114 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
115}
116
117static void
118nv50_fifo_init_intr(struct drm_device *dev)
119{
120 NV_DEBUG(dev, "\n");
121
122 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
123 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
124}
125
126static void
127nv50_fifo_init_context_table(struct drm_device *dev)
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 int i;
131
132 NV_DEBUG(dev, "\n");
133
134 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
135 if (dev_priv->fifos[i])
136 nv50_fifo_channel_enable(dev, i, true);
137 else
138 nv50_fifo_channel_disable(dev, i, true);
139 }
140
141 nv50_fifo_init_thingo(dev);
142}
143
144static void
145nv50_fifo_init_regs__nv(struct drm_device *dev)
146{
147 NV_DEBUG(dev, "\n");
148
149 nv_wr32(dev, 0x250c, 0x6f3cfc34);
150}
151
152static void
153nv50_fifo_init_regs(struct drm_device *dev)
154{
155 NV_DEBUG(dev, "\n");
156
157 nv_wr32(dev, 0x2500, 0);
158 nv_wr32(dev, 0x3250, 0);
159 nv_wr32(dev, 0x3220, 0);
160 nv_wr32(dev, 0x3204, 0);
161 nv_wr32(dev, 0x3210, 0);
162 nv_wr32(dev, 0x3270, 0);
163
164 /* Enable dummy channels setup by nv50_instmem.c */
165 nv50_fifo_channel_enable(dev, 0, true);
166 nv50_fifo_channel_enable(dev, 127, true);
167}
168
169int
170nv50_fifo_init(struct drm_device *dev)
171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nv50_fifo_priv *priv;
174 int ret;
175
176 NV_DEBUG(dev, "\n");
177
178 priv = dev_priv->engine.fifo.priv;
179 if (priv) {
180 priv->cur_thingo = !priv->cur_thingo;
181 goto just_reset;
182 }
183
184 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
185 if (!priv)
186 return -ENOMEM;
187 dev_priv->engine.fifo.priv = priv;
188
189 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
190 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
191 if (ret) {
192 NV_ERROR(dev, "error creating thingo0: %d\n", ret);
193 return ret;
194 }
195
196 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
198 if (ret) {
199 NV_ERROR(dev, "error creating thingo1: %d\n", ret);
200 return ret;
201 }
202
203just_reset:
204 nv50_fifo_init_reset(dev);
205 nv50_fifo_init_intr(dev);
206 nv50_fifo_init_context_table(dev);
207 nv50_fifo_init_regs__nv(dev);
208 nv50_fifo_init_regs(dev);
209 dev_priv->engine.fifo.enable(dev);
210 dev_priv->engine.fifo.reassign(dev, true);
211
212 return 0;
213}
214
215void
216nv50_fifo_takedown(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
220
221 NV_DEBUG(dev, "\n");
222
223 if (!priv)
224 return;
225
226 nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
227 nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
228
229 dev_priv->engine.fifo.priv = NULL;
230 kfree(priv);
231}
232
233int
234nv50_fifo_channel_id(struct drm_device *dev)
235{
236 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
237 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
238}
239
240int
241nv50_fifo_create_context(struct nouveau_channel *chan)
242{
243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL;
246 int ret;
247
248 NV_DEBUG(dev, "ch%d\n", chan->id);
249
250 if (IS_G80) {
251 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
252 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
253
254 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
255 0x100, NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &ramfc,
257 &chan->ramfc);
258 if (ret)
259 return ret;
260
261 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
262 ramin_voffset + 0x0400, 4096,
263 0, NULL, &chan->cache);
264 if (ret)
265 return ret;
266 } else {
267 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
268 NVOBJ_FLAG_ZERO_ALLOC |
269 NVOBJ_FLAG_ZERO_FREE,
270 &chan->ramfc);
271 if (ret)
272 return ret;
273 ramfc = chan->ramfc->gpuobj;
274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
276 0, &chan->cache);
277 if (ret)
278 return ret;
279 }
280
281 dev_priv->engine.instmem.prepare_access(dev, true);
282
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
294
295 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
297 nv_wo32(dev, chan->ramin->gpuobj, 1,
298 chan->ramfc->instance >> 8);
299
300 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
301 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
302 }
303
304 dev_priv->engine.instmem.finish_access(dev);
305
306 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret;
311 }
312
313 return 0;
314}
315
316void
317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{
319 struct drm_device *dev = chan->dev;
320
321 NV_DEBUG(dev, "ch%d\n", chan->id);
322
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
324 nouveau_gpuobj_ref_del(dev, &chan->cache);
325
326 nv50_fifo_channel_disable(dev, chan->id, false);
327
328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false);
331}
332
333int
334nv50_fifo_load_context(struct nouveau_channel *chan)
335{
336 struct drm_device *dev = chan->dev;
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
339 struct nouveau_gpuobj *cache = chan->cache->gpuobj;
340 int ptr, cnt;
341
342 NV_DEBUG(dev, "ch%d\n", chan->id);
343
344 dev_priv->engine.instmem.prepare_access(dev, false);
345
346 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
347 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
348 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
349 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
350 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
351 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
352 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
353 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
354 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
355 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
356 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
357 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
358 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
359 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
360 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
361 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
362 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
363 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
364 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
365 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
366 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
367 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
368 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
369 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
370 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
371 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
372 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
373 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
374 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
375 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
376 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
377 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
378 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
379
380 cnt = nv_ro32(dev, ramfc, 0x84/4);
381 for (ptr = 0; ptr < cnt; ptr++) {
382 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
383 nv_ro32(dev, cache, (ptr * 2) + 0));
384 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
385 nv_ro32(dev, cache, (ptr * 2) + 1));
386 }
387 nv_wr32(dev, 0x3210, cnt << 2);
388 nv_wr32(dev, 0x3270, 0);
389
390 /* guessing that all the 0x34xx regs aren't on NV50 */
391 if (!IS_G80) {
392 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
393 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
394 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
395 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
396 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
397 }
398
399 dev_priv->engine.instmem.finish_access(dev);
400
401 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
403 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
404 return 0;
405}
406
407int
408nv50_fifo_unload_context(struct drm_device *dev)
409{
410 struct drm_nouveau_private *dev_priv = dev->dev_private;
411 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
412 struct nouveau_gpuobj *ramfc, *cache;
413 struct nouveau_channel *chan = NULL;
414 int chid, get, put, ptr;
415
416 NV_DEBUG(dev, "\n");
417
418 chid = pfifo->channel_id(dev);
419 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
420 return 0;
421
422 chan = dev_priv->fifos[chid];
423 if (!chan) {
424 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
425 return -EINVAL;
426 }
427 NV_DEBUG(dev, "ch%d\n", chan->id);
428 ramfc = chan->ramfc->gpuobj;
429 cache = chan->cache->gpuobj;
430
431 dev_priv->engine.instmem.prepare_access(dev, true);
432
433 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
434 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
435 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
436 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
437 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
438 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
439 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
440 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
441 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
442 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
443 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
444 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
445 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
446 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
447 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
448 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
449 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
450 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
451 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
452 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
453 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
454 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
455 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
456 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
457 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
458 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
459 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
460 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
461 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
462 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
463 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
464 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
465 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
466
467 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
468 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
469 ptr = 0;
470 while (put != get) {
471 nv_wo32(dev, cache, ptr++,
472 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
473 nv_wo32(dev, cache, ptr++,
474 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
475 get = (get + 1) & 0x1ff;
476 }
477
478 /* guessing that all the 0x34xx regs aren't on NV50 */
479 if (!IS_G80) {
480 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
481 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
482 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
483 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
484 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
485 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
486 }
487
488 dev_priv->engine.instmem.finish_access(dev);
489
490 /*XXX: probably reload ch127 (NULL) state back too */
491 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
492 return 0;
493}
494