aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-05-24 05:17:16 -0400
committerDave Airlie <airlied@redhat.com>2012-05-24 05:55:34 -0400
commit8c914028f5ddaa417b7d0f4b7fdc24caceaa8043 (patch)
treeda4be094751a91df13833a50a724eb7774f099ff
parent41ceeeb25d5be06660a040e2fc99d6405dfc07f8 (diff)
parentaf3289e96383a60f5d3096afeb5579b837aad5e0 (diff)
Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-core-next
Ben was distracted: "Apologies for being really late with this, feel free to bash me in the future so I remember on time! Overview: - improvements to reclocking (especially memory) on nva3+ - kepler accel support (if you have blob ucode) - better inter-channel synchronisation on nv84+ - async ttm buffer moves on nv84+ (earlier cards don't have a non-PGRAPH engine that's useful)" * 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (60 commits) drm/nouveau/nvd9: Fix GPIO initialisation sequence. drm/nouveau: Unregister switcheroo client on exit drm/nouveau: Check dsm on switcheroo unregister drm/nouveau: fix a minor annoyance in an output string drm/nouveau: turn a BUG into a WARN drm/nv50: decode PGRAPH DATA_ERROR = 0x24 drm/nouveau/disp: fix dithering not being enabled on some eDP macbooks drm/nvd9/copy: initialise copy engine, seems to work like nvc0 drm/nvc0/ttm: use copy engines for async buffer moves drm/nva3/ttm: use copy engine for async buffer moves drm/nv98/ttm: add in a (disabled) crypto engine buffer copy method drm/nv84/ttm: use crypto engine for async buffer copies drm/nouveau/ttm: untangle code to support accelerated buffer moves drm/nouveau/fbcon: use fence for sync, rather than notifier drm/nv98/crypt: non-stub implementation of the engine hooks drm/nouveau/fifo: turn all fifo modules into engine modules drm/nv50/graph: remove ability to do interrupt-driven context switching drm/nv50: remove manual context unload on context destruction drm/nv50: remove execution engine context saves on suspend drm/nv50/fifo: use hardware channel kickoff functionality ...
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c341
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c86
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h166
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c578
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c278
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c351
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c98
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c596
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c229
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c166
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c31
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c290
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c423
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
76 files changed, 9520 insertions, 3026 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 01f13351a473..fe5267d06ab5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,10 +16,13 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
16 nv04_mc.o nv40_mc.o nv50_mc.o \ 16 nv04_mc.o nv40_mc.o nv50_mc.o \
17 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ 17 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
18 nv50_fb.o nvc0_fb.o \ 18 nv50_fb.o nvc0_fb.o \
19 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ 19 nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
20 nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
21 nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
22 nv04_software.o nv50_software.o nvc0_software.o \
20 nv04_graph.o nv10_graph.o nv20_graph.o \ 23 nv04_graph.o nv10_graph.o nv20_graph.o \
21 nv40_graph.o nv50_graph.o nvc0_graph.o \ 24 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
22 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ 25 nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
23 nv84_crypt.o nv98_crypt.o \ 26 nv84_crypt.o nv98_crypt.o \
24 nva3_copy.o nvc0_copy.o \ 27 nva3_copy.o nvc0_copy.o \
25 nv31_mpeg.o nv50_mpeg.o \ 28 nv31_mpeg.o nv50_mpeg.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 284bd25d5d21..fc841e87b343 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -338,7 +338,8 @@ void nouveau_switcheroo_optimus_dsm(void)
338 338
339void nouveau_unregister_dsm_handler(void) 339void nouveau_unregister_dsm_handler(void)
340{ 340{
341 vga_switcheroo_unregister_handler(); 341 if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
342 vga_switcheroo_unregister_handler();
342} 343}
343 344
344/* retrieve the ROM in 4k blocks */ 345/* retrieve the ROM in 4k blocks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0be4a815e706..2f11e16a81a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -30,6 +30,7 @@
30#include "nouveau_gpio.h" 30#include "nouveau_gpio.h"
31 31
32#include <linux/io-mapping.h> 32#include <linux/io-mapping.h>
33#include <linux/firmware.h>
33 34
34/* these defines are made up */ 35/* these defines are made up */
35#define NV_CIO_CRE_44_HEADA 0x0 36#define NV_CIO_CRE_44_HEADA 0x0
@@ -195,35 +196,24 @@ static void
195bios_shadow_acpi(struct nvbios *bios) 196bios_shadow_acpi(struct nvbios *bios)
196{ 197{
197 struct pci_dev *pdev = bios->dev->pdev; 198 struct pci_dev *pdev = bios->dev->pdev;
198 int ptr, len, ret; 199 int cnt = 65536 / ROM_BIOS_PAGE;
199 u8 data[3]; 200 int ret;
200 201
201 if (!nouveau_acpi_rom_supported(pdev)) 202 if (!nouveau_acpi_rom_supported(pdev))
202 return; 203 return;
203 204
204 ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data)); 205 bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
205 if (ret != sizeof(data))
206 return;
207
208 bios->length = min(data[2] * 512, 65536);
209 bios->data = kmalloc(bios->length, GFP_KERNEL);
210 if (!bios->data) 206 if (!bios->data)
211 return; 207 return;
212 208
213 len = bios->length; 209 bios->length = 0;
214 ptr = 0; 210 while (cnt--) {
215 while (len) { 211 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
216 int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len; 212 ROM_BIOS_PAGE);
217 213 if (ret != ROM_BIOS_PAGE)
218 ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
219 if (ret != size) {
220 kfree(bios->data);
221 bios->data = NULL;
222 return; 214 return;
223 }
224 215
225 len -= size; 216 bios->length += ROM_BIOS_PAGE;
226 ptr += size;
227 } 217 }
228} 218}
229 219
@@ -249,8 +239,12 @@ bios_shadow(struct drm_device *dev)
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 239 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nvbios *bios = &dev_priv->vbios; 240 struct nvbios *bios = &dev_priv->vbios;
251 struct methods *mthd, *best; 241 struct methods *mthd, *best;
242 const struct firmware *fw;
243 char fname[32];
244 int ret;
252 245
253 if (nouveau_vbios) { 246 if (nouveau_vbios) {
247 /* try to match one of the built-in methods */
254 mthd = shadow_methods; 248 mthd = shadow_methods;
255 do { 249 do {
256 if (strcasecmp(nouveau_vbios, mthd->desc)) 250 if (strcasecmp(nouveau_vbios, mthd->desc))
@@ -263,6 +257,22 @@ bios_shadow(struct drm_device *dev)
263 return true; 257 return true;
264 } while ((++mthd)->shadow); 258 } while ((++mthd)->shadow);
265 259
260 /* attempt to load firmware image */
261 snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
262 ret = request_firmware(&fw, fname, &dev->pdev->dev);
263 if (ret == 0) {
264 bios->length = fw->size;
265 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
266 release_firmware(fw);
267
268 NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
269 if (score_vbios(bios, 1))
270 return true;
271
272 kfree(bios->data);
273 bios->data = NULL;
274 }
275
266 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); 276 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
267 } 277 }
268 278
@@ -273,6 +283,7 @@ bios_shadow(struct drm_device *dev)
273 mthd->score = score_vbios(bios, mthd->rw); 283 mthd->score = score_vbios(bios, mthd->rw);
274 mthd->size = bios->length; 284 mthd->size = bios->length;
275 mthd->data = bios->data; 285 mthd->data = bios->data;
286 bios->data = NULL;
276 } while (mthd->score != 3 && (++mthd)->shadow); 287 } while (mthd->score != 3 && (++mthd)->shadow);
277 288
278 mthd = shadow_methods; 289 mthd = shadow_methods;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4435e115b929..7f80ed523562 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -35,6 +35,8 @@
35#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include "nouveau_mm.h" 36#include "nouveau_mm.h"
37#include "nouveau_vm.h" 37#include "nouveau_vm.h"
38#include "nouveau_fence.h"
39#include "nouveau_ramht.h"
38 40
39#include <linux/log2.h> 41#include <linux/log2.h>
40#include <linux/slab.h> 42#include <linux/slab.h>
@@ -478,7 +480,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
478 struct nouveau_fence *fence = NULL; 480 struct nouveau_fence *fence = NULL;
479 int ret; 481 int ret;
480 482
481 ret = nouveau_fence_new(chan, &fence, true); 483 ret = nouveau_fence_new(chan, &fence);
482 if (ret) 484 if (ret)
483 return ret; 485 return ret;
484 486
@@ -489,6 +491,76 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
489} 491}
490 492
491static int 493static int
494nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
495 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
496{
497 struct nouveau_mem *node = old_mem->mm_node;
498 int ret = RING_SPACE(chan, 10);
499 if (ret == 0) {
500 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
501 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
502 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
503 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
504 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
505 OUT_RING (chan, PAGE_SIZE);
506 OUT_RING (chan, PAGE_SIZE);
507 OUT_RING (chan, PAGE_SIZE);
508 OUT_RING (chan, new_mem->num_pages);
509 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
510 }
511 return ret;
512}
513
514static int
515nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
516{
517 int ret = RING_SPACE(chan, 2);
518 if (ret == 0) {
519 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
520 OUT_RING (chan, handle);
521 }
522 return ret;
523}
524
525static int
526nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
528{
529 struct nouveau_mem *node = old_mem->mm_node;
530 u64 src_offset = node->vma[0].offset;
531 u64 dst_offset = node->vma[1].offset;
532 u32 page_count = new_mem->num_pages;
533 int ret;
534
535 page_count = new_mem->num_pages;
536 while (page_count) {
537 int line_count = (page_count > 8191) ? 8191 : page_count;
538
539 ret = RING_SPACE(chan, 11);
540 if (ret)
541 return ret;
542
543 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
544 OUT_RING (chan, upper_32_bits(src_offset));
545 OUT_RING (chan, lower_32_bits(src_offset));
546 OUT_RING (chan, upper_32_bits(dst_offset));
547 OUT_RING (chan, lower_32_bits(dst_offset));
548 OUT_RING (chan, PAGE_SIZE);
549 OUT_RING (chan, PAGE_SIZE);
550 OUT_RING (chan, PAGE_SIZE);
551 OUT_RING (chan, line_count);
552 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
553 OUT_RING (chan, 0x00000110);
554
555 page_count -= line_count;
556 src_offset += (PAGE_SIZE * line_count);
557 dst_offset += (PAGE_SIZE * line_count);
558 }
559
560 return 0;
561}
562
563static int
492nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 564nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
493 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 565 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
494{ 566{
@@ -506,17 +578,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
506 if (ret) 578 if (ret)
507 return ret; 579 return ret;
508 580
509 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2); 581 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
510 OUT_RING (chan, upper_32_bits(dst_offset)); 582 OUT_RING (chan, upper_32_bits(dst_offset));
511 OUT_RING (chan, lower_32_bits(dst_offset)); 583 OUT_RING (chan, lower_32_bits(dst_offset));
512 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6); 584 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
513 OUT_RING (chan, upper_32_bits(src_offset)); 585 OUT_RING (chan, upper_32_bits(src_offset));
514 OUT_RING (chan, lower_32_bits(src_offset)); 586 OUT_RING (chan, lower_32_bits(src_offset));
515 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 587 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
516 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 588 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
517 OUT_RING (chan, PAGE_SIZE); /* line_length */ 589 OUT_RING (chan, PAGE_SIZE); /* line_length */
518 OUT_RING (chan, line_count); 590 OUT_RING (chan, line_count);
519 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1); 591 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
520 OUT_RING (chan, 0x00100110); 592 OUT_RING (chan, 0x00100110);
521 593
522 page_count -= line_count; 594 page_count -= line_count;
@@ -528,6 +600,102 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
528} 600}
529 601
530static int 602static int
603nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
604 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
605{
606 struct nouveau_mem *node = old_mem->mm_node;
607 u64 src_offset = node->vma[0].offset;
608 u64 dst_offset = node->vma[1].offset;
609 u32 page_count = new_mem->num_pages;
610 int ret;
611
612 page_count = new_mem->num_pages;
613 while (page_count) {
614 int line_count = (page_count > 8191) ? 8191 : page_count;
615
616 ret = RING_SPACE(chan, 11);
617 if (ret)
618 return ret;
619
620 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
621 OUT_RING (chan, upper_32_bits(src_offset));
622 OUT_RING (chan, lower_32_bits(src_offset));
623 OUT_RING (chan, upper_32_bits(dst_offset));
624 OUT_RING (chan, lower_32_bits(dst_offset));
625 OUT_RING (chan, PAGE_SIZE);
626 OUT_RING (chan, PAGE_SIZE);
627 OUT_RING (chan, PAGE_SIZE);
628 OUT_RING (chan, line_count);
629 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
630 OUT_RING (chan, 0x00000110);
631
632 page_count -= line_count;
633 src_offset += (PAGE_SIZE * line_count);
634 dst_offset += (PAGE_SIZE * line_count);
635 }
636
637 return 0;
638}
639
640static int
641nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
642 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
643{
644 struct nouveau_mem *node = old_mem->mm_node;
645 int ret = RING_SPACE(chan, 7);
646 if (ret == 0) {
647 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
648 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
649 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
650 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
651 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
652 OUT_RING (chan, 0x00000000 /* COPY */);
653 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
654 }
655 return ret;
656}
657
658static int
659nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
660 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
661{
662 struct nouveau_mem *node = old_mem->mm_node;
663 int ret = RING_SPACE(chan, 7);
664 if (ret == 0) {
665 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
666 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
667 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
668 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
669 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
670 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
671 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
672 }
673 return ret;
674}
675
676static int
677nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{
679 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
680 &chan->m2mf_ntfy);
681 if (ret == 0) {
682 ret = RING_SPACE(chan, 6);
683 if (ret == 0) {
684 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
685 OUT_RING (chan, handle);
686 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
687 OUT_RING (chan, NvNotify0);
688 OUT_RING (chan, NvDmaFB);
689 OUT_RING (chan, NvDmaFB);
690 } else {
691 nouveau_ramht_remove(chan, NvNotify0);
692 }
693 }
694
695 return ret;
696}
697
698static int
531nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 699nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
532 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 700 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
533{ 701{
@@ -551,7 +719,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
551 if (ret) 719 if (ret)
552 return ret; 720 return ret;
553 721
554 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); 722 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
555 OUT_RING (chan, 0); 723 OUT_RING (chan, 0);
556 OUT_RING (chan, 0); 724 OUT_RING (chan, 0);
557 OUT_RING (chan, stride); 725 OUT_RING (chan, stride);
@@ -564,7 +732,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
564 if (ret) 732 if (ret)
565 return ret; 733 return ret;
566 734
567 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 735 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
568 OUT_RING (chan, 1); 736 OUT_RING (chan, 1);
569 } 737 }
570 if (old_mem->mem_type == TTM_PL_VRAM && 738 if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -573,7 +741,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
573 if (ret) 741 if (ret)
574 return ret; 742 return ret;
575 743
576 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); 744 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
577 OUT_RING (chan, 0); 745 OUT_RING (chan, 0);
578 OUT_RING (chan, 0); 746 OUT_RING (chan, 0);
579 OUT_RING (chan, stride); 747 OUT_RING (chan, stride);
@@ -586,7 +754,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
586 if (ret) 754 if (ret)
587 return ret; 755 return ret;
588 756
589 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); 757 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
590 OUT_RING (chan, 1); 758 OUT_RING (chan, 1);
591 } 759 }
592 760
@@ -594,10 +762,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
594 if (ret) 762 if (ret)
595 return ret; 763 return ret;
596 764
597 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); 765 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
598 OUT_RING (chan, upper_32_bits(src_offset)); 766 OUT_RING (chan, upper_32_bits(src_offset));
599 OUT_RING (chan, upper_32_bits(dst_offset)); 767 OUT_RING (chan, upper_32_bits(dst_offset));
600 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); 768 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
601 OUT_RING (chan, lower_32_bits(src_offset)); 769 OUT_RING (chan, lower_32_bits(src_offset));
602 OUT_RING (chan, lower_32_bits(dst_offset)); 770 OUT_RING (chan, lower_32_bits(dst_offset));
603 OUT_RING (chan, stride); 771 OUT_RING (chan, stride);
@@ -606,7 +774,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
606 OUT_RING (chan, height); 774 OUT_RING (chan, height);
607 OUT_RING (chan, 0x00000101); 775 OUT_RING (chan, 0x00000101);
608 OUT_RING (chan, 0x00000000); 776 OUT_RING (chan, 0x00000000);
609 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 777 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
610 OUT_RING (chan, 0); 778 OUT_RING (chan, 0);
611 779
612 length -= amount; 780 length -= amount;
@@ -617,6 +785,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
617 return 0; 785 return 0;
618} 786}
619 787
788static int
789nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
790{
791 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
792 &chan->m2mf_ntfy);
793 if (ret == 0) {
794 ret = RING_SPACE(chan, 4);
795 if (ret == 0) {
796 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
797 OUT_RING (chan, handle);
798 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
799 OUT_RING (chan, NvNotify0);
800 }
801 }
802
803 return ret;
804}
805
620static inline uint32_t 806static inline uint32_t
621nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 807nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
622 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 808 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -639,7 +825,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
639 if (ret) 825 if (ret)
640 return ret; 826 return ret;
641 827
642 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 828 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
643 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); 829 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
644 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); 830 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
645 831
@@ -651,7 +837,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
651 if (ret) 837 if (ret)
652 return ret; 838 return ret;
653 839
654 BEGIN_RING(chan, NvSubM2MF, 840 BEGIN_NV04(chan, NvSubCopy,
655 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 841 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
656 OUT_RING (chan, src_offset); 842 OUT_RING (chan, src_offset);
657 OUT_RING (chan, dst_offset); 843 OUT_RING (chan, dst_offset);
@@ -661,7 +847,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
661 OUT_RING (chan, line_count); 847 OUT_RING (chan, line_count);
662 OUT_RING (chan, 0x00000101); 848 OUT_RING (chan, 0x00000101);
663 OUT_RING (chan, 0x00000000); 849 OUT_RING (chan, 0x00000000);
664 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 850 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
665 OUT_RING (chan, 0); 851 OUT_RING (chan, 0);
666 852
667 page_count -= line_count; 853 page_count -= line_count;
@@ -721,13 +907,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
721 goto out; 907 goto out;
722 } 908 }
723 909
724 if (dev_priv->card_type < NV_50) 910 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
725 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
726 else
727 if (dev_priv->card_type < NV_C0)
728 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
729 else
730 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
731 if (ret == 0) { 911 if (ret == 0) {
732 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 912 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
733 no_wait_reserve, 913 no_wait_reserve,
@@ -739,6 +919,49 @@ out:
739 return ret; 919 return ret;
740} 920}
741 921
922void
923nouveau_bo_move_init(struct nouveau_channel *chan)
924{
925 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
926 static const struct {
927 const char *name;
928 int engine;
929 u32 oclass;
930 int (*exec)(struct nouveau_channel *,
931 struct ttm_buffer_object *,
932 struct ttm_mem_reg *, struct ttm_mem_reg *);
933 int (*init)(struct nouveau_channel *, u32 handle);
934 } _methods[] = {
935 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
936 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
937 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
938 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
939 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
940 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
941 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
942 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
943 {},
944 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
945 }, *mthd = _methods;
946 const char *name = "CPU";
947 int ret;
948
949 do {
950 u32 handle = (mthd->engine << 16) | mthd->oclass;
951 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
952 if (ret == 0) {
953 ret = mthd->init(chan, handle);
954 if (ret == 0) {
955 dev_priv->ttm.move = mthd->exec;
956 name = mthd->name;
957 break;
958 }
959 }
960 } while ((++mthd)->exec);
961
962 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
963}
964
742static int 965static int
743nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 966nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
744 bool no_wait_reserve, bool no_wait_gpu, 967 bool no_wait_reserve, bool no_wait_gpu,
@@ -895,8 +1118,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
895 goto out; 1118 goto out;
896 } 1119 }
897 1120
898 /* Software copy if the card isn't up and running yet. */ 1121 /* CPU copy if we have no accelerated method available */
899 if (!dev_priv->channel) { 1122 if (!dev_priv->ttm.move) {
900 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1123 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
901 goto out; 1124 goto out;
902 } 1125 }
@@ -1044,22 +1267,6 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1044 return nouveau_bo_validate(nvbo, false, true, false); 1267 return nouveau_bo_validate(nvbo, false, true, false);
1045} 1268}
1046 1269
1047void
1048nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1049{
1050 struct nouveau_fence *old_fence;
1051
1052 if (likely(fence))
1053 nouveau_fence_ref(fence);
1054
1055 spin_lock(&nvbo->bo.bdev->fence_lock);
1056 old_fence = nvbo->bo.sync_obj;
1057 nvbo->bo.sync_obj = fence;
1058 spin_unlock(&nvbo->bo.bdev->fence_lock);
1059
1060 nouveau_fence_unref(&old_fence);
1061}
1062
1063static int 1270static int
1064nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1271nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1065{ 1272{
@@ -1157,6 +1364,52 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1157 ttm_pool_unpopulate(ttm); 1364 ttm_pool_unpopulate(ttm);
1158} 1365}
1159 1366
1367void
1368nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1369{
1370 struct nouveau_fence *old_fence = NULL;
1371
1372 if (likely(fence))
1373 nouveau_fence_ref(fence);
1374
1375 spin_lock(&nvbo->bo.bdev->fence_lock);
1376 old_fence = nvbo->bo.sync_obj;
1377 nvbo->bo.sync_obj = fence;
1378 spin_unlock(&nvbo->bo.bdev->fence_lock);
1379
1380 nouveau_fence_unref(&old_fence);
1381}
1382
1383static void
1384nouveau_bo_fence_unref(void **sync_obj)
1385{
1386 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1387}
1388
1389static void *
1390nouveau_bo_fence_ref(void *sync_obj)
1391{
1392 return nouveau_fence_ref(sync_obj);
1393}
1394
1395static bool
1396nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1397{
1398 return nouveau_fence_done(sync_obj);
1399}
1400
1401static int
1402nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1403{
1404 return nouveau_fence_wait(sync_obj, lazy, intr);
1405}
1406
1407static int
1408nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1409{
1410 return 0;
1411}
1412
1160struct ttm_bo_driver nouveau_bo_driver = { 1413struct ttm_bo_driver nouveau_bo_driver = {
1161 .ttm_tt_create = &nouveau_ttm_tt_create, 1414 .ttm_tt_create = &nouveau_ttm_tt_create,
1162 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1415 .ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1167,11 +1420,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
1167 .move_notify = nouveau_bo_move_ntfy, 1420 .move_notify = nouveau_bo_move_ntfy,
1168 .move = nouveau_bo_move, 1421 .move = nouveau_bo_move,
1169 .verify_access = nouveau_bo_verify_access, 1422 .verify_access = nouveau_bo_verify_access,
1170 .sync_obj_signaled = __nouveau_fence_signalled, 1423 .sync_obj_signaled = nouveau_bo_fence_signalled,
1171 .sync_obj_wait = __nouveau_fence_wait, 1424 .sync_obj_wait = nouveau_bo_fence_wait,
1172 .sync_obj_flush = __nouveau_fence_flush, 1425 .sync_obj_flush = nouveau_bo_fence_flush,
1173 .sync_obj_unref = __nouveau_fence_unref, 1426 .sync_obj_unref = nouveau_bo_fence_unref,
1174 .sync_obj_ref = __nouveau_fence_ref, 1427 .sync_obj_ref = nouveau_bo_fence_ref,
1175 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1428 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1176 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1429 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1177 .io_mem_free = &nouveau_ttm_io_mem_free, 1430 .io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 730bbb249b01..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,7 +27,10 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
32#include "nouveau_fence.h"
33#include "nouveau_software.h"
31 34
32static int 35static int
33nouveau_channel_pushbuf_init(struct nouveau_channel *chan) 36nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
@@ -117,8 +120,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
117 struct drm_file *file_priv, 120 struct drm_file *file_priv,
118 uint32_t vram_handle, uint32_t gart_handle) 121 uint32_t vram_handle, uint32_t gart_handle)
119{ 122{
123 struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
120 struct drm_nouveau_private *dev_priv = dev->dev_private; 125 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
122 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 126 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
123 struct nouveau_channel *chan; 127 struct nouveau_channel *chan;
124 unsigned long flags; 128 unsigned long flags;
@@ -155,10 +159,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
155 } 159 }
156 160
157 NV_DEBUG(dev, "initialising channel %d\n", chan->id); 161 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
162 162
163 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,20 +188,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
188 chan->user_put = 0x40; 188 chan->user_put = 0x40;
189 chan->user_get = 0x44; 189 chan->user_get = 0x44;
190 if (dev_priv->card_type >= NV_50) 190 if (dev_priv->card_type >= NV_50)
191 chan->user_get_hi = 0x60; 191 chan->user_get_hi = 0x60;
192 192
193 /* disable the fifo caches */ 193 /* create fifo context */
194 pfifo->reassign(dev, false); 194 ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
195
196 /* Construct initial RAMFC for new channel */
197 ret = pfifo->create_context(chan);
198 if (ret) { 195 if (ret) {
199 nouveau_channel_put(&chan); 196 nouveau_channel_put(&chan);
200 return ret; 197 return ret;
201 } 198 }
202 199
203 pfifo->reassign(dev, true);
204
205 /* Insert NOPs for NOUVEAU_DMA_SKIPS */ 200 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
206 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 201 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
207 if (ret) { 202 if (ret) {
@@ -211,9 +206,28 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
211 206
212 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 207 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
213 OUT_RING (chan, 0x00000000); 208 OUT_RING (chan, 0x00000000);
209
210 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
211 if (ret) {
212 nouveau_channel_put(&chan);
213 return ret;
214 }
215
216 if (dev_priv->card_type < NV_C0) {
217 ret = RING_SPACE(chan, 2);
218 if (ret) {
219 nouveau_channel_put(&chan);
220 return ret;
221 }
222
223 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
224 OUT_RING (chan, NvSw);
225 FIRE_RING (chan);
226 }
227
214 FIRE_RING(chan); 228 FIRE_RING(chan);
215 229
216 ret = nouveau_fence_channel_init(chan); 230 ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
217 if (ret) { 231 if (ret) {
218 nouveau_channel_put(&chan); 232 nouveau_channel_put(&chan);
219 return ret; 233 return ret;
@@ -268,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
268 struct nouveau_channel *chan = *pchan; 282 struct nouveau_channel *chan = *pchan;
269 struct drm_device *dev = chan->dev; 283 struct drm_device *dev = chan->dev;
270 struct drm_nouveau_private *dev_priv = dev->dev_private; 284 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
272 unsigned long flags; 285 unsigned long flags;
273 int i; 286 int i;
274 287
@@ -285,24 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
285 /* give it chance to idle */ 298 /* give it chance to idle */
286 nouveau_channel_idle(chan); 299 nouveau_channel_idle(chan);
287 300
288 /* ensure all outstanding fences are signaled. they should be if the
289 * above attempts at idling were OK, but if we failed this'll tell TTM
290 * we're done with the buffers.
291 */
292 nouveau_fence_channel_fini(chan);
293
294 /* boot it off the hardware */
295 pfifo->reassign(dev, false);
296
297 /* destroy the engine specific contexts */ 301 /* destroy the engine specific contexts */
298 pfifo->destroy_context(chan); 302 for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
299 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
300 if (chan->engctx[i]) 303 if (chan->engctx[i])
301 dev_priv->eng[i]->context_del(chan, i); 304 dev_priv->eng[i]->context_del(chan, i);
302 } 305 }
303 306
304 pfifo->reassign(dev, true);
305
306 /* aside from its resources, the channel should now be dead, 307 /* aside from its resources, the channel should now be dead,
307 * remove it from the channel list 308 * remove it from the channel list
308 */ 309 */
@@ -354,38 +355,37 @@ nouveau_channel_ref(struct nouveau_channel *chan,
354 *pchan = chan; 355 *pchan = chan;
355} 356}
356 357
357void 358int
358nouveau_channel_idle(struct nouveau_channel *chan) 359nouveau_channel_idle(struct nouveau_channel *chan)
359{ 360{
360 struct drm_device *dev = chan->dev; 361 struct drm_device *dev = chan->dev;
361 struct nouveau_fence *fence = NULL; 362 struct nouveau_fence *fence = NULL;
362 int ret; 363 int ret;
363 364
364 nouveau_fence_update(chan); 365 ret = nouveau_fence_new(chan, &fence);
365 366 if (!ret) {
366 if (chan->fence.sequence != chan->fence.sequence_ack) { 367 ret = nouveau_fence_wait(fence, false, false);
367 ret = nouveau_fence_new(chan, &fence, true); 368 nouveau_fence_unref(&fence);
368 if (!ret) {
369 ret = nouveau_fence_wait(fence, false, false);
370 nouveau_fence_unref(&fence);
371 }
372
373 if (ret)
374 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
375 } 369 }
370
371 if (ret)
372 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
373 return ret;
376} 374}
377 375
378/* cleans up all the fifos from file_priv */ 376/* cleans up all the fifos from file_priv */
379void 377void
380nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) 378nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
381{ 379{
382 struct drm_nouveau_private *dev_priv = dev->dev_private; 380 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
383 struct nouveau_engine *engine = &dev_priv->engine;
384 struct nouveau_channel *chan; 381 struct nouveau_channel *chan;
385 int i; 382 int i;
386 383
384 if (!pfifo)
385 return;
386
387 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 387 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
388 for (i = 0; i < engine->fifo.channels; i++) { 388 for (i = 0; i < pfifo->channels; i++) {
389 chan = nouveau_channel_get(file_priv, i); 389 chan = nouveau_channel_get(file_priv, i);
390 if (IS_ERR(chan)) 390 if (IS_ERR(chan))
391 continue; 391 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fa860358add1..7b11edb077d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,7 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
654 if (nv_connector->edid && connector->display_info.bpc) 654 if (nv_connector->edid && connector->display_info.bpc)
655 return; 655 return;
656 656
657 /* if not, we're out of options unless we're LVDS, default to 8bpc */ 657 /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
658 if (nv_connector->type == DCB_CONNECTOR_eDP) {
659 connector->display_info.bpc = 6;
660 return;
661 }
662
663 /* we're out of options unless we're LVDS, default to 8bpc */
658 if (nv_encoder->dcb->type != OUTPUT_LVDS) { 664 if (nv_encoder->dcb->type != OUTPUT_LVDS) {
659 connector->display_info.bpc = 8; 665 connector->display_info.bpc = 8;
660 return; 666 return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
67 nvchan_rd32(chan, 0x8c)); 67 nvchan_rd32(chan, 0x8c));
68 } 68 }
69 69
70 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
71 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
72 return 0; 70 return 0;
73} 71}
74 72
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 4b1cf7457983..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,7 +33,9 @@
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_software.h"
36#include "nouveau_gpio.h" 37#include "nouveau_gpio.h"
38#include "nouveau_fence.h"
37#include "nv50_display.h" 39#include "nv50_display.h"
38 40
39static void 41static void
@@ -325,14 +327,21 @@ nouveau_display_create(struct drm_device *dev)
325 327
326 ret = disp->create(dev); 328 ret = disp->create(dev);
327 if (ret) 329 if (ret)
328 return ret; 330 goto disp_create_err;
329 331
330 if (dev->mode_config.num_crtc) { 332 if (dev->mode_config.num_crtc) {
331 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 333 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
332 if (ret) 334 if (ret)
333 return ret; 335 goto vblank_err;
334 } 336 }
335 337
338 return 0;
339
340vblank_err:
341 disp->destroy(dev);
342disp_create_err:
343 drm_kms_helper_poll_fini(dev);
344 drm_mode_config_cleanup(dev);
336 return ret; 345 return ret;
337} 346}
338 347
@@ -425,6 +434,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
425 struct nouveau_page_flip_state *s, 434 struct nouveau_page_flip_state *s,
426 struct nouveau_fence **pfence) 435 struct nouveau_fence **pfence)
427{ 436{
437 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
428 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 438 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
429 struct drm_device *dev = chan->dev; 439 struct drm_device *dev = chan->dev;
430 unsigned long flags; 440 unsigned long flags;
@@ -432,7 +442,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
432 442
433 /* Queue it to the pending list */ 443 /* Queue it to the pending list */
434 spin_lock_irqsave(&dev->event_lock, flags); 444 spin_lock_irqsave(&dev->event_lock, flags);
435 list_add_tail(&s->head, &chan->nvsw.flip); 445 list_add_tail(&s->head, &swch->flip);
436 spin_unlock_irqrestore(&dev->event_lock, flags); 446 spin_unlock_irqrestore(&dev->event_lock, flags);
437 447
438 /* Synchronize with the old framebuffer */ 448 /* Synchronize with the old framebuffer */
@@ -446,17 +456,17 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
446 goto fail; 456 goto fail;
447 457
448 if (dev_priv->card_type < NV_C0) { 458 if (dev_priv->card_type < NV_C0) {
449 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 459 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
450 OUT_RING (chan, 0x00000000); 460 OUT_RING (chan, 0x00000000);
451 OUT_RING (chan, 0x00000000); 461 OUT_RING (chan, 0x00000000);
452 } else { 462 } else {
453 BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1); 463 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
454 OUT_RING (chan, ++chan->fence.sequence); 464 OUT_RING (chan, 0);
455 BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000); 465 BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
456 } 466 }
457 FIRE_RING (chan); 467 FIRE_RING (chan);
458 468
459 ret = nouveau_fence_new(chan, pfence, true); 469 ret = nouveau_fence_new(chan, pfence);
460 if (ret) 470 if (ret)
461 goto fail; 471 goto fail;
462 472
@@ -477,7 +487,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
477 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 487 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
478 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 488 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
479 struct nouveau_page_flip_state *s; 489 struct nouveau_page_flip_state *s;
480 struct nouveau_channel *chan; 490 struct nouveau_channel *chan = NULL;
481 struct nouveau_fence *fence; 491 struct nouveau_fence *fence;
482 int ret; 492 int ret;
483 493
@@ -500,7 +510,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
500 new_bo->bo.offset }; 510 new_bo->bo.offset };
501 511
502 /* Choose the channel the flip will be handled in */ 512 /* Choose the channel the flip will be handled in */
503 chan = nouveau_fence_channel(new_bo->bo.sync_obj); 513 fence = new_bo->bo.sync_obj;
514 if (fence)
515 chan = nouveau_channel_get_unlocked(fence->channel);
504 if (!chan) 516 if (!chan)
505 chan = nouveau_channel_get_unlocked(dev_priv->channel); 517 chan = nouveau_channel_get_unlocked(dev_priv->channel);
506 mutex_lock(&chan->mutex); 518 mutex_lock(&chan->mutex);
@@ -540,20 +552,20 @@ int
540nouveau_finish_page_flip(struct nouveau_channel *chan, 552nouveau_finish_page_flip(struct nouveau_channel *chan,
541 struct nouveau_page_flip_state *ps) 553 struct nouveau_page_flip_state *ps)
542{ 554{
555 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
543 struct drm_device *dev = chan->dev; 556 struct drm_device *dev = chan->dev;
544 struct nouveau_page_flip_state *s; 557 struct nouveau_page_flip_state *s;
545 unsigned long flags; 558 unsigned long flags;
546 559
547 spin_lock_irqsave(&dev->event_lock, flags); 560 spin_lock_irqsave(&dev->event_lock, flags);
548 561
549 if (list_empty(&chan->nvsw.flip)) { 562 if (list_empty(&swch->flip)) {
550 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); 563 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
551 spin_unlock_irqrestore(&dev->event_lock, flags); 564 spin_unlock_irqrestore(&dev->event_lock, flags);
552 return -EINVAL; 565 return -EINVAL;
553 } 566 }
554 567
555 s = list_first_entry(&chan->nvsw.flip, 568 s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
556 struct nouveau_page_flip_state, head);
557 if (s->event) { 569 if (s->event) {
558 struct drm_pending_vblank_event *e = s->event; 570 struct drm_pending_vblank_event *e = s->event;
559 struct timeval now; 571 struct timeval now;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 23d4edf992b7..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,12 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
48 48
49/* Hardcoded object assignments to subchannels (subchannel id). */ 49/* Hardcoded object assignments to subchannels (subchannel id). */
50enum { 50enum {
51 NvSubM2MF = 0, 51 NvSubCtxSurf2D = 0,
52 NvSubSw = 1, 52 NvSubSw = 1,
53 NvSub2D = 2, 53 NvSubImageBlit = 2,
54 NvSubCtxSurf2D = 2, 54 NvSub2D = 3,
55 NvSubGdiRect = 3, 55 NvSubGdiRect = 3,
56 NvSubImageBlit = 4 56 NvSubCopy = 4,
57}; 57};
58 58
59/* Object handles. */ 59/* Object handles. */
@@ -73,6 +73,7 @@ enum {
73 NvSema = 0x8000000f, 73 NvSema = 0x8000000f,
74 NvEvoSema0 = 0x80000010, 74 NvEvoSema0 = 0x80000010,
75 NvEvoSema1 = 0x80000011, 75 NvEvoSema1 = 0x80000011,
76 NvNotify1 = 0x80000012,
76 77
77 /* G80+ display objects */ 78 /* G80+ display objects */
78 NvEvoVRAM = 0x01000000, 79 NvEvoVRAM = 0x01000000,
@@ -127,15 +128,33 @@ extern void
127OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); 128OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
128 129
129static inline void 130static inline void
130BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size) 131BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
131{ 132{
132 OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2)); 133 OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
133} 134}
134 135
135static inline void 136static inline void
136BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) 137BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
137{ 138{
138 OUT_RING(chan, (subc << 13) | (size << 18) | mthd); 139 OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
140}
141
142static inline void
143BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
144{
145 OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
146}
147
148static inline void
149BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
150{
151 OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
152}
153
154static inline void
155BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
156{
157 OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
139} 158}
140 159
141#define WRITE_PUT(val) do { \ 160#define WRITE_PUT(val) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index b394ecf787f6..cad254c8e387 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -33,6 +33,7 @@
33#include "nouveau_fb.h" 33#include "nouveau_fb.h"
34#include "nouveau_fbcon.h" 34#include "nouveau_fbcon.h"
35#include "nouveau_pm.h" 35#include "nouveau_pm.h"
36#include "nouveau_fifo.h"
36#include "nv50_display.h" 37#include "nv50_display.h"
37 38
38#include "drm_pciids.h" 39#include "drm_pciids.h"
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
175 struct drm_device *dev = pci_get_drvdata(pdev); 176 struct drm_device *dev = pci_get_drvdata(pdev);
176 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 struct drm_nouveau_private *dev_priv = dev->dev_private;
177 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 178 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
178 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 179 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
179 struct nouveau_channel *chan; 180 struct nouveau_channel *chan;
180 struct drm_crtc *crtc; 181 struct drm_crtc *crtc;
181 int ret, i, e; 182 int ret, i, e;
@@ -214,17 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
214 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 215 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
215 216
216 NV_INFO(dev, "Idling channels...\n"); 217 NV_INFO(dev, "Idling channels...\n");
217 for (i = 0; i < pfifo->channels; i++) { 218 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
218 chan = dev_priv->channels.ptr[i]; 219 chan = dev_priv->channels.ptr[i];
219 220
220 if (chan && chan->pushbuf_bo) 221 if (chan && chan->pushbuf_bo)
221 nouveau_channel_idle(chan); 222 nouveau_channel_idle(chan);
222 } 223 }
223 224
224 pfifo->reassign(dev, false);
225 pfifo->disable(dev);
226 pfifo->unload_context(dev);
227
228 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 225 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
229 if (!dev_priv->eng[e]) 226 if (!dev_priv->eng[e])
230 continue; 227 continue;
@@ -265,8 +262,6 @@ out_abort:
265 if (dev_priv->eng[e]) 262 if (dev_priv->eng[e])
266 dev_priv->eng[e]->init(dev, e); 263 dev_priv->eng[e]->init(dev, e);
267 } 264 }
268 pfifo->enable(dev);
269 pfifo->reassign(dev, true);
270 return ret; 265 return ret;
271} 266}
272 267
@@ -274,6 +269,7 @@ int
274nouveau_pci_resume(struct pci_dev *pdev) 269nouveau_pci_resume(struct pci_dev *pdev)
275{ 270{
276 struct drm_device *dev = pci_get_drvdata(pdev); 271 struct drm_device *dev = pci_get_drvdata(pdev);
272 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
277 struct drm_nouveau_private *dev_priv = dev->dev_private; 273 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_engine *engine = &dev_priv->engine; 274 struct nouveau_engine *engine = &dev_priv->engine;
279 struct drm_crtc *crtc; 275 struct drm_crtc *crtc;
@@ -321,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
321 if (dev_priv->eng[i]) 317 if (dev_priv->eng[i])
322 dev_priv->eng[i]->init(dev, i); 318 dev_priv->eng[i]->init(dev, i);
323 } 319 }
324 engine->fifo.init(dev);
325 320
326 nouveau_irq_postinstall(dev); 321 nouveau_irq_postinstall(dev);
327 322
@@ -330,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
330 struct nouveau_channel *chan; 325 struct nouveau_channel *chan;
331 int j; 326 int j;
332 327
333 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 328 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
334 chan = dev_priv->channels.ptr[i]; 329 chan = dev_priv->channels.ptr[i];
335 if (!chan || !chan->pushbuf_bo) 330 if (!chan || !chan->pushbuf_bo)
336 continue; 331 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 92c9a8a648de..634d222c93de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -70,7 +70,7 @@ struct nouveau_mem;
70 70
71#define MAX_NUM_DCB_ENTRIES 16 71#define MAX_NUM_DCB_ENTRIES 16
72 72
73#define NOUVEAU_MAX_CHANNEL_NR 128 73#define NOUVEAU_MAX_CHANNEL_NR 4096
74#define NOUVEAU_MAX_TILE_NR 15 74#define NOUVEAU_MAX_TILE_NR 15
75 75
76struct nouveau_mem { 76struct nouveau_mem {
@@ -165,8 +165,10 @@ enum nouveau_flags {
165#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG 165#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
166#define NVOBJ_ENGINE_BSP 6 166#define NVOBJ_ENGINE_BSP 6
167#define NVOBJ_ENGINE_VP 7 167#define NVOBJ_ENGINE_VP 7
168#define NVOBJ_ENGINE_DISPLAY 15 168#define NVOBJ_ENGINE_FIFO 14
169#define NVOBJ_ENGINE_FENCE 15
169#define NVOBJ_ENGINE_NR 16 170#define NVOBJ_ENGINE_NR 16
171#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
170 172
171#define NVOBJ_FLAG_DONT_MAP (1 << 0) 173#define NVOBJ_FLAG_DONT_MAP (1 << 0)
172#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 174#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -234,17 +236,6 @@ struct nouveau_channel {
234 uint32_t user_get_hi; 236 uint32_t user_get_hi;
235 uint32_t user_put; 237 uint32_t user_put;
236 238
237 /* Fencing */
238 struct {
239 /* lock protects the pending list only */
240 spinlock_t lock;
241 struct list_head pending;
242 uint32_t sequence;
243 uint32_t sequence_ack;
244 atomic_t last_sequence_irq;
245 struct nouveau_vma vma;
246 } fence;
247
248 /* DMA push buffer */ 239 /* DMA push buffer */
249 struct nouveau_gpuobj *pushbuf; 240 struct nouveau_gpuobj *pushbuf;
250 struct nouveau_bo *pushbuf_bo; 241 struct nouveau_bo *pushbuf_bo;
@@ -258,8 +249,6 @@ struct nouveau_channel {
258 249
259 /* PFIFO context */ 250 /* PFIFO context */
260 struct nouveau_gpuobj *ramfc; 251 struct nouveau_gpuobj *ramfc;
261 struct nouveau_gpuobj *cache;
262 void *fifo_priv;
263 252
264 /* Execution engine contexts */ 253 /* Execution engine contexts */
265 void *engctx[NVOBJ_ENGINE_NR]; 254 void *engctx[NVOBJ_ENGINE_NR];
@@ -293,18 +282,6 @@ struct nouveau_channel {
293 int ib_put; 282 int ib_put;
294 } dma; 283 } dma;
295 284
296 uint32_t sw_subchannel[8];
297
298 struct nouveau_vma dispc_vma[4];
299 struct {
300 struct nouveau_gpuobj *vblsem;
301 uint32_t vblsem_head;
302 uint32_t vblsem_offset;
303 uint32_t vblsem_rval;
304 struct list_head vbl_wait;
305 struct list_head flip;
306 } nvsw;
307
308 struct { 285 struct {
309 bool active; 286 bool active;
310 char name[32]; 287 char name[32];
@@ -367,30 +344,6 @@ struct nouveau_fb_engine {
367 void (*free_tile_region)(struct drm_device *dev, int i); 344 void (*free_tile_region)(struct drm_device *dev, int i);
368}; 345};
369 346
370struct nouveau_fifo_engine {
371 void *priv;
372 int channels;
373
374 struct nouveau_gpuobj *playlist[2];
375 int cur_playlist;
376
377 int (*init)(struct drm_device *);
378 void (*takedown)(struct drm_device *);
379
380 void (*disable)(struct drm_device *);
381 void (*enable)(struct drm_device *);
382 bool (*reassign)(struct drm_device *, bool enable);
383 bool (*cache_pull)(struct drm_device *dev, bool enable);
384
385 int (*channel_id)(struct drm_device *);
386
387 int (*create_context)(struct nouveau_channel *);
388 void (*destroy_context)(struct nouveau_channel *);
389 int (*load_context)(struct nouveau_channel *);
390 int (*unload_context)(struct drm_device *);
391 void (*tlb_flush)(struct drm_device *dev);
392};
393
394struct nouveau_display_engine { 347struct nouveau_display_engine {
395 void *priv; 348 void *priv;
396 int (*early_init)(struct drm_device *); 349 int (*early_init)(struct drm_device *);
@@ -598,7 +551,6 @@ struct nouveau_engine {
598 struct nouveau_mc_engine mc; 551 struct nouveau_mc_engine mc;
599 struct nouveau_timer_engine timer; 552 struct nouveau_timer_engine timer;
600 struct nouveau_fb_engine fb; 553 struct nouveau_fb_engine fb;
601 struct nouveau_fifo_engine fifo;
602 struct nouveau_display_engine display; 554 struct nouveau_display_engine display;
603 struct nouveau_gpio_engine gpio; 555 struct nouveau_gpio_engine gpio;
604 struct nouveau_pm_engine pm; 556 struct nouveau_pm_engine pm;
@@ -741,6 +693,9 @@ struct drm_nouveau_private {
741 struct ttm_bo_global_ref bo_global_ref; 693 struct ttm_bo_global_ref bo_global_ref;
742 struct ttm_bo_device bdev; 694 struct ttm_bo_device bdev;
743 atomic_t validate_sequence; 695 atomic_t validate_sequence;
696 int (*move)(struct nouveau_channel *,
697 struct ttm_buffer_object *,
698 struct ttm_mem_reg *, struct ttm_mem_reg *);
744 } ttm; 699 } ttm;
745 700
746 struct { 701 struct {
@@ -978,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
978extern void nouveau_channel_put(struct nouveau_channel **); 933extern void nouveau_channel_put(struct nouveau_channel **);
979extern void nouveau_channel_ref(struct nouveau_channel *chan, 934extern void nouveau_channel_ref(struct nouveau_channel *chan,
980 struct nouveau_channel **pchan); 935 struct nouveau_channel **pchan);
981extern void nouveau_channel_idle(struct nouveau_channel *chan); 936extern int nouveau_channel_idle(struct nouveau_channel *chan);
982 937
983/* nouveau_object.c */ 938/* nouveau_object.c */
984#define NVOBJ_ENGINE_ADD(d, e, p) do { \ 939#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1210,56 +1165,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display);
1210extern int nvc0_fb_init(struct drm_device *); 1165extern int nvc0_fb_init(struct drm_device *);
1211extern void nvc0_fb_takedown(struct drm_device *); 1166extern void nvc0_fb_takedown(struct drm_device *);
1212 1167
1213/* nv04_fifo.c */
1214extern int nv04_fifo_init(struct drm_device *);
1215extern void nv04_fifo_fini(struct drm_device *);
1216extern void nv04_fifo_disable(struct drm_device *);
1217extern void nv04_fifo_enable(struct drm_device *);
1218extern bool nv04_fifo_reassign(struct drm_device *, bool);
1219extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
1220extern int nv04_fifo_channel_id(struct drm_device *);
1221extern int nv04_fifo_create_context(struct nouveau_channel *);
1222extern void nv04_fifo_destroy_context(struct nouveau_channel *);
1223extern int nv04_fifo_load_context(struct nouveau_channel *);
1224extern int nv04_fifo_unload_context(struct drm_device *);
1225extern void nv04_fifo_isr(struct drm_device *);
1226
1227/* nv10_fifo.c */
1228extern int nv10_fifo_init(struct drm_device *);
1229extern int nv10_fifo_channel_id(struct drm_device *);
1230extern int nv10_fifo_create_context(struct nouveau_channel *);
1231extern int nv10_fifo_load_context(struct nouveau_channel *);
1232extern int nv10_fifo_unload_context(struct drm_device *);
1233
1234/* nv40_fifo.c */
1235extern int nv40_fifo_init(struct drm_device *);
1236extern int nv40_fifo_create_context(struct nouveau_channel *);
1237extern int nv40_fifo_load_context(struct nouveau_channel *);
1238extern int nv40_fifo_unload_context(struct drm_device *);
1239
1240/* nv50_fifo.c */
1241extern int nv50_fifo_init(struct drm_device *);
1242extern void nv50_fifo_takedown(struct drm_device *);
1243extern int nv50_fifo_channel_id(struct drm_device *);
1244extern int nv50_fifo_create_context(struct nouveau_channel *);
1245extern void nv50_fifo_destroy_context(struct nouveau_channel *);
1246extern int nv50_fifo_load_context(struct nouveau_channel *);
1247extern int nv50_fifo_unload_context(struct drm_device *);
1248extern void nv50_fifo_tlb_flush(struct drm_device *dev);
1249
1250/* nvc0_fifo.c */
1251extern int nvc0_fifo_init(struct drm_device *);
1252extern void nvc0_fifo_takedown(struct drm_device *);
1253extern void nvc0_fifo_disable(struct drm_device *);
1254extern void nvc0_fifo_enable(struct drm_device *);
1255extern bool nvc0_fifo_reassign(struct drm_device *, bool);
1256extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
1257extern int nvc0_fifo_channel_id(struct drm_device *);
1258extern int nvc0_fifo_create_context(struct nouveau_channel *);
1259extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
1260extern int nvc0_fifo_load_context(struct nouveau_channel *);
1261extern int nvc0_fifo_unload_context(struct drm_device *);
1262
1263/* nv04_graph.c */ 1168/* nv04_graph.c */
1264extern int nv04_graph_create(struct drm_device *); 1169extern int nv04_graph_create(struct drm_device *);
1265extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16); 1170extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
@@ -1278,18 +1183,23 @@ extern int nv20_graph_create(struct drm_device *);
1278 1183
1279/* nv40_graph.c */ 1184/* nv40_graph.c */
1280extern int nv40_graph_create(struct drm_device *); 1185extern int nv40_graph_create(struct drm_device *);
1281extern void nv40_grctx_init(struct nouveau_grctx *); 1186extern void nv40_grctx_init(struct drm_device *, u32 *size);
1187extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1282 1188
1283/* nv50_graph.c */ 1189/* nv50_graph.c */
1284extern int nv50_graph_create(struct drm_device *); 1190extern int nv50_graph_create(struct drm_device *);
1285extern int nv50_grctx_init(struct nouveau_grctx *);
1286extern struct nouveau_enum nv50_data_error_names[]; 1191extern struct nouveau_enum nv50_data_error_names[];
1287extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst); 1192extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1193extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
1194extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1288 1195
1289/* nvc0_graph.c */ 1196/* nvc0_graph.c */
1290extern int nvc0_graph_create(struct drm_device *); 1197extern int nvc0_graph_create(struct drm_device *);
1291extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst); 1198extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1292 1199
1200/* nve0_graph.c */
1201extern int nve0_graph_create(struct drm_device *);
1202
1293/* nv84_crypt.c */ 1203/* nv84_crypt.c */
1294extern int nv84_crypt_create(struct drm_device *); 1204extern int nv84_crypt_create(struct drm_device *);
1295 1205
@@ -1415,6 +1325,7 @@ extern int nv04_crtc_create(struct drm_device *, int index);
1415 1325
1416/* nouveau_bo.c */ 1326/* nouveau_bo.c */
1417extern struct ttm_bo_driver nouveau_bo_driver; 1327extern struct ttm_bo_driver nouveau_bo_driver;
1328extern void nouveau_bo_move_init(struct nouveau_channel *);
1418extern int nouveau_bo_new(struct drm_device *, int size, int align, 1329extern int nouveau_bo_new(struct drm_device *, int size, int align,
1419 uint32_t flags, uint32_t tile_mode, 1330 uint32_t flags, uint32_t tile_mode,
1420 uint32_t tile_flags, 1331 uint32_t tile_flags,
@@ -1440,50 +1351,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1440 struct nouveau_vma *); 1351 struct nouveau_vma *);
1441extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); 1352extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1442 1353
1443/* nouveau_fence.c */
1444struct nouveau_fence;
1445extern int nouveau_fence_init(struct drm_device *);
1446extern void nouveau_fence_fini(struct drm_device *);
1447extern int nouveau_fence_channel_init(struct nouveau_channel *);
1448extern void nouveau_fence_channel_fini(struct nouveau_channel *);
1449extern void nouveau_fence_update(struct nouveau_channel *);
1450extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1451 bool emit);
1452extern int nouveau_fence_emit(struct nouveau_fence *);
1453extern void nouveau_fence_work(struct nouveau_fence *fence,
1454 void (*work)(void *priv, bool signalled),
1455 void *priv);
1456struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1457
1458extern bool __nouveau_fence_signalled(void *obj, void *arg);
1459extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1460extern int __nouveau_fence_flush(void *obj, void *arg);
1461extern void __nouveau_fence_unref(void **obj);
1462extern void *__nouveau_fence_ref(void *obj);
1463
1464static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
1465{
1466 return __nouveau_fence_signalled(obj, NULL);
1467}
1468static inline int
1469nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
1470{
1471 return __nouveau_fence_wait(obj, NULL, lazy, intr);
1472}
1473extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
1474static inline int nouveau_fence_flush(struct nouveau_fence *obj)
1475{
1476 return __nouveau_fence_flush(obj, NULL);
1477}
1478static inline void nouveau_fence_unref(struct nouveau_fence **obj)
1479{
1480 __nouveau_fence_unref((void **)obj);
1481}
1482static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1483{
1484 return __nouveau_fence_ref(obj);
1485}
1486
1487/* nouveau_gem.c */ 1354/* nouveau_gem.c */
1488extern int nouveau_gem_new(struct drm_device *, int size, int align, 1355extern int nouveau_gem_new(struct drm_device *, int size, int align,
1489 uint32_t domain, uint32_t tile_mode, 1356 uint32_t domain, uint32_t tile_mode,
@@ -1780,6 +1647,7 @@ nv44_graph_class(struct drm_device *dev)
1780#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001 1647#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
1781#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002 1648#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
1782#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004 1649#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
1650#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
1783#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020 1651#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
1784#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 1652#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
1785#define NV10_SUBCHAN_REF_CNT 0x00000050 1653#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8113e9201ed9..153b9a15469b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -153,7 +153,7 @@ nouveau_fbcon_sync(struct fb_info *info)
153 struct drm_device *dev = nfbdev->dev; 153 struct drm_device *dev = nfbdev->dev;
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_channel *chan = dev_priv->channel; 155 struct nouveau_channel *chan = dev_priv->channel;
156 int ret, i; 156 int ret;
157 157
158 if (!chan || !chan->accel_done || in_interrupt() || 158 if (!chan || !chan->accel_done || in_interrupt() ||
159 info->state != FBINFO_STATE_RUNNING || 159 info->state != FBINFO_STATE_RUNNING ||
@@ -163,38 +163,8 @@ nouveau_fbcon_sync(struct fb_info *info)
163 if (!mutex_trylock(&chan->mutex)) 163 if (!mutex_trylock(&chan->mutex))
164 return 0; 164 return 0;
165 165
166 ret = RING_SPACE(chan, 4); 166 ret = nouveau_channel_idle(chan);
167 if (ret) {
168 mutex_unlock(&chan->mutex);
169 nouveau_fbcon_gpu_lockup(info);
170 return 0;
171 }
172
173 if (dev_priv->card_type >= NV_C0) {
174 BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
175 OUT_RING (chan, 0);
176 BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
177 OUT_RING (chan, 0);
178 } else {
179 BEGIN_RING(chan, 0, 0x0104, 1);
180 OUT_RING (chan, 0);
181 BEGIN_RING(chan, 0, 0x0100, 1);
182 OUT_RING (chan, 0);
183 }
184
185 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
186 FIRE_RING(chan);
187 mutex_unlock(&chan->mutex); 167 mutex_unlock(&chan->mutex);
188
189 ret = -EBUSY;
190 for (i = 0; i < 100000; i++) {
191 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
192 ret = 0;
193 break;
194 }
195 DRM_UDELAY(1);
196 }
197
198 if (ret) { 168 if (ret) {
199 nouveau_fbcon_gpu_lockup(info); 169 nouveau_fbcon_gpu_lockup(info);
200 return 0; 170 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 965e3d2e8a7d..3c180493dab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,220 +32,100 @@
32 32
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_ramht.h" 34#include "nouveau_ramht.h"
35#include "nouveau_fence.h"
36#include "nouveau_software.h"
35#include "nouveau_dma.h" 37#include "nouveau_dma.h"
36 38
37#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 39void
38#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) 40nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
39
40struct nouveau_fence {
41 struct nouveau_channel *channel;
42 struct kref refcount;
43 struct list_head entry;
44
45 uint32_t sequence;
46 bool signalled;
47
48 void (*work)(void *priv, bool signalled);
49 void *priv;
50};
51
52struct nouveau_semaphore {
53 struct kref ref;
54 struct drm_device *dev;
55 struct drm_mm_node *mem;
56};
57
58static inline struct nouveau_fence *
59nouveau_fence(void *sync_obj)
60{ 41{
61 return (struct nouveau_fence *)sync_obj; 42 struct nouveau_fence *fence, *fnext;
43 spin_lock(&fctx->lock);
44 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
45 if (fence->work)
46 fence->work(fence->priv, false);
47 fence->channel = NULL;
48 list_del(&fence->head);
49 nouveau_fence_unref(&fence);
50 }
51 spin_unlock(&fctx->lock);
62} 52}
63 53
64static void 54void
65nouveau_fence_del(struct kref *ref) 55nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
66{ 56{
67 struct nouveau_fence *fence = 57 INIT_LIST_HEAD(&fctx->pending);
68 container_of(ref, struct nouveau_fence, refcount); 58 spin_lock_init(&fctx->lock);
69
70 nouveau_channel_ref(NULL, &fence->channel);
71 kfree(fence);
72} 59}
73 60
74void 61void
75nouveau_fence_update(struct nouveau_channel *chan) 62nouveau_fence_update(struct nouveau_channel *chan)
76{ 63{
77 struct drm_device *dev = chan->dev; 64 struct drm_device *dev = chan->dev;
78 struct nouveau_fence *tmp, *fence; 65 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
79 uint32_t sequence; 66 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
67 struct nouveau_fence *fence, *fnext;
80 68
81 spin_lock(&chan->fence.lock); 69 spin_lock(&fctx->lock);
82 70 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
83 /* Fetch the last sequence if the channel is still up and running */ 71 if (priv->read(chan) < fence->sequence)
84 if (likely(!list_empty(&chan->fence.pending))) {
85 if (USE_REFCNT(dev))
86 sequence = nvchan_rd32(chan, 0x48);
87 else
88 sequence = atomic_read(&chan->fence.last_sequence_irq);
89
90 if (chan->fence.sequence_ack == sequence)
91 goto out;
92 chan->fence.sequence_ack = sequence;
93 }
94
95 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
96 if (fence->sequence > chan->fence.sequence_ack)
97 break; 72 break;
98 73
99 fence->signalled = true;
100 list_del(&fence->entry);
101 if (fence->work) 74 if (fence->work)
102 fence->work(fence->priv, true); 75 fence->work(fence->priv, true);
103 76 fence->channel = NULL;
104 kref_put(&fence->refcount, nouveau_fence_del); 77 list_del(&fence->head);
105 }
106
107out:
108 spin_unlock(&chan->fence.lock);
109}
110
111int
112nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
113 bool emit)
114{
115 struct nouveau_fence *fence;
116 int ret = 0;
117
118 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
119 if (!fence)
120 return -ENOMEM;
121 kref_init(&fence->refcount);
122 nouveau_channel_ref(chan, &fence->channel);
123
124 if (emit)
125 ret = nouveau_fence_emit(fence);
126
127 if (ret)
128 nouveau_fence_unref(&fence); 78 nouveau_fence_unref(&fence);
129 *pfence = fence; 79 }
130 return ret; 80 spin_unlock(&fctx->lock);
131}
132
133struct nouveau_channel *
134nouveau_fence_channel(struct nouveau_fence *fence)
135{
136 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
137} 81}
138 82
139int 83int
140nouveau_fence_emit(struct nouveau_fence *fence) 84nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
141{ 85{
142 struct nouveau_channel *chan = fence->channel;
143 struct drm_device *dev = chan->dev; 86 struct drm_device *dev = chan->dev;
144 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
88 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
145 int ret; 89 int ret;
146 90
147 ret = RING_SPACE(chan, 2); 91 fence->channel = chan;
148 if (ret) 92 fence->timeout = jiffies + (3 * DRM_HZ);
149 return ret; 93 fence->sequence = ++fctx->sequence;
150
151 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
152 nouveau_fence_update(chan);
153 94
154 BUG_ON(chan->fence.sequence == 95 ret = priv->emit(fence);
155 chan->fence.sequence_ack - 1); 96 if (!ret) {
97 kref_get(&fence->kref);
98 spin_lock(&fctx->lock);
99 list_add_tail(&fence->head, &fctx->pending);
100 spin_unlock(&fctx->lock);
156 } 101 }
157 102
158 fence->sequence = ++chan->fence.sequence; 103 return ret;
159
160 kref_get(&fence->refcount);
161 spin_lock(&chan->fence.lock);
162 list_add_tail(&fence->entry, &chan->fence.pending);
163 spin_unlock(&chan->fence.lock);
164
165 if (USE_REFCNT(dev)) {
166 if (dev_priv->card_type < NV_C0)
167 BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
168 else
169 BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
170 } else {
171 BEGIN_RING(chan, NvSubSw, 0x0150, 1);
172 }
173 OUT_RING (chan, fence->sequence);
174 FIRE_RING(chan);
175
176 return 0;
177}
178
179void
180nouveau_fence_work(struct nouveau_fence *fence,
181 void (*work)(void *priv, bool signalled),
182 void *priv)
183{
184 BUG_ON(fence->work);
185
186 spin_lock(&fence->channel->fence.lock);
187
188 if (fence->signalled) {
189 work(priv, true);
190 } else {
191 fence->work = work;
192 fence->priv = priv;
193 }
194
195 spin_unlock(&fence->channel->fence.lock);
196}
197
198void
199__nouveau_fence_unref(void **sync_obj)
200{
201 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
202
203 if (fence)
204 kref_put(&fence->refcount, nouveau_fence_del);
205 *sync_obj = NULL;
206}
207
208void *
209__nouveau_fence_ref(void *sync_obj)
210{
211 struct nouveau_fence *fence = nouveau_fence(sync_obj);
212
213 kref_get(&fence->refcount);
214 return sync_obj;
215} 104}
216 105
217bool 106bool
218__nouveau_fence_signalled(void *sync_obj, void *sync_arg) 107nouveau_fence_done(struct nouveau_fence *fence)
219{ 108{
220 struct nouveau_fence *fence = nouveau_fence(sync_obj); 109 if (fence->channel)
221 struct nouveau_channel *chan = fence->channel; 110 nouveau_fence_update(fence->channel);
222 111 return !fence->channel;
223 if (fence->signalled)
224 return true;
225
226 nouveau_fence_update(chan);
227 return fence->signalled;
228} 112}
229 113
230int 114int
231__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 115nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
232{ 116{
233 unsigned long timeout = jiffies + (3 * DRM_HZ);
234 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 117 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
235 ktime_t t; 118 ktime_t t;
236 int ret = 0; 119 int ret = 0;
237 120
238 while (1) { 121 while (!nouveau_fence_done(fence)) {
239 if (__nouveau_fence_signalled(sync_obj, sync_arg)) 122 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
240 break;
241
242 if (time_after_eq(jiffies, timeout)) {
243 ret = -EBUSY; 123 ret = -EBUSY;
244 break; 124 break;
245 } 125 }
246 126
247 __set_current_state(intr ? TASK_INTERRUPTIBLE 127 __set_current_state(intr ? TASK_INTERRUPTIBLE :
248 : TASK_UNINTERRUPTIBLE); 128 TASK_UNINTERRUPTIBLE);
249 if (lazy) { 129 if (lazy) {
250 t = ktime_set(0, sleep_time); 130 t = ktime_set(0, sleep_time);
251 schedule_hrtimeout(&t, HRTIMER_MODE_REL); 131 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
@@ -261,354 +141,72 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
261 } 141 }
262 142
263 __set_current_state(TASK_RUNNING); 143 __set_current_state(TASK_RUNNING);
264
265 return ret; 144 return ret;
266} 145}
267 146
268static struct nouveau_semaphore *
269semaphore_alloc(struct drm_device *dev)
270{
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
272 struct nouveau_semaphore *sema;
273 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
274 int ret, i;
275
276 if (!USE_SEMA(dev))
277 return NULL;
278
279 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
280 if (!sema)
281 goto fail;
282
283 ret = drm_mm_pre_get(&dev_priv->fence.heap);
284 if (ret)
285 goto fail;
286
287 spin_lock(&dev_priv->fence.lock);
288 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
289 if (sema->mem)
290 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
291 spin_unlock(&dev_priv->fence.lock);
292
293 if (!sema->mem)
294 goto fail;
295
296 kref_init(&sema->ref);
297 sema->dev = dev;
298 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
299 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
300
301 return sema;
302fail:
303 kfree(sema);
304 return NULL;
305}
306
307static void
308semaphore_free(struct kref *ref)
309{
310 struct nouveau_semaphore *sema =
311 container_of(ref, struct nouveau_semaphore, ref);
312 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
313
314 spin_lock(&dev_priv->fence.lock);
315 drm_mm_put_block(sema->mem);
316 spin_unlock(&dev_priv->fence.lock);
317
318 kfree(sema);
319}
320
321static void
322semaphore_work(void *priv, bool signalled)
323{
324 struct nouveau_semaphore *sema = priv;
325 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
326
327 if (unlikely(!signalled))
328 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
329
330 kref_put(&sema->ref, semaphore_free);
331}
332
333static int
334semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
335{
336 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
337 struct nouveau_fence *fence = NULL;
338 u64 offset = chan->fence.vma.offset + sema->mem->start;
339 int ret;
340
341 if (dev_priv->chipset < 0x84) {
342 ret = RING_SPACE(chan, 4);
343 if (ret)
344 return ret;
345
346 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema);
348 OUT_RING (chan, offset);
349 OUT_RING (chan, 1);
350 } else
351 if (dev_priv->chipset < 0xc0) {
352 ret = RING_SPACE(chan, 7);
353 if (ret)
354 return ret;
355
356 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
357 OUT_RING (chan, chan->vram_handle);
358 BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
359 OUT_RING (chan, upper_32_bits(offset));
360 OUT_RING (chan, lower_32_bits(offset));
361 OUT_RING (chan, 1);
362 OUT_RING (chan, 1); /* ACQUIRE_EQ */
363 } else {
364 ret = RING_SPACE(chan, 5);
365 if (ret)
366 return ret;
367
368 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
369 OUT_RING (chan, upper_32_bits(offset));
370 OUT_RING (chan, lower_32_bits(offset));
371 OUT_RING (chan, 1);
372 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
373 }
374
375 /* Delay semaphore destruction until its work is done */
376 ret = nouveau_fence_new(chan, &fence, true);
377 if (ret)
378 return ret;
379
380 kref_get(&sema->ref);
381 nouveau_fence_work(fence, semaphore_work, sema);
382 nouveau_fence_unref(&fence);
383 return 0;
384}
385
386static int
387semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
388{
389 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
390 struct nouveau_fence *fence = NULL;
391 u64 offset = chan->fence.vma.offset + sema->mem->start;
392 int ret;
393
394 if (dev_priv->chipset < 0x84) {
395 ret = RING_SPACE(chan, 5);
396 if (ret)
397 return ret;
398
399 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
400 OUT_RING (chan, NvSema);
401 OUT_RING (chan, offset);
402 BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
403 OUT_RING (chan, 1);
404 } else
405 if (dev_priv->chipset < 0xc0) {
406 ret = RING_SPACE(chan, 7);
407 if (ret)
408 return ret;
409
410 BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
411 OUT_RING (chan, chan->vram_handle);
412 BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
413 OUT_RING (chan, upper_32_bits(offset));
414 OUT_RING (chan, lower_32_bits(offset));
415 OUT_RING (chan, 1);
416 OUT_RING (chan, 2); /* RELEASE */
417 } else {
418 ret = RING_SPACE(chan, 5);
419 if (ret)
420 return ret;
421
422 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
423 OUT_RING (chan, upper_32_bits(offset));
424 OUT_RING (chan, lower_32_bits(offset));
425 OUT_RING (chan, 1);
426 OUT_RING (chan, 0x1002); /* RELEASE */
427 }
428
429 /* Delay semaphore destruction until its work is done */
430 ret = nouveau_fence_new(chan, &fence, true);
431 if (ret)
432 return ret;
433
434 kref_get(&sema->ref);
435 nouveau_fence_work(fence, semaphore_work, sema);
436 nouveau_fence_unref(&fence);
437 return 0;
438}
439
440int 147int
441nouveau_fence_sync(struct nouveau_fence *fence, 148nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
442 struct nouveau_channel *wchan)
443{ 149{
444 struct nouveau_channel *chan = nouveau_fence_channel(fence); 150 struct drm_device *dev = chan->dev;
445 struct drm_device *dev = wchan->dev; 151 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
446 struct nouveau_semaphore *sema; 152 struct nouveau_channel *prev;
447 int ret = 0; 153 int ret = 0;
448 154
449 if (likely(!chan || chan == wchan || 155 prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
450 nouveau_fence_signalled(fence))) 156 if (prev) {
451 goto out; 157 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
452 158 ret = priv->sync(fence, prev, chan);
453 sema = semaphore_alloc(dev); 159 if (unlikely(ret))
454 if (!sema) { 160 ret = nouveau_fence_wait(fence, true, false);
455 /* Early card or broken userspace, fall back to 161 }
456 * software sync. */ 162 nouveau_channel_put_unlocked(&prev);
457 ret = nouveau_fence_wait(fence, true, false);
458 goto out;
459 }
460
461 /* try to take chan's mutex, if we can't take it right away
462 * we have to fallback to software sync to prevent locking
463 * order issues
464 */
465 if (!mutex_trylock(&chan->mutex)) {
466 ret = nouveau_fence_wait(fence, true, false);
467 goto out_unref;
468 } 163 }
469 164
470 /* Make wchan wait until it gets signalled */
471 ret = semaphore_acquire(wchan, sema);
472 if (ret)
473 goto out_unlock;
474
475 /* Signal the semaphore from chan */
476 ret = semaphore_release(chan, sema);
477
478out_unlock:
479 mutex_unlock(&chan->mutex);
480out_unref:
481 kref_put(&sema->ref, semaphore_free);
482out:
483 if (chan)
484 nouveau_channel_put_unlocked(&chan);
485 return ret; 165 return ret;
486} 166}
487 167
488int 168static void
489__nouveau_fence_flush(void *sync_obj, void *sync_arg) 169nouveau_fence_del(struct kref *kref)
490{ 170{
491 return 0; 171 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
172 kfree(fence);
492} 173}
493 174
494int 175void
495nouveau_fence_channel_init(struct nouveau_channel *chan) 176nouveau_fence_unref(struct nouveau_fence **pfence)
496{ 177{
497 struct drm_device *dev = chan->dev; 178 if (*pfence)
498 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 kref_put(&(*pfence)->kref, nouveau_fence_del);
499 struct nouveau_gpuobj *obj = NULL; 180 *pfence = NULL;
500 int ret;
501
502 if (dev_priv->card_type < NV_C0) {
503 /* Create an NV_SW object for various sync purposes */
504 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
505 if (ret)
506 return ret;
507
508 ret = RING_SPACE(chan, 2);
509 if (ret)
510 return ret;
511
512 BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
513 OUT_RING (chan, NvSw);
514 FIRE_RING (chan);
515 }
516
517 /* Setup area of memory shared between all channels for x-chan sync */
518 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
519 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
520
521 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
522 mem->start << PAGE_SHIFT,
523 mem->size, NV_MEM_ACCESS_RW,
524 NV_MEM_TARGET_VRAM, &obj);
525 if (ret)
526 return ret;
527
528 ret = nouveau_ramht_insert(chan, NvSema, obj);
529 nouveau_gpuobj_ref(NULL, &obj);
530 if (ret)
531 return ret;
532 } else
533 if (USE_SEMA(dev)) {
534 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma);
537 if (ret)
538 return ret;
539 }
540
541 atomic_set(&chan->fence.last_sequence_irq, 0);
542 return 0;
543} 181}
544 182
545void 183struct nouveau_fence *
546nouveau_fence_channel_fini(struct nouveau_channel *chan) 184nouveau_fence_ref(struct nouveau_fence *fence)
547{ 185{
548 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 186 kref_get(&fence->kref);
549 struct nouveau_fence *tmp, *fence; 187 return fence;
550
551 spin_lock(&chan->fence.lock);
552 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
553 fence->signalled = true;
554 list_del(&fence->entry);
555
556 if (unlikely(fence->work))
557 fence->work(fence->priv, false);
558
559 kref_put(&fence->refcount, nouveau_fence_del);
560 }
561 spin_unlock(&chan->fence.lock);
562
563 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
564} 188}
565 189
566int 190int
567nouveau_fence_init(struct drm_device *dev) 191nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
568{ 192{
569 struct drm_nouveau_private *dev_priv = dev->dev_private; 193 struct nouveau_fence *fence;
570 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384; 194 int ret = 0;
571 int ret;
572
573 /* Create a shared VRAM heap for cross-channel sync. */
574 if (USE_SEMA(dev)) {
575 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
576 0, 0, NULL, &dev_priv->fence.bo);
577 if (ret)
578 return ret;
579 195
580 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); 196 if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
581 if (ret) 197 return -ENODEV;
582 goto fail;
583 198
584 ret = nouveau_bo_map(dev_priv->fence.bo); 199 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
585 if (ret) 200 if (!fence)
586 goto fail; 201 return -ENOMEM;
202 kref_init(&fence->kref);
587 203
588 ret = drm_mm_init(&dev_priv->fence.heap, 0, 204 if (chan) {
589 dev_priv->fence.bo->bo.mem.size); 205 ret = nouveau_fence_emit(fence, chan);
590 if (ret) 206 if (ret)
591 goto fail; 207 nouveau_fence_unref(&fence);
592
593 spin_lock_init(&dev_priv->fence.lock);
594 } 208 }
595 209
596 return 0; 210 *pfence = fence;
597fail:
598 nouveau_bo_unmap(dev_priv->fence.bo);
599 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
600 return ret; 211 return ret;
601} 212}
602
603void
604nouveau_fence_fini(struct drm_device *dev)
605{
606 struct drm_nouveau_private *dev_priv = dev->dev_private;
607
608 if (USE_SEMA(dev)) {
609 drm_mm_takedown(&dev_priv->fence.heap);
610 nouveau_bo_unmap(dev_priv->fence.bo);
611 nouveau_bo_unpin(dev_priv->fence.bo);
612 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
613 }
614}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 000000000000..82ba733393ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,52 @@
1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__
3
4struct nouveau_fence {
5 struct list_head head;
6 struct kref kref;
7
8 struct nouveau_channel *channel;
9 unsigned long timeout;
10 u32 sequence;
11
12 void (*work)(void *priv, bool signalled);
13 void *priv;
14};
15
16int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
17struct nouveau_fence *
18nouveau_fence_ref(struct nouveau_fence *);
19void nouveau_fence_unref(struct nouveau_fence **);
20
21int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
22bool nouveau_fence_done(struct nouveau_fence *);
23int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
24int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *);
27
28struct nouveau_fence_chan {
29 struct list_head pending;
30 spinlock_t lock;
31 u32 sequence;
32};
33
34struct nouveau_fence_priv {
35 struct nouveau_exec_engine engine;
36 int (*emit)(struct nouveau_fence *);
37 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
38 struct nouveau_channel *);
39 u32 (*read)(struct nouveau_channel *);
40};
41
42void nouveau_fence_context_new(struct nouveau_fence_chan *);
43void nouveau_fence_context_del(struct nouveau_fence_chan *);
44
45int nv04_fence_create(struct drm_device *dev);
46int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
47
48int nv10_fence_create(struct drm_device *dev);
49int nv84_fence_create(struct drm_device *dev);
50int nvc0_fence_create(struct drm_device *dev);
51
52#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
new file mode 100644
index 000000000000..ce99cab2f257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h
@@ -0,0 +1,32 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4struct nouveau_fifo_priv {
5 struct nouveau_exec_engine base;
6 u32 channels;
7};
8
9struct nouveau_fifo_chan {
10};
11
12bool nv04_fifo_cache_pull(struct drm_device *, bool);
13void nv04_fifo_context_del(struct nouveau_channel *, int);
14int nv04_fifo_fini(struct drm_device *, int, bool);
15int nv04_fifo_init(struct drm_device *, int);
16void nv04_fifo_isr(struct drm_device *);
17void nv04_fifo_destroy(struct drm_device *, int);
18
19void nv50_fifo_playlist_update(struct drm_device *);
20void nv50_fifo_destroy(struct drm_device *, int);
21void nv50_fifo_tlb_flush(struct drm_device *, int);
22
23int nv04_fifo_create(struct drm_device *);
24int nv10_fifo_create(struct drm_device *);
25int nv17_fifo_create(struct drm_device *);
26int nv40_fifo_create(struct drm_device *);
27int nv50_fifo_create(struct drm_device *);
28int nv84_fifo_create(struct drm_device *);
29int nvc0_fifo_create(struct drm_device *);
30int nve0_fifo_create(struct drm_device *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 666dad0717a9..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -30,6 +30,7 @@
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_drm.h" 31#include "nouveau_drm.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_fence.h"
33 34
34#define nouveau_gem_pushbuf_sync(chan) 0 35#define nouveau_gem_pushbuf_sync(chan) 0
35 36
@@ -708,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
708 } 709 }
709 710
710 if (chan->dma.ib_max) { 711 if (chan->dma.ib_max) {
711 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); 712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
712 if (ret) { 713 if (ret) {
713 NV_INFO(dev, "nv50cal_space: %d\n", ret); 714 NV_INFO(dev, "nv50cal_space: %d\n", ret);
714 goto out; 715 goto out;
@@ -778,7 +779,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
778 } 779 }
779 } 780 }
780 781
781 ret = nouveau_fence_new(chan, &fence, true); 782 ret = nouveau_fence_new(chan, &fence);
782 if (ret) { 783 if (ret) {
783 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 784 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
784 WIND_RING(chan); 785 WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index a580cc62337a..82c19e82ff02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -387,7 +387,7 @@ nouveau_gpio_reset(struct drm_device *dev)
387 if (dev_priv->card_type >= NV_D0) { 387 if (dev_priv->card_type >= NV_D0) {
388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0); 388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
389 if (unk1--) 389 if (unk1--)
390 nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line); 390 nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
391 } else 391 } else
392 if (dev_priv->card_type >= NV_50) { 392 if (dev_priv->card_type >= NV_50) {
393 static const u32 regs[] = { 0xe100, 0xe28c }; 393 static const u32 regs[] = { 0xe100, 0xe28c };
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 86c2e374e938..b0795ececbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -18,7 +18,6 @@ struct nouveau_grctx {
18 uint32_t ctxvals_base; 18 uint32_t ctxvals_base;
19}; 19};
20 20
21#ifdef CP_CTX
22static inline void 21static inline void
23cp_out(struct nouveau_grctx *ctx, uint32_t inst) 22cp_out(struct nouveau_grctx *ctx, uint32_t inst)
24{ 23{
@@ -88,10 +87,8 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
88 (state ? 0 : CP_BRA_IF_CLEAR)); 87 (state ? 0 : CP_BRA_IF_CLEAR));
89} 88}
90#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 89#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD
92#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) 90#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) 91#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif
95 92
96static inline void 93static inline void
97_cp_wait(struct nouveau_grctx *ctx, int flag, int state) 94_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
@@ -128,6 +125,5 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
128 125
129 nv_wo32(ctx->data, reg * 4, val); 126 nv_wo32(ctx->data, reg * 4, val);
130} 127}
131#endif
132 128
133#endif 129#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index ba896e54b799..b87ad3bd7739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -1018,11 +1018,6 @@ nv_load_state_ext(struct drm_device *dev, int head,
1018 } 1018 }
1019 1019
1020 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); 1020 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1021
1022 /* Enable vblank interrupts. */
1023 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
1024 (dev->vblank_enabled[head] ? 1 : 0));
1025 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1026} 1021}
1027 1022
1028static void 1023static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index bb2f0a43f590..5b498ea32e14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -39,6 +39,8 @@
39#include "nouveau_pm.h" 39#include "nouveau_pm.h"
40#include "nouveau_mm.h" 40#include "nouveau_mm.h"
41#include "nouveau_vm.h" 41#include "nouveau_vm.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
42 44
43/* 45/*
44 * NV10-NV40 tiling helpers 46 * NV10-NV40 tiling helpers
@@ -50,7 +52,6 @@ nv10_mem_update_tile_region(struct drm_device *dev,
50 uint32_t size, uint32_t pitch, uint32_t flags) 52 uint32_t size, uint32_t pitch, uint32_t flags)
51{ 53{
52 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
54 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 55 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
55 int i = tile - dev_priv->tile.reg, j; 56 int i = tile - dev_priv->tile.reg, j;
56 unsigned long save; 57 unsigned long save;
@@ -64,8 +65,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags); 65 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65 66
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save); 67 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 pfifo->reassign(dev, false); 68 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
68 pfifo->cache_pull(dev, false); 69 nv04_fifo_cache_pull(dev, false);
69 70
70 nouveau_wait_for_idle(dev); 71 nouveau_wait_for_idle(dev);
71 72
@@ -75,8 +76,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
75 dev_priv->eng[j]->set_tile_region(dev, i); 76 dev_priv->eng[j]->set_tile_region(dev, i);
76 } 77 }
77 78
78 pfifo->cache_pull(dev, true); 79 nv04_fifo_cache_pull(dev, true);
79 pfifo->reassign(dev, true); 80 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); 81 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81} 82}
82 83
@@ -89,7 +90,7 @@ nv10_mem_get_tile_region(struct drm_device *dev, int i)
89 spin_lock(&dev_priv->tile.lock); 90 spin_lock(&dev_priv->tile.lock);
90 91
91 if (!tile->used && 92 if (!tile->used &&
92 (!tile->fence || nouveau_fence_signalled(tile->fence))) 93 (!tile->fence || nouveau_fence_done(tile->fence)))
93 tile->used = true; 94 tile->used = true;
94 else 95 else
95 tile = NULL; 96 tile = NULL;
@@ -843,6 +844,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
843 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t); 844 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
844 break; 845 break;
845 case NV_C0: 846 case NV_C0:
847 case NV_D0:
846 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t); 848 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
847 break; 849 break;
848 default: 850 default:
@@ -977,6 +979,8 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
977 break; 979 break;
978 case NV_MEM_TYPE_DDR3: 980 case NV_MEM_TYPE_DDR3:
979 tDLLK = 12000; 981 tDLLK = 12000;
982 tCKSRE = 2000;
983 tXS = 1000;
980 mr1_dlloff = 0x00000001; 984 mr1_dlloff = 0x00000001;
981 break; 985 break;
982 case NV_MEM_TYPE_GDDR3: 986 case NV_MEM_TYPE_GDDR3:
@@ -1023,6 +1027,7 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
1023 exec->refresh_self(exec, false); 1027 exec->refresh_self(exec, false);
1024 exec->refresh_auto(exec, true); 1028 exec->refresh_auto(exec, true);
1025 exec->wait(exec, tXS); 1029 exec->wait(exec, tXS);
1030 exec->wait(exec, tXS);
1026 1031
1027 /* update MRs */ 1032 /* update MRs */
1028 if (mr[2] != info->mr[2]) { 1033 if (mr[2] != info->mr[2]) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fae794b..b190cc01c820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,9 +34,10 @@
34#include "drm.h" 34#include "drm.h"
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_fifo.h"
37#include "nouveau_ramht.h" 38#include "nouveau_ramht.h"
39#include "nouveau_software.h"
38#include "nouveau_vm.h" 40#include "nouveau_vm.h"
39#include "nv50_display.h"
40 41
41struct nouveau_gpuobj_method { 42struct nouveau_gpuobj_method {
42 struct list_head head; 43 struct list_head head;
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data) 121 u32 class, u32 mthd, u32 data)
121{ 122{
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
123 struct nouveau_channel *chan = NULL; 125 struct nouveau_channel *chan = NULL;
124 unsigned long flags; 126 unsigned long flags;
125 int ret = -EINVAL; 127 int ret = -EINVAL;
126 128
127 spin_lock_irqsave(&dev_priv->channels.lock, flags); 129 spin_lock_irqsave(&dev_priv->channels.lock, flags);
128 if (chid >= 0 && chid < dev_priv->engine.fifo.channels) 130 if (chid >= 0 && chid < pfifo->channels)
129 chan = dev_priv->channels.ptr[chid]; 131 chan = dev_priv->channels.ptr[chid];
130 if (chan) 132 if (chan)
131 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); 133 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -133,37 +135,6 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
133 return ret; 135 return ret;
134} 136}
135 137
136/* NVidia uses context objects to drive drawing operations.
137
138 Context objects can be selected into 8 subchannels in the FIFO,
139 and then used via DMA command buffers.
140
141 A context object is referenced by a user defined handle (CARD32). The HW
142 looks up graphics objects in a hash table in the instance RAM.
143
144 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
145 the handle, the second one a bitfield, that contains the address of the
146 object in instance RAM.
147
148 The format of the second CARD32 seems to be:
149
150 NV4 to NV30:
151
152 15: 0 instance_addr >> 4
153 17:16 engine (here uses 1 = graphics)
154 28:24 channel id (here uses 0)
155 31 valid (use 1)
156
157 NV40:
158
159 15: 0 instance_addr >> 4 (maybe 19-0)
160 21:20 engine (here uses 1 = graphics)
161 I'm unsure about the other bits, but using 0 seems to work.
162
163 The key into the hash table depends on the object handle and channel id and
164 is given as:
165*/
166
167int 138int
168nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, 139nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
169 uint32_t size, int align, uint32_t flags, 140 uint32_t size, int align, uint32_t flags,
@@ -267,7 +238,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
267 kfree(oc); 238 kfree(oc);
268 } 239 }
269 240
270 BUG_ON(!list_empty(&dev_priv->gpuobj_list)); 241 WARN_ON(!list_empty(&dev_priv->gpuobj_list));
271} 242}
272 243
273 244
@@ -361,34 +332,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
361 return 0; 332 return 0;
362} 333}
363 334
364/*
365 DMA objects are used to reference a piece of memory in the
366 framebuffer, PCI or AGP address space. Each object is 16 bytes big
367 and looks as follows:
368
369 entry[0]
370 11:0 class (seems like I can always use 0 here)
371 12 page table present?
372 13 page entry linear?
373 15:14 access: 0 rw, 1 ro, 2 wo
374 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
375 31:20 dma adjust (bits 0-11 of the address)
376 entry[1]
377 dma limit (size of transfer)
378 entry[X]
379 1 0 readonly, 1 readwrite
380 31:12 dma frame address of the page (bits 12-31 of the address)
381 entry[N]
382 page table terminator, same value as the first pte, as does nvidia
383 rivatv uses 0xffffffff
384
385 Non linear page tables need a list of frame addresses afterwards,
386 the rivatv project has some info on this.
387
388 The method below creates a DMA object in instance RAM and returns a handle
389 to it that can be used to set up context objects.
390*/
391
392void 335void
393nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, 336nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
394 u64 base, u64 size, int target, int access, 337 u64 base, u64 size, int target, int access,
@@ -540,82 +483,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
540 return 0; 483 return 0;
541} 484}
542 485
543/* Context objects in the instance RAM have the following structure.
544 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
545
546 NV4 - NV30:
547
548 entry[0]
549 11:0 class
550 12 chroma key enable
551 13 user clip enable
552 14 swizzle enable
553 17:15 patch config:
554 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
555 18 synchronize enable
556 19 endian: 1 big, 0 little
557 21:20 dither mode
558 23 single step enable
559 24 patch status: 0 invalid, 1 valid
560 25 context_surface 0: 1 valid
561 26 context surface 1: 1 valid
562 27 context pattern: 1 valid
563 28 context rop: 1 valid
564 29,30 context beta, beta4
565 entry[1]
566 7:0 mono format
567 15:8 color format
568 31:16 notify instance address
569 entry[2]
570 15:0 dma 0 instance address
571 31:16 dma 1 instance address
572 entry[3]
573 dma method traps
574
575 NV40:
576 No idea what the exact format is. Here's what can be deducted:
577
578 entry[0]:
579 11:0 class (maybe uses more bits here?)
580 17 user clip enable
581 21:19 patch config
582 25 patch status valid ?
583 entry[1]:
584 15:0 DMA notifier (maybe 20:0)
585 entry[2]:
586 15:0 DMA 0 instance (maybe 20:0)
587 24 big endian
588 entry[3]:
589 15:0 DMA 1 instance (maybe 20:0)
590 entry[4]:
591 entry[5]:
592 set to 0?
593*/
594static int
595nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
596{
597 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
598 struct nouveau_gpuobj *gpuobj;
599 int ret;
600
601 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
602 if (!gpuobj)
603 return -ENOMEM;
604 gpuobj->dev = chan->dev;
605 gpuobj->engine = NVOBJ_ENGINE_SW;
606 gpuobj->class = class;
607 kref_init(&gpuobj->refcount);
608 gpuobj->cinst = 0x40;
609
610 spin_lock(&dev_priv->ramin_lock);
611 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
612 spin_unlock(&dev_priv->ramin_lock);
613
614 ret = nouveau_ramht_insert(chan, handle, gpuobj);
615 nouveau_gpuobj_ref(NULL, &gpuobj);
616 return ret;
617}
618
619int 486int
620nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) 487nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
621{ 488{
@@ -632,9 +499,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
632 if (oc->id != class) 499 if (oc->id != class)
633 continue; 500 continue;
634 501
635 if (oc->engine == NVOBJ_ENGINE_SW)
636 return nouveau_gpuobj_sw_new(chan, handle, class);
637
638 if (!chan->engctx[oc->engine]) { 502 if (!chan->engctx[oc->engine]) {
639 ret = eng->context_new(chan, oc->engine); 503 ret = eng->context_new(chan, oc->engine);
640 if (ret) 504 if (ret)
@@ -644,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
644 return eng->object_new(chan, oc->engine, handle, class); 508 return eng->object_new(chan, oc->engine, handle, class);
645 } 509 }
646 510
647 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
648 return -EINVAL; 511 return -EINVAL;
649} 512}
650 513
@@ -693,11 +556,10 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
693static int 556static int
694nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) 557nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
695{ 558{
696 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
697 struct drm_device *dev = chan->dev; 559 struct drm_device *dev = chan->dev;
698 struct nouveau_gpuobj *pgd = NULL; 560 struct nouveau_gpuobj *pgd = NULL;
699 struct nouveau_vm_pgd *vpgd; 561 struct nouveau_vm_pgd *vpgd;
700 int ret, i; 562 int ret;
701 563
702 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); 564 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
703 if (ret) 565 if (ret)
@@ -722,19 +584,6 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
722 nv_wo32(chan->ramin, 0x0208, 0xffffffff); 584 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
723 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 585 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
724 586
725 /* map display semaphore buffers into channel's vm */
726 for (i = 0; i < dev->mode_config.num_crtc; i++) {
727 struct nouveau_bo *bo;
728 if (dev_priv->card_type >= NV_D0)
729 bo = nvd0_display_crtc_sema(dev, i);
730 else
731 bo = nv50_display(dev)->crtc[i].sem.bo;
732
733 ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
734 if (ret)
735 return ret;
736 }
737
738 return 0; 587 return 0;
739} 588}
740 589
@@ -747,7 +596,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
747 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); 596 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
748 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; 597 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
749 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 598 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
750 int ret, i; 599 int ret;
751 600
752 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 601 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
753 if (dev_priv->card_type >= NV_C0) 602 if (dev_priv->card_type >= NV_C0)
@@ -795,25 +644,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
795 nouveau_gpuobj_ref(NULL, &ramht); 644 nouveau_gpuobj_ref(NULL, &ramht);
796 if (ret) 645 if (ret)
797 return ret; 646 return ret;
798
799 /* dma objects for display sync channel semaphore blocks */
800 for (i = 0; i < dev->mode_config.num_crtc; i++) {
801 struct nouveau_gpuobj *sem = NULL;
802 struct nv50_display_crtc *dispc =
803 &nv50_display(dev)->crtc[i];
804 u64 offset = dispc->sem.bo->bo.offset;
805
806 ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
807 NV_MEM_ACCESS_RW,
808 NV_MEM_TARGET_VRAM, &sem);
809 if (ret)
810 return ret;
811
812 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
813 nouveau_gpuobj_ref(NULL, &sem);
814 if (ret)
815 return ret;
816 }
817 } 647 }
818 648
819 /* VRAM ctxdma */ 649 /* VRAM ctxdma */
@@ -873,25 +703,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
873void 703void
874nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 704nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
875{ 705{
876 struct drm_device *dev = chan->dev; 706 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
877 struct drm_nouveau_private *dev_priv = dev->dev_private;
878 int i;
879
880 NV_DEBUG(dev, "ch%d\n", chan->id);
881
882 if (dev_priv->card_type >= NV_D0) {
883 for (i = 0; i < dev->mode_config.num_crtc; i++) {
884 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
885 nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
886 }
887 } else
888 if (dev_priv->card_type >= NV_50) {
889 struct nv50_display *disp = nv50_display(dev);
890 for (i = 0; i < dev->mode_config.num_crtc; i++) {
891 struct nv50_display_crtc *dispc = &disp->crtc[i];
892 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
893 }
894 }
895 707
896 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); 708 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
897 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 709 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,6 +768,17 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
956 if (init->handle == ~0) 768 if (init->handle == ~0)
957 return -EINVAL; 769 return -EINVAL;
958 770
771 /* compatibility with userspace that assumes 506e for all chipsets */
772 if (init->class == 0x506e) {
773 init->class = nouveau_software_class(dev);
774 if (init->class == 0x906e)
775 return 0;
776 } else
777 if (init->class == 0x906e) {
778 NV_ERROR(dev, "906e not supported yet\n");
779 return -EINVAL;
780 }
781
959 chan = nouveau_channel_get(file_priv, init->channel); 782 chan = nouveau_channel_get(file_priv, init->channel);
960 if (IS_ERR(chan)) 783 if (IS_ERR(chan))
961 return PTR_ERR(chan); 784 return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d106e6..ea6acf1c4a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -83,7 +83,7 @@ nouveau_perf_entry(struct drm_device *dev, int idx,
83 return NULL; 83 return NULL;
84} 84}
85 85
86static u8 * 86u8 *
87nouveau_perf_rammap(struct drm_device *dev, u32 freq, 87nouveau_perf_rammap(struct drm_device *dev, u32 freq,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
89{ 89{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfea61dd..07cac72c72b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -61,8 +61,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
61/* nouveau_perf.c */ 61/* nouveau_perf.c */
62void nouveau_perf_init(struct drm_device *); 62void nouveau_perf_init(struct drm_device *);
63void nouveau_perf_fini(struct drm_device *); 63void nouveau_perf_fini(struct drm_device *);
64u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len); 64u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
65 u8 *hdr, u8 *cnt, u8 *len);
65u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len); 66u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
67u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
66 68
67/* nouveau_mem.c */ 69/* nouveau_mem.c */
68void nouveau_mem_timing_init(struct drm_device *); 70void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 27aac9ada73a..38483a042bc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -341,10 +341,10 @@ nouveau_sgdma_init(struct drm_device *dev)
341 u32 aper_size, align; 341 u32 aper_size, align;
342 int ret; 342 int ret;
343 343
344 if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev)) 344 if (dev_priv->card_type >= NV_40)
345 aper_size = 512 * 1024 * 1024; 345 aper_size = 512 * 1024 * 1024;
346 else 346 else
347 aper_size = 64 * 1024 * 1024; 347 aper_size = 128 * 1024 * 1024;
348 348
349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for 349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
350 * christmas. The cards before it have them, the cards after 350 * christmas. The cards before it have them, the cards after
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
new file mode 100644
index 000000000000..e60bc6ce9003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -0,0 +1,69 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4struct nouveau_software_priv {
5 struct nouveau_exec_engine base;
6 struct list_head vblank;
7};
8
9struct nouveau_software_chan {
10 struct list_head flip;
11 struct {
12 struct list_head list;
13 struct nouveau_bo *bo;
14 u32 offset;
15 u32 value;
16 u32 head;
17 } vblank;
18};
19
20static inline void
21nouveau_software_vblank(struct drm_device *dev, int crtc)
22{
23 struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
24 struct nouveau_software_chan *pch, *tmp;
25
26 list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
27 if (pch->vblank.head != crtc)
28 continue;
29
30 nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
31 pch->vblank.value);
32 list_del(&pch->vblank.list);
33 drm_vblank_put(dev, crtc);
34 }
35}
36
37static inline void
38nouveau_software_context_new(struct nouveau_software_chan *pch)
39{
40 INIT_LIST_HEAD(&pch->flip);
41}
42
43static inline void
44nouveau_software_create(struct nouveau_software_priv *psw)
45{
46 INIT_LIST_HEAD(&psw->vblank);
47}
48
49static inline u16
50nouveau_software_class(struct drm_device *dev)
51{
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 if (dev_priv->card_type <= NV_04)
54 return 0x006e;
55 if (dev_priv->card_type <= NV_40)
56 return 0x016e;
57 if (dev_priv->card_type <= NV_50)
58 return 0x506e;
59 if (dev_priv->card_type <= NV_E0)
60 return 0x906e;
61 return 0x0000;
62}
63
64int nv04_software_create(struct drm_device *);
65int nv50_software_create(struct drm_device *);
66int nvc0_software_create(struct drm_device *);
67u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
68
69#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 298c09b75569..19706f0532ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,9 @@
39#include "nouveau_gpio.h" 39#include "nouveau_gpio.h"
40#include "nouveau_pm.h" 40#include "nouveau_pm.h"
41#include "nv50_display.h" 41#include "nv50_display.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
44#include "nouveau_software.h"
42 45
43static void nouveau_stub_takedown(struct drm_device *dev) {} 46static void nouveau_stub_takedown(struct drm_device *dev) {}
44static int nouveau_stub_init(struct drm_device *dev) { return 0; } 47static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -66,18 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
66 engine->timer.takedown = nv04_timer_takedown; 69 engine->timer.takedown = nv04_timer_takedown;
67 engine->fb.init = nv04_fb_init; 70 engine->fb.init = nv04_fb_init;
68 engine->fb.takedown = nv04_fb_takedown; 71 engine->fb.takedown = nv04_fb_takedown;
69 engine->fifo.channels = 16;
70 engine->fifo.init = nv04_fifo_init;
71 engine->fifo.takedown = nv04_fifo_fini;
72 engine->fifo.disable = nv04_fifo_disable;
73 engine->fifo.enable = nv04_fifo_enable;
74 engine->fifo.reassign = nv04_fifo_reassign;
75 engine->fifo.cache_pull = nv04_fifo_cache_pull;
76 engine->fifo.channel_id = nv04_fifo_channel_id;
77 engine->fifo.create_context = nv04_fifo_create_context;
78 engine->fifo.destroy_context = nv04_fifo_destroy_context;
79 engine->fifo.load_context = nv04_fifo_load_context;
80 engine->fifo.unload_context = nv04_fifo_unload_context;
81 engine->display.early_init = nv04_display_early_init; 72 engine->display.early_init = nv04_display_early_init;
82 engine->display.late_takedown = nv04_display_late_takedown; 73 engine->display.late_takedown = nv04_display_late_takedown;
83 engine->display.create = nv04_display_create; 74 engine->display.create = nv04_display_create;
@@ -111,18 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
111 engine->fb.init_tile_region = nv10_fb_init_tile_region; 102 engine->fb.init_tile_region = nv10_fb_init_tile_region;
112 engine->fb.set_tile_region = nv10_fb_set_tile_region; 103 engine->fb.set_tile_region = nv10_fb_set_tile_region;
113 engine->fb.free_tile_region = nv10_fb_free_tile_region; 104 engine->fb.free_tile_region = nv10_fb_free_tile_region;
114 engine->fifo.channels = 32;
115 engine->fifo.init = nv10_fifo_init;
116 engine->fifo.takedown = nv04_fifo_fini;
117 engine->fifo.disable = nv04_fifo_disable;
118 engine->fifo.enable = nv04_fifo_enable;
119 engine->fifo.reassign = nv04_fifo_reassign;
120 engine->fifo.cache_pull = nv04_fifo_cache_pull;
121 engine->fifo.channel_id = nv10_fifo_channel_id;
122 engine->fifo.create_context = nv10_fifo_create_context;
123 engine->fifo.destroy_context = nv04_fifo_destroy_context;
124 engine->fifo.load_context = nv10_fifo_load_context;
125 engine->fifo.unload_context = nv10_fifo_unload_context;
126 engine->display.early_init = nv04_display_early_init; 105 engine->display.early_init = nv04_display_early_init;
127 engine->display.late_takedown = nv04_display_late_takedown; 106 engine->display.late_takedown = nv04_display_late_takedown;
128 engine->display.create = nv04_display_create; 107 engine->display.create = nv04_display_create;
@@ -162,18 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
162 engine->fb.init_tile_region = nv20_fb_init_tile_region; 141 engine->fb.init_tile_region = nv20_fb_init_tile_region;
163 engine->fb.set_tile_region = nv20_fb_set_tile_region; 142 engine->fb.set_tile_region = nv20_fb_set_tile_region;
164 engine->fb.free_tile_region = nv20_fb_free_tile_region; 143 engine->fb.free_tile_region = nv20_fb_free_tile_region;
165 engine->fifo.channels = 32;
166 engine->fifo.init = nv10_fifo_init;
167 engine->fifo.takedown = nv04_fifo_fini;
168 engine->fifo.disable = nv04_fifo_disable;
169 engine->fifo.enable = nv04_fifo_enable;
170 engine->fifo.reassign = nv04_fifo_reassign;
171 engine->fifo.cache_pull = nv04_fifo_cache_pull;
172 engine->fifo.channel_id = nv10_fifo_channel_id;
173 engine->fifo.create_context = nv10_fifo_create_context;
174 engine->fifo.destroy_context = nv04_fifo_destroy_context;
175 engine->fifo.load_context = nv10_fifo_load_context;
176 engine->fifo.unload_context = nv10_fifo_unload_context;
177 engine->display.early_init = nv04_display_early_init; 144 engine->display.early_init = nv04_display_early_init;
178 engine->display.late_takedown = nv04_display_late_takedown; 145 engine->display.late_takedown = nv04_display_late_takedown;
179 engine->display.create = nv04_display_create; 146 engine->display.create = nv04_display_create;
@@ -209,18 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
209 engine->fb.init_tile_region = nv30_fb_init_tile_region; 176 engine->fb.init_tile_region = nv30_fb_init_tile_region;
210 engine->fb.set_tile_region = nv10_fb_set_tile_region; 177 engine->fb.set_tile_region = nv10_fb_set_tile_region;
211 engine->fb.free_tile_region = nv30_fb_free_tile_region; 178 engine->fb.free_tile_region = nv30_fb_free_tile_region;
212 engine->fifo.channels = 32;
213 engine->fifo.init = nv10_fifo_init;
214 engine->fifo.takedown = nv04_fifo_fini;
215 engine->fifo.disable = nv04_fifo_disable;
216 engine->fifo.enable = nv04_fifo_enable;
217 engine->fifo.reassign = nv04_fifo_reassign;
218 engine->fifo.cache_pull = nv04_fifo_cache_pull;
219 engine->fifo.channel_id = nv10_fifo_channel_id;
220 engine->fifo.create_context = nv10_fifo_create_context;
221 engine->fifo.destroy_context = nv04_fifo_destroy_context;
222 engine->fifo.load_context = nv10_fifo_load_context;
223 engine->fifo.unload_context = nv10_fifo_unload_context;
224 engine->display.early_init = nv04_display_early_init; 179 engine->display.early_init = nv04_display_early_init;
225 engine->display.late_takedown = nv04_display_late_takedown; 180 engine->display.late_takedown = nv04_display_late_takedown;
226 engine->display.create = nv04_display_create; 181 engine->display.create = nv04_display_create;
@@ -259,18 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
259 engine->fb.init_tile_region = nv30_fb_init_tile_region; 214 engine->fb.init_tile_region = nv30_fb_init_tile_region;
260 engine->fb.set_tile_region = nv40_fb_set_tile_region; 215 engine->fb.set_tile_region = nv40_fb_set_tile_region;
261 engine->fb.free_tile_region = nv30_fb_free_tile_region; 216 engine->fb.free_tile_region = nv30_fb_free_tile_region;
262 engine->fifo.channels = 32;
263 engine->fifo.init = nv40_fifo_init;
264 engine->fifo.takedown = nv04_fifo_fini;
265 engine->fifo.disable = nv04_fifo_disable;
266 engine->fifo.enable = nv04_fifo_enable;
267 engine->fifo.reassign = nv04_fifo_reassign;
268 engine->fifo.cache_pull = nv04_fifo_cache_pull;
269 engine->fifo.channel_id = nv10_fifo_channel_id;
270 engine->fifo.create_context = nv40_fifo_create_context;
271 engine->fifo.destroy_context = nv04_fifo_destroy_context;
272 engine->fifo.load_context = nv40_fifo_load_context;
273 engine->fifo.unload_context = nv40_fifo_unload_context;
274 engine->display.early_init = nv04_display_early_init; 217 engine->display.early_init = nv04_display_early_init;
275 engine->display.late_takedown = nv04_display_late_takedown; 218 engine->display.late_takedown = nv04_display_late_takedown;
276 engine->display.create = nv04_display_create; 219 engine->display.create = nv04_display_create;
@@ -317,18 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
317 engine->timer.takedown = nv04_timer_takedown; 260 engine->timer.takedown = nv04_timer_takedown;
318 engine->fb.init = nv50_fb_init; 261 engine->fb.init = nv50_fb_init;
319 engine->fb.takedown = nv50_fb_takedown; 262 engine->fb.takedown = nv50_fb_takedown;
320 engine->fifo.channels = 128;
321 engine->fifo.init = nv50_fifo_init;
322 engine->fifo.takedown = nv50_fifo_takedown;
323 engine->fifo.disable = nv04_fifo_disable;
324 engine->fifo.enable = nv04_fifo_enable;
325 engine->fifo.reassign = nv04_fifo_reassign;
326 engine->fifo.channel_id = nv50_fifo_channel_id;
327 engine->fifo.create_context = nv50_fifo_create_context;
328 engine->fifo.destroy_context = nv50_fifo_destroy_context;
329 engine->fifo.load_context = nv50_fifo_load_context;
330 engine->fifo.unload_context = nv50_fifo_unload_context;
331 engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
332 engine->display.early_init = nv50_display_early_init; 263 engine->display.early_init = nv50_display_early_init;
333 engine->display.late_takedown = nv50_display_late_takedown; 264 engine->display.late_takedown = nv50_display_late_takedown;
334 engine->display.create = nv50_display_create; 265 engine->display.create = nv50_display_create;
@@ -392,17 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
392 engine->timer.takedown = nv04_timer_takedown; 323 engine->timer.takedown = nv04_timer_takedown;
393 engine->fb.init = nvc0_fb_init; 324 engine->fb.init = nvc0_fb_init;
394 engine->fb.takedown = nvc0_fb_takedown; 325 engine->fb.takedown = nvc0_fb_takedown;
395 engine->fifo.channels = 128;
396 engine->fifo.init = nvc0_fifo_init;
397 engine->fifo.takedown = nvc0_fifo_takedown;
398 engine->fifo.disable = nvc0_fifo_disable;
399 engine->fifo.enable = nvc0_fifo_enable;
400 engine->fifo.reassign = nvc0_fifo_reassign;
401 engine->fifo.channel_id = nvc0_fifo_channel_id;
402 engine->fifo.create_context = nvc0_fifo_create_context;
403 engine->fifo.destroy_context = nvc0_fifo_destroy_context;
404 engine->fifo.load_context = nvc0_fifo_load_context;
405 engine->fifo.unload_context = nvc0_fifo_unload_context;
406 engine->display.early_init = nv50_display_early_init; 326 engine->display.early_init = nv50_display_early_init;
407 engine->display.late_takedown = nv50_display_late_takedown; 327 engine->display.late_takedown = nv50_display_late_takedown;
408 engine->display.create = nv50_display_create; 328 engine->display.create = nv50_display_create;
@@ -445,17 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
445 engine->timer.takedown = nv04_timer_takedown; 365 engine->timer.takedown = nv04_timer_takedown;
446 engine->fb.init = nvc0_fb_init; 366 engine->fb.init = nvc0_fb_init;
447 engine->fb.takedown = nvc0_fb_takedown; 367 engine->fb.takedown = nvc0_fb_takedown;
448 engine->fifo.channels = 128;
449 engine->fifo.init = nvc0_fifo_init;
450 engine->fifo.takedown = nvc0_fifo_takedown;
451 engine->fifo.disable = nvc0_fifo_disable;
452 engine->fifo.enable = nvc0_fifo_enable;
453 engine->fifo.reassign = nvc0_fifo_reassign;
454 engine->fifo.channel_id = nvc0_fifo_channel_id;
455 engine->fifo.create_context = nvc0_fifo_create_context;
456 engine->fifo.destroy_context = nvc0_fifo_destroy_context;
457 engine->fifo.load_context = nvc0_fifo_load_context;
458 engine->fifo.unload_context = nvc0_fifo_unload_context;
459 engine->display.early_init = nouveau_stub_init; 368 engine->display.early_init = nouveau_stub_init;
460 engine->display.late_takedown = nouveau_stub_takedown; 369 engine->display.late_takedown = nouveau_stub_takedown;
461 engine->display.create = nvd0_display_create; 370 engine->display.create = nvd0_display_create;
@@ -496,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
496 engine->timer.takedown = nv04_timer_takedown; 405 engine->timer.takedown = nv04_timer_takedown;
497 engine->fb.init = nvc0_fb_init; 406 engine->fb.init = nvc0_fb_init;
498 engine->fb.takedown = nvc0_fb_takedown; 407 engine->fb.takedown = nvc0_fb_takedown;
499 engine->fifo.channels = 0;
500 engine->fifo.init = nouveau_stub_init;
501 engine->fifo.takedown = nouveau_stub_takedown;
502 engine->fifo.disable = nvc0_fifo_disable;
503 engine->fifo.enable = nvc0_fifo_enable;
504 engine->fifo.reassign = nvc0_fifo_reassign;
505 engine->fifo.unload_context = nouveau_stub_init;
506 engine->display.early_init = nouveau_stub_init; 408 engine->display.early_init = nouveau_stub_init;
507 engine->display.late_takedown = nouveau_stub_takedown; 409 engine->display.late_takedown = nouveau_stub_takedown;
508 engine->display.create = nvd0_display_create; 410 engine->display.create = nvd0_display_create;
@@ -607,59 +509,16 @@ nouveau_card_channel_init(struct drm_device *dev)
607{ 509{
608 struct drm_nouveau_private *dev_priv = dev->dev_private; 510 struct drm_nouveau_private *dev_priv = dev->dev_private;
609 struct nouveau_channel *chan; 511 struct nouveau_channel *chan;
610 int ret, oclass; 512 int ret;
611 513
612 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT); 514 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
613 dev_priv->channel = chan; 515 dev_priv->channel = chan;
614 if (ret) 516 if (ret)
615 return ret; 517 return ret;
616
617 mutex_unlock(&dev_priv->channel->mutex); 518 mutex_unlock(&dev_priv->channel->mutex);
618 519
619 if (dev_priv->card_type <= NV_50) { 520 nouveau_bo_move_init(chan);
620 if (dev_priv->card_type < NV_50) 521 return 0;
621 oclass = 0x0039;
622 else
623 oclass = 0x5039;
624
625 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
626 if (ret)
627 goto error;
628
629 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
630 &chan->m2mf_ntfy);
631 if (ret)
632 goto error;
633
634 ret = RING_SPACE(chan, 6);
635 if (ret)
636 goto error;
637
638 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
639 OUT_RING (chan, NvM2MF);
640 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
641 OUT_RING (chan, NvNotify0);
642 OUT_RING (chan, chan->vram_handle);
643 OUT_RING (chan, chan->gart_handle);
644 } else
645 if (dev_priv->card_type <= NV_D0) {
646 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
647 if (ret)
648 goto error;
649
650 ret = RING_SPACE(chan, 2);
651 if (ret)
652 goto error;
653
654 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
655 OUT_RING (chan, 0x00009039);
656 }
657
658 FIRE_RING (chan);
659error:
660 if (ret)
661 nouveau_card_channel_fini(dev);
662 return ret;
663} 522}
664 523
665static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { 524static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
@@ -749,6 +608,81 @@ nouveau_card_init(struct drm_device *dev)
749 if (!dev_priv->noaccel) { 608 if (!dev_priv->noaccel) {
750 switch (dev_priv->card_type) { 609 switch (dev_priv->card_type) {
751 case NV_04: 610 case NV_04:
611 nv04_fifo_create(dev);
612 break;
613 case NV_10:
614 case NV_20:
615 case NV_30:
616 if (dev_priv->chipset < 0x17)
617 nv10_fifo_create(dev);
618 else
619 nv17_fifo_create(dev);
620 break;
621 case NV_40:
622 nv40_fifo_create(dev);
623 break;
624 case NV_50:
625 if (dev_priv->chipset == 0x50)
626 nv50_fifo_create(dev);
627 else
628 nv84_fifo_create(dev);
629 break;
630 case NV_C0:
631 case NV_D0:
632 nvc0_fifo_create(dev);
633 break;
634 case NV_E0:
635 nve0_fifo_create(dev);
636 break;
637 default:
638 break;
639 }
640
641 switch (dev_priv->card_type) {
642 case NV_04:
643 nv04_fence_create(dev);
644 break;
645 case NV_10:
646 case NV_20:
647 case NV_30:
648 case NV_40:
649 case NV_50:
650 if (dev_priv->chipset < 0x84)
651 nv10_fence_create(dev);
652 else
653 nv84_fence_create(dev);
654 break;
655 case NV_C0:
656 case NV_D0:
657 case NV_E0:
658 nvc0_fence_create(dev);
659 break;
660 default:
661 break;
662 }
663
664 switch (dev_priv->card_type) {
665 case NV_04:
666 case NV_10:
667 case NV_20:
668 case NV_30:
669 case NV_40:
670 nv04_software_create(dev);
671 break;
672 case NV_50:
673 nv50_software_create(dev);
674 break;
675 case NV_C0:
676 case NV_D0:
677 case NV_E0:
678 nvc0_software_create(dev);
679 break;
680 default:
681 break;
682 }
683
684 switch (dev_priv->card_type) {
685 case NV_04:
752 nv04_graph_create(dev); 686 nv04_graph_create(dev);
753 break; 687 break;
754 case NV_10: 688 case NV_10:
@@ -768,6 +702,9 @@ nouveau_card_init(struct drm_device *dev)
768 case NV_D0: 702 case NV_D0:
769 nvc0_graph_create(dev); 703 nvc0_graph_create(dev);
770 break; 704 break;
705 case NV_E0:
706 nve0_graph_create(dev);
707 break;
771 default: 708 default:
772 break; 709 break;
773 } 710 }
@@ -800,8 +737,9 @@ nouveau_card_init(struct drm_device *dev)
800 } 737 }
801 break; 738 break;
802 case NV_C0: 739 case NV_C0:
803 nvc0_copy_create(dev, 0);
804 nvc0_copy_create(dev, 1); 740 nvc0_copy_create(dev, 1);
741 case NV_D0:
742 nvc0_copy_create(dev, 0);
805 break; 743 break;
806 default: 744 default:
807 break; 745 break;
@@ -834,16 +772,11 @@ nouveau_card_init(struct drm_device *dev)
834 goto out_engine; 772 goto out_engine;
835 } 773 }
836 } 774 }
837
838 /* PFIFO */
839 ret = engine->fifo.init(dev);
840 if (ret)
841 goto out_engine;
842 } 775 }
843 776
844 ret = nouveau_irq_init(dev); 777 ret = nouveau_irq_init(dev);
845 if (ret) 778 if (ret)
846 goto out_fifo; 779 goto out_engine;
847 780
848 ret = nouveau_display_create(dev); 781 ret = nouveau_display_create(dev);
849 if (ret) 782 if (ret)
@@ -852,14 +785,10 @@ nouveau_card_init(struct drm_device *dev)
852 nouveau_backlight_init(dev); 785 nouveau_backlight_init(dev);
853 nouveau_pm_init(dev); 786 nouveau_pm_init(dev);
854 787
855 ret = nouveau_fence_init(dev);
856 if (ret)
857 goto out_pm;
858
859 if (dev_priv->eng[NVOBJ_ENGINE_GR]) { 788 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
860 ret = nouveau_card_channel_init(dev); 789 ret = nouveau_card_channel_init(dev);
861 if (ret) 790 if (ret)
862 goto out_fence; 791 goto out_pm;
863 } 792 }
864 793
865 if (dev->mode_config.num_crtc) { 794 if (dev->mode_config.num_crtc) {
@@ -874,17 +803,12 @@ nouveau_card_init(struct drm_device *dev)
874 803
875out_chan: 804out_chan:
876 nouveau_card_channel_fini(dev); 805 nouveau_card_channel_fini(dev);
877out_fence:
878 nouveau_fence_fini(dev);
879out_pm: 806out_pm:
880 nouveau_pm_fini(dev); 807 nouveau_pm_fini(dev);
881 nouveau_backlight_exit(dev); 808 nouveau_backlight_exit(dev);
882 nouveau_display_destroy(dev); 809 nouveau_display_destroy(dev);
883out_irq: 810out_irq:
884 nouveau_irq_fini(dev); 811 nouveau_irq_fini(dev);
885out_fifo:
886 if (!dev_priv->noaccel)
887 engine->fifo.takedown(dev);
888out_engine: 812out_engine:
889 if (!dev_priv->noaccel) { 813 if (!dev_priv->noaccel) {
890 for (e = e - 1; e >= 0; e--) { 814 for (e = e - 1; e >= 0; e--) {
@@ -916,6 +840,7 @@ out_bios:
916out_display_early: 840out_display_early:
917 engine->display.late_takedown(dev); 841 engine->display.late_takedown(dev);
918out: 842out:
843 vga_switcheroo_unregister_client(dev->pdev);
919 vga_client_register(dev->pdev, NULL, NULL, NULL); 844 vga_client_register(dev->pdev, NULL, NULL, NULL);
920 return ret; 845 return ret;
921} 846}
@@ -932,13 +857,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
932 } 857 }
933 858
934 nouveau_card_channel_fini(dev); 859 nouveau_card_channel_fini(dev);
935 nouveau_fence_fini(dev);
936 nouveau_pm_fini(dev); 860 nouveau_pm_fini(dev);
937 nouveau_backlight_exit(dev); 861 nouveau_backlight_exit(dev);
938 nouveau_display_destroy(dev); 862 nouveau_display_destroy(dev);
939 863
940 if (!dev_priv->noaccel) { 864 if (!dev_priv->noaccel) {
941 engine->fifo.takedown(dev);
942 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { 865 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
943 if (dev_priv->eng[e]) { 866 if (dev_priv->eng[e]) {
944 dev_priv->eng[e]->fini(dev, e, false); 867 dev_priv->eng[e]->fini(dev, e, false);
@@ -973,6 +896,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
973 896
974 nouveau_irq_fini(dev); 897 nouveau_irq_fini(dev);
975 898
899 vga_switcheroo_unregister_client(dev->pdev);
976 vga_client_register(dev->pdev, NULL, NULL, NULL); 900 vga_client_register(dev->pdev, NULL, NULL, NULL);
977} 901}
978 902
@@ -1180,7 +1104,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1180 goto err_priv; 1104 goto err_priv;
1181 } 1105 }
1182 1106
1183 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", 1107 NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
1184 dev_priv->card_type, reg0); 1108 dev_priv->card_type, reg0);
1185 1109
1186 /* map the mmio regs, limiting the amount to preserve vmap space */ 1110 /* map the mmio regs, limiting the amount to preserve vmap space */
@@ -1223,6 +1147,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1223 if (nouveau_noaccel == -1) { 1147 if (nouveau_noaccel == -1) {
1224 switch (dev_priv->chipset) { 1148 switch (dev_priv->chipset) {
1225 case 0xd9: /* known broken */ 1149 case 0xd9: /* known broken */
1150 case 0xe4: /* needs binary driver firmware */
1151 case 0xe7: /* needs binary driver firmware */
1226 NV_INFO(dev, "acceleration disabled by default, pass " 1152 NV_INFO(dev, "acceleration disabled by default, pass "
1227 "noaccel=0 to force enable\n"); 1153 "noaccel=0 to force enable\n");
1228 dev_priv->noaccel = true; 1154 dev_priv->noaccel = true;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37e8dab..44488e3a257d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -98,6 +98,13 @@ nv04_display_early_init(struct drm_device *dev)
98 NVSetOwner(dev, 0); 98 NVSetOwner(dev, 0);
99 } 99 }
100 100
101 /* ensure vblank interrupts are off, they can't be enabled until
102 * drm_vblank has been initialised
103 */
104 NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
105 if (nv_two_heads(dev))
106 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
107
101 return 0; 108 return 0;
102} 109}
103 110
@@ -246,6 +253,10 @@ nv04_display_init(struct drm_device *dev)
246void 253void
247nv04_display_fini(struct drm_device *dev) 254nv04_display_fini(struct drm_device *dev)
248{ 255{
256 /* disable vblank interrupts */
257 NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
258 if (nv_two_heads(dev))
259 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
249} 260}
250 261
251static void 262static void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7a1189371096..7cd7857347ef 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -41,7 +41,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); 44 BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
45 OUT_RING(chan, (region->sy << 16) | region->sx); 45 OUT_RING(chan, (region->sy << 16) | region->sx);
46 OUT_RING(chan, (region->dy << 16) | region->dx); 46 OUT_RING(chan, (region->dy << 16) | region->dx);
47 OUT_RING(chan, (region->height << 16) | region->width); 47 OUT_RING(chan, (region->height << 16) | region->width);
@@ -62,15 +62,15 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 65 BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
67 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1); 67 BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
68 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 68 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
69 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 69 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
70 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 70 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
71 else 71 else
72 OUT_RING(chan, rect->color); 72 OUT_RING(chan, rect->color);
73 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2); 73 BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
74 OUT_RING(chan, (rect->dx << 16) | rect->dy); 74 OUT_RING(chan, (rect->dx << 16) | rect->dy);
75 OUT_RING(chan, (rect->width << 16) | rect->height); 75 OUT_RING(chan, (rect->width << 16) | rect->height);
76 FIRE_RING(chan); 76 FIRE_RING(chan);
@@ -110,7 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
110 bg = image->bg_color; 110 bg = image->bg_color;
111 } 111 }
112 112
113 BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7); 113 BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
114 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 114 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
115 OUT_RING(chan, ((image->dy + image->height) << 16) | 115 OUT_RING(chan, ((image->dy + image->height) << 16) |
116 ((image->dx + image->width) & 0xffff)); 116 ((image->dx + image->width) & 0xffff));
@@ -127,7 +127,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
127 if (ret) 127 if (ret)
128 return ret; 128 return ret;
129 129
130 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); 130 BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
131 OUT_RINGp(chan, data, iter_len); 131 OUT_RINGp(chan, data, iter_len);
132 data += iter_len; 132 data += iter_len;
133 dsize -= iter_len; 133 dsize -= iter_len;
@@ -209,25 +209,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
209 return 0; 209 return 0;
210 } 210 }
211 211
212 BEGIN_RING(chan, sub, 0x0000, 1); 212 BEGIN_NV04(chan, sub, 0x0000, 1);
213 OUT_RING(chan, NvCtxSurf2D); 213 OUT_RING(chan, NvCtxSurf2D);
214 BEGIN_RING(chan, sub, 0x0184, 2); 214 BEGIN_NV04(chan, sub, 0x0184, 2);
215 OUT_RING(chan, NvDmaFB); 215 OUT_RING(chan, NvDmaFB);
216 OUT_RING(chan, NvDmaFB); 216 OUT_RING(chan, NvDmaFB);
217 BEGIN_RING(chan, sub, 0x0300, 4); 217 BEGIN_NV04(chan, sub, 0x0300, 4);
218 OUT_RING(chan, surface_fmt); 218 OUT_RING(chan, surface_fmt);
219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
222 222
223 BEGIN_RING(chan, sub, 0x0000, 1); 223 BEGIN_NV04(chan, sub, 0x0000, 1);
224 OUT_RING(chan, NvRop); 224 OUT_RING(chan, NvRop);
225 BEGIN_RING(chan, sub, 0x0300, 1); 225 BEGIN_NV04(chan, sub, 0x0300, 1);
226 OUT_RING(chan, 0x55); 226 OUT_RING(chan, 0x55);
227 227
228 BEGIN_RING(chan, sub, 0x0000, 1); 228 BEGIN_NV04(chan, sub, 0x0000, 1);
229 OUT_RING(chan, NvImagePatt); 229 OUT_RING(chan, NvImagePatt);
230 BEGIN_RING(chan, sub, 0x0300, 8); 230 BEGIN_NV04(chan, sub, 0x0300, 8);
231 OUT_RING(chan, pattern_fmt); 231 OUT_RING(chan, pattern_fmt);
232#ifdef __BIG_ENDIAN 232#ifdef __BIG_ENDIAN
233 OUT_RING(chan, 2); 233 OUT_RING(chan, 2);
@@ -241,31 +241,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
241 OUT_RING(chan, ~0); 241 OUT_RING(chan, ~0);
242 OUT_RING(chan, ~0); 242 OUT_RING(chan, ~0);
243 243
244 BEGIN_RING(chan, sub, 0x0000, 1); 244 BEGIN_NV04(chan, sub, 0x0000, 1);
245 OUT_RING(chan, NvClipRect); 245 OUT_RING(chan, NvClipRect);
246 BEGIN_RING(chan, sub, 0x0300, 2); 246 BEGIN_NV04(chan, sub, 0x0300, 2);
247 OUT_RING(chan, 0); 247 OUT_RING(chan, 0);
248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
249 249
250 BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1); 250 BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
251 OUT_RING(chan, NvImageBlit); 251 OUT_RING(chan, NvImageBlit);
252 BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1); 252 BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
253 OUT_RING(chan, NvCtxSurf2D); 253 OUT_RING(chan, NvCtxSurf2D);
254 BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1); 254 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
255 OUT_RING(chan, 3); 255 OUT_RING(chan, 3);
256 256
257 BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1); 257 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
258 OUT_RING(chan, NvGdiRect); 258 OUT_RING(chan, NvGdiRect);
259 BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1); 259 BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
260 OUT_RING(chan, NvCtxSurf2D); 260 OUT_RING(chan, NvCtxSurf2D);
261 BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2); 261 BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
262 OUT_RING(chan, NvImagePatt); 262 OUT_RING(chan, NvImagePatt);
263 OUT_RING(chan, NvRop); 263 OUT_RING(chan, NvRop);
264 BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1); 264 BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
265 OUT_RING(chan, 1); 265 OUT_RING(chan, 1);
266 BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1); 266 BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
267 OUT_RING(chan, rect_fmt); 267 OUT_RING(chan, rect_fmt);
268 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 268 BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
269 OUT_RING(chan, 3); 269 OUT_RING(chan, 3);
270 270
271 FIRE_RING(chan); 271 FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..abe89db6de24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv04_fence_chan {
32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34};
35
36struct nv04_fence_priv {
37 struct nouveau_fence_priv base;
38};
39
40static int
41nv04_fence_emit(struct nouveau_fence *fence)
42{
43 struct nouveau_channel *chan = fence->channel;
44 int ret = RING_SPACE(chan, 2);
45 if (ret == 0) {
46 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
47 OUT_RING (chan, fence->sequence);
48 FIRE_RING (chan);
49 }
50 return ret;
51}
52
53static int
54nv04_fence_sync(struct nouveau_fence *fence,
55 struct nouveau_channel *prev, struct nouveau_channel *chan)
56{
57 return -ENODEV;
58}
59
60int
61nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
62{
63 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
64 atomic_set(&fctx->sequence, data);
65 return 0;
66}
67
68static u32
69nv04_fence_read(struct nouveau_channel *chan)
70{
71 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
72 return atomic_read(&fctx->sequence);
73}
74
75static void
76nv04_fence_context_del(struct nouveau_channel *chan, int engine)
77{
78 struct nv04_fence_chan *fctx = chan->engctx[engine];
79 nouveau_fence_context_del(&fctx->base);
80 chan->engctx[engine] = NULL;
81 kfree(fctx);
82}
83
84static int
85nv04_fence_context_new(struct nouveau_channel *chan, int engine)
86{
87 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 if (fctx) {
89 nouveau_fence_context_new(&fctx->base);
90 atomic_set(&fctx->sequence, 0);
91 chan->engctx[engine] = fctx;
92 return 0;
93 }
94 return -ENOMEM;
95}
96
97static int
98nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
99{
100 return 0;
101}
102
103static int
104nv04_fence_init(struct drm_device *dev, int engine)
105{
106 return 0;
107}
108
109static void
110nv04_fence_destroy(struct drm_device *dev, int engine)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nv04_fence_priv *priv = nv_engine(dev, engine);
114
115 dev_priv->eng[engine] = NULL;
116 kfree(priv);
117}
118
119int
120nv04_fence_create(struct drm_device *dev)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv04_fence_priv *priv;
124 int ret;
125
126 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
127 if (!priv)
128 return -ENOMEM;
129
130 priv->base.engine.destroy = nv04_fence_destroy;
131 priv->base.engine.init = nv04_fence_init;
132 priv->base.engine.fini = nv04_fence_fini;
133 priv->base.engine.context_new = nv04_fence_context_new;
134 priv->base.engine.context_del = nv04_fence_context_del;
135 priv->base.emit = nv04_fence_emit;
136 priv->base.sync = nv04_fence_sync;
137 priv->base.read = nv04_fence_read;
138 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
139 return ret;
140}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index db465a3ee1b2..a6295cd00ec7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,49 +27,38 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_fifo.h"
31#include "nouveau_util.h" 31#include "nouveau_util.h"
32 32#include "nouveau_ramht.h"
33#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) 33#include "nouveau_software.h"
34#define NV04_RAMFC__SIZE 32 34
35#define NV04_RAMFC_DMA_PUT 0x00 35static struct ramfc_desc {
36#define NV04_RAMFC_DMA_GET 0x04 36 unsigned bits:6;
37#define NV04_RAMFC_DMA_INSTANCE 0x08 37 unsigned ctxs:5;
38#define NV04_RAMFC_DMA_STATE 0x0C 38 unsigned ctxp:8;
39#define NV04_RAMFC_DMA_FETCH 0x10 39 unsigned regs:5;
40#define NV04_RAMFC_ENGINE 0x14 40 unsigned regp;
41#define NV04_RAMFC_PULL1_ENGINE 0x18 41} nv04_ramfc[] = {
42 42 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
43#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) 43 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
44#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) 44 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 45 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46void 46 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47nv04_fifo_disable(struct drm_device *dev) 47 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48{ 48 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
49 uint32_t tmp; 49 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
50 50 {}
51 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH); 51};
52 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1); 52
53 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0); 53struct nv04_fifo_priv {
54 tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1); 54 struct nouveau_fifo_priv base;
55 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1); 55 struct ramfc_desc *ramfc_desc;
56} 56};
57 57
58void 58struct nv04_fifo_chan {
59nv04_fifo_enable(struct drm_device *dev) 59 struct nouveau_fifo_chan base;
60{ 60 struct nouveau_gpuobj *ramfc;
61 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1); 61};
62 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
63}
64
65bool
66nv04_fifo_reassign(struct drm_device *dev, bool enable)
67{
68 uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
69
70 nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
71 return (reassign == 1);
72}
73 62
74bool 63bool
75nv04_fifo_cache_pull(struct drm_device *dev, bool enable) 64nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
@@ -86,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
86 * invalidate the most recently calculated instance. 75 * invalidate the most recently calculated instance.
87 */ 76 */
88 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, 77 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
89 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) 78 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
90 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); 79 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
91 80
92 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & 81 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
93 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) 82 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
94 nv_wr32(dev, NV03_PFIFO_INTR_0, 83 nv_wr32(dev, NV03_PFIFO_INTR_0,
95 NV_PFIFO_INTR_CACHE_ERROR); 84 NV_PFIFO_INTR_CACHE_ERROR);
96 85
97 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); 86 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
98 } 87 }
@@ -100,242 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
100 return pull & 1; 89 return pull & 1;
101} 90}
102 91
103int 92static int
104nv04_fifo_channel_id(struct drm_device *dev) 93nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
105{
106 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
107 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
108}
109
110#ifdef __BIG_ENDIAN
111#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
112#else
113#define DMA_FETCH_ENDIANNESS 0
114#endif
115
116int
117nv04_fifo_create_context(struct nouveau_channel *chan)
118{ 94{
119 struct drm_device *dev = chan->dev; 95 struct drm_device *dev = chan->dev;
120 struct drm_nouveau_private *dev_priv = dev->dev_private; 96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
98 struct nv04_fifo_chan *fctx;
121 unsigned long flags; 99 unsigned long flags;
122 int ret; 100 int ret;
123 101
124 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, 102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
125 NV04_RAMFC__SIZE, 103 if (!fctx)
126 NVOBJ_FLAG_ZERO_ALLOC | 104 return -ENOMEM;
127 NVOBJ_FLAG_ZERO_FREE,
128 &chan->ramfc);
129 if (ret)
130 return ret;
131 105
106 /* map channel control registers */
132 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 107 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
133 NV03_USER(chan->id), PAGE_SIZE); 108 NV03_USER(chan->id), PAGE_SIZE);
134 if (!chan->user) 109 if (!chan->user) {
135 return -ENOMEM; 110 ret = -ENOMEM;
136 111 goto error;
137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 112 }
138
139 /* Setup initial state */
140 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
141 RAMFC_WR(DMA_GET, chan->pushbuf_base);
142 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
143 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
144 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
145 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
146 DMA_FETCH_ENDIANNESS));
147 113
148 /* enable the fifo dma operation */ 114 /* initialise default fifo context */
149 nv_wr32(dev, NV04_PFIFO_MODE, 115 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
150 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 116 chan->id * 32, ~0, 32,
117 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
118 if (ret)
119 goto error;
120
121 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
122 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
123 nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
124 nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
125 nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
126 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
127#ifdef __BIG_ENDIAN
128 NV_PFIFO_CACHE1_BIG_ENDIAN |
129#endif
130 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
131 nv_wo32(fctx->ramfc, 0x14, 0x00000000);
132 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
133 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
151 134
135 /* enable dma mode on the channel */
136 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
137 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
152 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 138 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
153 return 0; 139
140error:
141 if (ret)
142 priv->base.base.context_del(chan, engine);
143 return ret;
154} 144}
155 145
156void 146void
157nv04_fifo_destroy_context(struct nouveau_channel *chan) 147nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
158{ 148{
159 struct drm_device *dev = chan->dev; 149 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 150 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 151 struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
152 struct nv04_fifo_chan *fctx = chan->engctx[engine];
153 struct ramfc_desc *c = priv->ramfc_desc;
162 unsigned long flags; 154 unsigned long flags;
155 int chid;
163 156
157 /* prevent fifo context switches */
164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 158 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
165 pfifo->reassign(dev, false); 159 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
166 160
167 /* Unload the context if it's the currently active one */ 161 /* if this channel is active, replace it with a null context */
168 if (pfifo->channel_id(dev) == chan->id) { 162 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
169 pfifo->disable(dev); 163 if (chid == chan->id) {
170 pfifo->unload_context(dev); 164 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
171 pfifo->enable(dev); 165 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
166 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
167
168 do {
169 u32 mask = ((1ULL << c->bits) - 1) << c->regs;
170 nv_mask(dev, c->regp, mask, 0x00000000);
171 } while ((++c)->bits);
172
173 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
174 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
175 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
176 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
177 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
172 } 178 }
173 179
174 /* Keep it from being rescheduled */ 180 /* restore normal operation, after disabling dma mode */
175 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); 181 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
176 182 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
177 pfifo->reassign(dev, true);
178 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 183 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
179 184
180 /* Free the channel resources */ 185 /* clean up */
186 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
187 nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
181 if (chan->user) { 188 if (chan->user) {
182 iounmap(chan->user); 189 iounmap(chan->user);
183 chan->user = NULL; 190 chan->user = NULL;
184 } 191 }
185 nouveau_gpuobj_ref(NULL, &chan->ramfc);
186}
187
188static void
189nv04_fifo_do_load_context(struct drm_device *dev, int chid)
190{
191 struct drm_nouveau_private *dev_priv = dev->dev_private;
192 uint32_t fc = NV04_RAMFC(chid), tmp;
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
195 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
196 tmp = nv_ri32(dev, fc + 8);
197 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
198 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
199 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
200 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
201 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
202 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
203
204 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
205 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
206}
207
208int
209nv04_fifo_load_context(struct nouveau_channel *chan)
210{
211 uint32_t tmp;
212
213 nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
214 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
215 nv04_fifo_do_load_context(chan->dev, chan->id);
216 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
217
218 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
219 tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
220 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
221
222 return 0;
223} 192}
224 193
225int 194int
226nv04_fifo_unload_context(struct drm_device *dev) 195nv04_fifo_init(struct drm_device *dev, int engine)
227{ 196{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 197 struct drm_nouveau_private *dev_priv = dev->dev_private;
229 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 198 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
230 struct nouveau_channel *chan = NULL; 199 int i;
231 uint32_t tmp;
232 int chid;
233
234 chid = pfifo->channel_id(dev);
235 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
236 return 0;
237
238 chan = dev_priv->channels.ptr[chid];
239 if (!chan) {
240 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
241 return -EINVAL;
242 }
243
244 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
245 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
246 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
247 tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
248 RAMFC_WR(DMA_INSTANCE, tmp);
249 RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
250 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
251 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
252 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
253
254 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
255 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
256 return 0;
257}
258 200
259static void 201 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
260nv04_fifo_init_reset(struct drm_device *dev) 202 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
261{
262 nv_wr32(dev, NV03_PMC_ENABLE,
263 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
264 nv_wr32(dev, NV03_PMC_ENABLE,
265 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
266
267 nv_wr32(dev, 0x003224, 0x000f0078);
268 nv_wr32(dev, 0x002044, 0x0101ffff);
269 nv_wr32(dev, 0x002040, 0x000000ff);
270 nv_wr32(dev, 0x002500, 0x00000000);
271 nv_wr32(dev, 0x003000, 0x00000000);
272 nv_wr32(dev, 0x003050, 0x00000000);
273 nv_wr32(dev, 0x003200, 0x00000000);
274 nv_wr32(dev, 0x003250, 0x00000000);
275 nv_wr32(dev, 0x003220, 0x00000000);
276
277 nv_wr32(dev, 0x003250, 0x00000000);
278 nv_wr32(dev, 0x003270, 0x00000000);
279 nv_wr32(dev, 0x003210, 0x00000000);
280}
281 203
282static void 204 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
283nv04_fifo_init_ramxx(struct drm_device *dev) 205 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
284{
285 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 206
287 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 207 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
288 ((dev_priv->ramht->bits - 9) << 16) | 208 ((dev_priv->ramht->bits - 9) << 16) |
289 (dev_priv->ramht->gpuobj->pinst >> 8)); 209 (dev_priv->ramht->gpuobj->pinst >> 8));
290 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); 210 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
291 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); 211 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
292}
293 212
294static void 213 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
295nv04_fifo_init_intr(struct drm_device *dev) 214
296{ 215 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
297 nouveau_irq_register(dev, 8, nv04_fifo_isr); 216 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
298 nv_wr32(dev, 0x002100, 0xffffffff); 217
299 nv_wr32(dev, 0x002140, 0xffffffff); 218 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
219 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
220 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
221
222 for (i = 0; i < priv->base.channels; i++) {
223 if (dev_priv->channels.ptr[i])
224 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
225 }
226
227 return 0;
300} 228}
301 229
302int 230int
303nv04_fifo_init(struct drm_device *dev) 231nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
304{ 232{
305 struct drm_nouveau_private *dev_priv = dev->dev_private; 233 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 234 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
307 int i; 235 struct nouveau_channel *chan;
308 236 int chid;
309 nv04_fifo_init_reset(dev);
310 nv04_fifo_init_ramxx(dev);
311
312 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
313 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
314 237
315 nv04_fifo_init_intr(dev); 238 /* prevent context switches and halt fifo operation */
316 pfifo->enable(dev); 239 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
317 pfifo->reassign(dev, true); 240 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
241 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
242 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
318 243
319 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 244 /* store current fifo context in ramfc */
320 if (dev_priv->channels.ptr[i]) { 245 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
321 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 246 chan = dev_priv->channels.ptr[chid];
322 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 247 if (suspend && chid != priv->base.channels && chan) {
323 } 248 struct nv04_fifo_chan *fctx = chan->engctx[engine];
249 struct nouveau_gpuobj *ctx = fctx->ramfc;
250 struct ramfc_desc *c = priv->ramfc_desc;
251 do {
252 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
253 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
254 u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
255 u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
256 nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
257 } while ((++c)->bits);
324 } 258 }
325 259
260 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
326 return 0; 261 return 0;
327} 262}
328 263
329void
330nv04_fifo_fini(struct drm_device *dev)
331{
332 nv_wr32(dev, 0x2140, 0x00000000);
333 nouveau_irq_unregister(dev, 8);
334}
335
336static bool 264static bool
337nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) 265nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
338{ 266{
267 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 268 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_channel *chan = NULL; 269 struct nouveau_channel *chan = NULL;
341 struct nouveau_gpuobj *obj; 270 struct nouveau_gpuobj *obj;
@@ -346,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
346 u32 engine; 275 u32 engine;
347 276
348 spin_lock_irqsave(&dev_priv->channels.lock, flags); 277 spin_lock_irqsave(&dev_priv->channels.lock, flags);
349 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) 278 if (likely(chid >= 0 && chid < pfifo->channels))
350 chan = dev_priv->channels.ptr[chid]; 279 chan = dev_priv->channels.ptr[chid];
351 if (unlikely(!chan)) 280 if (unlikely(!chan))
352 goto out; 281 goto out;
@@ -357,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
357 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) 286 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
358 break; 287 break;
359 288
360 chan->sw_subchannel[subc] = obj->class;
361 engine = 0x0000000f << (subc * 4); 289 engine = 0x0000000f << (subc * 4);
362 290
363 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); 291 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
@@ -368,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
368 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) 296 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
369 break; 297 break;
370 298
371 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], 299 if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
372 mthd, data)) 300 mthd, data))
373 handled = true; 301 handled = true;
374 break; 302 break;
@@ -391,8 +319,8 @@ static const char *nv_dma_state_err(u32 state)
391void 319void
392nv04_fifo_isr(struct drm_device *dev) 320nv04_fifo_isr(struct drm_device *dev)
393{ 321{
322 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
394 struct drm_nouveau_private *dev_priv = dev->dev_private; 323 struct drm_nouveau_private *dev_priv = dev->dev_private;
395 struct nouveau_engine *engine = &dev_priv->engine;
396 uint32_t status, reassign; 324 uint32_t status, reassign;
397 int cnt = 0; 325 int cnt = 0;
398 326
@@ -402,7 +330,7 @@ nv04_fifo_isr(struct drm_device *dev)
402 330
403 nv_wr32(dev, NV03_PFIFO_CACHES, 0); 331 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
404 332
405 chid = engine->fifo.channel_id(dev); 333 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
406 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); 334 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
407 335
408 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 336 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
@@ -541,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev)
541 469
542 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); 470 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
543} 471}
472
473void
474nv04_fifo_destroy(struct drm_device *dev, int engine)
475{
476 struct drm_nouveau_private *dev_priv = dev->dev_private;
477 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
478
479 nouveau_irq_unregister(dev, 8);
480
481 dev_priv->eng[engine] = NULL;
482 kfree(priv);
483}
484
485int
486nv04_fifo_create(struct drm_device *dev)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nv04_fifo_priv *priv;
490
491 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
492 if (!priv)
493 return -ENOMEM;
494
495 priv->base.base.destroy = nv04_fifo_destroy;
496 priv->base.base.init = nv04_fifo_init;
497 priv->base.base.fini = nv04_fifo_fini;
498 priv->base.base.context_new = nv04_fifo_context_new;
499 priv->base.base.context_del = nv04_fifo_context_del;
500 priv->base.channels = 15;
501 priv->ramfc_desc = nv04_ramfc;
502 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
503
504 nouveau_irq_register(dev, 8, nv04_fifo_isr);
505 return 0;
506}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8ed3925..72f1a62903b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -356,12 +356,12 @@ static struct nouveau_channel *
356nv04_graph_channel(struct drm_device *dev) 356nv04_graph_channel(struct drm_device *dev)
357{ 357{
358 struct drm_nouveau_private *dev_priv = dev->dev_private; 358 struct drm_nouveau_private *dev_priv = dev->dev_private;
359 int chid = dev_priv->engine.fifo.channels; 359 int chid = 15;
360 360
361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) 361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24; 362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
363 363
364 if (chid >= dev_priv->engine.fifo.channels) 364 if (chid > 15)
365 return NULL; 365 return NULL;
366 366
367 return dev_priv->channels.ptr[chid]; 367 return dev_priv->channels.ptr[chid];
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan)
404static int 404static int
405nv04_graph_unload_context(struct drm_device *dev) 405nv04_graph_unload_context(struct drm_device *dev)
406{ 406{
407 struct drm_nouveau_private *dev_priv = dev->dev_private;
408 struct nouveau_channel *chan = NULL; 407 struct nouveau_channel *chan = NULL;
409 struct graph_state *ctx; 408 struct graph_state *ctx;
410 uint32_t tmp; 409 uint32_t tmp;
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev)
420 419
421 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000); 420 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
422 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 421 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
423 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 422 tmp |= 15 << 24;
424 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 423 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
425 return 0; 424 return 0;
426} 425}
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine,
495static int 494static int
496nv04_graph_init(struct drm_device *dev, int engine) 495nv04_graph_init(struct drm_device *dev, int engine)
497{ 496{
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 uint32_t tmp; 497 uint32_t tmp;
500 498
501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 499 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine)
527 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF); 525 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
528 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100); 526 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
529 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff; 527 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
530 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 528 tmp |= 15 << 24;
531 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp); 529 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
532 530
533 /* These don't belong here, they're part of a per-channel context */ 531 /* These don't belong here, they're part of a per-channel context */
@@ -550,28 +548,6 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
550 return 0; 548 return 0;
551} 549}
552 550
553static int
554nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
555 u32 class, u32 mthd, u32 data)
556{
557 atomic_set(&chan->fence.last_sequence_irq, data);
558 return 0;
559}
560
561int
562nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
563 u32 class, u32 mthd, u32 data)
564{
565 struct drm_device *dev = chan->dev;
566 struct nouveau_page_flip_state s;
567
568 if (!nouveau_finish_page_flip(chan, &s))
569 nv_set_crtc_base(dev, s.crtc,
570 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
571
572 return 0;
573}
574
575/* 551/*
576 * Software methods, why they are needed, and how they all work: 552 * Software methods, why they are needed, and how they all work:
577 * 553 *
@@ -1020,7 +996,8 @@ nv04_graph_context_switch(struct drm_device *dev)
1020 nv04_graph_unload_context(dev); 996 nv04_graph_unload_context(dev);
1021 997
1022 /* Load context for next channel */ 998 /* Load context for next channel */
1023 chid = dev_priv->engine.fifo.channel_id(dev); 999 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
1000 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
1024 chan = dev_priv->channels.ptr[chid]; 1001 chan = dev_priv->channels.ptr[chid];
1025 if (chan) 1002 if (chan)
1026 nv04_graph_load_context(chan); 1003 nv04_graph_load_context(chan);
@@ -1345,9 +1322,5 @@ nv04_graph_create(struct drm_device *dev)
1345 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); 1322 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1346 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); 1323 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1347 1324
1348 /* nvsw */
1349 NVOBJ_CLASS(dev, 0x506e, SW);
1350 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1351 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1352 return 0; 1325 return 0;
1353} 1326}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0740a3..ef7a934a499a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,8 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "drm.h" 2#include "drm.h"
3
3#include "nouveau_drv.h" 4#include "nouveau_drv.h"
5#include "nouveau_fifo.h"
4#include "nouveau_ramht.h" 6#include "nouveau_ramht.h"
5 7
6/* returns the size of fifo context */ 8/* returns the size of fifo context */
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 12 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 13
12 if (dev_priv->chipset >= 0x40) 14 if (dev_priv->chipset >= 0x40)
13 return 128; 15 return 128 * 32;
14 else 16 else
15 if (dev_priv->chipset >= 0x17) 17 if (dev_priv->chipset >= 0x17)
16 return 64; 18 return 64 * 32;
19 else
20 if (dev_priv->chipset >= 0x10)
21 return 32 * 32;
17 22
18 return 32; 23 return 32 * 16;
19} 24}
20 25
21int nv04_instmem_init(struct drm_device *dev) 26int nv04_instmem_init(struct drm_device *dev)
@@ -39,14 +44,10 @@ int nv04_instmem_init(struct drm_device *dev)
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs; 44 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs; 45 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024; 46 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels; 47 rsvd *= 32; /* per-channel */
43
44 /* pciegart table */
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
47 48
48 /* object storage */ 49 rsvd += 512 * 1024; /* pci(e)gart table */
49 rsvd += 512 * 1024; 50 rsvd += 512 * 1024; /* object storage */
50 51
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096); 52 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else { 53 } else {
@@ -71,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev)
71 return ret; 72 return ret;
72 73
73 /* And RAMFC */ 74 /* And RAMFC */
74 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); 75 length = nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) { 76 switch (dev_priv->card_type) {
76 case NV_40: 77 case NV_40:
77 offset = 0x20000; 78 offset = 0x20000;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
new file mode 100644
index 000000000000..0c41abf48774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30#include "nouveau_software.h"
31#include "nouveau_hw.h"
32
33struct nv04_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv04_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44
45 struct nouveau_page_flip_state state;
46
47 if (!nouveau_finish_page_flip(chan, &state)) {
48 nv_set_crtc_base(chan->dev, state.crtc, state.offset +
49 state.y * state.pitch +
50 state.x * state.bpp / 8);
51 }
52
53 return 0;
54}
55
56static int
57nv04_software_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct nv04_software_chan *pch;
60
61 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
62 if (!pch)
63 return -ENOMEM;
64
65 nouveau_software_context_new(&pch->base);
66 chan->engctx[engine] = pch;
67 return 0;
68}
69
70static void
71nv04_software_context_del(struct nouveau_channel *chan, int engine)
72{
73 struct nv04_software_chan *pch = chan->engctx[engine];
74 chan->engctx[engine] = NULL;
75 kfree(pch);
76}
77
78static int
79nv04_software_object_new(struct nouveau_channel *chan, int engine,
80 u32 handle, u16 class)
81{
82 struct drm_device *dev = chan->dev;
83 struct nouveau_gpuobj *obj = NULL;
84 int ret;
85
86 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
87 if (ret)
88 return ret;
89 obj->engine = 0;
90 obj->class = class;
91
92 ret = nouveau_ramht_insert(chan, handle, obj);
93 nouveau_gpuobj_ref(NULL, &obj);
94 return ret;
95}
96
97static int
98nv04_software_init(struct drm_device *dev, int engine)
99{
100 return 0;
101}
102
103static int
104nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
105{
106 return 0;
107}
108
109static void
110nv04_software_destroy(struct drm_device *dev, int engine)
111{
112 struct nv04_software_priv *psw = nv_engine(dev, engine);
113
114 NVOBJ_ENGINE_DEL(dev, SW);
115 kfree(psw);
116}
117
118int
119nv04_software_create(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv04_software_priv *psw;
123
124 psw = kzalloc(sizeof(*psw), GFP_KERNEL);
125 if (!psw)
126 return -ENOMEM;
127
128 psw->base.base.destroy = nv04_software_destroy;
129 psw->base.base.init = nv04_software_init;
130 psw->base.base.fini = nv04_software_fini;
131 psw->base.base.context_new = nv04_software_context_new;
132 psw->base.base.context_del = nv04_software_context_del;
133 psw->base.base.object_new = nv04_software_object_new;
134 nouveau_software_create(&psw->base);
135
136 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
137 if (dev_priv->card_type <= NV_04) {
138 NVOBJ_CLASS(dev, 0x006e, SW);
139 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
140 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
141 } else {
142 NVOBJ_CLASS(dev, 0x016e, SW);
143 NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..8a1b75009185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv10_fence_chan {
32 struct nouveau_fence_chan base;
33};
34
35struct nv10_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_bo *bo;
38 spinlock_t lock;
39 u32 sequence;
40};
41
42static int
43nv10_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 int ret = RING_SPACE(chan, 2);
47 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49 OUT_RING (chan, fence->sequence);
50 FIRE_RING (chan);
51 }
52 return ret;
53}
54
55
56static int
57nv10_fence_sync(struct nouveau_fence *fence,
58 struct nouveau_channel *prev, struct nouveau_channel *chan)
59{
60 return -ENODEV;
61}
62
63static int
64nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{
67 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
68 u32 value;
69 int ret;
70
71 if (!mutex_trylock(&prev->mutex))
72 return -EBUSY;
73
74 spin_lock(&priv->lock);
75 value = priv->sequence;
76 priv->sequence += 2;
77 spin_unlock(&priv->lock);
78
79 ret = RING_SPACE(prev, 5);
80 if (!ret) {
81 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
82 OUT_RING (prev, NvSema);
83 OUT_RING (prev, 0);
84 OUT_RING (prev, value + 0);
85 OUT_RING (prev, value + 1);
86 FIRE_RING (prev);
87 }
88
89 if (!ret && !(ret = RING_SPACE(chan, 5))) {
90 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
91 OUT_RING (chan, NvSema);
92 OUT_RING (chan, 0);
93 OUT_RING (chan, value + 1);
94 OUT_RING (chan, value + 2);
95 FIRE_RING (chan);
96 }
97
98 mutex_unlock(&prev->mutex);
99 return 0;
100}
101
102static u32
103nv10_fence_read(struct nouveau_channel *chan)
104{
105 return nvchan_rd32(chan, 0x0048);
106}
107
108static void
109nv10_fence_context_del(struct nouveau_channel *chan, int engine)
110{
111 struct nv10_fence_chan *fctx = chan->engctx[engine];
112 nouveau_fence_context_del(&fctx->base);
113 chan->engctx[engine] = NULL;
114 kfree(fctx);
115}
116
117static int
118nv10_fence_context_new(struct nouveau_channel *chan, int engine)
119{
120 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
121 struct nv10_fence_chan *fctx;
122 struct nouveau_gpuobj *obj;
123 int ret = 0;
124
125 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx)
127 return -ENOMEM;
128
129 nouveau_fence_context_new(&fctx->base);
130
131 if (priv->bo) {
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
133
134 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
135 mem->start * PAGE_SIZE, mem->size,
136 NV_MEM_ACCESS_RW,
137 NV_MEM_TARGET_VRAM, &obj);
138 if (!ret) {
139 ret = nouveau_ramht_insert(chan, NvSema, obj);
140 nouveau_gpuobj_ref(NULL, &obj);
141 }
142 }
143
144 if (ret)
145 nv10_fence_context_del(chan, engine);
146 return ret;
147}
148
149static int
150nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
151{
152 return 0;
153}
154
155static int
156nv10_fence_init(struct drm_device *dev, int engine)
157{
158 return 0;
159}
160
161static void
162nv10_fence_destroy(struct drm_device *dev, int engine)
163{
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nv10_fence_priv *priv = nv_engine(dev, engine);
166
167 nouveau_bo_ref(NULL, &priv->bo);
168 dev_priv->eng[engine] = NULL;
169 kfree(priv);
170}
171
172int
173nv10_fence_create(struct drm_device *dev)
174{
175 struct drm_nouveau_private *dev_priv = dev->dev_private;
176 struct nv10_fence_priv *priv;
177 int ret = 0;
178
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv)
181 return -ENOMEM;
182
183 priv->base.engine.destroy = nv10_fence_destroy;
184 priv->base.engine.init = nv10_fence_init;
185 priv->base.engine.fini = nv10_fence_fini;
186 priv->base.engine.context_new = nv10_fence_context_new;
187 priv->base.engine.context_del = nv10_fence_context_del;
188 priv->base.emit = nv10_fence_emit;
189 priv->base.read = nv10_fence_read;
190 priv->base.sync = nv10_fence_sync;
191 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192 spin_lock_init(&priv->lock);
193
194 if (dev_priv->chipset >= 0x17) {
195 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196 0, 0x0000, NULL, &priv->bo);
197 if (!ret) {
198 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
199 if (!ret)
200 ret = nouveau_bo_map(priv->bo);
201 if (ret)
202 nouveau_bo_ref(NULL, &priv->bo);
203 }
204
205 if (ret == 0) {
206 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
207 priv->base.sync = nv17_fence_sync;
208 }
209 }
210
211 if (ret)
212 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
213 return ret;
214}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index d2ecbff4bee1..f1fe7d758241 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,220 +27,112 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
30#include "nouveau_ramht.h" 32#include "nouveau_ramht.h"
31 33
32#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) 34static struct ramfc_desc {
33#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) 35 unsigned bits:6;
34 36 unsigned ctxs:5;
35int 37 unsigned ctxp:8;
36nv10_fifo_channel_id(struct drm_device *dev) 38 unsigned regs:5;
37{ 39 unsigned regp;
38 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 40} nv10_ramfc[] = {
39 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK; 41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
40} 42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
41 43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
42int 44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
43nv10_fifo_create_context(struct nouveau_channel *chan) 45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 {}
51};
52
53struct nv10_fifo_priv {
54 struct nouveau_fifo_priv base;
55 struct ramfc_desc *ramfc_desc;
56};
57
58struct nv10_fifo_chan {
59 struct nouveau_fifo_chan base;
60 struct nouveau_gpuobj *ramfc;
61};
62
63static int
64nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
44{ 65{
45 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
46 struct drm_device *dev = chan->dev; 66 struct drm_device *dev = chan->dev;
47 uint32_t fc = NV10_RAMFC(chan->id); 67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nv10_fifo_priv *priv = nv_engine(dev, engine);
69 struct nv10_fifo_chan *fctx;
70 unsigned long flags;
48 int ret; 71 int ret;
49 72
50 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, 73 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
51 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 74 if (!fctx)
52 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); 75 return -ENOMEM;
53 if (ret)
54 return ret;
55 76
77 /* map channel control registers */
56 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 78 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
57 NV03_USER(chan->id), PAGE_SIZE); 79 NV03_USER(chan->id), PAGE_SIZE);
58 if (!chan->user) 80 if (!chan->user) {
59 return -ENOMEM; 81 ret = -ENOMEM;
82 goto error;
83 }
60 84
61 /* Fill entries that are seen filled in dumps of nvidia driver just 85 /* initialise default fifo context */
62 * after channel's is put into DMA mode 86 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
63 */ 87 chan->id * 32, ~0, 32,
64 nv_wi32(dev, fc + 0, chan->pushbuf_base); 88 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
65 nv_wi32(dev, fc + 4, chan->pushbuf_base); 89 if (ret)
66 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); 90 goto error;
67 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 91
68 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 92 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
69 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 93 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
94 nv_wo32(fctx->ramfc, 0x08, 0x00000000);
95 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
96 nv_wo32(fctx->ramfc, 0x10, 0x00000000);
97 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
98 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
70#ifdef __BIG_ENDIAN 99#ifdef __BIG_ENDIAN
71 NV_PFIFO_CACHE1_BIG_ENDIAN | 100 NV_PFIFO_CACHE1_BIG_ENDIAN |
72#endif 101#endif
73 0); 102 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
74 103 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
75 /* enable the fifo dma operation */ 104 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
76 nv_wr32(dev, NV04_PFIFO_MODE,
77 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
78 return 0;
79}
80
81static void
82nv10_fifo_do_load_context(struct drm_device *dev, int chid)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 uint32_t fc = NV10_RAMFC(chid), tmp;
86
87 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
88 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
89 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
90
91 tmp = nv_ri32(dev, fc + 12);
92 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
93 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
94
95 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
98 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
99
100 if (dev_priv->chipset < 0x17)
101 goto out;
102
103 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
104 tmp = nv_ri32(dev, fc + 36);
105 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
106 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
107 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
108 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
109
110out:
111 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
112 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
113}
114
115int
116nv10_fifo_load_context(struct nouveau_channel *chan)
117{
118 struct drm_device *dev = chan->dev;
119 uint32_t tmp;
120
121 nv10_fifo_do_load_context(dev, chan->id);
122 105
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 106 /* enable dma mode on the channel */
124 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); 107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
125 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1); 108 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
109 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
126 110
127 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ 111error:
128 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31); 112 if (ret)
129 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp); 113 priv->base.base.context_del(chan, engine);
130 114 return ret;
131 return 0;
132} 115}
133 116
134int 117int
135nv10_fifo_unload_context(struct drm_device *dev) 118nv10_fifo_create(struct drm_device *dev)
136{
137 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
139 uint32_t fc, tmp;
140 int chid;
141
142 chid = pfifo->channel_id(dev);
143 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
144 return 0;
145 fc = NV10_RAMFC(chid);
146
147 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
148 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
149 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
150 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
151 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
152 nv_wi32(dev, fc + 12, tmp);
153 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
154 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
155 nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
156 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
157
158 if (dev_priv->chipset < 0x17)
159 goto out;
160
161 nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
162 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
163 nv_wi32(dev, fc + 36, tmp);
164 nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
165 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
166 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
167
168out:
169 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
170 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
171 return 0;
172}
173
174static void
175nv10_fifo_init_reset(struct drm_device *dev)
176{
177 nv_wr32(dev, NV03_PMC_ENABLE,
178 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
179 nv_wr32(dev, NV03_PMC_ENABLE,
180 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
181
182 nv_wr32(dev, 0x003224, 0x000f0078);
183 nv_wr32(dev, 0x002044, 0x0101ffff);
184 nv_wr32(dev, 0x002040, 0x000000ff);
185 nv_wr32(dev, 0x002500, 0x00000000);
186 nv_wr32(dev, 0x003000, 0x00000000);
187 nv_wr32(dev, 0x003050, 0x00000000);
188
189 nv_wr32(dev, 0x003258, 0x00000000);
190 nv_wr32(dev, 0x003210, 0x00000000);
191 nv_wr32(dev, 0x003270, 0x00000000);
192}
193
194static void
195nv10_fifo_init_ramxx(struct drm_device *dev)
196{ 119{
197 struct drm_nouveau_private *dev_priv = dev->dev_private; 120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nv10_fifo_priv *priv;
198 122
199 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 123 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
200 ((dev_priv->ramht->bits - 9) << 16) | 124 if (!priv)
201 (dev_priv->ramht->gpuobj->pinst >> 8)); 125 return -ENOMEM;
202 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
203 126
204 if (dev_priv->chipset < 0x17) { 127 priv->base.base.destroy = nv04_fifo_destroy;
205 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); 128 priv->base.base.init = nv04_fifo_init;
206 } else { 129 priv->base.base.fini = nv04_fifo_fini;
207 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | 130 priv->base.base.context_new = nv10_fifo_context_new;
208 (1 << 16) /* 64 Bytes entry*/); 131 priv->base.base.context_del = nv04_fifo_context_del;
209 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ 132 priv->base.channels = 31;
210 } 133 priv->ramfc_desc = nv10_ramfc;
211} 134 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
212 135
213static void
214nv10_fifo_init_intr(struct drm_device *dev)
215{
216 nouveau_irq_register(dev, 8, nv04_fifo_isr); 136 nouveau_irq_register(dev, 8, nv04_fifo_isr);
217 nv_wr32(dev, 0x002100, 0xffffffff);
218 nv_wr32(dev, 0x002140, 0xffffffff);
219}
220
221int
222nv10_fifo_init(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
226 int i;
227
228 nv10_fifo_init_reset(dev);
229 nv10_fifo_init_ramxx(dev);
230
231 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
232 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
233
234 nv10_fifo_init_intr(dev);
235 pfifo->enable(dev);
236 pfifo->reassign(dev, true);
237
238 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
239 if (dev_priv->channels.ptr[i]) {
240 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
241 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
242 }
243 }
244
245 return 0; 137 return 0;
246} 138}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a4d3f3..fb1d88a951de 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -759,7 +759,6 @@ static int
759nv10_graph_unload_context(struct drm_device *dev) 759nv10_graph_unload_context(struct drm_device *dev)
760{ 760{
761 struct drm_nouveau_private *dev_priv = dev->dev_private; 761 struct drm_nouveau_private *dev_priv = dev->dev_private;
762 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
763 struct nouveau_channel *chan; 762 struct nouveau_channel *chan;
764 struct graph_state *ctx; 763 struct graph_state *ctx;
765 uint32_t tmp; 764 uint32_t tmp;
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev)
782 781
783 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 782 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
784 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 783 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
785 tmp |= (pfifo->channels - 1) << 24; 784 tmp |= 31 << 24;
786 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 785 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
787 return 0; 786 return 0;
788} 787}
@@ -822,12 +821,12 @@ struct nouveau_channel *
822nv10_graph_channel(struct drm_device *dev) 821nv10_graph_channel(struct drm_device *dev)
823{ 822{
824 struct drm_nouveau_private *dev_priv = dev->dev_private; 823 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 int chid = dev_priv->engine.fifo.channels; 824 int chid = 31;
826 825
827 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) 826 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
828 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; 827 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
829 828
830 if (chid >= dev_priv->engine.fifo.channels) 829 if (chid >= 31)
831 return NULL; 830 return NULL;
832 831
833 return dev_priv->channels.ptr[chid]; 832 return dev_priv->channels.ptr[chid];
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine)
948 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); 947 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
949 948
950 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 949 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
951 tmp |= (dev_priv->engine.fifo.channels - 1) << 24; 950 tmp |= 31 << 24;
952 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 951 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
953 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 952 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
954 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); 953 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
@@ -1153,10 +1152,6 @@ nv10_graph_create(struct drm_device *dev)
1153 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 1152 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1154 nouveau_irq_register(dev, 12, nv10_graph_isr); 1153 nouveau_irq_register(dev, 12, nv10_graph_isr);
1155 1154
1156 /* nvsw */
1157 NVOBJ_CLASS(dev, 0x506e, SW);
1158 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1159
1160 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 1155 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1161 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 1156 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1162 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 1157 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
new file mode 100644
index 000000000000..d9e482e4abee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fifo.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv17_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 {}
56};
57
58struct nv17_fifo_priv {
59 struct nouveau_fifo_priv base;
60 struct ramfc_desc *ramfc_desc;
61};
62
63struct nv17_fifo_chan {
64 struct nouveau_fifo_chan base;
65 struct nouveau_gpuobj *ramfc;
66};
67
68static int
69nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
70{
71 struct drm_device *dev = chan->dev;
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
74 struct nv17_fifo_chan *fctx;
75 unsigned long flags;
76 int ret;
77
78 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
79 if (!fctx)
80 return -ENOMEM;
81
82 /* map channel control registers */
83 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
84 NV03_USER(chan->id), PAGE_SIZE);
85 if (!chan->user) {
86 ret = -ENOMEM;
87 goto error;
88 }
89
90 /* initialise default fifo context */
91 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
92 chan->id * 64, ~0, 64,
93 NVOBJ_FLAG_ZERO_ALLOC |
94 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
95 if (ret)
96 goto error;
97
98 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
99 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
100 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
101 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
103#ifdef __BIG_ENDIAN
104 NV_PFIFO_CACHE1_BIG_ENDIAN |
105#endif
106 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
107
108 /* enable dma mode on the channel */
109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
110 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
111 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
112
113error:
114 if (ret)
115 priv->base.base.context_del(chan, engine);
116 return ret;
117}
118
119static int
120nv17_fifo_init(struct drm_device *dev, int engine)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
124 int i;
125
126 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
127 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
128
129 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
130 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
131
132 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
133 ((dev_priv->ramht->bits - 9) << 16) |
134 (dev_priv->ramht->gpuobj->pinst >> 8));
135 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
136 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
137 dev_priv->ramfc->pinst >> 8);
138
139 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
140
141 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
142 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
143
144 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
145 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
146 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
147
148 for (i = 0; i < priv->base.channels; i++) {
149 if (dev_priv->channels.ptr[i])
150 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
151 }
152
153 return 0;
154}
155
156int
157nv17_fifo_create(struct drm_device *dev)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nv17_fifo_priv *priv;
161
162 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
163 if (!priv)
164 return -ENOMEM;
165
166 priv->base.base.destroy = nv04_fifo_destroy;
167 priv->base.base.init = nv17_fifo_init;
168 priv->base.base.fini = nv04_fifo_fini;
169 priv->base.base.context_new = nv17_fifo_context_new;
170 priv->base.base.context_del = nv04_fifo_context_del;
171 priv->base.channels = 31;
172 priv->ramfc_desc = nv17_ramfc;
173 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
174
175 nouveau_irq_register(dev, 8, nv04_fifo_isr);
176 return 0;
177}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e37512ef9..e34ea30758f6 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -43,8 +43,6 @@ struct nv20_graph_engine {
43int 43int
44nv20_graph_unload_context(struct drm_device *dev) 44nv20_graph_unload_context(struct drm_device *dev)
45{ 45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
48 struct nouveau_channel *chan; 46 struct nouveau_channel *chan;
49 struct nouveau_gpuobj *grctx; 47 struct nouveau_gpuobj *grctx;
50 u32 tmp; 48 u32 tmp;
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev)
62 60
63 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); 61 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
64 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; 62 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
65 tmp |= (pfifo->channels - 1) << 24; 63 tmp |= 31 << 24;
66 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); 64 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
67 return 0; 65 return 0;
68} 66}
@@ -796,10 +794,6 @@ nv20_graph_create(struct drm_device *dev)
796 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 794 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
797 nouveau_irq_register(dev, 12, nv20_graph_isr); 795 nouveau_irq_register(dev, 12, nv20_graph_isr);
798 796
799 /* nvsw */
800 NVOBJ_CLASS(dev, 0x506e, SW);
801 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
802
803 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 797 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
804 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 798 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
805 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 799 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index 6f06a0713f00..5f239bf658c4 100644
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_fifo.h"
27#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
28 29
29struct nv31_mpeg_engine { 30struct nv31_mpeg_engine {
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
208static int 209static int
209nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst) 210nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
210{ 211{
212 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
211 struct drm_nouveau_private *dev_priv = dev->dev_private; 213 struct drm_nouveau_private *dev_priv = dev->dev_private;
212 struct nouveau_gpuobj *ctx; 214 struct nouveau_gpuobj *ctx;
213 unsigned long flags; 215 unsigned long flags;
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
218 return 0; 220 return 0;
219 221
220 spin_lock_irqsave(&dev_priv->channels.lock, flags); 222 spin_lock_irqsave(&dev_priv->channels.lock, flags);
221 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 223 for (i = 0; i < pfifo->channels; i++) {
222 if (!dev_priv->channels.ptr[i]) 224 if (!dev_priv->channels.ptr[i])
223 continue; 225 continue;
224 226
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 68cb2d991c88..cdc818479b0a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -25,215 +25,123 @@
25 */ 25 */
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h"
28#include "nouveau_drv.h" 29#include "nouveau_drv.h"
29#include "nouveau_drm.h" 30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
30#include "nouveau_ramht.h" 32#include "nouveau_ramht.h"
31 33
32#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) 34static struct ramfc_desc {
33#define NV40_RAMFC__SIZE 128 35 unsigned bits:6;
34 36 unsigned ctxs:5;
35int 37 unsigned ctxp:8;
36nv40_fifo_create_context(struct nouveau_channel *chan) 38 unsigned regs:5;
39 unsigned regp;
40} nv40_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 2, 28, 0x18, 28, 0x002058 },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
55 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
56 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
57 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
58 { 32, 0, 0x40, 0, 0x0032e4 },
59 { 32, 0, 0x44, 0, 0x0032e8 },
60 { 32, 0, 0x4c, 0, 0x002088 },
61 { 32, 0, 0x50, 0, 0x003300 },
62 { 32, 0, 0x54, 0, 0x00330c },
63 {}
64};
65
66struct nv40_fifo_priv {
67 struct nouveau_fifo_priv base;
68 struct ramfc_desc *ramfc_desc;
69};
70
71struct nv40_fifo_chan {
72 struct nouveau_fifo_chan base;
73 struct nouveau_gpuobj *ramfc;
74};
75
76static int
77nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
37{ 78{
38 struct drm_device *dev = chan->dev; 79 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 uint32_t fc = NV40_RAMFC(chan->id); 81 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
82 struct nv40_fifo_chan *fctx;
41 unsigned long flags; 83 unsigned long flags;
42 int ret; 84 int ret;
43 85
44 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 86 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
45 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | 87 if (!fctx)
46 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
47 if (ret)
48 return ret;
49
50 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
51 NV40_USER(chan->id), PAGE_SIZE);
52 if (!chan->user)
53 return -ENOMEM; 88 return -ENOMEM;
54 89
55 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 90 /* map channel control registers */
91 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
92 NV03_USER(chan->id), PAGE_SIZE);
93 if (!chan->user) {
94 ret = -ENOMEM;
95 goto error;
96 }
56 97
57 nv_wi32(dev, fc + 0, chan->pushbuf_base); 98 /* initialise default fifo context */
58 nv_wi32(dev, fc + 4, chan->pushbuf_base); 99 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
59 nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); 100 chan->id * 128, ~0, 128,
60 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 101 NVOBJ_FLAG_ZERO_ALLOC |
61 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | 102 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
62 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | 103 if (ret)
104 goto error;
105
106 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
107 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
108 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
109 nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
110 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
111 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
63#ifdef __BIG_ENDIAN 112#ifdef __BIG_ENDIAN
64 NV_PFIFO_CACHE1_BIG_ENDIAN | 113 NV_PFIFO_CACHE1_BIG_ENDIAN |
65#endif 114#endif
66 0x30000000 /* no idea.. */); 115 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
67 nv_wi32(dev, fc + 60, 0x0001FFFF); 116 nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
68
69 /* enable the fifo dma operation */
70 nv_wr32(dev, NV04_PFIFO_MODE,
71 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
72 117
118 /* enable dma mode on the channel */
119 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
120 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
73 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 121 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
74 return 0;
75}
76
77static void
78nv40_fifo_do_load_context(struct drm_device *dev, int chid)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
82
83 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
84 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
85 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
86 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
87 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
88 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
89
90 /* No idea what 0x2058 is.. */
91 tmp = nv_ri32(dev, fc + 24);
92 tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
93 tmp2 |= (tmp & 0x30000000);
94 nv_wr32(dev, 0x2058, tmp2);
95 tmp &= ~0x30000000;
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
97 122
98 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28)); 123 /*XXX: remove this later, need fifo engine context commit hook */
99 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32)); 124 nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
100 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
101 tmp = nv_ri32(dev, fc + 40);
102 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
103 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
104 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
105 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
106 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
107 125
108 /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ 126error:
109 tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; 127 if (ret)
110 tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF; 128 priv->base.base.context_del(chan, engine);
111 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp); 129 return ret;
112
113 nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
114 /* NVIDIA does this next line twice... */
115 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
116 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
117 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
118 nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
119
120 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
121 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
122}
123
124int
125nv40_fifo_load_context(struct nouveau_channel *chan)
126{
127 struct drm_device *dev = chan->dev;
128 uint32_t tmp;
129
130 nv40_fifo_do_load_context(dev, chan->id);
131
132 /* Set channel active, and in DMA mode */
133 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
134 NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
135 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
136
137 /* Reset DMA_CTL_AT_INFO to INVALID */
138 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
139 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
140
141 return 0;
142} 130}
143 131
144int 132static int
145nv40_fifo_unload_context(struct drm_device *dev) 133nv40_fifo_init(struct drm_device *dev, int engine)
146{ 134{
147 struct drm_nouveau_private *dev_priv = dev->dev_private; 135 struct drm_nouveau_private *dev_priv = dev->dev_private;
148 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 136 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
149 uint32_t fc, tmp;
150 int chid;
151
152 chid = pfifo->channel_id(dev);
153 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
154 return 0;
155 fc = NV40_RAMFC(chid);
156
157 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
158 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
159 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
160 nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
161 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
162 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
163 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
164 tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
165 nv_wi32(dev, fc + 24, tmp);
166 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
167 nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
168 nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
169 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
170 nv_wi32(dev, fc + 40, tmp);
171 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
172 nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
173 /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
174 * more involved depending on the value of 0x3228?
175 */
176 nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
177 nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
178 nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
179 /* No idea what the below is for exactly, ripped from a mmio-trace */
180 nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
181 /* NVIDIA do this next line twice.. bug? */
182 nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
183 nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
184 nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
185#if 0 /* no real idea which is PUT/GET in UNK_48.. */
186 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
187 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
188 nv_wi32(dev, fc + 72, tmp);
189#endif
190 nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
191
192 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
193 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
194 NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
195 return 0;
196}
197
198static void
199nv40_fifo_init_reset(struct drm_device *dev)
200{
201 int i; 137 int i;
202 138
203 nv_wr32(dev, NV03_PMC_ENABLE, 139 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
204 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO); 140 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
205 nv_wr32(dev, NV03_PMC_ENABLE,
206 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
207 141
208 nv_wr32(dev, 0x003224, 0x000f0078);
209 nv_wr32(dev, 0x003210, 0x00000000);
210 nv_wr32(dev, 0x003270, 0x00000000);
211 nv_wr32(dev, 0x003240, 0x00000000);
212 nv_wr32(dev, 0x003244, 0x00000000);
213 nv_wr32(dev, 0x003258, 0x00000000);
214 nv_wr32(dev, 0x002504, 0x00000000);
215 for (i = 0; i < 16; i++)
216 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
217 nv_wr32(dev, 0x00250c, 0x0000ffff);
218 nv_wr32(dev, 0x002048, 0x00000000);
219 nv_wr32(dev, 0x003228, 0x00000000);
220 nv_wr32(dev, 0x0032e8, 0x00000000);
221 nv_wr32(dev, 0x002410, 0x00000000);
222 nv_wr32(dev, 0x002420, 0x00000000);
223 nv_wr32(dev, 0x002058, 0x00000001);
224 nv_wr32(dev, 0x00221c, 0x00000000);
225 /* something with 0x2084, read/modify/write, no change */
226 nv_wr32(dev, 0x002040, 0x000000ff); 142 nv_wr32(dev, 0x002040, 0x000000ff);
227 nv_wr32(dev, 0x002500, 0x00000000); 143 nv_wr32(dev, 0x002044, 0x2101ffff);
228 nv_wr32(dev, 0x003200, 0x00000000); 144 nv_wr32(dev, 0x002058, 0x00000001);
229
230 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
231}
232
233static void
234nv40_fifo_init_ramxx(struct drm_device *dev)
235{
236 struct drm_nouveau_private *dev_priv = dev->dev_private;
237 145
238 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | 146 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
239 ((dev_priv->ramht->bits - 9) << 16) | 147 ((dev_priv->ramht->bits - 9) << 16) |
@@ -244,64 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
244 case 0x47: 152 case 0x47:
245 case 0x49: 153 case 0x49:
246 case 0x4b: 154 case 0x4b:
247 nv_wr32(dev, 0x2230, 1); 155 nv_wr32(dev, 0x002230, 0x00000001);
248 break;
249 default:
250 break;
251 }
252
253 switch (dev_priv->chipset) {
254 case 0x40: 156 case 0x40:
255 case 0x41: 157 case 0x41:
256 case 0x42: 158 case 0x42:
257 case 0x43: 159 case 0x43:
258 case 0x45: 160 case 0x45:
259 case 0x47:
260 case 0x48: 161 case 0x48:
261 case 0x49: 162 nv_wr32(dev, 0x002220, 0x00030002);
262 case 0x4b:
263 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
264 break; 163 break;
265 default: 164 default:
266 nv_wr32(dev, 0x2230, 0); 165 nv_wr32(dev, 0x002230, 0x00000000);
267 nv_wr32(dev, NV40_PFIFO_RAMFC, 166 nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
268 ((dev_priv->vram_size - 512 * 1024 + 167 dev_priv->ramfc->pinst) >> 16) |
269 dev_priv->ramfc->pinst) >> 16) | (3 << 16)); 168 0x00030000);
270 break; 169 break;
271 } 170 }
272}
273 171
274static void 172 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
275nv40_fifo_init_intr(struct drm_device *dev) 173
276{ 174 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
277 nouveau_irq_register(dev, 8, nv04_fifo_isr); 175 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
278 nv_wr32(dev, 0x002100, 0xffffffff); 176
279 nv_wr32(dev, 0x002140, 0xffffffff); 177 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
178 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
179 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
180
181 for (i = 0; i < priv->base.channels; i++) {
182 if (dev_priv->channels.ptr[i])
183 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
184 }
185
186 return 0;
280} 187}
281 188
282int 189int
283nv40_fifo_init(struct drm_device *dev) 190nv40_fifo_create(struct drm_device *dev)
284{ 191{
285 struct drm_nouveau_private *dev_priv = dev->dev_private; 192 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 193 struct nv40_fifo_priv *priv;
287 int i;
288
289 nv40_fifo_init_reset(dev);
290 nv40_fifo_init_ramxx(dev);
291 194
292 nv40_fifo_do_load_context(dev, pfifo->channels - 1); 195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
293 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); 196 if (!priv)
294 197 return -ENOMEM;
295 nv40_fifo_init_intr(dev);
296 pfifo->enable(dev);
297 pfifo->reassign(dev, true);
298 198
299 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 199 priv->base.base.destroy = nv04_fifo_destroy;
300 if (dev_priv->channels.ptr[i]) { 200 priv->base.base.init = nv40_fifo_init;
301 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 201 priv->base.base.fini = nv04_fifo_fini;
302 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 202 priv->base.base.context_new = nv40_fifo_context_new;
303 } 203 priv->base.base.context_del = nv04_fifo_context_del;
304 } 204 priv->base.channels = 31;
205 priv->ramfc_desc = nv40_ramfc;
206 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
305 207
208 nouveau_irq_register(dev, 8, nv04_fifo_isr);
306 return 0; 209 return 0;
307} 210}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93d8afa..aa9e2df64a26 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -27,7 +27,7 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
32 32
33struct nv40_graph_engine { 33struct nv40_graph_engine {
@@ -42,7 +42,6 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
42 struct drm_device *dev = chan->dev; 42 struct drm_device *dev = chan->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private; 43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_gpuobj *grctx = NULL; 44 struct nouveau_gpuobj *grctx = NULL;
45 struct nouveau_grctx ctx = {};
46 unsigned long flags; 45 unsigned long flags;
47 int ret; 46 int ret;
48 47
@@ -52,11 +51,7 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
52 return ret; 51 return ret;
53 52
54 /* Initialise default context values */ 53 /* Initialise default context values */
55 ctx.dev = chan->dev; 54 nv40_grctx_fill(dev, grctx);
56 ctx.mode = NOUVEAU_GRCTX_VALS;
57 ctx.data = grctx;
58 nv40_grctx_init(&ctx);
59
60 nv_wo32(grctx, 0, grctx->vinst); 55 nv_wo32(grctx, 0, grctx->vinst);
61 56
62 /* init grctx pointer in ramfc, and on PFIFO if channel is 57 /* init grctx pointer in ramfc, and on PFIFO if channel is
@@ -184,8 +179,7 @@ nv40_graph_init(struct drm_device *dev, int engine)
184 struct nv40_graph_engine *pgraph = nv_engine(dev, engine); 179 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
185 struct drm_nouveau_private *dev_priv = dev->dev_private; 180 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 181 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
187 struct nouveau_grctx ctx = {}; 182 uint32_t vramsz;
188 uint32_t vramsz, *cp;
189 int i, j; 183 int i, j;
190 184
191 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 185 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -193,22 +187,8 @@ nv40_graph_init(struct drm_device *dev, int engine)
193 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 187 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
194 NV_PMC_ENABLE_PGRAPH); 188 NV_PMC_ENABLE_PGRAPH);
195 189
196 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); 190 /* generate and upload context program */
197 if (!cp) 191 nv40_grctx_init(dev, &pgraph->grctx_size);
198 return -ENOMEM;
199
200 ctx.dev = dev;
201 ctx.mode = NOUVEAU_GRCTX_PROG;
202 ctx.data = cp;
203 ctx.ctxprog_max = 256;
204 nv40_grctx_init(&ctx);
205 pgraph->grctx_size = ctx.ctxvals_pos * 4;
206
207 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
208 for (i = 0; i < ctx.ctxprog_len; i++)
209 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
210
211 kfree(cp);
212 192
213 /* No context present currently */ 193 /* No context present currently */
214 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 194 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -366,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
366static int 346static int
367nv40_graph_isr_chid(struct drm_device *dev, u32 inst) 347nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
368{ 348{
349 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
369 struct drm_nouveau_private *dev_priv = dev->dev_private; 350 struct drm_nouveau_private *dev_priv = dev->dev_private;
370 struct nouveau_gpuobj *grctx; 351 struct nouveau_gpuobj *grctx;
371 unsigned long flags; 352 unsigned long flags;
372 int i; 353 int i;
373 354
374 spin_lock_irqsave(&dev_priv->channels.lock, flags); 355 spin_lock_irqsave(&dev_priv->channels.lock, flags);
375 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 356 for (i = 0; i < pfifo->channels; i++) {
376 if (!dev_priv->channels.ptr[i]) 357 if (!dev_priv->channels.ptr[i])
377 continue; 358 continue;
378 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; 359 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
@@ -460,7 +441,6 @@ nv40_graph_create(struct drm_device *dev)
460 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 441 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
461 nouveau_irq_register(dev, 12, nv40_graph_isr); 442 nouveau_irq_register(dev, 12, nv40_graph_isr);
462 443
463 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
464 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 444 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
465 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ 445 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
466 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ 446 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
@@ -483,8 +463,5 @@ nv40_graph_create(struct drm_device *dev)
483 else 463 else
484 NVOBJ_CLASS(dev, 0x4097, GR); 464 NVOBJ_CLASS(dev, 0x4097, GR);
485 465
486 /* nvsw */
487 NVOBJ_CLASS(dev, 0x506e, SW);
488 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
489 return 0; 466 return 0;
490} 467}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index f70447d131d7..be0a74750fb1 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -595,8 +595,8 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
595 } 595 }
596} 596}
597 597
598void 598static void
599nv40_grctx_init(struct nouveau_grctx *ctx) 599nv40_grctx_generate(struct nouveau_grctx *ctx)
600{ 600{
601 /* decide whether we're loading/unloading the context */ 601 /* decide whether we're loading/unloading the context */
602 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); 602 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -660,3 +660,31 @@ nv40_grctx_init(struct nouveau_grctx *ctx)
660 cp_out (ctx, CP_END); 660 cp_out (ctx, CP_END);
661} 661}
662 662
663void
664nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
665{
666 nv40_grctx_generate(&(struct nouveau_grctx) {
667 .dev = dev,
668 .mode = NOUVEAU_GRCTX_VALS,
669 .data = mem,
670 });
671}
672
673void
674nv40_grctx_init(struct drm_device *dev, u32 *size)
675{
676 u32 ctxprog[256], i;
677 struct nouveau_grctx ctx = {
678 .dev = dev,
679 .mode = NOUVEAU_GRCTX_PROG,
680 .data = ctxprog,
681 .ctxprog_max = ARRAY_SIZE(ctxprog)
682 };
683
684 nv40_grctx_generate(&ctx);
685
686 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
687 for (i = 0; i < ctx.ctxprog_len; i++)
688 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
689 *size = ctx.ctxvals_pos * 4;
690}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index c7615381c5d9..e66273aff493 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,6 +27,7 @@
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_fifo.h"
30 31
31#define min2(a,b) ((a) < (b) ? (a) : (b)) 32#define min2(a,b) ((a) < (b) ? (a) : (b))
32 33
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cad2abd11756..97a477b3d52d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -79,15 +79,15 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
79 NV_ERROR(dev, "no space while blanking crtc\n"); 79 NV_ERROR(dev, "no space while blanking crtc\n");
80 return ret; 80 return ret;
81 } 81 }
82 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 82 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); 83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0); 84 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) { 85 if (dev_priv->chipset != 0x50) {
86 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 86 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); 87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 } 88 }
89 89
90 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 90 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); 91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
92 } else { 92 } else {
93 if (nv_crtc->cursor.visible) 93 if (nv_crtc->cursor.visible)
@@ -100,20 +100,20 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
100 NV_ERROR(dev, "no space while unblanking crtc\n"); 100 NV_ERROR(dev, "no space while unblanking crtc\n");
101 return ret; 101 return ret;
102 } 102 }
103 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 103 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ? 104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); 107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
108 if (dev_priv->chipset != 0x50) { 108 if (dev_priv->chipset != 0x50) {
109 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 109 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
110 OUT_RING(evo, NvEvoVRAM); 110 OUT_RING(evo, NvEvoVRAM);
111 } 111 }
112 112
113 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); 113 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
114 OUT_RING(evo, nv_crtc->fb.offset >> 8); 114 OUT_RING(evo, nv_crtc->fb.offset >> 8);
115 OUT_RING(evo, 0); 115 OUT_RING(evo, 0);
116 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 116 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
117 if (dev_priv->chipset != 0x50) 117 if (dev_priv->chipset != 0x50)
118 if (nv_crtc->fb.tile_flags == 0x7a00 || 118 if (nv_crtc->fb.tile_flags == 0x7a00 ||
119 nv_crtc->fb.tile_flags == 0xfe00) 119 nv_crtc->fb.tile_flags == 0xfe00)
@@ -158,10 +158,10 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
158 158
159 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 159 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
160 if (ret == 0) { 160 if (ret == 0) {
161 BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1); 161 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
162 OUT_RING (evo, mode); 162 OUT_RING (evo, mode);
163 if (update) { 163 if (update) {
164 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 164 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
165 OUT_RING (evo, 0); 165 OUT_RING (evo, 0);
166 FIRE_RING (evo); 166 FIRE_RING (evo);
167 } 167 }
@@ -193,11 +193,11 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
193 193
194 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; 194 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
195 195
196 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); 196 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
197 OUT_RING (evo, (hue << 20) | (vib << 8)); 197 OUT_RING (evo, (hue << 20) | (vib << 8));
198 198
199 if (update) { 199 if (update) {
200 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 200 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
201 OUT_RING (evo, 0); 201 OUT_RING (evo, 0);
202 FIRE_RING (evo); 202 FIRE_RING (evo);
203 } 203 }
@@ -311,9 +311,9 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
311 if (ret) 311 if (ret)
312 return ret; 312 return ret;
313 313
314 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); 314 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
315 OUT_RING (evo, ctrl); 315 OUT_RING (evo, ctrl);
316 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); 316 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
317 OUT_RING (evo, oY << 16 | oX); 317 OUT_RING (evo, oY << 16 | oX);
318 OUT_RING (evo, oY << 16 | oX); 318 OUT_RING (evo, oY << 16 | oX);
319 319
@@ -383,23 +383,15 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
383static void 383static void
384nv50_crtc_destroy(struct drm_crtc *crtc) 384nv50_crtc_destroy(struct drm_crtc *crtc)
385{ 385{
386 struct drm_device *dev; 386 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
387 struct nouveau_crtc *nv_crtc;
388
389 if (!crtc)
390 return;
391
392 dev = crtc->dev;
393 nv_crtc = nouveau_crtc(crtc);
394
395 NV_DEBUG_KMS(dev, "\n");
396 387
397 drm_crtc_cleanup(&nv_crtc->base); 388 NV_DEBUG_KMS(crtc->dev, "\n");
398 389
399 nouveau_bo_unmap(nv_crtc->lut.nvbo); 390 nouveau_bo_unmap(nv_crtc->lut.nvbo);
400 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 391 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
401 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 392 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
402 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 393 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
394 drm_crtc_cleanup(&nv_crtc->base);
403 kfree(nv_crtc); 395 kfree(nv_crtc);
404} 396}
405 397
@@ -593,7 +585,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
593 if (ret) 585 if (ret)
594 return ret; 586 return ret;
595 587
596 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); 588 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
597 OUT_RING (evo, fb->r_dma); 589 OUT_RING (evo, fb->r_dma);
598 } 590 }
599 591
@@ -601,18 +593,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
601 if (ret) 593 if (ret)
602 return ret; 594 return ret;
603 595
604 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); 596 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
605 OUT_RING (evo, nv_crtc->fb.offset >> 8); 597 OUT_RING (evo, nv_crtc->fb.offset >> 8);
606 OUT_RING (evo, 0); 598 OUT_RING (evo, 0);
607 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); 599 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
608 OUT_RING (evo, fb->r_pitch); 600 OUT_RING (evo, fb->r_pitch);
609 OUT_RING (evo, fb->r_format); 601 OUT_RING (evo, fb->r_format);
610 602
611 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); 603 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
612 OUT_RING (evo, fb->base.depth == 8 ? 604 OUT_RING (evo, fb->base.depth == 8 ?
613 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); 605 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
614 606
615 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); 607 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
616 OUT_RING (evo, (y << 16) | x); 608 OUT_RING (evo, (y << 16) | x);
617 609
618 if (nv_crtc->lut.depth != fb->base.depth) { 610 if (nv_crtc->lut.depth != fb->base.depth) {
@@ -672,23 +664,23 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
672 664
673 ret = RING_SPACE(evo, 18); 665 ret = RING_SPACE(evo, 18);
674 if (ret == 0) { 666 if (ret == 0) {
675 BEGIN_RING(evo, 0, 0x0804 + head, 2); 667 BEGIN_NV04(evo, 0, 0x0804 + head, 2);
676 OUT_RING (evo, 0x00800000 | mode->clock); 668 OUT_RING (evo, 0x00800000 | mode->clock);
677 OUT_RING (evo, (ilace == 2) ? 2 : 0); 669 OUT_RING (evo, (ilace == 2) ? 2 : 0);
678 BEGIN_RING(evo, 0, 0x0810 + head, 6); 670 BEGIN_NV04(evo, 0, 0x0810 + head, 6);
679 OUT_RING (evo, 0x00000000); /* border colour */ 671 OUT_RING (evo, 0x00000000); /* border colour */
680 OUT_RING (evo, (vactive << 16) | hactive); 672 OUT_RING (evo, (vactive << 16) | hactive);
681 OUT_RING (evo, ( vsynce << 16) | hsynce); 673 OUT_RING (evo, ( vsynce << 16) | hsynce);
682 OUT_RING (evo, (vblanke << 16) | hblanke); 674 OUT_RING (evo, (vblanke << 16) | hblanke);
683 OUT_RING (evo, (vblanks << 16) | hblanks); 675 OUT_RING (evo, (vblanks << 16) | hblanks);
684 OUT_RING (evo, (vblan2e << 16) | vblan2s); 676 OUT_RING (evo, (vblan2e << 16) | vblan2s);
685 BEGIN_RING(evo, 0, 0x082c + head, 1); 677 BEGIN_NV04(evo, 0, 0x082c + head, 1);
686 OUT_RING (evo, 0x00000000); 678 OUT_RING (evo, 0x00000000);
687 BEGIN_RING(evo, 0, 0x0900 + head, 1); 679 BEGIN_NV04(evo, 0, 0x0900 + head, 1);
688 OUT_RING (evo, 0x00000311); /* makes sync channel work */ 680 OUT_RING (evo, 0x00000311); /* makes sync channel work */
689 BEGIN_RING(evo, 0, 0x08c8 + head, 1); 681 BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
690 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay); 682 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
691 BEGIN_RING(evo, 0, 0x08d4 + head, 1); 683 BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
692 OUT_RING (evo, 0x00000000); /* screen position */ 684 OUT_RING (evo, 0x00000000); /* screen position */
693 } 685 }
694 686
@@ -755,18 +747,22 @@ nv50_crtc_create(struct drm_device *dev, int index)
755 if (!nv_crtc) 747 if (!nv_crtc)
756 return -ENOMEM; 748 return -ENOMEM;
757 749
750 nv_crtc->index = index;
751 nv_crtc->set_dither = nv50_crtc_set_dither;
752 nv_crtc->set_scale = nv50_crtc_set_scale;
753 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
758 nv_crtc->color_vibrance = 50; 754 nv_crtc->color_vibrance = 50;
759 nv_crtc->vibrant_hue = 0; 755 nv_crtc->vibrant_hue = 0;
760 756 nv_crtc->lut.depth = 0;
761 /* Default CLUT parameters, will be activated on the hw upon
762 * first mode set.
763 */
764 for (i = 0; i < 256; i++) { 757 for (i = 0; i < 256; i++) {
765 nv_crtc->lut.r[i] = i << 8; 758 nv_crtc->lut.r[i] = i << 8;
766 nv_crtc->lut.g[i] = i << 8; 759 nv_crtc->lut.g[i] = i << 8;
767 nv_crtc->lut.b[i] = i << 8; 760 nv_crtc->lut.b[i] = i << 8;
768 } 761 }
769 nv_crtc->lut.depth = 0; 762
763 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
764 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
765 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
770 766
771 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, 767 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
772 0, 0x0000, NULL, &nv_crtc->lut.nvbo); 768 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
@@ -778,21 +774,9 @@ nv50_crtc_create(struct drm_device *dev, int index)
778 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 774 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
779 } 775 }
780 776
781 if (ret) { 777 if (ret)
782 kfree(nv_crtc); 778 goto out;
783 return ret;
784 }
785
786 nv_crtc->index = index;
787 779
788 /* set function pointers */
789 nv_crtc->set_dither = nv50_crtc_set_dither;
790 nv_crtc->set_scale = nv50_crtc_set_scale;
791 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
792
793 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
794 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
795 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
796 780
797 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 781 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
798 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); 782 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
@@ -804,6 +788,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
804 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 788 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
805 } 789 }
806 790
791 if (ret)
792 goto out;
793
807 nv50_cursor_init(nv_crtc); 794 nv50_cursor_init(nv_crtc);
808 return 0; 795out:
796 if (ret)
797 nv50_crtc_destroy(&nv_crtc->base);
798 return ret;
809} 799}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b607a50..af4ec7bf3670 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -53,15 +53,15 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
53 } 53 }
54 54
55 if (dev_priv->chipset != 0x50) { 55 if (dev_priv->chipset != 0x50) {
56 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 56 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM); 57 OUT_RING(evo, NvEvoVRAM);
58 } 58 }
59 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 59 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); 60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
61 OUT_RING(evo, nv_crtc->cursor.offset >> 8); 61 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
62 62
63 if (update) { 63 if (update) {
64 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 64 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
65 OUT_RING(evo, 0); 65 OUT_RING(evo, 0);
66 FIRE_RING(evo); 66 FIRE_RING(evo);
67 nv_crtc->cursor.visible = true; 67 nv_crtc->cursor.visible = true;
@@ -86,16 +86,16 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
86 NV_ERROR(dev, "no space while hiding cursor\n"); 86 NV_ERROR(dev, "no space while hiding cursor\n");
87 return; 87 return;
88 } 88 }
89 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 89 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); 90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0); 91 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) { 92 if (dev_priv->chipset != 0x50) {
93 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 93 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); 94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 } 95 }
96 96
97 if (update) { 97 if (update) {
98 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 98 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
99 OUT_RING(evo, 0); 99 OUT_RING(evo, 0);
100 FIRE_RING(evo); 100 FIRE_RING(evo);
101 nv_crtc->cursor.visible = false; 101 nv_crtc->cursor.visible = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c56330be6d..eb216a446b89 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -55,9 +55,9 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
55 NV_ERROR(dev, "no space while disconnecting DAC\n"); 55 NV_ERROR(dev, "no space while disconnecting DAC\n");
56 return; 56 return;
57 } 57 }
58 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); 58 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
59 OUT_RING (evo, 0); 59 OUT_RING (evo, 0);
60 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 60 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
61 OUT_RING (evo, 0); 61 OUT_RING (evo, 0);
62 62
63 nv_encoder->crtc = NULL; 63 nv_encoder->crtc = NULL;
@@ -240,7 +240,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
240 NV_ERROR(dev, "no space while connecting DAC\n"); 240 NV_ERROR(dev, "no space while connecting DAC\n");
241 return; 241 return;
242 } 242 }
243 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); 243 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
244 OUT_RING(evo, mode_ctl); 244 OUT_RING(evo, mode_ctl);
245 OUT_RING(evo, mode_ctl2); 245 OUT_RING(evo, mode_ctl2);
246 246
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b78b9cfa383..5c41612723b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -32,6 +32,7 @@
32#include "nouveau_fb.h" 32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 33#include "nouveau_fbcon.h"
34#include "nouveau_ramht.h" 34#include "nouveau_ramht.h"
35#include "nouveau_software.h"
35#include "drm_crtc_helper.h" 36#include "drm_crtc_helper.h"
36 37
37static void nv50_display_isr(struct drm_device *); 38static void nv50_display_isr(struct drm_device *);
@@ -140,11 +141,11 @@ nv50_display_sync(struct drm_device *dev)
140 141
141 ret = RING_SPACE(evo, 6); 142 ret = RING_SPACE(evo, 6);
142 if (ret == 0) { 143 if (ret == 0) {
143 BEGIN_RING(evo, 0, 0x0084, 1); 144 BEGIN_NV04(evo, 0, 0x0084, 1);
144 OUT_RING (evo, 0x80000000); 145 OUT_RING (evo, 0x80000000);
145 BEGIN_RING(evo, 0, 0x0080, 1); 146 BEGIN_NV04(evo, 0, 0x0080, 1);
146 OUT_RING (evo, 0); 147 OUT_RING (evo, 0);
147 BEGIN_RING(evo, 0, 0x0084, 1); 148 BEGIN_NV04(evo, 0, 0x0084, 1);
148 OUT_RING (evo, 0x00000000); 149 OUT_RING (evo, 0x00000000);
149 150
150 nv_wo32(disp->ntfy, 0x000, 0x00000000); 151 nv_wo32(disp->ntfy, 0x000, 0x00000000);
@@ -267,7 +268,7 @@ nv50_display_init(struct drm_device *dev)
267 ret = RING_SPACE(evo, 3); 268 ret = RING_SPACE(evo, 3);
268 if (ret) 269 if (ret)
269 return ret; 270 return ret;
270 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2); 271 BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
271 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED); 272 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
272 OUT_RING (evo, NvEvoSync); 273 OUT_RING (evo, NvEvoSync);
273 274
@@ -292,7 +293,7 @@ nv50_display_fini(struct drm_device *dev)
292 293
293 ret = RING_SPACE(evo, 2); 294 ret = RING_SPACE(evo, 2);
294 if (ret == 0) { 295 if (ret == 0) {
295 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 296 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
296 OUT_RING(evo, 0); 297 OUT_RING(evo, 0);
297 } 298 }
298 FIRE_RING(evo); 299 FIRE_RING(evo);
@@ -358,8 +359,11 @@ nv50_display_create(struct drm_device *dev)
358 dev_priv->engine.display.priv = priv; 359 dev_priv->engine.display.priv = priv;
359 360
360 /* Create CRTC objects */ 361 /* Create CRTC objects */
361 for (i = 0; i < 2; i++) 362 for (i = 0; i < 2; i++) {
362 nv50_crtc_create(dev, i); 363 ret = nv50_crtc_create(dev, i);
364 if (ret)
365 return ret;
366 }
363 367
364 /* We setup the encoders from the BIOS table */ 368 /* We setup the encoders from the BIOS table */
365 for (i = 0 ; i < dcb->entries; i++) { 369 for (i = 0 ; i < dcb->entries; i++) {
@@ -438,13 +442,13 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
438 return; 442 return;
439 } 443 }
440 444
441 BEGIN_RING(evo, 0, 0x0084, 1); 445 BEGIN_NV04(evo, 0, 0x0084, 1);
442 OUT_RING (evo, 0x00000000); 446 OUT_RING (evo, 0x00000000);
443 BEGIN_RING(evo, 0, 0x0094, 1); 447 BEGIN_NV04(evo, 0, 0x0094, 1);
444 OUT_RING (evo, 0x00000000); 448 OUT_RING (evo, 0x00000000);
445 BEGIN_RING(evo, 0, 0x00c0, 1); 449 BEGIN_NV04(evo, 0, 0x00c0, 1);
446 OUT_RING (evo, 0x00000000); 450 OUT_RING (evo, 0x00000000);
447 BEGIN_RING(evo, 0, 0x0080, 1); 451 BEGIN_NV04(evo, 0, 0x0080, 1);
448 OUT_RING (evo, 0x00000000); 452 OUT_RING (evo, 0x00000000);
449 FIRE_RING (evo); 453 FIRE_RING (evo);
450} 454}
@@ -474,28 +478,28 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
474 } 478 }
475 479
476 if (dev_priv->chipset < 0xc0) { 480 if (dev_priv->chipset < 0xc0) {
477 BEGIN_RING(chan, 0, 0x0060, 2); 481 BEGIN_NV04(chan, 0, 0x0060, 2);
478 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 482 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
479 OUT_RING (chan, dispc->sem.offset); 483 OUT_RING (chan, dispc->sem.offset);
480 BEGIN_RING(chan, 0, 0x006c, 1); 484 BEGIN_NV04(chan, 0, 0x006c, 1);
481 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 485 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
482 BEGIN_RING(chan, 0, 0x0064, 2); 486 BEGIN_NV04(chan, 0, 0x0064, 2);
483 OUT_RING (chan, dispc->sem.offset ^ 0x10); 487 OUT_RING (chan, dispc->sem.offset ^ 0x10);
484 OUT_RING (chan, 0x74b1e000); 488 OUT_RING (chan, 0x74b1e000);
485 BEGIN_RING(chan, 0, 0x0060, 1); 489 BEGIN_NV04(chan, 0, 0x0060, 1);
486 if (dev_priv->chipset < 0x84) 490 if (dev_priv->chipset < 0x84)
487 OUT_RING (chan, NvSema); 491 OUT_RING (chan, NvSema);
488 else 492 else
489 OUT_RING (chan, chan->vram_handle); 493 OUT_RING (chan, chan->vram_handle);
490 } else { 494 } else {
491 u64 offset = chan->dispc_vma[nv_crtc->index].offset; 495 u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
492 offset += dispc->sem.offset; 496 offset += dispc->sem.offset;
493 BEGIN_NVC0(chan, 2, 0, 0x0010, 4); 497 BEGIN_NVC0(chan, 0, 0x0010, 4);
494 OUT_RING (chan, upper_32_bits(offset)); 498 OUT_RING (chan, upper_32_bits(offset));
495 OUT_RING (chan, lower_32_bits(offset)); 499 OUT_RING (chan, lower_32_bits(offset));
496 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 500 OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
497 OUT_RING (chan, 0x1002); 501 OUT_RING (chan, 0x1002);
498 BEGIN_NVC0(chan, 2, 0, 0x0010, 4); 502 BEGIN_NVC0(chan, 0, 0x0010, 4);
499 OUT_RING (chan, upper_32_bits(offset)); 503 OUT_RING (chan, upper_32_bits(offset));
500 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 504 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
501 OUT_RING (chan, 0x74b1e000); 505 OUT_RING (chan, 0x74b1e000);
@@ -508,40 +512,40 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
508 } 512 }
509 513
510 /* queue the flip on the crtc's "display sync" channel */ 514 /* queue the flip on the crtc's "display sync" channel */
511 BEGIN_RING(evo, 0, 0x0100, 1); 515 BEGIN_NV04(evo, 0, 0x0100, 1);
512 OUT_RING (evo, 0xfffe0000); 516 OUT_RING (evo, 0xfffe0000);
513 if (chan) { 517 if (chan) {
514 BEGIN_RING(evo, 0, 0x0084, 1); 518 BEGIN_NV04(evo, 0, 0x0084, 1);
515 OUT_RING (evo, 0x00000100); 519 OUT_RING (evo, 0x00000100);
516 } else { 520 } else {
517 BEGIN_RING(evo, 0, 0x0084, 1); 521 BEGIN_NV04(evo, 0, 0x0084, 1);
518 OUT_RING (evo, 0x00000010); 522 OUT_RING (evo, 0x00000010);
519 /* allows gamma somehow, PDISP will bitch at you if 523 /* allows gamma somehow, PDISP will bitch at you if
520 * you don't wait for vblank before changing this.. 524 * you don't wait for vblank before changing this..
521 */ 525 */
522 BEGIN_RING(evo, 0, 0x00e0, 1); 526 BEGIN_NV04(evo, 0, 0x00e0, 1);
523 OUT_RING (evo, 0x40000000); 527 OUT_RING (evo, 0x40000000);
524 } 528 }
525 BEGIN_RING(evo, 0, 0x0088, 4); 529 BEGIN_NV04(evo, 0, 0x0088, 4);
526 OUT_RING (evo, dispc->sem.offset); 530 OUT_RING (evo, dispc->sem.offset);
527 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 531 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
528 OUT_RING (evo, 0x74b1e000); 532 OUT_RING (evo, 0x74b1e000);
529 OUT_RING (evo, NvEvoSync); 533 OUT_RING (evo, NvEvoSync);
530 BEGIN_RING(evo, 0, 0x00a0, 2); 534 BEGIN_NV04(evo, 0, 0x00a0, 2);
531 OUT_RING (evo, 0x00000000); 535 OUT_RING (evo, 0x00000000);
532 OUT_RING (evo, 0x00000000); 536 OUT_RING (evo, 0x00000000);
533 BEGIN_RING(evo, 0, 0x00c0, 1); 537 BEGIN_NV04(evo, 0, 0x00c0, 1);
534 OUT_RING (evo, nv_fb->r_dma); 538 OUT_RING (evo, nv_fb->r_dma);
535 BEGIN_RING(evo, 0, 0x0110, 2); 539 BEGIN_NV04(evo, 0, 0x0110, 2);
536 OUT_RING (evo, 0x00000000); 540 OUT_RING (evo, 0x00000000);
537 OUT_RING (evo, 0x00000000); 541 OUT_RING (evo, 0x00000000);
538 BEGIN_RING(evo, 0, 0x0800, 5); 542 BEGIN_NV04(evo, 0, 0x0800, 5);
539 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); 543 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
540 OUT_RING (evo, 0); 544 OUT_RING (evo, 0);
541 OUT_RING (evo, (fb->height << 16) | fb->width); 545 OUT_RING (evo, (fb->height << 16) | fb->width);
542 OUT_RING (evo, nv_fb->r_pitch); 546 OUT_RING (evo, nv_fb->r_pitch);
543 OUT_RING (evo, nv_fb->r_format); 547 OUT_RING (evo, nv_fb->r_format);
544 BEGIN_RING(evo, 0, 0x0080, 1); 548 BEGIN_NV04(evo, 0, 0x0080, 1);
545 OUT_RING (evo, 0x00000000); 549 OUT_RING (evo, 0x00000000);
546 FIRE_RING (evo); 550 FIRE_RING (evo);
547 551
@@ -642,20 +646,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
642static void 646static void
643nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) 647nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
644{ 648{
645 struct drm_nouveau_private *dev_priv = dev->dev_private; 649 nouveau_software_vblank(dev, crtc);
646 struct nouveau_channel *chan, *tmp;
647
648 list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
649 nvsw.vbl_wait) {
650 if (chan->nvsw.vblsem_head != crtc)
651 continue;
652
653 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
654 chan->nvsw.vblsem_rval);
655 list_del(&chan->nvsw.vbl_wait);
656 drm_vblank_put(dev, crtc);
657 }
658
659 drm_handle_vblank(dev, crtc); 650 drm_handle_vblank(dev, crtc);
660} 651}
661 652
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14d2837..e9db9b97f041 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,6 +33,7 @@
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_reg.h" 34#include "nouveau_reg.h"
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_software.h"
36#include "nv50_evo.h" 37#include "nv50_evo.h"
37 38
38struct nv50_display_crtc { 39struct nv50_display_crtc {
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index bdd2afe29205..f1e4b9e07d14 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -2,6 +2,7 @@
2#include "drm.h" 2#include "drm.h"
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5#include "nouveau_fifo.h"
5 6
6struct nv50_fb_priv { 7struct nv50_fb_priv {
7 struct page *r100c08_page; 8 struct page *r100c08_page;
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = {
212void 213void
213nv50_fb_vm_trap(struct drm_device *dev, int display) 214nv50_fb_vm_trap(struct drm_device *dev, int display)
214{ 215{
216 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
215 struct drm_nouveau_private *dev_priv = dev->dev_private; 217 struct drm_nouveau_private *dev_priv = dev->dev_private;
216 const struct nouveau_enum *en, *cl; 218 const struct nouveau_enum *en, *cl;
217 unsigned long flags; 219 unsigned long flags;
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display)
236 /* lookup channel id */ 238 /* lookup channel id */
237 chinst = (trap[2] << 16) | trap[1]; 239 chinst = (trap[2] << 16) | trap[1];
238 spin_lock_irqsave(&dev_priv->channels.lock, flags); 240 spin_lock_irqsave(&dev_priv->channels.lock, flags);
239 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { 241 for (ch = 0; ch < pfifo->channels; ch++) {
240 struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; 242 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
241 243
242 if (!chan || !chan->ramin) 244 if (!chan || !chan->ramin)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a7206524..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -43,22 +43,22 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
43 return ret; 43 return ret;
44 44
45 if (rect->rop != ROP_COPY) { 45 if (rect->rop != ROP_COPY) {
46 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 46 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
47 OUT_RING(chan, 1); 47 OUT_RING(chan, 1);
48 } 48 }
49 BEGIN_RING(chan, NvSub2D, 0x0588, 1); 49 BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
50 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 50 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
51 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 51 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
52 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 52 OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
53 else 53 else
54 OUT_RING(chan, rect->color); 54 OUT_RING(chan, rect->color);
55 BEGIN_RING(chan, NvSub2D, 0x0600, 4); 55 BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
56 OUT_RING(chan, rect->dx); 56 OUT_RING(chan, rect->dx);
57 OUT_RING(chan, rect->dy); 57 OUT_RING(chan, rect->dy);
58 OUT_RING(chan, rect->dx + rect->width); 58 OUT_RING(chan, rect->dx + rect->width);
59 OUT_RING(chan, rect->dy + rect->height); 59 OUT_RING(chan, rect->dy + rect->height);
60 if (rect->rop != ROP_COPY) { 60 if (rect->rop != ROP_COPY) {
61 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 61 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
62 OUT_RING(chan, 3); 62 OUT_RING(chan, 3);
63 } 63 }
64 FIRE_RING(chan); 64 FIRE_RING(chan);
@@ -78,14 +78,14 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
78 if (ret) 78 if (ret)
79 return ret; 79 return ret;
80 80
81 BEGIN_RING(chan, NvSub2D, 0x0110, 1); 81 BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
82 OUT_RING(chan, 0); 82 OUT_RING(chan, 0);
83 BEGIN_RING(chan, NvSub2D, 0x08b0, 4); 83 BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
84 OUT_RING(chan, region->dx); 84 OUT_RING(chan, region->dx);
85 OUT_RING(chan, region->dy); 85 OUT_RING(chan, region->dy);
86 OUT_RING(chan, region->width); 86 OUT_RING(chan, region->width);
87 OUT_RING(chan, region->height); 87 OUT_RING(chan, region->height);
88 BEGIN_RING(chan, NvSub2D, 0x08d0, 4); 88 BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
89 OUT_RING(chan, 0); 89 OUT_RING(chan, 0);
90 OUT_RING(chan, region->sx); 90 OUT_RING(chan, region->sx);
91 OUT_RING(chan, 0); 91 OUT_RING(chan, 0);
@@ -116,7 +116,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
116 width = ALIGN(image->width, 32); 116 width = ALIGN(image->width, 32);
117 dwords = (width * image->height) >> 5; 117 dwords = (width * image->height) >> 5;
118 118
119 BEGIN_RING(chan, NvSub2D, 0x0814, 2); 119 BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
120 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 120 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
122 OUT_RING(chan, palette[image->bg_color] | mask); 122 OUT_RING(chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING(chan, image->bg_color); 125 OUT_RING(chan, image->bg_color);
126 OUT_RING(chan, image->fg_color); 126 OUT_RING(chan, image->fg_color);
127 } 127 }
128 BEGIN_RING(chan, NvSub2D, 0x0838, 2); 128 BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
129 OUT_RING(chan, image->width); 129 OUT_RING(chan, image->width);
130 OUT_RING(chan, image->height); 130 OUT_RING(chan, image->height);
131 BEGIN_RING(chan, NvSub2D, 0x0850, 4); 131 BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
132 OUT_RING(chan, 0); 132 OUT_RING(chan, 0);
133 OUT_RING(chan, image->dx); 133 OUT_RING(chan, image->dx);
134 OUT_RING(chan, 0); 134 OUT_RING(chan, 0);
@@ -143,7 +143,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
143 143
144 dwords -= push; 144 dwords -= push;
145 145
146 BEGIN_RING(chan, NvSub2D, 0x40000860, push); 146 BEGIN_NI04(chan, NvSub2D, 0x0860, push);
147 OUT_RINGp(chan, data, push); 147 OUT_RINGp(chan, data, push);
148 data += push; 148 data += push;
149 } 149 }
@@ -199,60 +199,59 @@ nv50_fbcon_accel_init(struct fb_info *info)
199 return ret; 199 return ret;
200 } 200 }
201 201
202 BEGIN_RING(chan, NvSub2D, 0x0000, 1); 202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 203 OUT_RING(chan, Nv2D);
204 BEGIN_RING(chan, NvSub2D, 0x0180, 4); 204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, NvNotify0);
206 OUT_RING(chan, chan->vram_handle); 205 OUT_RING(chan, chan->vram_handle);
207 OUT_RING(chan, chan->vram_handle); 206 OUT_RING(chan, chan->vram_handle);
208 OUT_RING(chan, chan->vram_handle); 207 OUT_RING(chan, chan->vram_handle);
209 BEGIN_RING(chan, NvSub2D, 0x0290, 1); 208 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
210 OUT_RING(chan, 0); 209 OUT_RING(chan, 0);
211 BEGIN_RING(chan, NvSub2D, 0x0888, 1); 210 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
212 OUT_RING(chan, 1); 211 OUT_RING(chan, 1);
213 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 212 BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
214 OUT_RING(chan, 3); 213 OUT_RING(chan, 3);
215 BEGIN_RING(chan, NvSub2D, 0x02a0, 1); 214 BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
216 OUT_RING(chan, 0x55); 215 OUT_RING(chan, 0x55);
217 BEGIN_RING(chan, NvSub2D, 0x08c0, 4); 216 BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
218 OUT_RING(chan, 0); 217 OUT_RING(chan, 0);
219 OUT_RING(chan, 1); 218 OUT_RING(chan, 1);
220 OUT_RING(chan, 0); 219 OUT_RING(chan, 0);
221 OUT_RING(chan, 1); 220 OUT_RING(chan, 1);
222 BEGIN_RING(chan, NvSub2D, 0x0580, 2); 221 BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
223 OUT_RING(chan, 4); 222 OUT_RING(chan, 4);
224 OUT_RING(chan, format); 223 OUT_RING(chan, format);
225 BEGIN_RING(chan, NvSub2D, 0x02e8, 2); 224 BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
226 OUT_RING(chan, 2); 225 OUT_RING(chan, 2);
227 OUT_RING(chan, 1); 226 OUT_RING(chan, 1);
228 BEGIN_RING(chan, NvSub2D, 0x0804, 1); 227 BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
229 OUT_RING(chan, format); 228 OUT_RING(chan, format);
230 BEGIN_RING(chan, NvSub2D, 0x0800, 1); 229 BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
231 OUT_RING(chan, 1); 230 OUT_RING(chan, 1);
232 BEGIN_RING(chan, NvSub2D, 0x0808, 3); 231 BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
233 OUT_RING(chan, 0); 232 OUT_RING(chan, 0);
234 OUT_RING(chan, 0); 233 OUT_RING(chan, 0);
235 OUT_RING(chan, 1); 234 OUT_RING(chan, 1);
236 BEGIN_RING(chan, NvSub2D, 0x081c, 1); 235 BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
237 OUT_RING(chan, 1); 236 OUT_RING(chan, 1);
238 BEGIN_RING(chan, NvSub2D, 0x0840, 4); 237 BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
239 OUT_RING(chan, 0); 238 OUT_RING(chan, 0);
240 OUT_RING(chan, 1); 239 OUT_RING(chan, 1);
241 OUT_RING(chan, 0); 240 OUT_RING(chan, 0);
242 OUT_RING(chan, 1); 241 OUT_RING(chan, 1);
243 BEGIN_RING(chan, NvSub2D, 0x0200, 2); 242 BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
244 OUT_RING(chan, format); 243 OUT_RING(chan, format);
245 OUT_RING(chan, 1); 244 OUT_RING(chan, 1);
246 BEGIN_RING(chan, NvSub2D, 0x0214, 5); 245 BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
247 OUT_RING(chan, info->fix.line_length); 246 OUT_RING(chan, info->fix.line_length);
248 OUT_RING(chan, info->var.xres_virtual); 247 OUT_RING(chan, info->var.xres_virtual);
249 OUT_RING(chan, info->var.yres_virtual); 248 OUT_RING(chan, info->var.yres_virtual);
250 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 249 OUT_RING(chan, upper_32_bits(fb->vma.offset));
251 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 250 OUT_RING(chan, lower_32_bits(fb->vma.offset));
252 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 251 BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
253 OUT_RING(chan, format); 252 OUT_RING(chan, format);
254 OUT_RING(chan, 1); 253 OUT_RING(chan, 1);
255 BEGIN_RING(chan, NvSub2D, 0x0244, 5); 254 BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
256 OUT_RING(chan, info->fix.line_length); 255 OUT_RING(chan, info->fix.line_length);
257 OUT_RING(chan, info->var.xres_virtual); 256 OUT_RING(chan, info->var.xres_virtual);
258 OUT_RING(chan, info->var.yres_virtual); 257 OUT_RING(chan, info->var.yres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a565c20b..55383b85db0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Ben Skeggs. 2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining 5 * Permission is hereby granted, free of charge, to any person obtaining
@@ -27,480 +27,268 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
31#include "nouveau_vm.h" 32#include "nouveau_vm.h"
32 33
33static void 34struct nv50_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv50_fifo_chan {
41 struct nouveau_fifo_chan base;
42};
43
44void
34nv50_fifo_playlist_update(struct drm_device *dev) 45nv50_fifo_playlist_update(struct drm_device *dev)
35{ 46{
47 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 48 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
38 struct nouveau_gpuobj *cur; 49 struct nouveau_gpuobj *cur;
39 int i, nr; 50 int i, p;
40
41 NV_DEBUG(dev, "\n");
42 51
43 cur = pfifo->playlist[pfifo->cur_playlist]; 52 cur = priv->playlist[priv->cur_playlist];
44 pfifo->cur_playlist = !pfifo->cur_playlist; 53 priv->cur_playlist = !priv->cur_playlist;
45 54
46 /* We never schedule channel 0 or 127 */ 55 for (i = 0, p = 0; i < priv->base.channels; i++) {
47 for (i = 1, nr = 0; i < 127; i++) { 56 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
48 if (dev_priv->channels.ptr[i] && 57 nv_wo32(cur, p++ * 4, i);
49 dev_priv->channels.ptr[i]->ramfc) {
50 nv_wo32(cur, (nr * 4), i);
51 nr++;
52 }
53 } 58 }
54 dev_priv->engine.instmem.flush(dev);
55
56 nv_wr32(dev, 0x32f4, cur->vinst >> 12);
57 nv_wr32(dev, 0x32ec, nr);
58 nv_wr32(dev, 0x2500, 0x101);
59}
60 59
61static void 60 dev_priv->engine.instmem.flush(dev);
62nv50_fifo_channel_enable(struct drm_device *dev, int channel)
63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private;
65 struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
66 uint32_t inst;
67
68 NV_DEBUG(dev, "ch%d\n", channel);
69
70 if (dev_priv->chipset == 0x50)
71 inst = chan->ramfc->vinst >> 12;
72 else
73 inst = chan->ramfc->vinst >> 8;
74 61
75 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | 62 nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
76 NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); 63 nv_wr32(dev, 0x0032ec, p);
64 nv_wr32(dev, 0x002500, 0x00000101);
77} 65}
78 66
79static void 67static int
80nv50_fifo_channel_disable(struct drm_device *dev, int channel) 68nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
81{ 69{
70 struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
71 struct nv50_fifo_chan *fctx;
72 struct drm_device *dev = chan->dev;
82 struct drm_nouveau_private *dev_priv = dev->dev_private; 73 struct drm_nouveau_private *dev_priv = dev->dev_private;
83 uint32_t inst; 74 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
84 75 u64 instance = chan->ramin->vinst >> 12;
85 NV_DEBUG(dev, "ch%d\n", channel); 76 unsigned long flags;
77 int ret = 0, i;
86 78
87 if (dev_priv->chipset == 0x50) 79 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; 80 if (!fctx)
89 else 81 return -ENOMEM;
90 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; 82 atomic_inc(&chan->vm->engref[engine]);
91 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
92}
93 83
94static void 84 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
95nv50_fifo_init_reset(struct drm_device *dev) 85 NV50_USER(chan->id), PAGE_SIZE);
96{ 86 if (!chan->user) {
97 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; 87 ret = -ENOMEM;
88 goto error;
89 }
98 90
99 NV_DEBUG(dev, "\n"); 91 for (i = 0; i < 0x100; i += 4)
92 nv_wo32(chan->ramin, i, 0x00000000);
93 nv_wo32(chan->ramin, 0x3c, 0x403f6078);
94 nv_wo32(chan->ramin, 0x40, 0x00000000);
95 nv_wo32(chan->ramin, 0x44, 0x01003fff);
96 nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
97 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
98 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
99 drm_order(chan->dma.ib_max + 1) << 16);
100 nv_wo32(chan->ramin, 0x60, 0x7fffffff);
101 nv_wo32(chan->ramin, 0x78, 0x00000000);
102 nv_wo32(chan->ramin, 0x7c, 0x30000001);
103 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
104 (4 << 24) /* SEARCH_FULL */ |
105 (chan->ramht->gpuobj->cinst >> 4));
100 106
101 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e); 107 dev_priv->engine.instmem.flush(dev);
102 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
103}
104 108
105static void 109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
106nv50_fifo_init_intr(struct drm_device *dev) 110 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
107{ 111 nv50_fifo_playlist_update(dev);
108 NV_DEBUG(dev, "\n"); 112 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
109 113
110 nouveau_irq_register(dev, 8, nv04_fifo_isr); 114error:
111 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); 115 if (ret)
112 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); 116 priv->base.base.context_del(chan, engine);
117 return ret;
113} 118}
114 119
115static void 120static bool
116nv50_fifo_init_context_table(struct drm_device *dev) 121nv50_fifo_kickoff(struct nouveau_channel *chan)
117{ 122{
118 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_device *dev = chan->dev;
119 int i; 124 bool done = true;
120 125 u32 me;
121 NV_DEBUG(dev, "\n"); 126
122 127 /* HW bug workaround:
123 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { 128 *
124 if (dev_priv->channels.ptr[i]) 129 * PFIFO will hang forever if the connected engines don't report
125 nv50_fifo_channel_enable(dev, i); 130 * that they've processed the context switch request.
126 else 131 *
127 nv50_fifo_channel_disable(dev, i); 132 * In order for the kickoff to work, we need to ensure all the
133 * connected engines are in a state where they can answer.
134 *
135 * Newer chipsets don't seem to suffer from this issue, and well,
136 * there's also a "ignore these engines" bitmask reg we can use
137 * if we hit the issue there..
138 */
139
140 /* PME: make sure engine is enabled */
141 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
142
143 /* do the kickoff... */
144 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
145 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
146 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
147 done = false;
128 } 148 }
129 149
130 nv50_fifo_playlist_update(dev); 150 /* restore any engine states we changed, and exit */
151 nv_wr32(dev, 0x00b860, me);
152 return done;
131} 153}
132 154
133static void 155static void
134nv50_fifo_init_regs__nv(struct drm_device *dev) 156nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
135{
136 NV_DEBUG(dev, "\n");
137
138 nv_wr32(dev, 0x250c, 0x6f3cfc34);
139}
140
141static void
142nv50_fifo_init_regs(struct drm_device *dev)
143{
144 NV_DEBUG(dev, "\n");
145
146 nv_wr32(dev, 0x2500, 0);
147 nv_wr32(dev, 0x3250, 0);
148 nv_wr32(dev, 0x3220, 0);
149 nv_wr32(dev, 0x3204, 0);
150 nv_wr32(dev, 0x3210, 0);
151 nv_wr32(dev, 0x3270, 0);
152 nv_wr32(dev, 0x2044, 0x01003fff);
153
154 /* Enable dummy channels setup by nv50_instmem.c */
155 nv50_fifo_channel_enable(dev, 0);
156 nv50_fifo_channel_enable(dev, 127);
157}
158
159int
160nv50_fifo_init(struct drm_device *dev)
161{ 157{
158 struct nv50_fifo_chan *fctx = chan->engctx[engine];
159 struct drm_device *dev = chan->dev;
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 161 unsigned long flags;
164 int ret;
165 162
166 NV_DEBUG(dev, "\n"); 163 /* remove channel from playlist, will context switch if active */
164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
165 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
166 nv50_fifo_playlist_update(dev);
167 167
168 if (pfifo->playlist[0]) { 168 /* tell any engines on this channel to unload their contexts */
169 pfifo->cur_playlist = !pfifo->cur_playlist; 169 nv50_fifo_kickoff(chan);
170 goto just_reset;
171 }
172 170
173 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, 171 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
174 NVOBJ_FLAG_ZERO_ALLOC, 172 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
175 &pfifo->playlist[0]);
176 if (ret) {
177 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
178 return ret;
179 }
180 173
181 ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, 174 /* clean up */
182 NVOBJ_FLAG_ZERO_ALLOC, 175 if (chan->user) {
183 &pfifo->playlist[1]); 176 iounmap(chan->user);
184 if (ret) { 177 chan->user = NULL;
185 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
186 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
187 return ret;
188 } 178 }
189 179
190just_reset: 180 atomic_dec(&chan->vm->engref[engine]);
191 nv50_fifo_init_reset(dev); 181 chan->engctx[engine] = NULL;
192 nv50_fifo_init_intr(dev); 182 kfree(fctx);
193 nv50_fifo_init_context_table(dev);
194 nv50_fifo_init_regs__nv(dev);
195 nv50_fifo_init_regs(dev);
196 dev_priv->engine.fifo.enable(dev);
197 dev_priv->engine.fifo.reassign(dev, true);
198
199 return 0;
200} 183}
201 184
202void 185static int
203nv50_fifo_takedown(struct drm_device *dev) 186nv50_fifo_init(struct drm_device *dev, int engine)
204{ 187{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 188 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 189 u32 instance;
190 int i;
207 191
208 NV_DEBUG(dev, "\n"); 192 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
193 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
194 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
195 nv_wr32(dev, 0x002044, 0x01003fff);
209 196
210 if (!pfifo->playlist[0]) 197 nv_wr32(dev, 0x002100, 0xffffffff);
211 return; 198 nv_wr32(dev, 0x002140, 0xffffffff);
212 199
213 nv_wr32(dev, 0x2140, 0x00000000); 200 for (i = 0; i < 128; i++) {
214 nouveau_irq_unregister(dev, 8); 201 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan && chan->engctx[engine])
203 instance = 0x80000000 | chan->ramin->vinst >> 12;
204 else
205 instance = 0x00000000;
206 nv_wr32(dev, 0x002600 + (i * 4), instance);
207 }
215 208
216 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); 209 nv50_fifo_playlist_update(dev);
217 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
218}
219 210
220int 211 nv_wr32(dev, 0x003200, 1);
221nv50_fifo_channel_id(struct drm_device *dev) 212 nv_wr32(dev, 0x003250, 1);
222{ 213 nv_wr32(dev, 0x002500, 1);
223 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & 214 return 0;
224 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
225} 215}
226 216
227int 217static int
228nv50_fifo_create_context(struct nouveau_channel *chan) 218nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
229{ 219{
230 struct drm_device *dev = chan->dev;
231 struct drm_nouveau_private *dev_priv = dev->dev_private; 220 struct drm_nouveau_private *dev_priv = dev->dev_private;
232 struct nouveau_gpuobj *ramfc = NULL; 221 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
233 uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4; 222 int i;
234 unsigned long flags;
235 int ret;
236
237 NV_DEBUG(dev, "ch%d\n", chan->id);
238
239 if (dev_priv->chipset == 0x50) {
240 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
241 chan->ramin->vinst, 0x100,
242 NVOBJ_FLAG_ZERO_ALLOC |
243 NVOBJ_FLAG_ZERO_FREE,
244 &chan->ramfc);
245 if (ret)
246 return ret;
247
248 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
249 chan->ramin->vinst + 0x0400,
250 4096, 0, &chan->cache);
251 if (ret)
252 return ret;
253 } else {
254 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
255 NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
257 if (ret)
258 return ret;
259
260 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
261 0, &chan->cache);
262 if (ret)
263 return ret;
264 }
265 ramfc = chan->ramfc;
266 223
267 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + 224 /* set playlist length to zero, fifo will unload context */
268 NV50_USER(chan->id), PAGE_SIZE); 225 nv_wr32(dev, 0x0032ec, 0);
269 if (!chan->user)
270 return -ENOMEM;
271 226
272 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 227 /* tell all connected engines to unload their contexts */
273 228 for (i = 0; i < priv->base.channels; i++) {
274 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); 229 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
275 nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | 230 if (chan && !nv50_fifo_kickoff(chan))
276 (4 << 24) /* SEARCH_FULL */ | 231 return -EBUSY;
277 (chan->ramht->gpuobj->cinst >> 4));
278 nv_wo32(ramfc, 0x44, 0x01003fff);
279 nv_wo32(ramfc, 0x60, 0x7fffffff);
280 nv_wo32(ramfc, 0x40, 0x00000000);
281 nv_wo32(ramfc, 0x7c, 0x30000001);
282 nv_wo32(ramfc, 0x78, 0x00000000);
283 nv_wo32(ramfc, 0x3c, 0x403f6078);
284 nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
285 nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
286 drm_order(chan->dma.ib_max + 1) << 16);
287
288 if (dev_priv->chipset != 0x50) {
289 nv_wo32(chan->ramin, 0, chan->id);
290 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
291
292 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
293 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
294 } 232 }
295 233
296 dev_priv->engine.instmem.flush(dev); 234 nv_wr32(dev, 0x002140, 0);
297
298 nv50_fifo_channel_enable(dev, chan->id);
299 nv50_fifo_playlist_update(dev);
300 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
301 return 0; 235 return 0;
302} 236}
303 237
304void 238void
305nv50_fifo_destroy_context(struct nouveau_channel *chan) 239nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
306{ 240{
307 struct drm_device *dev = chan->dev; 241 nv50_vm_flush_engine(dev, 5);
308 struct drm_nouveau_private *dev_priv = dev->dev_private;
309 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
310 struct nouveau_gpuobj *ramfc = NULL;
311 unsigned long flags;
312
313 NV_DEBUG(dev, "ch%d\n", chan->id);
314
315 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
316 pfifo->reassign(dev, false);
317
318 /* Unload the context if it's the currently active one */
319 if (pfifo->channel_id(dev) == chan->id) {
320 pfifo->disable(dev);
321 pfifo->unload_context(dev);
322 pfifo->enable(dev);
323 }
324
325 /* This will ensure the channel is seen as disabled. */
326 nouveau_gpuobj_ref(chan->ramfc, &ramfc);
327 nouveau_gpuobj_ref(NULL, &chan->ramfc);
328 nv50_fifo_channel_disable(dev, chan->id);
329
330 /* Dummy channel, also used on ch 127 */
331 if (chan->id == 0)
332 nv50_fifo_channel_disable(dev, 127);
333 nv50_fifo_playlist_update(dev);
334
335 pfifo->reassign(dev, true);
336 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
337
338 /* Free the channel resources */
339 if (chan->user) {
340 iounmap(chan->user);
341 chan->user = NULL;
342 }
343 nouveau_gpuobj_ref(NULL, &ramfc);
344 nouveau_gpuobj_ref(NULL, &chan->cache);
345} 242}
346 243
347int 244void
348nv50_fifo_load_context(struct nouveau_channel *chan) 245nv50_fifo_destroy(struct drm_device *dev, int engine)
349{ 246{
350 struct drm_device *dev = chan->dev;
351 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct drm_nouveau_private *dev_priv = dev->dev_private;
352 struct nouveau_gpuobj *ramfc = chan->ramfc; 248 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
353 struct nouveau_gpuobj *cache = chan->cache;
354 int ptr, cnt;
355
356 NV_DEBUG(dev, "ch%d\n", chan->id);
357
358 nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
359 nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
360 nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
361 nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
362 nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
363 nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
364 nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
365 nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
366 nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
367 nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
368 nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
369 nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
370 nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
371 nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
372 nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
373 nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
374 nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
375 nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
376 nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
377 nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
378 nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
379 nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
380 nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
381 nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
382 nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
383 nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
384 nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
385 nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
386 nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
387 nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
388 nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
389 nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
390 nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
391
392 cnt = nv_ro32(ramfc, 0x84);
393 for (ptr = 0; ptr < cnt; ptr++) {
394 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
395 nv_ro32(cache, (ptr * 8) + 0));
396 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
397 nv_ro32(cache, (ptr * 8) + 4));
398 }
399 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
400 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
401
402 /* guessing that all the 0x34xx regs aren't on NV50 */
403 if (dev_priv->chipset != 0x50) {
404 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
405 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
406 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
407 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
408 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
409 }
410 249
411 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); 250 nouveau_irq_unregister(dev, 8);
412 return 0; 251
252 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
253 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
254
255 dev_priv->eng[engine] = NULL;
256 kfree(priv);
413} 257}
414 258
415int 259int
416nv50_fifo_unload_context(struct drm_device *dev) 260nv50_fifo_create(struct drm_device *dev)
417{ 261{
418 struct drm_nouveau_private *dev_priv = dev->dev_private; 262 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 263 struct nv50_fifo_priv *priv;
420 struct nouveau_gpuobj *ramfc, *cache; 264 int ret;
421 struct nouveau_channel *chan = NULL;
422 int chid, get, put, ptr;
423
424 NV_DEBUG(dev, "\n");
425
426 chid = pfifo->channel_id(dev);
427 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
428 return 0;
429
430 chan = dev_priv->channels.ptr[chid];
431 if (!chan) {
432 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
433 return -EINVAL;
434 }
435 NV_DEBUG(dev, "ch%d\n", chan->id);
436 ramfc = chan->ramfc;
437 cache = chan->cache;
438
439 nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
440 nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
441 nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
442 nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
443 nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
444 nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
445 nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
446 nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
447 nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
448 nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
449 nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
450 nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
451 nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
452 nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
453 nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
454 nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
455 nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
456 nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
457 nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
458 nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
459 nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
460 nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
461 nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
462 nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
463 nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
464 nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
465 nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
466 nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
467 nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
468 nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
469 nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
470 nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
471 nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
472
473 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
474 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
475 ptr = 0;
476 while (put != get) {
477 nv_wo32(cache, ptr + 0,
478 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
479 nv_wo32(cache, ptr + 4,
480 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
481 get = (get + 1) & 0x1ff;
482 ptr += 8;
483 }
484
485 /* guessing that all the 0x34xx regs aren't on NV50 */
486 if (dev_priv->chipset != 0x50) {
487 nv_wo32(ramfc, 0x84, ptr >> 3);
488 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
489 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
490 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
491 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
492 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
493 }
494 265
495 dev_priv->engine.instmem.flush(dev); 266 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
267 if (!priv)
268 return -ENOMEM;
496 269
497 /*XXX: probably reload ch127 (NULL) state back too */ 270 priv->base.base.destroy = nv50_fifo_destroy;
498 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); 271 priv->base.base.init = nv50_fifo_init;
499 return 0; 272 priv->base.base.fini = nv50_fifo_fini;
500} 273 priv->base.base.context_new = nv50_fifo_context_new;
274 priv->base.base.context_del = nv50_fifo_context_del;
275 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
276 priv->base.channels = 127;
277 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
278
279 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
280 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
281 if (ret)
282 goto error;
283
284 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
285 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
286 if (ret)
287 goto error;
501 288
502void 289 nouveau_irq_register(dev, 8, nv04_fifo_isr);
503nv50_fifo_tlb_flush(struct drm_device *dev) 290error:
504{ 291 if (ret)
505 nv50_vm_flush_engine(dev, 5); 292 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
293 return ret;
506} 294}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711a918d..d9cc2f2638d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,8 +27,8 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
30#include "nouveau_ramht.h" 31#include "nouveau_ramht.h"
31#include "nouveau_grctx.h"
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33#include "nouveau_vm.h" 33#include "nouveau_vm.h"
34#include "nv50_evo.h" 34#include "nv50_evo.h"
@@ -40,86 +40,6 @@ struct nv50_graph_engine {
40 u32 grctx_size; 40 u32 grctx_size;
41}; 41};
42 42
43static void
44nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
45{
46 const uint32_t mask = 0x00010001;
47
48 if (enabled)
49 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
50 else
51 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
52}
53
54static struct nouveau_channel *
55nv50_graph_channel(struct drm_device *dev)
56{
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 uint32_t inst;
59 int i;
60
61 /* Be sure we're not in the middle of a context switch or bad things
62 * will happen, such as unloading the wrong pgraph context.
63 */
64 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
65 NV_ERROR(dev, "Ctxprog is still running\n");
66
67 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
68 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
69 return NULL;
70 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
71
72 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
73 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
74
75 if (chan && chan->ramin && chan->ramin->vinst == inst)
76 return chan;
77 }
78
79 return NULL;
80}
81
82static int
83nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
84{
85 uint32_t fifo = nv_rd32(dev, 0x400500);
86
87 nv_wr32(dev, 0x400500, fifo & ~1);
88 nv_wr32(dev, 0x400784, inst);
89 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
90 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
91 nv_wr32(dev, 0x400040, 0xffffffff);
92 (void)nv_rd32(dev, 0x400040);
93 nv_wr32(dev, 0x400040, 0x00000000);
94 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
95
96 if (nouveau_wait_for_idle(dev))
97 nv_wr32(dev, 0x40032c, inst | (1<<31));
98 nv_wr32(dev, 0x400500, fifo);
99
100 return 0;
101}
102
103static int
104nv50_graph_unload_context(struct drm_device *dev)
105{
106 uint32_t inst;
107
108 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
109 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
110 return 0;
111 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
112
113 nouveau_wait_for_idle(dev);
114 nv_wr32(dev, 0x400784, inst);
115 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
116 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
117 nouveau_wait_for_idle(dev);
118
119 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
120 return 0;
121}
122
123static int 43static int
124nv50_graph_init(struct drm_device *dev, int engine) 44nv50_graph_init(struct drm_device *dev, int engine)
125{ 45{
@@ -211,12 +131,6 @@ nv50_graph_init(struct drm_device *dev, int engine)
211static int 131static int
212nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) 132nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
213{ 133{
214 nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
215 if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
216 nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
217 return -EBUSY;
218 }
219 nv50_graph_unload_context(dev);
220 nv_wr32(dev, 0x40013c, 0x00000000); 134 nv_wr32(dev, 0x40013c, 0x00000000);
221 return 0; 135 return 0;
222} 136}
@@ -229,7 +143,6 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
229 struct nouveau_gpuobj *ramin = chan->ramin; 143 struct nouveau_gpuobj *ramin = chan->ramin;
230 struct nouveau_gpuobj *grctx = NULL; 144 struct nouveau_gpuobj *grctx = NULL;
231 struct nv50_graph_engine *pgraph = nv_engine(dev, engine); 145 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
232 struct nouveau_grctx ctx = {};
233 int hdr, ret; 146 int hdr, ret;
234 147
235 NV_DEBUG(dev, "ch%d\n", chan->id); 148 NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -248,11 +161,7 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
248 nv_wo32(ramin, hdr + 0x10, 0); 161 nv_wo32(ramin, hdr + 0x10, 0);
249 nv_wo32(ramin, hdr + 0x14, 0x00010000); 162 nv_wo32(ramin, hdr + 0x14, 0x00010000);
250 163
251 ctx.dev = chan->dev; 164 nv50_grctx_fill(dev, grctx);
252 ctx.mode = NOUVEAU_GRCTX_VALS;
253 ctx.data = grctx;
254 nv50_grctx_init(&ctx);
255
256 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12); 165 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
257 166
258 dev_priv->engine.instmem.flush(dev); 167 dev_priv->engine.instmem.flush(dev);
@@ -268,33 +177,14 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
268 struct nouveau_gpuobj *grctx = chan->engctx[engine]; 177 struct nouveau_gpuobj *grctx = chan->engctx[engine];
269 struct drm_device *dev = chan->dev; 178 struct drm_device *dev = chan->dev;
270 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 struct drm_nouveau_private *dev_priv = dev->dev_private;
271 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
272 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 180 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
273 unsigned long flags;
274
275 NV_DEBUG(dev, "ch%d\n", chan->id);
276
277 if (!chan->ramin)
278 return;
279
280 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
281 pfifo->reassign(dev, false);
282 nv50_graph_fifo_access(dev, false);
283
284 if (nv50_graph_channel(dev) == chan)
285 nv50_graph_unload_context(dev);
286 181
287 for (i = hdr; i < hdr + 24; i += 4) 182 for (i = hdr; i < hdr + 24; i += 4)
288 nv_wo32(chan->ramin, i, 0); 183 nv_wo32(chan->ramin, i, 0);
289 dev_priv->engine.instmem.flush(dev); 184 dev_priv->engine.instmem.flush(dev);
290 185
291 nv50_graph_fifo_access(dev, true);
292 pfifo->reassign(dev, true);
293 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
294
295 nouveau_gpuobj_ref(NULL, &grctx);
296
297 atomic_dec(&chan->vm->engref[engine]); 186 atomic_dec(&chan->vm->engref[engine]);
187 nouveau_gpuobj_ref(NULL, &grctx);
298 chan->engctx[engine] = NULL; 188 chan->engctx[engine] = NULL;
299} 189}
300 190
@@ -325,85 +215,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
325} 215}
326 216
327static void 217static void
328nv50_graph_context_switch(struct drm_device *dev)
329{
330 uint32_t inst;
331
332 nv50_graph_unload_context(dev);
333
334 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
335 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
336 nv50_graph_do_load_context(dev, inst);
337
338 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
339 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
340}
341
342static int
343nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
344 u32 class, u32 mthd, u32 data)
345{
346 struct nouveau_gpuobj *gpuobj;
347
348 gpuobj = nouveau_ramht_find(chan, data);
349 if (!gpuobj)
350 return -ENOENT;
351
352 if (nouveau_notifier_offset(gpuobj, NULL))
353 return -EINVAL;
354
355 chan->nvsw.vblsem = gpuobj;
356 chan->nvsw.vblsem_offset = ~0;
357 return 0;
358}
359
360static int
361nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
362 u32 class, u32 mthd, u32 data)
363{
364 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
365 return -ERANGE;
366
367 chan->nvsw.vblsem_offset = data >> 2;
368 return 0;
369}
370
371static int
372nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
373 u32 class, u32 mthd, u32 data)
374{
375 chan->nvsw.vblsem_rval = data;
376 return 0;
377}
378
379static int
380nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
381 u32 class, u32 mthd, u32 data)
382{
383 struct drm_device *dev = chan->dev;
384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385
386 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
387 return -EINVAL;
388
389 drm_vblank_get(dev, data);
390
391 chan->nvsw.vblsem_head = data;
392 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
393
394 return 0;
395}
396
397static int
398nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
399 u32 class, u32 mthd, u32 data)
400{
401 nouveau_finish_page_flip(chan, NULL);
402 return 0;
403}
404
405
406static void
407nv50_graph_tlb_flush(struct drm_device *dev, int engine) 218nv50_graph_tlb_flush(struct drm_device *dev, int engine)
408{ 219{
409 nv50_vm_flush_engine(dev, 0); 220 nv50_vm_flush_engine(dev, 0);
@@ -514,6 +325,7 @@ struct nouveau_enum nv50_data_error_names[] = {
514 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, 325 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
515 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, 326 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
516 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, 327 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
328 { 0x00000024, "VP_ZERO_INPUTS", NULL },
517 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, 329 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
518 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, 330 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
519 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, 331 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
@@ -900,13 +712,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
900int 712int
901nv50_graph_isr_chid(struct drm_device *dev, u64 inst) 713nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
902{ 714{
715 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
903 struct drm_nouveau_private *dev_priv = dev->dev_private; 716 struct drm_nouveau_private *dev_priv = dev->dev_private;
904 struct nouveau_channel *chan; 717 struct nouveau_channel *chan;
905 unsigned long flags; 718 unsigned long flags;
906 int i; 719 int i;
907 720
908 spin_lock_irqsave(&dev_priv->channels.lock, flags); 721 spin_lock_irqsave(&dev_priv->channels.lock, flags);
909 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 722 for (i = 0; i < pfifo->channels; i++) {
910 chan = dev_priv->channels.ptr[i]; 723 chan = dev_priv->channels.ptr[i];
911 if (!chan || !chan->ramin) 724 if (!chan || !chan->ramin)
912 continue; 725 continue;
@@ -939,15 +752,6 @@ nv50_graph_isr(struct drm_device *dev)
939 show &= ~0x00000010; 752 show &= ~0x00000010;
940 } 753 }
941 754
942 if (stat & 0x00001000) {
943 nv_wr32(dev, 0x400500, 0x00000000);
944 nv_wr32(dev, 0x400100, 0x00001000);
945 nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
946 nv50_graph_context_switch(dev);
947 stat &= ~0x00001000;
948 show &= ~0x00001000;
949 }
950
951 show = (show && nouveau_ratelimit()) ? show : 0; 755 show = (show && nouveau_ratelimit()) ? show : 0;
952 756
953 if (show & 0x00100000) { 757 if (show & 0x00100000) {
@@ -996,28 +800,21 @@ nv50_graph_create(struct drm_device *dev)
996{ 800{
997 struct drm_nouveau_private *dev_priv = dev->dev_private; 801 struct drm_nouveau_private *dev_priv = dev->dev_private;
998 struct nv50_graph_engine *pgraph; 802 struct nv50_graph_engine *pgraph;
999 struct nouveau_grctx ctx = {};
1000 int ret; 803 int ret;
1001 804
1002 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL); 805 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
1003 if (!pgraph) 806 if (!pgraph)
1004 return -ENOMEM; 807 return -ENOMEM;
1005 808
1006 ctx.dev = dev; 809 ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
1007 ctx.mode = NOUVEAU_GRCTX_PROG; 810 &pgraph->ctxprog_size,
1008 ctx.data = pgraph->ctxprog; 811 &pgraph->grctx_size);
1009 ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
1010
1011 ret = nv50_grctx_init(&ctx);
1012 if (ret) { 812 if (ret) {
1013 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n"); 813 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
1014 kfree(pgraph); 814 kfree(pgraph);
1015 return 0; 815 return 0;
1016 } 816 }
1017 817
1018 pgraph->grctx_size = ctx.ctxvals_pos * 4;
1019 pgraph->ctxprog_size = ctx.ctxprog_len;
1020
1021 pgraph->base.destroy = nv50_graph_destroy; 818 pgraph->base.destroy = nv50_graph_destroy;
1022 pgraph->base.init = nv50_graph_init; 819 pgraph->base.init = nv50_graph_init;
1023 pgraph->base.fini = nv50_graph_fini; 820 pgraph->base.fini = nv50_graph_fini;
@@ -1031,14 +828,6 @@ nv50_graph_create(struct drm_device *dev)
1031 828
1032 nouveau_irq_register(dev, 12, nv50_graph_isr); 829 nouveau_irq_register(dev, 12, nv50_graph_isr);
1033 830
1034 /* NVSW really doesn't live here... */
1035 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1036 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
1037 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
1038 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
1039 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
1040 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
1041
1042 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); 831 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1043 NVOBJ_CLASS(dev, 0x0030, GR); /* null */ 832 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1044 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ 833 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d6968566..881e22b249fc 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -172,8 +172,8 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
172 172
173/* Main function: construct the ctxprog skeleton, call the other functions. */ 173/* Main function: construct the ctxprog skeleton, call the other functions. */
174 174
175int 175static int
176nv50_grctx_init(struct nouveau_grctx *ctx) 176nv50_grctx_generate(struct nouveau_grctx *ctx)
177{ 177{
178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
179 179
@@ -210,7 +210,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
210 cp_name(ctx, cp_check_load); 210 cp_name(ctx, cp_check_load);
211 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); 211 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
212 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); 212 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
213 cp_bra (ctx, ALWAYS, TRUE, cp_exit); 213 cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
214 214
215 /* setup for context load */ 215 /* setup for context load */
216 cp_name(ctx, cp_setup_auto_load); 216 cp_name(ctx, cp_setup_auto_load);
@@ -277,6 +277,33 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
277 return 0; 277 return 0;
278} 278}
279 279
280void
281nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
282{
283 nv50_grctx_generate(&(struct nouveau_grctx) {
284 .dev = dev,
285 .mode = NOUVEAU_GRCTX_VALS,
286 .data = mem,
287 });
288}
289
290int
291nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
292{
293 struct nouveau_grctx ctx = {
294 .dev = dev,
295 .mode = NOUVEAU_GRCTX_PROG,
296 .data = data,
297 .ctxprog_max = max
298 };
299 int ret;
300
301 ret = nv50_grctx_generate(&ctx);
302 *cnt = ctx.ctxvals_pos * 4;
303 *len = ctx.ctxprog_len;
304 return ret;
305}
306
280/* 307/*
281 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which 308 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
282 * registers to save/restore and the default values for them. 309 * registers to save/restore and the default values for them.
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c94a5a6..0bba54f11800 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -83,7 +83,7 @@ nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
83 return ret; 83 return ret;
84 } 84 }
85 85
86 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); 86 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
87 if (ret) { 87 if (ret) {
88 nv50_channel_del(&chan); 88 nv50_channel_del(&chan);
89 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d180ad2..90e8ed22cfcb 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
77static void 77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) 78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{ 79{
80 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
81 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 80 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev; 81 struct drm_device *dev = chan->dev;
83 unsigned long flags; 82 int i;
84 u32 inst, i;
85
86 if (!chan->ramin)
87 return;
88
89 inst = chan->ramin->vinst >> 12;
90 inst |= 0x80000000;
91
92 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
93 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
94 if (nv_rd32(dev, 0x00b318) == inst)
95 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
97 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
98 83
99 for (i = 0x00; i <= 0x14; i += 4) 84 for (i = 0x00; i <= 0x14; i += 4)
100 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); 85 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
86
101 nouveau_gpuobj_ref(NULL, &ctx); 87 nouveau_gpuobj_ref(NULL, &ctx);
102 chan->engctx[engine] = NULL; 88 chan->engctx[engine] = NULL;
103} 89}
@@ -162,7 +148,6 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
162static int 148static int
163nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend) 149nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{ 150{
165 /*XXX: context save for s/r */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); 151 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000); 152 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0; 153 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
new file mode 100644
index 000000000000..114d2517d4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nv50_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv50_software_chan {
38 struct nouveau_software_chan base;
39 struct {
40 struct nouveau_gpuobj *object;
41 } vblank;
42};
43
44static int
45mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
46{
47 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
48 struct nouveau_gpuobj *gpuobj;
49
50 gpuobj = nouveau_ramht_find(chan, data);
51 if (!gpuobj)
52 return -ENOENT;
53
54 if (nouveau_notifier_offset(gpuobj, NULL))
55 return -EINVAL;
56
57 pch->vblank.object = gpuobj;
58 pch->base.vblank.offset = ~0;
59 return 0;
60}
61
62static int
63mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
64{
65 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
66
67 if (nouveau_notifier_offset(pch->vblank.object, &data))
68 return -ERANGE;
69
70 pch->base.vblank.offset = data >> 2;
71 return 0;
72}
73
74static int
75mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
76{
77 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
78 pch->base.vblank.value = data;
79 return 0;
80}
81
82static int
83mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
84{
85 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
86 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
87 struct drm_device *dev = chan->dev;
88
89 if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
90 return -EINVAL;
91
92 drm_vblank_get(dev, data);
93
94 pch->base.vblank.head = data;
95 list_add(&pch->base.vblank.list, &psw->base.vblank);
96 return 0;
97}
98
99static int
100mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
101{
102 nouveau_finish_page_flip(chan, NULL);
103 return 0;
104}
105
106static int
107nv50_software_context_new(struct nouveau_channel *chan, int engine)
108{
109 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
110 struct nv50_display *pdisp = nv50_display(chan->dev);
111 struct nv50_software_chan *pch;
112 int ret = 0, i;
113
114 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
115 if (!pch)
116 return -ENOMEM;
117
118 nouveau_software_context_new(&pch->base);
119 pch->base.vblank.bo = chan->notifier_bo;
120 chan->engctx[engine] = pch;
121
122 /* dma objects for display sync channel semaphore blocks */
123 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
124 struct nv50_display_crtc *dispc = &pdisp->crtc[i];
125 struct nouveau_gpuobj *obj = NULL;
126
127 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
128 dispc->sem.bo->bo.offset, 0x1000,
129 NV_MEM_ACCESS_RW,
130 NV_MEM_TARGET_VRAM, &obj);
131 if (ret)
132 break;
133
134 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
135 nouveau_gpuobj_ref(NULL, &obj);
136 }
137
138 if (ret)
139 psw->base.base.context_del(chan, engine);
140 return ret;
141}
142
143static void
144nv50_software_context_del(struct nouveau_channel *chan, int engine)
145{
146 struct nv50_software_chan *pch = chan->engctx[engine];
147 chan->engctx[engine] = NULL;
148 kfree(pch);
149}
150
151static int
152nv50_software_object_new(struct nouveau_channel *chan, int engine,
153 u32 handle, u16 class)
154{
155 struct drm_device *dev = chan->dev;
156 struct nouveau_gpuobj *obj = NULL;
157 int ret;
158
159 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
160 if (ret)
161 return ret;
162 obj->engine = 0;
163 obj->class = class;
164
165 ret = nouveau_ramht_insert(chan, handle, obj);
166 nouveau_gpuobj_ref(NULL, &obj);
167 return ret;
168}
169
170static int
171nv50_software_init(struct drm_device *dev, int engine)
172{
173 return 0;
174}
175
176static int
177nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
178{
179 return 0;
180}
181
182static void
183nv50_software_destroy(struct drm_device *dev, int engine)
184{
185 struct nv50_software_priv *psw = nv_engine(dev, engine);
186
187 NVOBJ_ENGINE_DEL(dev, SW);
188 kfree(psw);
189}
190
191int
192nv50_software_create(struct drm_device *dev)
193{
194 struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
195 if (!psw)
196 return -ENOMEM;
197
198 psw->base.base.destroy = nv50_software_destroy;
199 psw->base.base.init = nv50_software_init;
200 psw->base.base.fini = nv50_software_fini;
201 psw->base.base.context_new = nv50_software_context_new;
202 psw->base.base.context_del = nv50_software_context_del;
203 psw->base.base.object_new = nv50_software_object_new;
204 nouveau_software_create(&psw->base);
205
206 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
207 NVOBJ_CLASS(dev, 0x506e, SW);
208 NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
209 NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
210 NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
211 NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
212 NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
213 return 0;
214}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 274640212475..a9514eaa74c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -242,9 +242,9 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
242 NV_ERROR(dev, "no space while disconnecting SOR\n"); 242 NV_ERROR(dev, "no space while disconnecting SOR\n");
243 return; 243 return;
244 } 244 }
245 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 245 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
246 OUT_RING (evo, 0); 246 OUT_RING (evo, 0);
247 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); 247 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
248 OUT_RING (evo, 0); 248 OUT_RING (evo, 0);
249 249
250 nouveau_hdmi_mode_set(encoder, NULL); 250 nouveau_hdmi_mode_set(encoder, NULL);
@@ -430,7 +430,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
430 nv_encoder->crtc = NULL; 430 nv_encoder->crtc = NULL;
431 return; 431 return;
432 } 432 }
433 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 433 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
434 OUT_RING(evo, mode_ctl); 434 OUT_RING(evo, mode_ctl);
435} 435}
436 436
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9c7d93..179bb42a635c 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
147{ 147{
148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
151 int i; 150 int i;
152 151
153 pinstmem->flush(vm->dev); 152 pinstmem->flush(vm->dev);
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
158 return; 157 return;
159 } 158 }
160 159
161 pfifo->tlb_flush(vm->dev);
162 for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 160 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
163 if (atomic_read(&vm->engref[i])) 161 if (atomic_read(&vm->engref[i]))
164 dev_priv->eng[i]->tlb_flush(vm->dev, i); 162 dev_priv->eng[i]->tlb_flush(vm->dev, i);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..c2f889b0d340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h"
31
32struct nv84_fence_chan {
33 struct nouveau_fence_chan base;
34};
35
36struct nv84_fence_priv {
37 struct nouveau_fence_priv base;
38 struct nouveau_gpuobj *mem;
39};
40
41static int
42nv84_fence_emit(struct nouveau_fence *fence)
43{
44 struct nouveau_channel *chan = fence->channel;
45 int ret = RING_SPACE(chan, 7);
46 if (ret == 0) {
47 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
48 OUT_RING (chan, NvSema);
49 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
50 OUT_RING (chan, upper_32_bits(chan->id * 16));
51 OUT_RING (chan, lower_32_bits(chan->id * 16));
52 OUT_RING (chan, fence->sequence);
53 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
54 FIRE_RING (chan);
55 }
56 return ret;
57}
58
59
60static int
61nv84_fence_sync(struct nouveau_fence *fence,
62 struct nouveau_channel *prev, struct nouveau_channel *chan)
63{
64 int ret = RING_SPACE(chan, 7);
65 if (ret == 0) {
66 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
67 OUT_RING (chan, NvSema);
68 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
69 OUT_RING (chan, upper_32_bits(prev->id * 16));
70 OUT_RING (chan, lower_32_bits(prev->id * 16));
71 OUT_RING (chan, fence->sequence);
72 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
73 FIRE_RING (chan);
74 }
75 return ret;
76}
77
78static u32
79nv84_fence_read(struct nouveau_channel *chan)
80{
81 struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
82 return nv_ro32(priv->mem, chan->id * 16);
83}
84
85static void
86nv84_fence_context_del(struct nouveau_channel *chan, int engine)
87{
88 struct nv84_fence_chan *fctx = chan->engctx[engine];
89 nouveau_fence_context_del(&fctx->base);
90 chan->engctx[engine] = NULL;
91 kfree(fctx);
92}
93
94static int
95nv84_fence_context_new(struct nouveau_channel *chan, int engine)
96{
97 struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
98 struct nv84_fence_chan *fctx;
99 struct nouveau_gpuobj *obj;
100 int ret;
101
102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
103 if (!fctx)
104 return -ENOMEM;
105
106 nouveau_fence_context_new(&fctx->base);
107
108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
109 priv->mem->vinst, priv->mem->size,
110 NV_MEM_ACCESS_RW,
111 NV_MEM_TARGET_VRAM, &obj);
112 if (ret == 0) {
113 ret = nouveau_ramht_insert(chan, NvSema, obj);
114 nouveau_gpuobj_ref(NULL, &obj);
115 nv_wo32(priv->mem, chan->id * 16, 0x00000000);
116 }
117
118 if (ret)
119 nv84_fence_context_del(chan, engine);
120 return ret;
121}
122
123static int
124nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
125{
126 return 0;
127}
128
129static int
130nv84_fence_init(struct drm_device *dev, int engine)
131{
132 return 0;
133}
134
135static void
136nv84_fence_destroy(struct drm_device *dev, int engine)
137{
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nv84_fence_priv *priv = nv_engine(dev, engine);
140
141 nouveau_gpuobj_ref(NULL, &priv->mem);
142 dev_priv->eng[engine] = NULL;
143 kfree(priv);
144}
145
146int
147nv84_fence_create(struct drm_device *dev)
148{
149 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nv84_fence_priv *priv;
152 int ret;
153
154 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
155 if (!priv)
156 return -ENOMEM;
157
158 priv->base.engine.destroy = nv84_fence_destroy;
159 priv->base.engine.init = nv84_fence_init;
160 priv->base.engine.fini = nv84_fence_fini;
161 priv->base.engine.context_new = nv84_fence_context_new;
162 priv->base.engine.context_del = nv84_fence_context_del;
163 priv->base.emit = nv84_fence_emit;
164 priv->base.sync = nv84_fence_sync;
165 priv->base.read = nv84_fence_read;
166 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
167
168 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
169 0x1000, 0, &priv->mem);
170 if (ret)
171 goto out;
172
173out:
174 if (ret)
175 nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
176 return ret;
177}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
new file mode 100644
index 000000000000..cc82d799fc3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -0,0 +1,241 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_vm.h"
33
34struct nv84_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv84_fifo_chan {
41 struct nouveau_fifo_chan base;
42 struct nouveau_gpuobj *ramfc;
43 struct nouveau_gpuobj *cache;
44};
45
46static int
47nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
50 struct nv84_fifo_chan *fctx;
51 struct drm_device *dev = chan->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
54 u64 instance;
55 unsigned long flags;
56 int ret;
57
58 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
59 if (!fctx)
60 return -ENOMEM;
61 atomic_inc(&chan->vm->engref[engine]);
62
63 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
64 NV50_USER(chan->id), PAGE_SIZE);
65 if (!chan->user) {
66 ret = -ENOMEM;
67 goto error;
68 }
69
70 ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
71 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
72 if (ret)
73 goto error;
74
75 instance = fctx->ramfc->vinst >> 8;
76
77 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
78 if (ret)
79 goto error;
80
81 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
82 nv_wo32(fctx->ramfc, 0x40, 0x00000000);
83 nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
84 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
85 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
86 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
87 drm_order(chan->dma.ib_max + 1) << 16);
88 nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
89 nv_wo32(fctx->ramfc, 0x78, 0x00000000);
90 nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
91 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
92 (4 << 24) /* SEARCH_FULL */ |
93 (chan->ramht->gpuobj->cinst >> 4));
94 nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
95 nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
96
97 nv_wo32(chan->ramin, 0x00, chan->id);
98 nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
99
100 dev_priv->engine.instmem.flush(dev);
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
104 nv50_fifo_playlist_update(dev);
105 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
106
107error:
108 if (ret)
109 priv->base.base.context_del(chan, engine);
110 return ret;
111}
112
113static void
114nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
115{
116 struct nv84_fifo_chan *fctx = chan->engctx[engine];
117 struct drm_device *dev = chan->dev;
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 unsigned long flags;
120
121 /* remove channel from playlist, will context switch if active */
122 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
123 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
124 nv50_fifo_playlist_update(dev);
125
126 /* tell any engines on this channel to unload their contexts */
127 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
128 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
129 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
130
131 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
132 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
133
134 /* clean up */
135 if (chan->user) {
136 iounmap(chan->user);
137 chan->user = NULL;
138 }
139
140 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
141 nouveau_gpuobj_ref(NULL, &fctx->cache);
142
143 atomic_dec(&chan->vm->engref[engine]);
144 chan->engctx[engine] = NULL;
145 kfree(fctx);
146}
147
148static int
149nv84_fifo_init(struct drm_device *dev, int engine)
150{
151 struct drm_nouveau_private *dev_priv = dev->dev_private;
152 struct nv84_fifo_chan *fctx;
153 u32 instance;
154 int i;
155
156 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
157 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
158 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
159 nv_wr32(dev, 0x002044, 0x01003fff);
160
161 nv_wr32(dev, 0x002100, 0xffffffff);
162 nv_wr32(dev, 0x002140, 0xffffffff);
163
164 for (i = 0; i < 128; i++) {
165 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
166 if (chan && (fctx = chan->engctx[engine]))
167 instance = 0x80000000 | fctx->ramfc->vinst >> 8;
168 else
169 instance = 0x00000000;
170 nv_wr32(dev, 0x002600 + (i * 4), instance);
171 }
172
173 nv50_fifo_playlist_update(dev);
174
175 nv_wr32(dev, 0x003200, 1);
176 nv_wr32(dev, 0x003250, 1);
177 nv_wr32(dev, 0x002500, 1);
178 return 0;
179}
180
181static int
182nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
183{
184 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nv84_fifo_priv *priv = nv_engine(dev, engine);
186 int i;
187
188 /* set playlist length to zero, fifo will unload context */
189 nv_wr32(dev, 0x0032ec, 0);
190
191 /* tell all connected engines to unload their contexts */
192 for (i = 0; i < priv->base.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
194 if (chan)
195 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
196 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
197 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
198 return -EBUSY;
199 }
200 }
201
202 nv_wr32(dev, 0x002140, 0);
203 return 0;
204}
205
206int
207nv84_fifo_create(struct drm_device *dev)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nv84_fifo_priv *priv;
211 int ret;
212
213 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214 if (!priv)
215 return -ENOMEM;
216
217 priv->base.base.destroy = nv50_fifo_destroy;
218 priv->base.base.init = nv84_fifo_init;
219 priv->base.base.fini = nv84_fifo_fini;
220 priv->base.base.context_new = nv84_fifo_context_new;
221 priv->base.base.context_del = nv84_fifo_context_del;
222 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
223 priv->base.channels = 127;
224 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
225
226 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
227 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
228 if (ret)
229 goto error;
230
231 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
232 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
233 if (ret)
234 goto error;
235
236 nouveau_irq_register(dev, 8, nv04_fifo_isr);
237error:
238 if (ret)
239 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
240 return ret;
241}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
index db94ff0a9fab..e25e13fb894e 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -23,21 +23,93 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26
26#include "nouveau_drv.h" 27#include "nouveau_drv.h"
27#include "nouveau_util.h" 28#include "nouveau_util.h"
28#include "nouveau_vm.h" 29#include "nouveau_vm.h"
29#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
30 31
31struct nv98_crypt_engine { 32#include "nv98_crypt.fuc.h"
33
34struct nv98_crypt_priv {
32 struct nouveau_exec_engine base; 35 struct nouveau_exec_engine base;
33}; 36};
34 37
38struct nv98_crypt_chan {
39 struct nouveau_gpuobj *mem;
40};
41
35static int 42static int
36nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend) 43nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
44{
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
48 struct nv98_crypt_chan *cctx;
49 int ret;
50
51 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
52 if (!cctx)
53 return -ENOMEM;
54
55 atomic_inc(&chan->vm->engref[engine]);
56
57 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
59 if (ret)
60 goto error;
61
62 nv_wo32(chan->ramin, 0xa0, 0x00190000);
63 nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
64 nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
65 nv_wo32(chan->ramin, 0xac, 0x00000000);
66 nv_wo32(chan->ramin, 0xb0, 0x00000000);
67 nv_wo32(chan->ramin, 0xb4, 0x00000000);
68 dev_priv->engine.instmem.flush(dev);
69
70error:
71 if (ret)
72 priv->base.context_del(chan, engine);
73 return ret;
74}
75
76static void
77nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
78{
79 struct nv98_crypt_chan *cctx = chan->engctx[engine];
80 int i;
81
82 for (i = 0xa0; i < 0xb4; i += 4)
83 nv_wo32(chan->ramin, i, 0x00000000);
84
85 nouveau_gpuobj_ref(NULL, &cctx->mem);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89 kfree(cctx);
90}
91
92static int
93nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
94 u32 handle, u16 class)
37{ 95{
38 if (!(nv_rd32(dev, 0x000200) & 0x00004000)) 96 struct nv98_crypt_chan *cctx = chan->engctx[engine];
39 return 0; 97
98 /* fuc engine doesn't need an object, our ramht code does.. */
99 cctx->mem->engine = 5;
100 cctx->mem->class = class;
101 return nouveau_ramht_insert(chan, handle, cctx->mem);
102}
40 103
104static void
105nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
106{
107 nv50_vm_flush_engine(dev, 0x0a);
108}
109
110static int
111nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
112{
41 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 113 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
42 return 0; 114 return 0;
43} 115}
@@ -45,34 +117,100 @@ nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
45static int 117static int
46nv98_crypt_init(struct drm_device *dev, int engine) 118nv98_crypt_init(struct drm_device *dev, int engine)
47{ 119{
120 int i;
121
122 /* reset! */
48 nv_mask(dev, 0x000200, 0x00004000, 0x00000000); 123 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00004000, 0x00004000); 124 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
125
126 /* wait for exit interrupt to signal */
127 nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
128 nv_wr32(dev, 0x087004, 0x00000010);
129
130 /* upload microcode code and data segments */
131 nv_wr32(dev, 0x087ff8, 0x00100000);
132 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
133 nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
134
135 nv_wr32(dev, 0x087ff8, 0x00000000);
136 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
137 nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
138
139 /* start it running */
140 nv_wr32(dev, 0x08710c, 0x00000000);
141 nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
142 nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
50 return 0; 143 return 0;
51} 144}
52 145
146static struct nouveau_enum nv98_crypt_isr_error_name[] = {
147 { 0x0000, "ILLEGAL_MTHD" },
148 { 0x0001, "INVALID_BITFIELD" },
149 { 0x0002, "INVALID_ENUM" },
150 { 0x0003, "QUERY" },
151 {}
152};
153
154static void
155nv98_crypt_isr(struct drm_device *dev)
156{
157 u32 disp = nv_rd32(dev, 0x08701c);
158 u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
159 u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
160 u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
161 u32 addr = nv_rd32(dev, 0x087040) >> 16;
162 u32 mthd = (addr & 0x07ff) << 2;
163 u32 subc = (addr & 0x3800) >> 11;
164 u32 data = nv_rd32(dev, 0x087044);
165 int chid = nv50_graph_isr_chid(dev, inst);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
169 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
170 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, 0x087004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, 0x087004, stat);
179 }
180
181 nv50_fb_vm_trap(dev, 1);
182}
183
53static void 184static void
54nv98_crypt_destroy(struct drm_device *dev, int engine) 185nv98_crypt_destroy(struct drm_device *dev, int engine)
55{ 186{
56 struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine); 187 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
57 188
189 nouveau_irq_unregister(dev, 14);
58 NVOBJ_ENGINE_DEL(dev, CRYPT); 190 NVOBJ_ENGINE_DEL(dev, CRYPT);
59 191 kfree(priv);
60 kfree(pcrypt);
61} 192}
62 193
63int 194int
64nv98_crypt_create(struct drm_device *dev) 195nv98_crypt_create(struct drm_device *dev)
65{ 196{
66 struct nv98_crypt_engine *pcrypt; 197 struct nv98_crypt_priv *priv;
67 198
68 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL); 199 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
69 if (!pcrypt) 200 if (!priv)
70 return -ENOMEM; 201 return -ENOMEM;
71 202
72 pcrypt->base.destroy = nv98_crypt_destroy; 203 priv->base.destroy = nv98_crypt_destroy;
73 pcrypt->base.init = nv98_crypt_init; 204 priv->base.init = nv98_crypt_init;
74 pcrypt->base.fini = nv98_crypt_fini; 205 priv->base.fini = nv98_crypt_fini;
206 priv->base.context_new = nv98_crypt_context_new;
207 priv->base.context_del = nv98_crypt_context_del;
208 priv->base.object_new = nv98_crypt_object_new;
209 priv->base.tlb_flush = nv98_crypt_tlb_flush;
210
211 nouveau_irq_register(dev, 14, nv98_crypt_isr);
75 212
76 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base); 213 NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
214 NVOBJ_CLASS(dev, 0x88b4, CRYPT);
77 return 0; 215 return 0;
78} 216}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
new file mode 100644
index 000000000000..7393813044de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
@@ -0,0 +1,698 @@
1/*
2 * fuc microcode for nv98 pcrypt engine
3 * Copyright (C) 2010 Marcin Kościelnicki
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20.section #nv98_pcrypt_data
21
22ctx_dma:
23ctx_dma_query: .b32 0
24ctx_dma_src: .b32 0
25ctx_dma_dst: .b32 0
26.equ #dma_count 3
27ctx_query_address_high: .b32 0
28ctx_query_address_low: .b32 0
29ctx_query_counter: .b32 0
30ctx_cond_address_high: .b32 0
31ctx_cond_address_low: .b32 0
32ctx_cond_off: .b32 0
33ctx_src_address_high: .b32 0
34ctx_src_address_low: .b32 0
35ctx_dst_address_high: .b32 0
36ctx_dst_address_low: .b32 0
37ctx_mode: .b32 0
38.align 16
39ctx_key: .skip 16
40ctx_iv: .skip 16
41
42.align 0x80
43swap:
44.skip 32
45
46.align 8
47common_cmd_dtable:
48.b32 #ctx_query_address_high + 0x20000 ~0xff
49.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
50.b32 #ctx_query_counter + 0x20000 ~0xffffffff
51.b32 #cmd_query_get + 0x00000 ~1
52.b32 #ctx_cond_address_high + 0x20000 ~0xff
53.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
54.b32 #cmd_cond_mode + 0x00000 ~7
55.b32 #cmd_wrcache_flush + 0x00000 ~0
56.equ #common_cmd_max 0x88
57
58
59.align 8
60engine_cmd_dtable:
61.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
62.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
63.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
64.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
65.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
66.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
67.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
68.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
69.b32 #ctx_src_address_high + 0x20000 ~0xff
70.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
71.b32 #ctx_dst_address_high + 0x20000 ~0xff
72.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
73.b32 #crypt_cmd_mode + 0x00000 ~0xf
74.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
75.equ #engine_cmd_max 0xce
76
77.align 4
78crypt_dtable:
79.b16 #crypt_copy_prep #crypt_do_inout
80.b16 #crypt_store_prep #crypt_do_out
81.b16 #crypt_ecb_e_prep #crypt_do_inout
82.b16 #crypt_ecb_d_prep #crypt_do_inout
83.b16 #crypt_cbc_e_prep #crypt_do_inout
84.b16 #crypt_cbc_d_prep #crypt_do_inout
85.b16 #crypt_pcbc_e_prep #crypt_do_inout
86.b16 #crypt_pcbc_d_prep #crypt_do_inout
87.b16 #crypt_cfb_e_prep #crypt_do_inout
88.b16 #crypt_cfb_d_prep #crypt_do_inout
89.b16 #crypt_ofb_prep #crypt_do_inout
90.b16 #crypt_ctr_prep #crypt_do_inout
91.b16 #crypt_cbc_mac_prep #crypt_do_in
92.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
93.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
94
95.align 0x100
96
97.section #nv98_pcrypt_code
98
99 // $r0 is always set to 0 in our code - this allows some space savings.
100 clear b32 $r0
101
102 // set up the interrupt handler
103 mov $r1 #ih
104 mov $iv0 $r1
105
106 // init stack pointer
107 mov $sp $r0
108
109 // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
110 movw $r1 0xfff0
111 sethi $r1 0
112 mov $r2 0x400
113 iowr I[$r2 + 0x300] $r1
114
115 // enable the interrupts
116 or $r1 0xc
117 iowr I[$r2] $r1
118
119 // enable fifo access and context switching
120 mov $r1 3
121 mov $r2 0x1200
122 iowr I[$r2] $r1
123
124 // enable i0 delivery
125 bset $flags ie0
126
127 // sleep forver, waking only for interrupts.
128 bset $flags $p0
129 spin:
130 sleep $p0
131 bra #spin
132
133// i0 handler
134ih:
135 // see which interrupts we got
136 iord $r1 I[$r0 + 0x200]
137
138 and $r2 $r1 0x8
139 cmpu b32 $r2 0
140 bra e #noctx
141
142 // context switch... prepare the regs for xfer
143 mov $r2 0x7700
144 mov $xtargets $r2
145 mov $xdbase $r0
146 // 128-byte context.
147 mov $r2 0
148 sethi $r2 0x50000
149
150 // read current channel
151 mov $r3 0x1400
152 iord $r4 I[$r3]
153 // if bit 30 set, it's active, so we have to unload it first.
154 shl b32 $r5 $r4 1
155 cmps b32 $r5 0
156 bra nc #ctxload
157
158 // unload the current channel - save the context
159 xdst $r0 $r2
160 xdwait
161 // and clear bit 30, then write back
162 bclr $r4 0x1e
163 iowr I[$r3] $r4
164 // tell PFIFO we unloaded
165 mov $r4 1
166 iowr I[$r3 + 0x200] $r4
167
168 bra #noctx
169
170 ctxload:
171 // no channel loaded - perhaps we're requested to load one
172 iord $r4 I[$r3 + 0x100]
173 shl b32 $r15 $r4 1
174 cmps b32 $r15 0
175 // if bit 30 of next channel not set, probably PFIFO is just
176 // killing a context. do a faux load, without the active bit.
177 bra nc #dummyload
178
179 // ok, do a real context load.
180 xdld $r0 $r2
181 xdwait
182 mov $r5 #ctx_dma
183 mov $r6 #dma_count - 1
184 ctxload_dma_loop:
185 ld b32 $r7 D[$r5 + $r6 * 4]
186 add b32 $r8 $r6 0x180
187 shl b32 $r8 8
188 iowr I[$r8] $r7
189 sub b32 $r6 1
190 bra nc #ctxload_dma_loop
191
192 dummyload:
193 // tell PFIFO we're done
194 mov $r5 2
195 iowr I[$r3 + 0x200] $r5
196
197 noctx:
198 and $r2 $r1 0x4
199 cmpu b32 $r2 0
200 bra e #nocmd
201
202 // incoming fifo command.
203 mov $r3 0x1900
204 iord $r2 I[$r3 + 0x100]
205 iord $r3 I[$r3]
206 // extract the method
207 and $r4 $r2 0x7ff
208 // shift the addr to proper position if we need to interrupt later
209 shl b32 $r2 0x10
210
211 // mthd 0 and 0x100 [NAME, NOP]: ignore
212 and $r5 $r4 0x7bf
213 cmpu b32 $r5 0
214 bra e #cmddone
215
216 mov $r5 #engine_cmd_dtable - 0xc0 * 8
217 mov $r6 #engine_cmd_max
218 cmpu b32 $r4 0xc0
219 bra nc #dtable_cmd
220 mov $r5 #common_cmd_dtable - 0x80 * 8
221 mov $r6 #common_cmd_max
222 cmpu b32 $r4 0x80
223 bra nc #dtable_cmd
224 cmpu b32 $r4 0x60
225 bra nc #dma_cmd
226 cmpu b32 $r4 0x50
227 bra ne #illegal_mthd
228
229 // mthd 0x140: PM_TRIGGER
230 mov $r2 0x2200
231 clear b32 $r3
232 sethi $r3 0x20000
233 iowr I[$r2] $r3
234 bra #cmddone
235
236 dma_cmd:
237 // mthd 0x180...: DMA_*
238 cmpu b32 $r4 0x60+#dma_count
239 bra nc #illegal_mthd
240 shl b32 $r5 $r4 2
241 add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
242 bset $r3 0x1e
243 st b32 D[$r5] $r3
244 add b32 $r4 0x180 - 0x60
245 shl b32 $r4 8
246 iowr I[$r4] $r3
247 bra #cmddone
248
249 dtable_cmd:
250 cmpu b32 $r4 $r6
251 bra nc #illegal_mthd
252 shl b32 $r4 3
253 add b32 $r4 $r5
254 ld b32 $r5 D[$r4 + 4]
255 and $r5 $r3
256 cmpu b32 $r5 0
257 bra ne #invalid_bitfield
258 ld b16 $r5 D[$r4]
259 ld b16 $r6 D[$r4 + 2]
260 cmpu b32 $r6 2
261 bra e #cmd_setctx
262 ld b32 $r7 D[$r0 + #ctx_cond_off]
263 and $r6 $r7
264 cmpu b32 $r6 1
265 bra e #cmddone
266 call $r5
267 bra $p1 #dispatch_error
268 bra #cmddone
269
270 cmd_setctx:
271 st b32 D[$r5] $r3
272 bra #cmddone
273
274
275 invalid_bitfield:
276 or $r2 1
277 dispatch_error:
278 illegal_mthd:
279 mov $r4 0x1000
280 iowr I[$r4] $r2
281 iowr I[$r4 + 0x100] $r3
282 mov $r4 0x40
283 iowr I[$r0] $r4
284
285 im_loop:
286 iord $r4 I[$r0 + 0x200]
287 and $r4 0x40
288 cmpu b32 $r4 0
289 bra ne #im_loop
290
291 cmddone:
292 // remove the command from FIFO
293 mov $r3 0x1d00
294 mov $r4 1
295 iowr I[$r3] $r4
296
297 nocmd:
298 // ack the processed interrupts
299 and $r1 $r1 0xc
300 iowr I[$r0 + 0x100] $r1
301iret
302
303cmd_query_get:
304 // if bit 0 of param set, trigger interrupt afterwards.
305 setp $p1 $r3
306 or $r2 3
307
308 // read PTIMER, beware of races...
309 mov $r4 0xb00
310 ptimer_retry:
311 iord $r6 I[$r4 + 0x100]
312 iord $r5 I[$r4]
313 iord $r7 I[$r4 + 0x100]
314 cmpu b32 $r6 $r7
315 bra ne #ptimer_retry
316
317 // prepare the query structure
318 ld b32 $r4 D[$r0 + #ctx_query_counter]
319 st b32 D[$r0 + #swap + 0x0] $r4
320 st b32 D[$r0 + #swap + 0x4] $r0
321 st b32 D[$r0 + #swap + 0x8] $r5
322 st b32 D[$r0 + #swap + 0xc] $r6
323
324 // will use target 0, DMA_QUERY.
325 mov $xtargets $r0
326
327 ld b32 $r4 D[$r0 + #ctx_query_address_high]
328 shl b32 $r4 0x18
329 mov $xdbase $r4
330
331 ld b32 $r4 D[$r0 + #ctx_query_address_low]
332 mov $r5 #swap
333 sethi $r5 0x20000
334 xdst $r4 $r5
335 xdwait
336
337 ret
338
339cmd_cond_mode:
340 // if >= 5, INVALID_ENUM
341 bset $flags $p1
342 or $r2 2
343 cmpu b32 $r3 5
344 bra nc #return
345
346 // otherwise, no error.
347 bclr $flags $p1
348
349 // if < 2, no QUERY object is involved
350 cmpu b32 $r3 2
351 bra nc #cmd_cond_mode_queryful
352
353 xor $r3 1
354 st b32 D[$r0 + #ctx_cond_off] $r3
355 return:
356 ret
357
358 cmd_cond_mode_queryful:
359 // ok, will need to pull a QUERY object, prepare offsets
360 ld b32 $r4 D[$r0 + #ctx_cond_address_high]
361 ld b32 $r5 D[$r0 + #ctx_cond_address_low]
362 and $r6 $r5 0xff
363 shr b32 $r5 8
364 shl b32 $r4 0x18
365 or $r4 $r5
366 mov $xdbase $r4
367 mov $xtargets $r0
368
369 // pull the first one
370 mov $r5 #swap
371 sethi $r5 0x20000
372 xdld $r6 $r5
373
374 // if == 2, only a single QUERY is involved...
375 cmpu b32 $r3 2
376 bra ne #cmd_cond_mode_double
377
378 xdwait
379 ld b32 $r4 D[$r0 + #swap + 4]
380 cmpu b32 $r4 0
381 xbit $r4 $flags z
382 st b32 D[$r0 + #ctx_cond_off] $r4
383 ret
384
385 // ok, we'll need to pull second one too
386 cmd_cond_mode_double:
387 add b32 $r6 0x10
388 add b32 $r5 0x10
389 xdld $r6 $r5
390 xdwait
391
392 // compare COUNTERs
393 ld b32 $r5 D[$r0 + #swap + 0x00]
394 ld b32 $r6 D[$r0 + #swap + 0x10]
395 cmpu b32 $r5 $r6
396 xbit $r4 $flags z
397
398 // compare RESen
399 ld b32 $r5 D[$r0 + #swap + 0x04]
400 ld b32 $r6 D[$r0 + #swap + 0x14]
401 cmpu b32 $r5 $r6
402 xbit $r5 $flags z
403 and $r4 $r5
404
405 // and negate or not, depending on mode
406 cmpu b32 $r3 3
407 xbit $r5 $flags z
408 xor $r4 $r5
409 st b32 D[$r0 + #ctx_cond_off] $r4
410 ret
411
412cmd_wrcache_flush:
413 bclr $flags $p1
414 mov $r2 0x2200
415 clear b32 $r3
416 sethi $r3 0x10000
417 iowr I[$r2] $r3
418 ret
419
420crypt_cmd_mode:
421 // if >= 0xf, INVALID_ENUM
422 bset $flags $p1
423 or $r2 2
424 cmpu b32 $r3 0xf
425 bra nc #crypt_cmd_mode_return
426
427 bclr $flags $p1
428 st b32 D[$r0 + #ctx_mode] $r3
429
430 crypt_cmd_mode_return:
431 ret
432
433crypt_cmd_length:
434 // nop if length == 0
435 cmpu b32 $r3 0
436 bra e #crypt_cmd_mode_return
437
438 // init key, IV
439 cxset 3
440 mov $r4 #ctx_key
441 sethi $r4 0x70000
442 xdst $r0 $r4
443 mov $r4 #ctx_iv
444 sethi $r4 0x60000
445 xdst $r0 $r4
446 xdwait
447 ckeyreg $c7
448
449 // prepare the targets
450 mov $r4 0x2100
451 mov $xtargets $r4
452
453 // prepare src address
454 ld b32 $r4 D[$r0 + #ctx_src_address_high]
455 ld b32 $r5 D[$r0 + #ctx_src_address_low]
456 shr b32 $r8 $r5 8
457 shl b32 $r4 0x18
458 or $r4 $r8
459 and $r5 $r5 0xff
460
461 // prepare dst address
462 ld b32 $r6 D[$r0 + #ctx_dst_address_high]
463 ld b32 $r7 D[$r0 + #ctx_dst_address_low]
464 shr b32 $r8 $r7 8
465 shl b32 $r6 0x18
466 or $r6 $r8
467 and $r7 $r7 0xff
468
469 // find the proper prep & do functions
470 ld b32 $r8 D[$r0 + #ctx_mode]
471 shl b32 $r8 2
472
473 // run prep
474 ld b16 $r9 D[$r8 + #crypt_dtable]
475 call $r9
476
477 // do it
478 ld b16 $r9 D[$r8 + #crypt_dtable + 2]
479 call $r9
480 cxset 1
481 xdwait
482 cxset 0x61
483 xdwait
484 xdwait
485
486 // update src address
487 shr b32 $r8 $r4 0x18
488 shl b32 $r9 $r4 8
489 add b32 $r9 $r5
490 adc b32 $r8 0
491 st b32 D[$r0 + #ctx_src_address_high] $r8
492 st b32 D[$r0 + #ctx_src_address_low] $r9
493
494 // update dst address
495 shr b32 $r8 $r6 0x18
496 shl b32 $r9 $r6 8
497 add b32 $r9 $r7
498 adc b32 $r8 0
499 st b32 D[$r0 + #ctx_dst_address_high] $r8
500 st b32 D[$r0 + #ctx_dst_address_low] $r9
501
502 // pull updated IV
503 cxset 2
504 mov $r4 #ctx_iv
505 sethi $r4 0x60000
506 xdld $r0 $r4
507 xdwait
508
509 ret
510
511
512crypt_copy_prep:
513 cs0begin 2
514 cxsin $c0
515 cxsout $c0
516 ret
517
518crypt_store_prep:
519 cs0begin 1
520 cxsout $c6
521 ret
522
523crypt_ecb_e_prep:
524 cs0begin 3
525 cxsin $c0
526 cenc $c0 $c0
527 cxsout $c0
528 ret
529
530crypt_ecb_d_prep:
531 ckexp $c7 $c7
532 cs0begin 3
533 cxsin $c0
534 cdec $c0 $c0
535 cxsout $c0
536 ret
537
538crypt_cbc_e_prep:
539 cs0begin 4
540 cxsin $c0
541 cxor $c6 $c0
542 cenc $c6 $c6
543 cxsout $c6
544 ret
545
546crypt_cbc_d_prep:
547 ckexp $c7 $c7
548 cs0begin 5
549 cmov $c2 $c6
550 cxsin $c6
551 cdec $c0 $c6
552 cxor $c0 $c2
553 cxsout $c0
554 ret
555
556crypt_pcbc_e_prep:
557 cs0begin 5
558 cxsin $c0
559 cxor $c6 $c0
560 cenc $c6 $c6
561 cxsout $c6
562 cxor $c6 $c0
563 ret
564
565crypt_pcbc_d_prep:
566 ckexp $c7 $c7
567 cs0begin 5
568 cxsin $c0
569 cdec $c1 $c0
570 cxor $c6 $c1
571 cxsout $c6
572 cxor $c6 $c0
573 ret
574
575crypt_cfb_e_prep:
576 cs0begin 4
577 cenc $c6 $c6
578 cxsin $c0
579 cxor $c6 $c0
580 cxsout $c6
581 ret
582
583crypt_cfb_d_prep:
584 cs0begin 4
585 cenc $c0 $c6
586 cxsin $c6
587 cxor $c0 $c6
588 cxsout $c0
589 ret
590
591crypt_ofb_prep:
592 cs0begin 4
593 cenc $c6 $c6
594 cxsin $c0
595 cxor $c0 $c6
596 cxsout $c0
597 ret
598
599crypt_ctr_prep:
600 cs0begin 5
601 cenc $c1 $c6
602 cadd $c6 1
603 cxsin $c0
604 cxor $c0 $c1
605 cxsout $c0
606 ret
607
608crypt_cbc_mac_prep:
609 cs0begin 3
610 cxsin $c0
611 cxor $c6 $c0
612 cenc $c6 $c6
613 ret
614
615crypt_cmac_finish_complete_prep:
616 cs0begin 7
617 cxsin $c0
618 cxor $c6 $c0
619 cxor $c0 $c0
620 cenc $c0 $c0
621 cprecmac $c0 $c0
622 cxor $c6 $c0
623 cenc $c6 $c6
624 ret
625
626crypt_cmac_finish_partial_prep:
627 cs0begin 8
628 cxsin $c0
629 cxor $c6 $c0
630 cxor $c0 $c0
631 cenc $c0 $c0
632 cprecmac $c0 $c0
633 cprecmac $c0 $c0
634 cxor $c6 $c0
635 cenc $c6 $c6
636 ret
637
638// TODO
639crypt_do_in:
640 add b32 $r3 $r5
641 mov $xdbase $r4
642 mov $r9 #swap
643 sethi $r9 0x20000
644 crypt_do_in_loop:
645 xdld $r5 $r9
646 xdwait
647 cxset 0x22
648 xdst $r0 $r9
649 cs0exec 1
650 xdwait
651 add b32 $r5 0x10
652 cmpu b32 $r5 $r3
653 bra ne #crypt_do_in_loop
654 cxset 1
655 xdwait
656 ret
657
658crypt_do_out:
659 add b32 $r3 $r7
660 mov $xdbase $r6
661 mov $r9 #swap
662 sethi $r9 0x20000
663 crypt_do_out_loop:
664 cs0exec 1
665 cxset 0x61
666 xdld $r7 $r9
667 xdst $r7 $r9
668 cxset 1
669 xdwait
670 add b32 $r7 0x10
671 cmpu b32 $r7 $r3
672 bra ne #crypt_do_out_loop
673 ret
674
675crypt_do_inout:
676 add b32 $r3 $r5
677 mov $r9 #swap
678 sethi $r9 0x20000
679 crypt_do_inout_loop:
680 mov $xdbase $r4
681 xdld $r5 $r9
682 xdwait
683 cxset 0x21
684 xdst $r0 $r9
685 cs0exec 1
686 cxset 0x61
687 mov $xdbase $r6
688 xdld $r7 $r9
689 xdst $r7 $r9
690 cxset 1
691 xdwait
692 add b32 $r5 0x10
693 add b32 $r7 0x10
694 cmpu b32 $r5 $r3
695 bra ne #crypt_do_inout_loop
696 ret
697
698.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
new file mode 100644
index 000000000000..38676c74e6e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
@@ -0,0 +1,584 @@
1uint32_t nv98_pcrypt_data[] = {
2/* 0x0000: ctx_dma */
3/* 0x0000: ctx_dma_query */
4 0x00000000,
5/* 0x0004: ctx_dma_src */
6 0x00000000,
7/* 0x0008: ctx_dma_dst */
8 0x00000000,
9/* 0x000c: ctx_query_address_high */
10 0x00000000,
11/* 0x0010: ctx_query_address_low */
12 0x00000000,
13/* 0x0014: ctx_query_counter */
14 0x00000000,
15/* 0x0018: ctx_cond_address_high */
16 0x00000000,
17/* 0x001c: ctx_cond_address_low */
18 0x00000000,
19/* 0x0020: ctx_cond_off */
20 0x00000000,
21/* 0x0024: ctx_src_address_high */
22 0x00000000,
23/* 0x0028: ctx_src_address_low */
24 0x00000000,
25/* 0x002c: ctx_dst_address_high */
26 0x00000000,
27/* 0x0030: ctx_dst_address_low */
28 0x00000000,
29/* 0x0034: ctx_mode */
30 0x00000000,
31 0x00000000,
32 0x00000000,
33/* 0x0040: ctx_key */
34 0x00000000,
35 0x00000000,
36 0x00000000,
37 0x00000000,
38/* 0x0050: ctx_iv */
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51/* 0x0080: swap */
52 0x00000000,
53 0x00000000,
54 0x00000000,
55 0x00000000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60/* 0x00a0: common_cmd_dtable */
61 0x0002000c,
62 0xffffff00,
63 0x00020010,
64 0x0000000f,
65 0x00020014,
66 0x00000000,
67 0x00000192,
68 0xfffffffe,
69 0x00020018,
70 0xffffff00,
71 0x0002001c,
72 0x0000000f,
73 0x000001d7,
74 0xfffffff8,
75 0x00000260,
76 0xffffffff,
77/* 0x00e0: engine_cmd_dtable */
78 0x00020040,
79 0x00000000,
80 0x00020044,
81 0x00000000,
82 0x00020048,
83 0x00000000,
84 0x0002004c,
85 0x00000000,
86 0x00020050,
87 0x00000000,
88 0x00020054,
89 0x00000000,
90 0x00020058,
91 0x00000000,
92 0x0002005c,
93 0x00000000,
94 0x00020024,
95 0xffffff00,
96 0x00020028,
97 0x0000000f,
98 0x0002002c,
99 0xffffff00,
100 0x00020030,
101 0x0000000f,
102 0x00000271,
103 0xfffffff0,
104 0x00010285,
105 0xf000000f,
106/* 0x0150: crypt_dtable */
107 0x04db0321,
108 0x04b1032f,
109 0x04db0339,
110 0x04db034b,
111 0x04db0361,
112 0x04db0377,
113 0x04db0395,
114 0x04db03af,
115 0x04db03cd,
116 0x04db03e3,
117 0x04db03f9,
118 0x04db040f,
119 0x04830429,
120 0x0483043b,
121 0x0483045d,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151};
152
153uint32_t nv98_pcrypt_code[] = {
154 0x17f004bd,
155 0x0010fe35,
156 0xf10004fe,
157 0xf0fff017,
158 0x27f10013,
159 0x21d00400,
160 0x0c15f0c0,
161 0xf00021d0,
162 0x27f10317,
163 0x21d01200,
164 0x1031f400,
165/* 0x002f: spin */
166 0xf40031f4,
167 0x0ef40028,
168/* 0x0035: ih */
169 0x8001cffd,
170 0xb00812c4,
171 0x0bf40024,
172 0x0027f167,
173 0x002bfe77,
174 0xf00007fe,
175 0x23f00027,
176 0x0037f105,
177 0x0034cf14,
178 0xb0014594,
179 0x18f40055,
180 0x0602fa17,
181 0x4af003f8,
182 0x0034d01e,
183 0xd00147f0,
184 0x0ef48034,
185/* 0x0075: ctxload */
186 0x4034cf33,
187 0xb0014f94,
188 0x18f400f5,
189 0x0502fa21,
190 0x57f003f8,
191 0x0267f000,
192/* 0x008c: ctxload_dma_loop */
193 0xa07856bc,
194 0xb6018068,
195 0x87d00884,
196 0x0162b600,
197/* 0x009f: dummyload */
198 0xf0f018f4,
199 0x35d00257,
200/* 0x00a5: noctx */
201 0x0412c480,
202 0xf50024b0,
203 0xf100df0b,
204 0xcf190037,
205 0x33cf4032,
206 0xff24e400,
207 0x1024b607,
208 0x07bf45e4,
209 0xf50054b0,
210 0xf100b90b,
211 0xf1fae057,
212 0xb000ce67,
213 0x18f4c044,
214 0xa057f14d,
215 0x8867f1fc,
216 0x8044b000,
217 0xb03f18f4,
218 0x18f46044,
219 0x5044b019,
220 0xf1741bf4,
221 0xbd220027,
222 0x0233f034,
223 0xf50023d0,
224/* 0x0103: dma_cmd */
225 0xb000810e,
226 0x18f46344,
227 0x0245945e,
228 0xfe8050b7,
229 0x801e39f0,
230 0x40b70053,
231 0x44b60120,
232 0x0043d008,
233/* 0x0123: dtable_cmd */
234 0xb8600ef4,
235 0x18f40446,
236 0x0344b63e,
237 0x980045bb,
238 0x53fd0145,
239 0x0054b004,
240 0x58291bf4,
241 0x46580045,
242 0x0264b001,
243 0x98170bf4,
244 0x67fd0807,
245 0x0164b004,
246 0xf9300bf4,
247 0x0f01f455,
248/* 0x015b: cmd_setctx */
249 0x80280ef4,
250 0x0ef40053,
251/* 0x0161: invalid_bitfield */
252 0x0125f022,
253/* 0x0164: dispatch_error */
254/* 0x0164: illegal_mthd */
255 0x100047f1,
256 0xd00042d0,
257 0x47f04043,
258 0x0004d040,
259/* 0x0174: im_loop */
260 0xf08004cf,
261 0x44b04044,
262 0xf71bf400,
263/* 0x0180: cmddone */
264 0x1d0037f1,
265 0xd00147f0,
266/* 0x018a: nocmd */
267 0x11c40034,
268 0x4001d00c,
269/* 0x0192: cmd_query_get */
270 0x38f201f8,
271 0x0325f001,
272 0x0b0047f1,
273/* 0x019c: ptimer_retry */
274 0xcf4046cf,
275 0x47cf0045,
276 0x0467b840,
277 0x98f41bf4,
278 0x04800504,
279 0x21008020,
280 0x80220580,
281 0x0bfe2306,
282 0x03049800,
283 0xfe1844b6,
284 0x04980047,
285 0x8057f104,
286 0x0253f000,
287 0xf80645fa,
288/* 0x01d7: cmd_cond_mode */
289 0xf400f803,
290 0x25f00131,
291 0x0534b002,
292 0xf41218f4,
293 0x34b00132,
294 0x0b18f402,
295 0x800136f0,
296/* 0x01f2: return */
297 0x00f80803,
298/* 0x01f4: cmd_cond_mode_queryful */
299 0x98060498,
300 0x56c40705,
301 0x0855b6ff,
302 0xfd1844b6,
303 0x47fe0545,
304 0x000bfe00,
305 0x008057f1,
306 0xfa0253f0,
307 0x34b00565,
308 0x131bf402,
309 0x049803f8,
310 0x0044b021,
311 0x800b4cf0,
312 0x00f80804,
313/* 0x022c: cmd_cond_mode_double */
314 0xb61060b6,
315 0x65fa1050,
316 0x9803f805,
317 0x06982005,
318 0x0456b824,
319 0x980b4cf0,
320 0x06982105,
321 0x0456b825,
322 0xfd0b5cf0,
323 0x34b00445,
324 0x0b5cf003,
325 0x800645fd,
326 0x00f80804,
327/* 0x0260: cmd_wrcache_flush */
328 0xf10132f4,
329 0xbd220027,
330 0x0133f034,
331 0xf80023d0,
332/* 0x0271: crypt_cmd_mode */
333 0x0131f400,
334 0xb00225f0,
335 0x18f40f34,
336 0x0132f409,
337/* 0x0283: crypt_cmd_mode_return */
338 0xf80d0380,
339/* 0x0285: crypt_cmd_length */
340 0x0034b000,
341 0xf4fb0bf4,
342 0x47f0033c,
343 0x0743f040,
344 0xf00604fa,
345 0x43f05047,
346 0x0604fa06,
347 0x3cf503f8,
348 0x47f1c407,
349 0x4bfe2100,
350 0x09049800,
351 0x950a0598,
352 0x44b60858,
353 0x0548fd18,
354 0x98ff55c4,
355 0x07980b06,
356 0x0878950c,
357 0xfd1864b6,
358 0x77c40568,
359 0x0d0898ff,
360 0x580284b6,
361 0x95f9a889,
362 0xf9a98958,
363 0x013cf495,
364 0x3cf403f8,
365 0xf803f861,
366 0x18489503,
367 0xbb084994,
368 0x81b60095,
369 0x09088000,
370 0x950a0980,
371 0x69941868,
372 0x0097bb08,
373 0x800081b6,
374 0x09800b08,
375 0x023cf40c,
376 0xf05047f0,
377 0x04fa0643,
378 0xf803f805,
379/* 0x0321: crypt_copy_prep */
380 0x203cf500,
381 0x003cf594,
382 0x003cf588,
383/* 0x032f: crypt_store_prep */
384 0xf500f88c,
385 0xf594103c,
386 0xf88c063c,
387/* 0x0339: crypt_ecb_e_prep */
388 0x303cf500,
389 0x003cf594,
390 0x003cf588,
391 0x003cf5d0,
392/* 0x034b: crypt_ecb_d_prep */
393 0xf500f88c,
394 0xf5c8773c,
395 0xf594303c,
396 0xf588003c,
397 0xf5d4003c,
398 0xf88c003c,
399/* 0x0361: crypt_cbc_e_prep */
400 0x403cf500,
401 0x003cf594,
402 0x063cf588,
403 0x663cf5ac,
404 0x063cf5d0,
405/* 0x0377: crypt_cbc_d_prep */
406 0xf500f88c,
407 0xf5c8773c,
408 0xf594503c,
409 0xf584623c,
410 0xf588063c,
411 0xf5d4603c,
412 0xf5ac203c,
413 0xf88c003c,
414/* 0x0395: crypt_pcbc_e_prep */
415 0x503cf500,
416 0x003cf594,
417 0x063cf588,
418 0x663cf5ac,
419 0x063cf5d0,
420 0x063cf58c,
421/* 0x03af: crypt_pcbc_d_prep */
422 0xf500f8ac,
423 0xf5c8773c,
424 0xf594503c,
425 0xf588003c,
426 0xf5d4013c,
427 0xf5ac163c,
428 0xf58c063c,
429 0xf8ac063c,
430/* 0x03cd: crypt_cfb_e_prep */
431 0x403cf500,
432 0x663cf594,
433 0x003cf5d0,
434 0x063cf588,
435 0x063cf5ac,
436/* 0x03e3: crypt_cfb_d_prep */
437 0xf500f88c,
438 0xf594403c,
439 0xf5d0603c,
440 0xf588063c,
441 0xf5ac603c,
442 0xf88c003c,
443/* 0x03f9: crypt_ofb_prep */
444 0x403cf500,
445 0x663cf594,
446 0x003cf5d0,
447 0x603cf588,
448 0x003cf5ac,
449/* 0x040f: crypt_ctr_prep */
450 0xf500f88c,
451 0xf594503c,
452 0xf5d0613c,
453 0xf5b0163c,
454 0xf588003c,
455 0xf5ac103c,
456 0xf88c003c,
457/* 0x0429: crypt_cbc_mac_prep */
458 0x303cf500,
459 0x003cf594,
460 0x063cf588,
461 0x663cf5ac,
462/* 0x043b: crypt_cmac_finish_complete_prep */
463 0xf500f8d0,
464 0xf594703c,
465 0xf588003c,
466 0xf5ac063c,
467 0xf5ac003c,
468 0xf5d0003c,
469 0xf5bc003c,
470 0xf5ac063c,
471 0xf8d0663c,
472/* 0x045d: crypt_cmac_finish_partial_prep */
473 0x803cf500,
474 0x003cf594,
475 0x063cf588,
476 0x003cf5ac,
477 0x003cf5ac,
478 0x003cf5d0,
479 0x003cf5bc,
480 0x063cf5bc,
481 0x663cf5ac,
482/* 0x0483: crypt_do_in */
483 0xbb00f8d0,
484 0x47fe0035,
485 0x8097f100,
486 0x0293f000,
487/* 0x0490: crypt_do_in_loop */
488 0xf80559fa,
489 0x223cf403,
490 0xf50609fa,
491 0xf898103c,
492 0x1050b603,
493 0xf40453b8,
494 0x3cf4e91b,
495 0xf803f801,
496/* 0x04b1: crypt_do_out */
497 0x0037bb00,
498 0xf10067fe,
499 0xf0008097,
500/* 0x04be: crypt_do_out_loop */
501 0x3cf50293,
502 0x3cf49810,
503 0x0579fa61,
504 0xf40679fa,
505 0x03f8013c,
506 0xb81070b6,
507 0x1bf40473,
508/* 0x04db: crypt_do_inout */
509 0xbb00f8e8,
510 0x97f10035,
511 0x93f00080,
512/* 0x04e5: crypt_do_inout_loop */
513 0x0047fe02,
514 0xf80559fa,
515 0x213cf403,
516 0xf50609fa,
517 0xf498103c,
518 0x67fe613c,
519 0x0579fa00,
520 0xf40679fa,
521 0x03f8013c,
522 0xb61050b6,
523 0x53b81070,
524 0xd41bf404,
525 0x000000f8,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x00000000,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x00000000,
540 0x00000000,
541 0x00000000,
542 0x00000000,
543 0x00000000,
544 0x00000000,
545 0x00000000,
546 0x00000000,
547 0x00000000,
548 0x00000000,
549 0x00000000,
550 0x00000000,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x00000000,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x00000000,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0x00000000,
575 0x00000000,
576 0x00000000,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d58e409..0387dc7f4f42 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -79,29 +79,13 @@ static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine) 79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{ 80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine]; 81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 struct drm_device *dev = chan->dev; 82 int i;
83 u32 inst;
84
85 inst = (chan->ramin->vinst >> 12);
86 inst |= 0x40000000;
87
88 /* disable fifo access */
89 nv_wr32(dev, 0x104048, 0x00000000);
90 /* mark channel as unloaded if it's currently active */
91 if (nv_rd32(dev, 0x104050) == inst)
92 nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
93 /* mark next channel as invalid if it's about to be loaded */
94 if (nv_rd32(dev, 0x104054) == inst)
95 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
96 /* restore fifo access */
97 nv_wr32(dev, 0x104048, 0x00000003);
98 83
99 for (inst = 0xc0; inst <= 0xd4; inst += 4) 84 for (i = 0xc0; i <= 0xd4; i += 4)
100 nv_wo32(chan->ramin, inst, 0x00000000); 85 nv_wo32(chan->ramin, i, 0x00000000);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103 86
104 atomic_dec(&chan->vm->engref[engine]); 87 atomic_dec(&chan->vm->engref[engine]);
88 nouveau_gpuobj_ref(NULL, &ctx);
105 chan->engctx[engine] = ctx; 89 chan->engctx[engine] = ctx;
106} 90}
107 91
@@ -143,13 +127,6 @@ static int
143nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) 127nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
144{ 128{
145 nv_mask(dev, 0x104048, 0x00000003, 0x00000000); 129 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
146
147 /* trigger fuc context unload */
148 nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
149 nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
150 nv_wr32(dev, 0x104000, 0x00000008);
151 nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
152
153 nv_wr32(dev, 0x104014, 0xffffffff); 130 nv_wr32(dev, 0x104014, 0xffffffff);
154 return 0; 131 return 0;
155} 132}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6ef6d7..798829353fb6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -98,7 +98,9 @@ read_pll(struct drm_device *dev, int clk, u32 pll)
98 sclk = read_clk(dev, 0x10 + clk, false); 98 sclk = read_clk(dev, 0x10 + clk, false);
99 } 99 }
100 100
101 return sclk * N / (M * P); 101 if (M * P)
102 return sclk * N / (M * P);
103 return 0;
102} 104}
103 105
104struct creg { 106struct creg {
@@ -182,23 +184,26 @@ prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
182 const u32 src1 = 0x004160 + (clk * 4); 184 const u32 src1 = 0x004160 + (clk * 4);
183 const u32 ctrl = pll + 0; 185 const u32 ctrl = pll + 0;
184 const u32 coef = pll + 4; 186 const u32 coef = pll + 4;
185 u32 cntl;
186 187
187 if (!reg->clk && !reg->pll) { 188 if (!reg->clk && !reg->pll) {
188 NV_DEBUG(dev, "no clock for %02x\n", clk); 189 NV_DEBUG(dev, "no clock for %02x\n", clk);
189 return; 190 return;
190 } 191 }
191 192
192 cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
193 if (reg->pll) { 193 if (reg->pll) {
194 nv_mask(dev, src0, 0x00000101, 0x00000101); 194 nv_mask(dev, src0, 0x00000101, 0x00000101);
195 nv_wr32(dev, coef, reg->pll); 195 nv_wr32(dev, coef, reg->pll);
196 nv_wr32(dev, ctrl, cntl | 0x00000015); 196 nv_mask(dev, ctrl, 0x00000015, 0x00000015);
197 nv_mask(dev, ctrl, 0x00000010, 0x00000000);
198 nv_wait(dev, ctrl, 0x00020000, 0x00020000);
199 nv_mask(dev, ctrl, 0x00000010, 0x00000010);
200 nv_mask(dev, ctrl, 0x00000008, 0x00000000);
197 nv_mask(dev, src1, 0x00000100, 0x00000000); 201 nv_mask(dev, src1, 0x00000100, 0x00000000);
198 nv_mask(dev, src1, 0x00000001, 0x00000000); 202 nv_mask(dev, src1, 0x00000001, 0x00000000);
199 } else { 203 } else {
200 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); 204 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
201 nv_wr32(dev, ctrl, cntl | 0x0000001d); 205 nv_mask(dev, ctrl, 0x00000018, 0x00000018);
206 udelay(20);
202 nv_mask(dev, ctrl, 0x00000001, 0x00000000); 207 nv_mask(dev, ctrl, 0x00000001, 0x00000000);
203 nv_mask(dev, src0, 0x00000100, 0x00000000); 208 nv_mask(dev, src0, 0x00000100, 0x00000000);
204 nv_mask(dev, src0, 0x00000001, 0x00000000); 209 nv_mask(dev, src0, 0x00000001, 0x00000000);
@@ -230,17 +235,28 @@ nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
230} 235}
231 236
232struct nva3_pm_state { 237struct nva3_pm_state {
238 struct nouveau_pm_level *perflvl;
239
233 struct creg nclk; 240 struct creg nclk;
234 struct creg sclk; 241 struct creg sclk;
235 struct creg mclk;
236 struct creg vdec; 242 struct creg vdec;
237 struct creg unka0; 243 struct creg unka0;
244
245 struct creg mclk;
246 u8 *rammap;
247 u8 rammap_ver;
248 u8 rammap_len;
249 u8 *ramcfg;
250 u8 ramcfg_len;
251 u32 r004018;
252 u32 r100760;
238}; 253};
239 254
240void * 255void *
241nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 256nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
242{ 257{
243 struct nva3_pm_state *info; 258 struct nva3_pm_state *info;
259 u8 ramcfg_cnt;
244 int ret; 260 int ret;
245 261
246 info = kzalloc(sizeof(*info), GFP_KERNEL); 262 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -267,6 +283,20 @@ nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
267 if (ret < 0) 283 if (ret < 0)
268 goto out; 284 goto out;
269 285
286 info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
287 &info->rammap_ver,
288 &info->rammap_len,
289 &ramcfg_cnt, &info->ramcfg_len);
290 if (info->rammap_ver != 0x10 || info->rammap_len < 5)
291 info->rammap = NULL;
292
293 info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
294 &info->rammap_ver,
295 &info->ramcfg_len);
296 if (info->rammap_ver != 0x10)
297 info->ramcfg = NULL;
298
299 info->perflvl = perflvl;
270out: 300out:
271 if (ret < 0) { 301 if (ret < 0) {
272 kfree(info); 302 kfree(info);
@@ -287,6 +317,240 @@ nva3_pm_grcp_idle(void *data)
287 return false; 317 return false;
288} 318}
289 319
320static void
321mclk_precharge(struct nouveau_mem_exec_func *exec)
322{
323 nv_wr32(exec->dev, 0x1002d4, 0x00000001);
324}
325
326static void
327mclk_refresh(struct nouveau_mem_exec_func *exec)
328{
329 nv_wr32(exec->dev, 0x1002d0, 0x00000001);
330}
331
332static void
333mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
334{
335 nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
336}
337
338static void
339mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
340{
341 nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
342}
343
344static void
345mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
346{
347 volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
348 udelay((nsec + 500) / 1000);
349}
350
351static u32
352mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
353{
354 if (mr <= 1)
355 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
356 if (mr <= 3)
357 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
358 return 0;
359}
360
361static void
362mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
363{
364 struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
365
366 if (mr <= 1) {
367 if (dev_priv->vram_rank_B)
368 nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
369 nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
370 } else
371 if (mr <= 3) {
372 if (dev_priv->vram_rank_B)
373 nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
374 nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
375 }
376}
377
378static void
379mclk_clock_set(struct nouveau_mem_exec_func *exec)
380{
381 struct drm_device *dev = exec->dev;
382 struct nva3_pm_state *info = exec->priv;
383 u32 ctrl;
384
385 ctrl = nv_rd32(dev, 0x004000);
386 if (!(ctrl & 0x00000008) && info->mclk.pll) {
387 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
388 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
389 nv_wr32(dev, 0x004018, 0x00001000);
390 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
391 nv_wr32(dev, 0x004004, info->mclk.pll);
392 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
393 udelay(64);
394 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
395 udelay(20);
396 } else
397 if (!info->mclk.pll) {
398 nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
399 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
400 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
401 nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
402 }
403
404 if (info->rammap) {
405 if (info->ramcfg && (info->rammap[4] & 0x08)) {
406 u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
407 info->ramcfg[5];
408 u32 unk5a4 = ROM16(info->ramcfg[7]);
409 u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
410 (info->ramcfg[3] & 0x0f) << 16 |
411 (info->ramcfg[9] & 0x0f) |
412 0x80000000;
413 nv_wr32(dev, 0x1005a0, unk5a0);
414 nv_wr32(dev, 0x1005a4, unk5a4);
415 nv_wr32(dev, 0x10f804, unk804);
416 nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
417 } else {
418 nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
419 nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
420 nv_mask(dev, 0x100760, 0x22222222, info->r100760);
421 nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
422 nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
423 }
424 }
425
426 if (info->mclk.pll) {
427 nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
428 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
429 }
430}
431
432static void
433mclk_timing_set(struct nouveau_mem_exec_func *exec)
434{
435 struct drm_device *dev = exec->dev;
436 struct nva3_pm_state *info = exec->priv;
437 struct nouveau_pm_level *perflvl = info->perflvl;
438 int i;
439
440 for (i = 0; i < 9; i++)
441 nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
442
443 if (info->ramcfg) {
444 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
445 nv_mask(dev, 0x100200, 0x00001000, data);
446 }
447
448 if (info->ramcfg) {
449 u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
450 u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
451 u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
452 if ( (info->ramcfg[2] & 0x20))
453 unk714 |= 0xf0000000;
454 if (!(info->ramcfg[2] & 0x04))
455 unk714 |= 0x00000010;
456 nv_wr32(dev, 0x100714, unk714);
457
458 if (info->ramcfg[2] & 0x01)
459 unk71c |= 0x00000100;
460 nv_wr32(dev, 0x10071c, unk71c);
461
462 if (info->ramcfg[2] & 0x02)
463 unk718 |= 0x00000100;
464 nv_wr32(dev, 0x100718, unk718);
465
466 if (info->ramcfg[2] & 0x10)
467 nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
468 }
469}
470
471static void
472prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
473{
474 struct nouveau_mem_exec_func exec = {
475 .dev = dev,
476 .precharge = mclk_precharge,
477 .refresh = mclk_refresh,
478 .refresh_auto = mclk_refresh_auto,
479 .refresh_self = mclk_refresh_self,
480 .wait = mclk_wait,
481 .mrg = mclk_mrg,
482 .mrs = mclk_mrs,
483 .clock_set = mclk_clock_set,
484 .timing_set = mclk_timing_set,
485 .priv = info
486 };
487 u32 ctrl;
488
489 /* XXX: where the fuck does 750MHz come from? */
490 if (info->perflvl->memory <= 750000) {
491 info->r004018 = 0x10000000;
492 info->r100760 = 0x22222222;
493 }
494
495 ctrl = nv_rd32(dev, 0x004000);
496 if (ctrl & 0x00000008) {
497 if (info->mclk.pll) {
498 nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
499 nv_wr32(dev, 0x004004, info->mclk.pll);
500 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
501 nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
502 nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
503 nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
504 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
505 nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
506 }
507 } else {
508 u32 ssel = 0x00000101;
509 if (info->mclk.clk)
510 ssel |= info->mclk.clk;
511 else
512 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
513 nv_mask(dev, 0x004168, 0x003f3141, ctrl);
514 }
515
516 if (info->ramcfg) {
517 if (info->ramcfg[2] & 0x10) {
518 nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
519 } else {
520 nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
521 nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
522 }
523 }
524 if (info->rammap && !(info->rammap[4] & 0x02))
525 nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
526 nv_wr32(dev, 0x611200, 0x00003300);
527 if (!(info->ramcfg[2] & 0x10))
528 nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
529
530 nouveau_mem_exec(&exec, info->perflvl);
531
532 nv_wr32(dev, 0x611200, 0x00003330);
533 if (info->rammap && (info->rammap[4] & 0x02))
534 nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
535 if (info->ramcfg) {
536 if (info->ramcfg[2] & 0x10) {
537 nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
538 nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
539 } else {
540 nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
541 }
542 }
543
544 if (info->mclk.pll) {
545 nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
546 nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
547 } else {
548 nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
549 nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
550 nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
551 }
552}
553
290int 554int
291nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) 555nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
292{ 556{
@@ -316,18 +580,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
316 prog_clk(dev, 0x20, &info->unka0); 580 prog_clk(dev, 0x20, &info->unka0);
317 prog_clk(dev, 0x21, &info->vdec); 581 prog_clk(dev, 0x21, &info->vdec);
318 582
319 if (info->mclk.clk || info->mclk.pll) { 583 if (info->mclk.clk || info->mclk.pll)
320 nv_wr32(dev, 0x100210, 0); 584 prog_mem(dev, info);
321 nv_wr32(dev, 0x1002dc, 1);
322 nv_wr32(dev, 0x004018, 0x00001000);
323 prog_pll(dev, 0x02, 0x004000, &info->mclk);
324 if (nv_rd32(dev, 0x4000) & 0x00000008)
325 nv_wr32(dev, 0x004018, 0x1000d000);
326 else
327 nv_wr32(dev, 0x004018, 0x10005000);
328 nv_wr32(dev, 0x1002dc, 0);
329 nv_wr32(dev, 0x100210, 0x80000000);
330 }
331 585
332 ret = 0; 586 ret = 0;
333 587
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48197ca..797159e7b7a6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -43,22 +43,22 @@ nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
43 return ret; 43 return ret;
44 44
45 if (rect->rop != ROP_COPY) { 45 if (rect->rop != ROP_COPY) {
46 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 46 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
47 OUT_RING (chan, 1); 47 OUT_RING (chan, 1);
48 } 48 }
49 BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1); 49 BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
50 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 50 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
51 info->fix.visual == FB_VISUAL_DIRECTCOLOR) 51 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
52 OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]); 52 OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
53 else 53 else
54 OUT_RING (chan, rect->color); 54 OUT_RING (chan, rect->color);
55 BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4); 55 BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
56 OUT_RING (chan, rect->dx); 56 OUT_RING (chan, rect->dx);
57 OUT_RING (chan, rect->dy); 57 OUT_RING (chan, rect->dy);
58 OUT_RING (chan, rect->dx + rect->width); 58 OUT_RING (chan, rect->dx + rect->width);
59 OUT_RING (chan, rect->dy + rect->height); 59 OUT_RING (chan, rect->dy + rect->height);
60 if (rect->rop != ROP_COPY) { 60 if (rect->rop != ROP_COPY) {
61 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 61 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
62 OUT_RING (chan, 3); 62 OUT_RING (chan, 3);
63 } 63 }
64 FIRE_RING(chan); 64 FIRE_RING(chan);
@@ -78,14 +78,14 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
78 if (ret) 78 if (ret)
79 return ret; 79 return ret;
80 80
81 BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1); 81 BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
82 OUT_RING (chan, 0); 82 OUT_RING (chan, 0);
83 BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4); 83 BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
84 OUT_RING (chan, region->dx); 84 OUT_RING (chan, region->dx);
85 OUT_RING (chan, region->dy); 85 OUT_RING (chan, region->dy);
86 OUT_RING (chan, region->width); 86 OUT_RING (chan, region->width);
87 OUT_RING (chan, region->height); 87 OUT_RING (chan, region->height);
88 BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4); 88 BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
89 OUT_RING (chan, 0); 89 OUT_RING (chan, 0);
90 OUT_RING (chan, region->sx); 90 OUT_RING (chan, region->sx);
91 OUT_RING (chan, 0); 91 OUT_RING (chan, 0);
@@ -116,7 +116,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
116 width = ALIGN(image->width, 32); 116 width = ALIGN(image->width, 32);
117 dwords = (width * image->height) >> 5; 117 dwords = (width * image->height) >> 5;
118 118
119 BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2); 119 BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
120 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 120 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
122 OUT_RING (chan, palette[image->bg_color] | mask); 122 OUT_RING (chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING (chan, image->bg_color); 125 OUT_RING (chan, image->bg_color);
126 OUT_RING (chan, image->fg_color); 126 OUT_RING (chan, image->fg_color);
127 } 127 }
128 BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2); 128 BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
129 OUT_RING (chan, image->width); 129 OUT_RING (chan, image->width);
130 OUT_RING (chan, image->height); 130 OUT_RING (chan, image->height);
131 BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4); 131 BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
132 OUT_RING (chan, 0); 132 OUT_RING (chan, 0);
133 OUT_RING (chan, image->dx); 133 OUT_RING (chan, image->dx);
134 OUT_RING (chan, 0); 134 OUT_RING (chan, 0);
@@ -143,7 +143,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
143 143
144 dwords -= push; 144 dwords -= push;
145 145
146 BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push); 146 BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
147 OUT_RINGp(chan, data, push); 147 OUT_RINGp(chan, data, push);
148 data += push; 148 data += push;
149 } 149 }
@@ -200,47 +200,47 @@ nvc0_fbcon_accel_init(struct fb_info *info)
200 return ret; 200 return ret;
201 } 201 }
202 202
203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); 203 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 204 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); 205 BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset)); 206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset)); 207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); 208 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 209 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); 210 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
211 OUT_RING (chan, 1); 211 OUT_RING (chan, 1);
212 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); 212 BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
213 OUT_RING (chan, 3); 213 OUT_RING (chan, 3);
214 BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1); 214 BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
215 OUT_RING (chan, 0x55); 215 OUT_RING (chan, 0x55);
216 BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4); 216 BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
217 OUT_RING (chan, 0); 217 OUT_RING (chan, 0);
218 OUT_RING (chan, 1); 218 OUT_RING (chan, 1);
219 OUT_RING (chan, 0); 219 OUT_RING (chan, 0);
220 OUT_RING (chan, 1); 220 OUT_RING (chan, 1);
221 BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2); 221 BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
222 OUT_RING (chan, 4); 222 OUT_RING (chan, 4);
223 OUT_RING (chan, format); 223 OUT_RING (chan, format);
224 BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2); 224 BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
225 OUT_RING (chan, 2); 225 OUT_RING (chan, 2);
226 OUT_RING (chan, 1); 226 OUT_RING (chan, 1);
227 227
228 BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1); 228 BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
229 OUT_RING (chan, format); 229 OUT_RING (chan, format);
230 BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1); 230 BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
231 OUT_RING (chan, 1); 231 OUT_RING (chan, 1);
232 BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3); 232 BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
233 OUT_RING (chan, 0); 233 OUT_RING (chan, 0);
234 OUT_RING (chan, 0); 234 OUT_RING (chan, 0);
235 OUT_RING (chan, 1); 235 OUT_RING (chan, 1);
236 BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1); 236 BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
237 OUT_RING (chan, 1); 237 OUT_RING (chan, 1);
238 BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4); 238 BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
239 OUT_RING (chan, 0); 239 OUT_RING (chan, 0);
240 OUT_RING (chan, 1); 240 OUT_RING (chan, 1);
241 OUT_RING (chan, 0); 241 OUT_RING (chan, 0);
242 OUT_RING (chan, 1); 242 OUT_RING (chan, 1);
243 BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10); 243 BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
244 OUT_RING (chan, format); 244 OUT_RING (chan, format);
245 OUT_RING (chan, 1); 245 OUT_RING (chan, 1);
246 OUT_RING (chan, 0); 246 OUT_RING (chan, 0);
@@ -251,7 +251,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
251 OUT_RING (chan, info->var.yres_virtual); 251 OUT_RING (chan, info->var.yres_virtual);
252 OUT_RING (chan, upper_32_bits(fb->vma.offset)); 252 OUT_RING (chan, upper_32_bits(fb->vma.offset));
253 OUT_RING (chan, lower_32_bits(fb->vma.offset)); 253 OUT_RING (chan, lower_32_bits(fb->vma.offset));
254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); 254 BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
255 OUT_RING (chan, format); 255 OUT_RING (chan, format);
256 OUT_RING (chan, 1); 256 OUT_RING (chan, 1);
257 OUT_RING (chan, 0); 257 OUT_RING (chan, 0);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..47ab388a606e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h"
31
32struct nvc0_fence_priv {
33 struct nouveau_fence_priv base;
34 struct nouveau_bo *bo;
35};
36
37struct nvc0_fence_chan {
38 struct nouveau_fence_chan base;
39 struct nouveau_vma vma;
40};
41
42static int
43nvc0_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
47 u64 addr = fctx->vma.offset + chan->id * 16;
48 int ret;
49
50 ret = RING_SPACE(chan, 5);
51 if (ret == 0) {
52 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
53 OUT_RING (chan, upper_32_bits(addr));
54 OUT_RING (chan, lower_32_bits(addr));
55 OUT_RING (chan, fence->sequence);
56 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
57 FIRE_RING (chan);
58 }
59
60 return ret;
61}
62
63static int
64nvc0_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{
67 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
68 u64 addr = fctx->vma.offset + prev->id * 16;
69 int ret;
70
71 ret = RING_SPACE(chan, 5);
72 if (ret == 0) {
73 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
74 OUT_RING (chan, upper_32_bits(addr));
75 OUT_RING (chan, lower_32_bits(addr));
76 OUT_RING (chan, fence->sequence);
77 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
78 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
79 FIRE_RING (chan);
80 }
81
82 return ret;
83}
84
85static u32
86nvc0_fence_read(struct nouveau_channel *chan)
87{
88 struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
89 return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
90}
91
92static void
93nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
94{
95 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
96 struct nvc0_fence_chan *fctx = chan->engctx[engine];
97
98 nouveau_bo_vma_del(priv->bo, &fctx->vma);
99 nouveau_fence_context_del(&fctx->base);
100 chan->engctx[engine] = NULL;
101 kfree(fctx);
102}
103
104static int
105nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
106{
107 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
108 struct nvc0_fence_chan *fctx;
109 int ret;
110
111 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
112 if (!fctx)
113 return -ENOMEM;
114
115 nouveau_fence_context_new(&fctx->base);
116
117 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
118 if (ret)
119 nvc0_fence_context_del(chan, engine);
120
121 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
122 return ret;
123}
124
125static int
126nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
127{
128 return 0;
129}
130
131static int
132nvc0_fence_init(struct drm_device *dev, int engine)
133{
134 return 0;
135}
136
137static void
138nvc0_fence_destroy(struct drm_device *dev, int engine)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nvc0_fence_priv *priv = nv_engine(dev, engine);
142
143 nouveau_bo_unmap(priv->bo);
144 nouveau_bo_ref(NULL, &priv->bo);
145 dev_priv->eng[engine] = NULL;
146 kfree(priv);
147}
148
149int
150nvc0_fence_create(struct drm_device *dev)
151{
152 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 struct nvc0_fence_priv *priv;
155 int ret;
156
157 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
158 if (!priv)
159 return -ENOMEM;
160
161 priv->base.engine.destroy = nvc0_fence_destroy;
162 priv->base.engine.init = nvc0_fence_init;
163 priv->base.engine.fini = nvc0_fence_fini;
164 priv->base.engine.context_new = nvc0_fence_context_new;
165 priv->base.engine.context_del = nvc0_fence_context_del;
166 priv->base.emit = nvc0_fence_emit;
167 priv->base.sync = nvc0_fence_sync;
168 priv->base.read = nvc0_fence_read;
169 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
170
171 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
172 0, 0, NULL, &priv->bo);
173 if (ret == 0) {
174 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
175 if (ret == 0)
176 ret = nouveau_bo_map(priv->bo);
177 if (ret)
178 nouveau_bo_ref(NULL, &priv->bo);
179 }
180
181 if (ret)
182 nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
183 return ret;
184}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 50d68a7a1379..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_mm.h" 28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
29 30
30static void nvc0_fifo_isr(struct drm_device *); 31static void nvc0_fifo_isr(struct drm_device *);
31 32
32struct nvc0_fifo_priv { 33struct nvc0_fifo_priv {
34 struct nouveau_fifo_priv base;
33 struct nouveau_gpuobj *playlist[2]; 35 struct nouveau_gpuobj *playlist[2];
34 int cur_playlist; 36 int cur_playlist;
35 struct nouveau_vma user_vma; 37 struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
37}; 39};
38 40
39struct nvc0_fifo_chan { 41struct nvc0_fifo_chan {
42 struct nouveau_fifo_chan base;
40 struct nouveau_gpuobj *user; 43 struct nouveau_gpuobj *user;
41 struct nouveau_gpuobj *ramfc;
42}; 44};
43 45
44static void 46static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
46{ 48{
47 struct drm_nouveau_private *dev_priv = dev->dev_private; 49 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 50 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
50 struct nvc0_fifo_priv *priv = pfifo->priv;
51 struct nouveau_gpuobj *cur; 52 struct nouveau_gpuobj *cur;
52 int i, p; 53 int i, p;
53 54
@@ -69,59 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
69 NV_ERROR(dev, "PFIFO - playlist update failed\n"); 70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
70} 71}
71 72
72void 73static int
73nvc0_fifo_disable(struct drm_device *dev) 74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
74{
75}
76
77void
78nvc0_fifo_enable(struct drm_device *dev)
79{
80}
81
82bool
83nvc0_fifo_reassign(struct drm_device *dev, bool enable)
84{
85 return false;
86}
87
88bool
89nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
90{
91 return false;
92}
93
94int
95nvc0_fifo_channel_id(struct drm_device *dev)
96{
97 return 127;
98}
99
100int
101nvc0_fifo_create_context(struct nouveau_channel *chan)
102{ 75{
103 struct drm_device *dev = chan->dev; 76 struct drm_device *dev = chan->dev;
104 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 78 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 79 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
107 struct nvc0_fifo_priv *priv = pfifo->priv; 80 struct nvc0_fifo_chan *fctx;
108 struct nvc0_fifo_chan *fifoch;
109 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; 81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
110 int ret; 82 int ret, i;
111 83
112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); 84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
113 if (!chan->fifo_priv) 85 if (!fctx)
114 return -ENOMEM; 86 return -ENOMEM;
115 fifoch = chan->fifo_priv;
116
117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
119 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
120 if (ret)
121 goto error;
122
123 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
124 *(struct nouveau_mem **)fifoch->user->node);
125 87
126 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + 88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
127 priv->user_vma.offset + (chan->id * 0x1000), 89 priv->user_vma.offset + (chan->id * 0x1000),
@@ -131,176 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
131 goto error; 93 goto error;
132 } 94 }
133 95
134 /* ramfc */ 96 /* allocate vram for control regs, map into polling area */
135 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, 97 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
136 chan->ramin->vinst, 0x100, 98 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
137 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
138 if (ret) 99 if (ret)
139 goto error; 100 goto error;
140 101
141 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst)); 102 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
142 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst)); 103 *(struct nouveau_mem **)fctx->user->node);
143 nv_wo32(fifoch->ramfc, 0x10, 0x0000face); 104
144 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); 105 for (i = 0; i < 0x100; i += 4)
145 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); 106 nv_wo32(chan->ramin, i, 0x00000000);
146 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | 107 nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
108 nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
109 nv_wo32(chan->ramin, 0x10, 0x0000face);
110 nv_wo32(chan->ramin, 0x30, 0xfffff902);
111 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
112 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
147 upper_32_bits(ib_virt)); 113 upper_32_bits(ib_virt));
148 nv_wo32(fifoch->ramfc, 0x54, 0x00000002); 114 nv_wo32(chan->ramin, 0x54, 0x00000002);
149 nv_wo32(fifoch->ramfc, 0x84, 0x20400000); 115 nv_wo32(chan->ramin, 0x84, 0x20400000);
150 nv_wo32(fifoch->ramfc, 0x94, 0x30000001); 116 nv_wo32(chan->ramin, 0x94, 0x30000001);
151 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); 117 nv_wo32(chan->ramin, 0x9c, 0x00000100);
152 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); 118 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
153 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); 119 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
154 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); 120 nv_wo32(chan->ramin, 0xac, 0x0000001f);
155 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); 121 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
156 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ 122 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
157 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ 123 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
158 pinstmem->flush(dev); 124 pinstmem->flush(dev);
159 125
160 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | 126 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
161 (chan->ramin->vinst >> 12)); 127 (chan->ramin->vinst >> 12));
162 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); 128 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
163 nvc0_fifo_playlist_update(dev); 129 nvc0_fifo_playlist_update(dev);
164 return 0;
165 130
166error: 131error:
167 pfifo->destroy_context(chan); 132 if (ret)
133 priv->base.base.context_del(chan, engine);
168 return ret; 134 return ret;
169} 135}
170 136
171void 137static void
172nvc0_fifo_destroy_context(struct nouveau_channel *chan) 138nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
173{ 139{
140 struct nvc0_fifo_chan *fctx = chan->engctx[engine];
174 struct drm_device *dev = chan->dev; 141 struct drm_device *dev = chan->dev;
175 struct nvc0_fifo_chan *fifoch;
176 142
177 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); 143 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
178 nv_wr32(dev, 0x002634, chan->id); 144 nv_wr32(dev, 0x002634, chan->id);
179 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) 145 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
180 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); 146 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
181
182 nvc0_fifo_playlist_update(dev); 147 nvc0_fifo_playlist_update(dev);
183
184 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); 148 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
185 149
150 nouveau_gpuobj_ref(NULL, &fctx->user);
186 if (chan->user) { 151 if (chan->user) {
187 iounmap(chan->user); 152 iounmap(chan->user);
188 chan->user = NULL; 153 chan->user = NULL;
189 } 154 }
190 155
191 fifoch = chan->fifo_priv; 156 chan->engctx[engine] = NULL;
192 chan->fifo_priv = NULL; 157 kfree(fctx);
193 if (!fifoch)
194 return;
195
196 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
197 nouveau_gpuobj_ref(NULL, &fifoch->user);
198 kfree(fifoch);
199}
200
201int
202nvc0_fifo_load_context(struct nouveau_channel *chan)
203{
204 return 0;
205}
206
207int
208nvc0_fifo_unload_context(struct drm_device *dev)
209{
210 int i;
211
212 for (i = 0; i < 128; i++) {
213 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
214 continue;
215
216 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
217 nv_wr32(dev, 0x002634, i);
218 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
219 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
220 i, nv_rd32(dev, 0x002634));
221 return -EBUSY;
222 }
223 }
224
225 return 0;
226}
227
228static void
229nvc0_fifo_destroy(struct drm_device *dev)
230{
231 struct drm_nouveau_private *dev_priv = dev->dev_private;
232 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
233 struct nvc0_fifo_priv *priv;
234
235 priv = pfifo->priv;
236 if (!priv)
237 return;
238
239 nouveau_vm_put(&priv->user_vma);
240 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
241 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
242 kfree(priv);
243}
244
245void
246nvc0_fifo_takedown(struct drm_device *dev)
247{
248 nv_wr32(dev, 0x002140, 0x00000000);
249 nvc0_fifo_destroy(dev);
250} 158}
251 159
252static int 160static int
253nvc0_fifo_create(struct drm_device *dev) 161nvc0_fifo_init(struct drm_device *dev, int engine)
254{
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
257 struct nvc0_fifo_priv *priv;
258 int ret;
259
260 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
261 if (!priv)
262 return -ENOMEM;
263 pfifo->priv = priv;
264
265 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
266 &priv->playlist[0]);
267 if (ret)
268 goto error;
269
270 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
271 &priv->playlist[1]);
272 if (ret)
273 goto error;
274
275 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
276 12, NV_MEM_ACCESS_RW, &priv->user_vma);
277 if (ret)
278 goto error;
279
280 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
281 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
282 return 0;
283
284error:
285 nvc0_fifo_destroy(dev);
286 return ret;
287}
288
289int
290nvc0_fifo_init(struct drm_device *dev)
291{ 162{
292 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct drm_nouveau_private *dev_priv = dev->dev_private;
293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 164 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
294 struct nouveau_channel *chan; 165 struct nouveau_channel *chan;
295 struct nvc0_fifo_priv *priv; 166 int i;
296 int ret, i;
297
298 if (!pfifo->priv) {
299 ret = nvc0_fifo_create(dev);
300 if (ret)
301 return ret;
302 }
303 priv = pfifo->priv;
304 167
305 /* reset PFIFO, enable all available PSUBFIFO areas */ 168 /* reset PFIFO, enable all available PSUBFIFO areas */
306 nv_mask(dev, 0x000200, 0x00000100, 0x00000000); 169 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -338,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
338 /* restore PFIFO context table */ 201 /* restore PFIFO context table */
339 for (i = 0; i < 128; i++) { 202 for (i = 0; i < 128; i++) {
340 chan = dev_priv->channels.ptr[i]; 203 chan = dev_priv->channels.ptr[i];
341 if (!chan || !chan->fifo_priv) 204 if (!chan || !chan->engctx[engine])
342 continue; 205 continue;
343 206
344 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 | 207 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -350,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
350 return 0; 213 return 0;
351} 214}
352 215
216static int
217nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 int i;
220
221 for (i = 0; i < 128; i++) {
222 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
223 continue;
224
225 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
226 nv_wr32(dev, 0x002634, i);
227 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
228 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i, nv_rd32(dev, 0x002634));
230 return -EBUSY;
231 }
232 }
233
234 nv_wr32(dev, 0x002140, 0x00000000);
235 return 0;
236}
237
238
353struct nouveau_enum nvc0_fifo_fault_unit[] = { 239struct nouveau_enum nvc0_fifo_fault_unit[] = {
354 { 0x00, "PGRAPH" }, 240 { 0x00, "PGRAPH" },
355 { 0x03, "PEEPHOLE" }, 241 { 0x03, "PEEPHOLE" },
@@ -439,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
439static int 325static int
440nvc0_fifo_page_flip(struct drm_device *dev, u32 chid) 326nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
441{ 327{
328 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
442 struct drm_nouveau_private *dev_priv = dev->dev_private; 329 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nouveau_channel *chan = NULL; 330 struct nouveau_channel *chan = NULL;
444 unsigned long flags; 331 unsigned long flags;
445 int ret = -EINVAL; 332 int ret = -EINVAL;
446 333
447 spin_lock_irqsave(&dev_priv->channels.lock, flags); 334 spin_lock_irqsave(&dev_priv->channels.lock, flags);
448 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) { 335 if (likely(chid >= 0 && chid < priv->base.channels)) {
449 chan = dev_priv->channels.ptr[chid]; 336 chan = dev_priv->channels.ptr[chid];
450 if (likely(chan)) 337 if (likely(chan))
451 ret = nouveau_finish_page_flip(chan, NULL); 338 ret = nouveau_finish_page_flip(chan, NULL);
@@ -534,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
534 nv_wr32(dev, 0x002140, 0); 421 nv_wr32(dev, 0x002140, 0);
535 } 422 }
536} 423}
424
425static void
426nvc0_fifo_destroy(struct drm_device *dev, int engine)
427{
428 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430
431 nouveau_vm_put(&priv->user_vma);
432 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
433 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
434
435 dev_priv->eng[engine] = NULL;
436 kfree(priv);
437}
438
439int
440nvc0_fifo_create(struct drm_device *dev)
441{
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nvc0_fifo_priv *priv;
444 int ret;
445
446 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
447 if (!priv)
448 return -ENOMEM;
449
450 priv->base.base.destroy = nvc0_fifo_destroy;
451 priv->base.base.init = nvc0_fifo_init;
452 priv->base.base.fini = nvc0_fifo_fini;
453 priv->base.base.context_new = nvc0_fifo_context_new;
454 priv->base.base.context_del = nvc0_fifo_context_del;
455 priv->base.channels = 128;
456 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
457
458 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
459 if (ret)
460 goto error;
461
462 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
463 if (ret)
464 goto error;
465
466 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
467 12, NV_MEM_ACCESS_RW, &priv->user_vma);
468 if (ret)
469 goto error;
470
471 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
472error:
473 if (ret)
474 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
475 return ret;
476}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 9066102d1159..2a01e6e47724 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -29,6 +29,7 @@
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_mm.h" 31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
32 33
33#include "nvc0_graph.h" 34#include "nvc0_graph.h"
34#include "nvc0_grhub.fuc.h" 35#include "nvc0_grhub.fuc.h"
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine)
620int 621int
621nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) 622nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
622{ 623{
624 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
623 struct drm_nouveau_private *dev_priv = dev->dev_private; 625 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct nouveau_channel *chan; 626 struct nouveau_channel *chan;
625 unsigned long flags; 627 unsigned long flags;
626 int i; 628 int i;
627 629
628 spin_lock_irqsave(&dev_priv->channels.lock, flags); 630 spin_lock_irqsave(&dev_priv->channels.lock, flags);
629 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 631 for (i = 0; i < pfifo->channels; i++) {
630 chan = dev_priv->channels.ptr[i]; 632 chan = dev_priv->channels.ptr[i];
631 if (!chan || !chan->ramin) 633 if (!chan || !chan->ramin)
632 continue; 634 continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index ce65f81bb871..7c95c44e2887 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -164,7 +164,9 @@ struct nvc0_pm_clock {
164}; 164};
165 165
166struct nvc0_pm_state { 166struct nvc0_pm_state {
167 struct nouveau_pm_level *perflvl;
167 struct nvc0_pm_clock eng[16]; 168 struct nvc0_pm_clock eng[16];
169 struct nvc0_pm_clock mem;
168}; 170};
169 171
170static u32 172static u32
@@ -303,6 +305,48 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
303 return 0; 305 return 0;
304} 306}
305 307
308static int
309calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
310{
311 struct pll_lims pll;
312 int N, M, P, ret;
313 u32 ctrl;
314
315 /* mclk pll input freq comes from another pll, make sure it's on */
316 ctrl = nv_rd32(dev, 0x132020);
317 if (!(ctrl & 0x00000001)) {
318 /* if not, program it to 567MHz. nfi where this value comes
319 * from - it looks like it's in the pll limits table for
320 * 132000 but the binary driver ignores all my attempts to
321 * change this value.
322 */
323 nv_wr32(dev, 0x137320, 0x00000103);
324 nv_wr32(dev, 0x137330, 0x81200606);
325 nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
326 nv_wr32(dev, 0x132024, 0x0001150f);
327 nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
328 nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
329 nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
330 }
331
332 /* for the moment, until the clock tree is better understood, use
333 * pll mode for all clock frequencies
334 */
335 ret = get_pll_limits(dev, 0x132000, &pll);
336 if (ret == 0) {
337 pll.refclk = read_pll(dev, 0x132020);
338 if (pll.refclk) {
339 ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
340 if (ret > 0) {
341 info->coef = (P << 16) | (N << 8) | M;
342 return 0;
343 }
344 }
345 }
346
347 return -EINVAL;
348}
349
306void * 350void *
307nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 351nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
308{ 352{
@@ -335,6 +379,15 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
335 return ERR_PTR(ret); 379 return ERR_PTR(ret);
336 } 380 }
337 381
382 if (perflvl->memory) {
383 ret = calc_mem(dev, &info->mem, perflvl->memory);
384 if (ret) {
385 kfree(info);
386 return ERR_PTR(ret);
387 }
388 }
389
390 info->perflvl = perflvl;
338 return info; 391 return info;
339} 392}
340 393
@@ -375,12 +428,148 @@ prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
375 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 428 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
376} 429}
377 430
431static void
432mclk_precharge(struct nouveau_mem_exec_func *exec)
433{
434}
435
436static void
437mclk_refresh(struct nouveau_mem_exec_func *exec)
438{
439}
440
441static void
442mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
443{
444 nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
445}
446
447static void
448mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
449{
450}
451
452static void
453mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
454{
455 udelay((nsec + 500) / 1000);
456}
457
458static u32
459mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
460{
461 struct drm_device *dev = exec->dev;
462 struct drm_nouveau_private *dev_priv = dev->dev_private;
463 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
464 if (mr <= 1)
465 return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
466 return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
467 } else {
468 if (mr == 0)
469 return nv_rd32(dev, 0x10f300 + (mr * 4));
470 else
471 if (mr <= 7)
472 return nv_rd32(dev, 0x10f32c + (mr * 4));
473 return nv_rd32(dev, 0x10f34c);
474 }
475}
476
477static void
478mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
479{
480 struct drm_device *dev = exec->dev;
481 struct drm_nouveau_private *dev_priv = dev->dev_private;
482 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
483 if (mr <= 1) {
484 nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
485 if (dev_priv->vram_rank_B)
486 nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
487 } else
488 if (mr <= 3) {
489 nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
490 if (dev_priv->vram_rank_B)
491 nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
492 }
493 } else {
494 if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
495 else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
496 else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
497 }
498}
499
500static void
501mclk_clock_set(struct nouveau_mem_exec_func *exec)
502{
503 struct nvc0_pm_state *info = exec->priv;
504 struct drm_device *dev = exec->dev;
505 u32 ctrl = nv_rd32(dev, 0x132000);
506
507 nv_wr32(dev, 0x137360, 0x00000001);
508 nv_wr32(dev, 0x137370, 0x00000000);
509 nv_wr32(dev, 0x137380, 0x00000000);
510 if (ctrl & 0x00000001)
511 nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
512
513 nv_wr32(dev, 0x132004, info->mem.coef);
514 nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
515 nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
516 nv_wr32(dev, 0x132018, 0x00005000);
517
518 nv_wr32(dev, 0x137370, 0x00000001);
519 nv_wr32(dev, 0x137380, 0x00000001);
520 nv_wr32(dev, 0x137360, 0x00000000);
521}
522
523static void
524mclk_timing_set(struct nouveau_mem_exec_func *exec)
525{
526 struct nvc0_pm_state *info = exec->priv;
527 struct nouveau_pm_level *perflvl = info->perflvl;
528 int i;
529
530 for (i = 0; i < 5; i++)
531 nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
532}
533
534static void
535prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
536{
537 struct drm_nouveau_private *dev_priv = dev->dev_private;
538 struct nouveau_mem_exec_func exec = {
539 .dev = dev,
540 .precharge = mclk_precharge,
541 .refresh = mclk_refresh,
542 .refresh_auto = mclk_refresh_auto,
543 .refresh_self = mclk_refresh_self,
544 .wait = mclk_wait,
545 .mrg = mclk_mrg,
546 .mrs = mclk_mrs,
547 .clock_set = mclk_clock_set,
548 .timing_set = mclk_timing_set,
549 .priv = info
550 };
551
552 if (dev_priv->chipset < 0xd0)
553 nv_wr32(dev, 0x611200, 0x00003300);
554 else
555 nv_wr32(dev, 0x62c000, 0x03030000);
556
557 nouveau_mem_exec(&exec, info->perflvl);
558
559 if (dev_priv->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003300);
561 else
562 nv_wr32(dev, 0x62c000, 0x03030300);
563}
378int 564int
379nvc0_pm_clocks_set(struct drm_device *dev, void *data) 565nvc0_pm_clocks_set(struct drm_device *dev, void *data)
380{ 566{
381 struct nvc0_pm_state *info = data; 567 struct nvc0_pm_state *info = data;
382 int i; 568 int i;
383 569
570 if (info->mem.coef)
571 prog_mem(dev, info);
572
384 for (i = 0; i < 16; i++) { 573 for (i = 0; i < 16; i++) {
385 if (!info->eng[i].freq) 574 if (!info->eng[i].freq)
386 continue; 575 continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
new file mode 100644
index 000000000000..93e8c164fec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_software.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nvc0_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nvc0_software_chan {
38 struct nouveau_software_chan base;
39 struct nouveau_vma dispc_vma[4];
40};
41
42u64
43nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
44{
45 struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
46 return pch->dispc_vma[crtc].offset;
47}
48
49static int
50nvc0_software_context_new(struct nouveau_channel *chan, int engine)
51{
52 struct drm_device *dev = chan->dev;
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
55 struct nvc0_software_chan *pch;
56 int ret = 0, i;
57
58 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
59 if (!pch)
60 return -ENOMEM;
61
62 nouveau_software_context_new(&pch->base);
63 chan->engctx[engine] = pch;
64
65 /* map display semaphore buffers into channel's vm */
66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
67 struct nouveau_bo *bo;
68 if (dev_priv->card_type >= NV_D0)
69 bo = nvd0_display_crtc_sema(dev, i);
70 else
71 bo = nv50_display(dev)->crtc[i].sem.bo;
72
73 ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
74 }
75
76 if (ret)
77 psw->base.base.context_del(chan, engine);
78 return ret;
79}
80
81static void
82nvc0_software_context_del(struct nouveau_channel *chan, int engine)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nvc0_software_chan *pch = chan->engctx[engine];
87 int i;
88
89 if (dev_priv->card_type >= NV_D0) {
90 for (i = 0; i < dev->mode_config.num_crtc; i++) {
91 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
92 nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
93 }
94 } else
95 if (dev_priv->card_type >= NV_50) {
96 struct nv50_display *disp = nv50_display(dev);
97 for (i = 0; i < dev->mode_config.num_crtc; i++) {
98 struct nv50_display_crtc *dispc = &disp->crtc[i];
99 nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
100 }
101 }
102
103 chan->engctx[engine] = NULL;
104 kfree(pch);
105}
106
107static int
108nvc0_software_object_new(struct nouveau_channel *chan, int engine,
109 u32 handle, u16 class)
110{
111 return 0;
112}
113
114static int
115nvc0_software_init(struct drm_device *dev, int engine)
116{
117 return 0;
118}
119
120static int
121nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
122{
123 return 0;
124}
125
126static void
127nvc0_software_destroy(struct drm_device *dev, int engine)
128{
129 struct nvc0_software_priv *psw = nv_engine(dev, engine);
130
131 NVOBJ_ENGINE_DEL(dev, SW);
132 kfree(psw);
133}
134
135int
136nvc0_software_create(struct drm_device *dev)
137{
138 struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
139 if (!psw)
140 return -ENOMEM;
141
142 psw->base.base.destroy = nvc0_software_destroy;
143 psw->base.base.init = nvc0_software_init;
144 psw->base.base.fini = nvc0_software_fini;
145 psw->base.base.context_new = nvc0_software_context_new;
146 psw->base.base.context_del = nvc0_software_context_del;
147 psw->base.base.object_new = nvc0_software_object_new;
148 nouveau_software_create(&psw->base);
149
150 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
151 NVOBJ_CLASS(dev, 0x906e, SW);
152 return 0;
153}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 1f3a9b1240e8..c486d3ce3c2c 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -33,6 +33,7 @@
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_fb.h" 35#include "nouveau_fb.h"
36#include "nouveau_software.h"
36#include "nv50_display.h" 37#include "nv50_display.h"
37 38
38#define EVO_DMA_NR 9 39#define EVO_DMA_NR 9
@@ -284,8 +285,6 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
284 u32 *push; 285 u32 *push;
285 int ret; 286 int ret;
286 287
287 evo_sync(crtc->dev, EVO_MASTER);
288
289 swap_interval <<= 4; 288 swap_interval <<= 4;
290 if (swap_interval == 0) 289 if (swap_interval == 0)
291 swap_interval |= 0x100; 290 swap_interval |= 0x100;
@@ -300,15 +299,16 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
300 if (ret) 299 if (ret)
301 return ret; 300 return ret;
302 301
303 offset = chan->dispc_vma[nv_crtc->index].offset; 302
303 offset = nvc0_software_crtc(chan, nv_crtc->index);
304 offset += evo->sem.offset; 304 offset += evo->sem.offset;
305 305
306 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 306 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
307 OUT_RING (chan, upper_32_bits(offset)); 307 OUT_RING (chan, upper_32_bits(offset));
308 OUT_RING (chan, lower_32_bits(offset)); 308 OUT_RING (chan, lower_32_bits(offset));
309 OUT_RING (chan, 0xf00d0000 | evo->sem.value); 309 OUT_RING (chan, 0xf00d0000 | evo->sem.value);
310 OUT_RING (chan, 0x1002); 310 OUT_RING (chan, 0x1002);
311 BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 311 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
312 OUT_RING (chan, upper_32_bits(offset)); 312 OUT_RING (chan, upper_32_bits(offset));
313 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 313 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
314 OUT_RING (chan, 0x74b1e000); 314 OUT_RING (chan, 0x74b1e000);
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
new file mode 100644
index 000000000000..1855ecbd843b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -0,0 +1,423 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31#define NVE0_FIFO_ENGINE_NUM 32
32
33static void nve0_fifo_isr(struct drm_device *);
34
35struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nve0_fifo_priv {
41 struct nouveau_fifo_priv base;
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48};
49
50struct nve0_fifo_chan {
51 struct nouveau_fifo_chan base;
52 u32 engine;
53};
54
55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
79 for (i = 0, p = 0; i < priv->base.channels; i++) {
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93}
94
95static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i;
106
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
109 return -ENOMEM;
110
111 fctx->engine = 0; /* PGRAPH */
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129 upper_32_bits(ib_virt));
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146error:
147 if (ret)
148 priv->base.base.context_del(chan, engine);
149 return ret;
150}
151
152static void
153nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154{
155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
156 struct drm_device *dev = chan->dev;
157
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162 nve0_fifo_playlist_update(dev, fctx->engine);
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
172}
173
174static int
175nve0_fifo_init(struct drm_device *dev, int engine)
176{
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
207 continue;
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217}
218
219static int
220nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221{
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240}
241
242struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244};
245
246struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257};
258
259struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261};
262
263struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265};
266
267struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271};
272
273static void
274nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275{
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295}
296
297static void
298nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
299{
300 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
301 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
302 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
303 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
304 u32 subc = (addr & 0x00070000);
305 u32 mthd = (addr & 0x00003ffc);
306
307 NV_INFO(dev, "PSUBFIFO %d:", unit);
308 nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
309 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
310 unit, chid, subc, mthd, data);
311
312 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
313 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
314}
315
316static void
317nve0_fifo_isr(struct drm_device *dev)
318{
319 u32 stat = nv_rd32(dev, 0x002100);
320
321 if (stat & 0x00000100) {
322 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
323 nv_wr32(dev, 0x002100, 0x00000100);
324 stat &= ~0x00000100;
325 }
326
327 if (stat & 0x10000000) {
328 u32 units = nv_rd32(dev, 0x00259c);
329 u32 u = units;
330
331 while (u) {
332 int i = ffs(u) - 1;
333 nve0_fifo_isr_vm_fault(dev, i);
334 u &= ~(1 << i);
335 }
336
337 nv_wr32(dev, 0x00259c, units);
338 stat &= ~0x10000000;
339 }
340
341 if (stat & 0x20000000) {
342 u32 units = nv_rd32(dev, 0x0025a0);
343 u32 u = units;
344
345 while (u) {
346 int i = ffs(u) - 1;
347 nve0_fifo_isr_subfifo_intr(dev, i);
348 u &= ~(1 << i);
349 }
350
351 nv_wr32(dev, 0x0025a0, units);
352 stat &= ~0x20000000;
353 }
354
355 if (stat & 0x40000000) {
356 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
357 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
358 stat &= ~0x40000000;
359 }
360
361 if (stat) {
362 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
363 nv_wr32(dev, 0x002100, stat);
364 nv_wr32(dev, 0x002140, 0);
365 }
366}
367
368static void
369nve0_fifo_destroy(struct drm_device *dev, int engine)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
373 int i;
374
375 nouveau_vm_put(&priv->user.bar);
376 nouveau_gpuobj_ref(NULL, &priv->user.mem);
377
378 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
379 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
380 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
381 }
382
383 dev_priv->eng[engine] = NULL;
384 kfree(priv);
385}
386
387int
388nve0_fifo_create(struct drm_device *dev)
389{
390 struct drm_nouveau_private *dev_priv = dev->dev_private;
391 struct nve0_fifo_priv *priv;
392 int ret;
393
394 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
395 if (!priv)
396 return -ENOMEM;
397
398 priv->base.base.destroy = nve0_fifo_destroy;
399 priv->base.base.init = nve0_fifo_init;
400 priv->base.base.fini = nve0_fifo_fini;
401 priv->base.base.context_new = nve0_fifo_context_new;
402 priv->base.base.context_del = nve0_fifo_context_del;
403 priv->base.channels = 4096;
404 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
405
406 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
407 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
408 if (ret)
409 goto error;
410
411 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
412 12, NV_MEM_ACCESS_RW, &priv->user.bar);
413 if (ret)
414 goto error;
415
416 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
417
418 nouveau_irq_register(dev, 8, nve0_fifo_isr);
419error:
420 if (ret)
421 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
422 return ret;
423}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
new file mode 100644
index 000000000000..8a8051b68f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -0,0 +1,831 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nve0_graph.h"
35
36static void
37nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nve0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nve0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
59
60static int
61nve0_graph_load_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64
65 nv_wr32(dev, 0x409840, 0x00000030);
66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
67 nv_wr32(dev, 0x409504, 0x00000003);
68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
70
71 return 0;
72}
73
74static int
75nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
76{
77 nv_wr32(dev, 0x409840, 0x00000003);
78 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
79 nv_wr32(dev, 0x409504, 0x00000009);
80 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
82 return -EBUSY;
83 }
84
85 return 0;
86}
87
88static int
89nve0_graph_construct_context(struct nouveau_channel *chan)
90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct drm_device *dev = chan->dev;
95 int ret, i;
96 u32 *ctx;
97
98 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
99 if (!ctx)
100 return -ENOMEM;
101
102 nve0_graph_load_context(chan);
103
104 nv_wo32(grch->grctx, 0x1c, 1);
105 nv_wo32(grch->grctx, 0x20, 0);
106 nv_wo32(grch->grctx, 0x28, 0);
107 nv_wo32(grch->grctx, 0x2c, 0);
108 dev_priv->engine.instmem.flush(dev);
109
110 ret = nve0_grctx_generate(chan);
111 if (ret)
112 goto err;
113
114 ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
115 if (ret)
116 goto err;
117
118 for (i = 0; i < priv->grctx_size; i += 4)
119 ctx[i / 4] = nv_ro32(grch->grctx, i);
120
121 priv->grctx_vals = ctx;
122 return 0;
123
124err:
125 kfree(ctx);
126 return ret;
127}
128
129static int
130nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
131{
132 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
133 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
134 struct drm_device *dev = chan->dev;
135 u32 magic[GPC_MAX][2];
136 u16 offset = 0x0000;
137 int gpc;
138 int ret;
139
140 ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
141 &grch->unk408004);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
146 &grch->unk40800c);
147 if (ret)
148 return ret;
149
150 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
151 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
152 &grch->unk418810);
153 if (ret)
154 return ret;
155
156 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
157 &grch->mmio);
158 if (ret)
159 return ret;
160
161#define mmio(r,v) do { \
162 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
163 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
164 grch->mmio_nr++; \
165} while (0)
166 mmio(0x40800c, grch->unk40800c->linst >> 8);
167 mmio(0x408010, 0x80000000);
168 mmio(0x419004, grch->unk40800c->linst >> 8);
169 mmio(0x419008, 0x00000000);
170 mmio(0x4064cc, 0x80000000);
171 mmio(0x408004, grch->unk408004->linst >> 8);
172 mmio(0x408008, 0x80000030);
173 mmio(0x418808, grch->unk408004->linst >> 8);
174 mmio(0x41880c, 0x80000030);
175 mmio(0x4064c8, 0x01800600);
176 mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
177 mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
178 mmio(0x405830, 0x02180648);
179 mmio(0x4064c4, 0x0192ffff);
180
181 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
182 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
183 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
184 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
185 magic[gpc][1] = 0x00000000 | (magic1 << 16);
186 offset += 0x0324 * priv->tpc_nr[gpc];
187 }
188
189 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
190 mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
191 mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
192 offset += 0x07ff * priv->tpc_nr[gpc];
193 }
194
195 mmio(0x17e91c, 0x06060609);
196 mmio(0x17e920, 0x00090a05);
197#undef mmio
198 return 0;
199}
200
201static int
202nve0_graph_context_new(struct nouveau_channel *chan, int engine)
203{
204 struct drm_device *dev = chan->dev;
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
207 struct nve0_graph_priv *priv = nv_engine(dev, engine);
208 struct nve0_graph_chan *grch;
209 struct nouveau_gpuobj *grctx;
210 int ret, i;
211
212 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
213 if (!grch)
214 return -ENOMEM;
215 chan->engctx[NVOBJ_ENGINE_GR] = grch;
216
217 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
218 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
219 &grch->grctx);
220 if (ret)
221 goto error;
222 grctx = grch->grctx;
223
224 ret = nve0_graph_create_context_mmio_list(chan);
225 if (ret)
226 goto error;
227
228 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
229 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
230 pinstmem->flush(dev);
231
232 if (!priv->grctx_vals) {
233 ret = nve0_graph_construct_context(chan);
234 if (ret)
235 goto error;
236 }
237
238 for (i = 0; i < priv->grctx_size; i += 4)
239 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
240 nv_wo32(grctx, 0xf4, 0);
241 nv_wo32(grctx, 0xf8, 0);
242 nv_wo32(grctx, 0x10, grch->mmio_nr);
243 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
244 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
245 nv_wo32(grctx, 0x1c, 1);
246 nv_wo32(grctx, 0x20, 0);
247 nv_wo32(grctx, 0x28, 0);
248 nv_wo32(grctx, 0x2c, 0);
249
250 pinstmem->flush(dev);
251 return 0;
252
253error:
254 priv->base.context_del(chan, engine);
255 return ret;
256}
257
258static void
259nve0_graph_context_del(struct nouveau_channel *chan, int engine)
260{
261 struct nve0_graph_chan *grch = chan->engctx[engine];
262
263 nouveau_gpuobj_ref(NULL, &grch->mmio);
264 nouveau_gpuobj_ref(NULL, &grch->unk418810);
265 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
266 nouveau_gpuobj_ref(NULL, &grch->unk408004);
267 nouveau_gpuobj_ref(NULL, &grch->grctx);
268 chan->engctx[engine] = NULL;
269}
270
271static int
272nve0_graph_object_new(struct nouveau_channel *chan, int engine,
273 u32 handle, u16 class)
274{
275 return 0;
276}
277
278static int
279nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
280{
281 return 0;
282}
283
284static void
285nve0_graph_init_obj418880(struct drm_device *dev)
286{
287 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
288 int i;
289
290 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
291 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
292 for (i = 0; i < 4; i++)
293 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
294 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
295 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
296}
297
298static void
299nve0_graph_init_regs(struct drm_device *dev)
300{
301 nv_wr32(dev, 0x400080, 0x003083c2);
302 nv_wr32(dev, 0x400088, 0x0001ffe7);
303 nv_wr32(dev, 0x40008c, 0x00000000);
304 nv_wr32(dev, 0x400090, 0x00000030);
305 nv_wr32(dev, 0x40013c, 0x003901f7);
306 nv_wr32(dev, 0x400140, 0x00000100);
307 nv_wr32(dev, 0x400144, 0x00000000);
308 nv_wr32(dev, 0x400148, 0x00000110);
309 nv_wr32(dev, 0x400138, 0x00000000);
310 nv_wr32(dev, 0x400130, 0x00000000);
311 nv_wr32(dev, 0x400134, 0x00000000);
312 nv_wr32(dev, 0x400124, 0x00000002);
313}
314
315static void
316nve0_graph_init_units(struct drm_device *dev)
317{
318 nv_wr32(dev, 0x409ffc, 0x00000000);
319 nv_wr32(dev, 0x409c14, 0x00003e3e);
320 nv_wr32(dev, 0x409c24, 0x000f0000);
321
322 nv_wr32(dev, 0x404000, 0xc0000000);
323 nv_wr32(dev, 0x404600, 0xc0000000);
324 nv_wr32(dev, 0x408030, 0xc0000000);
325 nv_wr32(dev, 0x404490, 0xc0000000);
326 nv_wr32(dev, 0x406018, 0xc0000000);
327 nv_wr32(dev, 0x407020, 0xc0000000);
328 nv_wr32(dev, 0x405840, 0xc0000000);
329 nv_wr32(dev, 0x405844, 0x00ffffff);
330
331 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
332 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
333
334}
335
336static void
337nve0_graph_init_gpc_0(struct drm_device *dev)
338{
339 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
340 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
341 u32 data[TPC_MAX / 8];
342 u8 tpcnr[GPC_MAX];
343 int i, gpc, tpc;
344
345 nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
346
347 memset(data, 0x00, sizeof(data));
348 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
349 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
350 do {
351 gpc = (gpc + 1) % priv->gpc_nr;
352 } while (!tpcnr[gpc]);
353 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
354
355 data[i / 8] |= tpc << ((i % 8) * 4);
356 }
357
358 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
359 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
360 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
361 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
362
363 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
364 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
365 priv->tpc_nr[gpc]);
366 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
367 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
368 }
369
370 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
371 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
372}
373
374static void
375nve0_graph_init_gpc_1(struct drm_device *dev)
376{
377 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
378 int gpc, tpc;
379
380 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
381 nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
382 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
383 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
384 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
385 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
386 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
387 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
388 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
389 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
390 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
391 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
392 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
393 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
394 }
395 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
396 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
397 }
398}
399
400static void
401nve0_graph_init_rop(struct drm_device *dev)
402{
403 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
404 int rop;
405
406 for (rop = 0; rop < priv->rop_nr; rop++) {
407 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
408 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
409 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
410 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
411 }
412}
413
414static void
415nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
416 struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
417{
418 int i;
419
420 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
421 for (i = 0; i < data->size / 4; i++)
422 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
423
424 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
425 for (i = 0; i < code->size / 4; i++) {
426 if ((i & 0x3f) == 0)
427 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
428 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
429 }
430}
431
432static int
433nve0_graph_init_ctxctl(struct drm_device *dev)
434{
435 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
436 u32 r000260;
437
438 /* load fuc microcode */
439 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
440 nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
441 nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
442 nv_wr32(dev, 0x000260, r000260);
443
444 /* start both of them running */
445 nv_wr32(dev, 0x409840, 0xffffffff);
446 nv_wr32(dev, 0x41a10c, 0x00000000);
447 nv_wr32(dev, 0x40910c, 0x00000000);
448 nv_wr32(dev, 0x41a100, 0x00000002);
449 nv_wr32(dev, 0x409100, 0x00000002);
450 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
451 NV_INFO(dev, "0x409800 wait failed\n");
452
453 nv_wr32(dev, 0x409840, 0xffffffff);
454 nv_wr32(dev, 0x409500, 0x7fffffff);
455 nv_wr32(dev, 0x409504, 0x00000021);
456
457 nv_wr32(dev, 0x409840, 0xffffffff);
458 nv_wr32(dev, 0x409500, 0x00000000);
459 nv_wr32(dev, 0x409504, 0x00000010);
460 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
461 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
462 return -EBUSY;
463 }
464 priv->grctx_size = nv_rd32(dev, 0x409800);
465
466 nv_wr32(dev, 0x409840, 0xffffffff);
467 nv_wr32(dev, 0x409500, 0x00000000);
468 nv_wr32(dev, 0x409504, 0x00000016);
469 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
470 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
471 return -EBUSY;
472 }
473
474 nv_wr32(dev, 0x409840, 0xffffffff);
475 nv_wr32(dev, 0x409500, 0x00000000);
476 nv_wr32(dev, 0x409504, 0x00000025);
477 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
478 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
479 return -EBUSY;
480 }
481
482 nv_wr32(dev, 0x409800, 0x00000000);
483 nv_wr32(dev, 0x409500, 0x00000001);
484 nv_wr32(dev, 0x409504, 0x00000030);
485 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
486 NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
487 return -EBUSY;
488 }
489
490 nv_wr32(dev, 0x409810, 0xb00095c8);
491 nv_wr32(dev, 0x409800, 0x00000000);
492 nv_wr32(dev, 0x409500, 0x00000001);
493 nv_wr32(dev, 0x409504, 0x00000031);
494 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
495 NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
496 return -EBUSY;
497 }
498
499 nv_wr32(dev, 0x409810, 0x00080420);
500 nv_wr32(dev, 0x409800, 0x00000000);
501 nv_wr32(dev, 0x409500, 0x00000001);
502 nv_wr32(dev, 0x409504, 0x00000032);
503 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
504 NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
505 return -EBUSY;
506 }
507
508 nv_wr32(dev, 0x409614, 0x00000070);
509 nv_wr32(dev, 0x409614, 0x00000770);
510 nv_wr32(dev, 0x40802c, 0x00000001);
511 return 0;
512}
513
514static int
515nve0_graph_init(struct drm_device *dev, int engine)
516{
517 int ret;
518
519 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
520 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
521
522 nve0_graph_init_obj418880(dev);
523 nve0_graph_init_regs(dev);
524 nve0_graph_init_gpc_0(dev);
525
526 nv_wr32(dev, 0x400500, 0x00010001);
527 nv_wr32(dev, 0x400100, 0xffffffff);
528 nv_wr32(dev, 0x40013c, 0xffffffff);
529
530 nve0_graph_init_units(dev);
531 nve0_graph_init_gpc_1(dev);
532 nve0_graph_init_rop(dev);
533
534 nv_wr32(dev, 0x400108, 0xffffffff);
535 nv_wr32(dev, 0x400138, 0xffffffff);
536 nv_wr32(dev, 0x400118, 0xffffffff);
537 nv_wr32(dev, 0x400130, 0xffffffff);
538 nv_wr32(dev, 0x40011c, 0xffffffff);
539 nv_wr32(dev, 0x400134, 0xffffffff);
540 nv_wr32(dev, 0x400054, 0x34ce3464);
541
542 ret = nve0_graph_init_ctxctl(dev);
543 if (ret)
544 return ret;
545
546 return 0;
547}
548
549int
550nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
551{
552 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
553 struct drm_nouveau_private *dev_priv = dev->dev_private;
554 struct nouveau_channel *chan;
555 unsigned long flags;
556 int i;
557
558 spin_lock_irqsave(&dev_priv->channels.lock, flags);
559 for (i = 0; i < pfifo->channels; i++) {
560 chan = dev_priv->channels.ptr[i];
561 if (!chan || !chan->ramin)
562 continue;
563
564 if (inst == chan->ramin->vinst)
565 break;
566 }
567 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
568 return i;
569}
570
571static void
572nve0_graph_ctxctl_isr(struct drm_device *dev)
573{
574 u32 ustat = nv_rd32(dev, 0x409c18);
575
576 if (ustat & 0x00000001)
577 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
578 if (ustat & 0x00080000)
579 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
580 if (ustat & ~0x00080001)
581 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
582
583 nve0_graph_ctxctl_debug(dev);
584 nv_wr32(dev, 0x409c20, ustat);
585}
586
587static void
588nve0_graph_trap_isr(struct drm_device *dev, int chid)
589{
590 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
591 u32 trap = nv_rd32(dev, 0x400108);
592 int rop;
593
594 if (trap & 0x00000001) {
595 u32 stat = nv_rd32(dev, 0x404000);
596 NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
597 nv_wr32(dev, 0x404000, 0xc0000000);
598 nv_wr32(dev, 0x400108, 0x00000001);
599 trap &= ~0x00000001;
600 }
601
602 if (trap & 0x00000010) {
603 u32 stat = nv_rd32(dev, 0x405840);
604 NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
605 nv_wr32(dev, 0x405840, 0xc0000000);
606 nv_wr32(dev, 0x400108, 0x00000010);
607 trap &= ~0x00000010;
608 }
609
610 if (trap & 0x02000000) {
611 for (rop = 0; rop < priv->rop_nr; rop++) {
612 u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
613 u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
614 NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
615 rop, chid, statz, statc);
616 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
617 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
618 }
619 nv_wr32(dev, 0x400108, 0x02000000);
620 trap &= ~0x02000000;
621 }
622
623 if (trap) {
624 NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
625 nv_wr32(dev, 0x400108, trap);
626 }
627}
628
629static void
630nve0_graph_isr(struct drm_device *dev)
631{
632 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
633 u32 chid = nve0_graph_isr_chid(dev, inst);
634 u32 stat = nv_rd32(dev, 0x400100);
635 u32 addr = nv_rd32(dev, 0x400704);
636 u32 mthd = (addr & 0x00003ffc);
637 u32 subc = (addr & 0x00070000) >> 16;
638 u32 data = nv_rd32(dev, 0x400708);
639 u32 code = nv_rd32(dev, 0x400110);
640 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
641
642 if (stat & 0x00000010) {
643 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
644 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
645 "subc %d class 0x%04x mthd 0x%04x "
646 "data 0x%08x\n",
647 chid, inst, subc, class, mthd, data);
648 }
649 nv_wr32(dev, 0x400100, 0x00000010);
650 stat &= ~0x00000010;
651 }
652
653 if (stat & 0x00000020) {
654 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
655 "class 0x%04x mthd 0x%04x data 0x%08x\n",
656 chid, inst, subc, class, mthd, data);
657 nv_wr32(dev, 0x400100, 0x00000020);
658 stat &= ~0x00000020;
659 }
660
661 if (stat & 0x00100000) {
662 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
663 nouveau_enum_print(nv50_data_error_names, code);
664 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
665 "mthd 0x%04x data 0x%08x\n",
666 chid, inst, subc, class, mthd, data);
667 nv_wr32(dev, 0x400100, 0x00100000);
668 stat &= ~0x00100000;
669 }
670
671 if (stat & 0x00200000) {
672 nve0_graph_trap_isr(dev, chid);
673 nv_wr32(dev, 0x400100, 0x00200000);
674 stat &= ~0x00200000;
675 }
676
677 if (stat & 0x00080000) {
678 nve0_graph_ctxctl_isr(dev);
679 nv_wr32(dev, 0x400100, 0x00080000);
680 stat &= ~0x00080000;
681 }
682
683 if (stat) {
684 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
685 nv_wr32(dev, 0x400100, stat);
686 }
687
688 nv_wr32(dev, 0x400500, 0x00010001);
689}
690
691static int
692nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
693 struct nve0_graph_fuc *fuc)
694{
695 struct drm_nouveau_private *dev_priv = dev->dev_private;
696 const struct firmware *fw;
697 char f[32];
698 int ret;
699
700 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
701 ret = request_firmware(&fw, f, &dev->pdev->dev);
702 if (ret)
703 return ret;
704
705 fuc->size = fw->size;
706 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
707 release_firmware(fw);
708 return (fuc->data != NULL) ? 0 : -ENOMEM;
709}
710
711static void
712nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
713{
714 if (fuc->data) {
715 kfree(fuc->data);
716 fuc->data = NULL;
717 }
718}
719
720static void
721nve0_graph_destroy(struct drm_device *dev, int engine)
722{
723 struct nve0_graph_priv *priv = nv_engine(dev, engine);
724
725 nve0_graph_destroy_fw(&priv->fuc409c);
726 nve0_graph_destroy_fw(&priv->fuc409d);
727 nve0_graph_destroy_fw(&priv->fuc41ac);
728 nve0_graph_destroy_fw(&priv->fuc41ad);
729
730 nouveau_irq_unregister(dev, 12);
731
732 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
733 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
734
735 if (priv->grctx_vals)
736 kfree(priv->grctx_vals);
737
738 NVOBJ_ENGINE_DEL(dev, GR);
739 kfree(priv);
740}
741
742int
743nve0_graph_create(struct drm_device *dev)
744{
745 struct drm_nouveau_private *dev_priv = dev->dev_private;
746 struct nve0_graph_priv *priv;
747 int ret, gpc, i;
748 u32 kepler;
749
750 kepler = nve0_graph_class(dev);
751 if (!kepler) {
752 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
753 return 0;
754 }
755
756 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
757 if (!priv)
758 return -ENOMEM;
759
760 priv->base.destroy = nve0_graph_destroy;
761 priv->base.init = nve0_graph_init;
762 priv->base.fini = nve0_graph_fini;
763 priv->base.context_new = nve0_graph_context_new;
764 priv->base.context_del = nve0_graph_context_del;
765 priv->base.object_new = nve0_graph_object_new;
766
767 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
768 nouveau_irq_register(dev, 12, nve0_graph_isr);
769
770 NV_INFO(dev, "PGRAPH: using external firmware\n");
771 if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
772 nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
773 nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
774 nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
775 ret = 0;
776 goto error;
777 }
778
779 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
780 if (ret)
781 goto error;
782
783 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
784 if (ret)
785 goto error;
786
787 for (i = 0; i < 0x1000; i += 4) {
788 nv_wo32(priv->unk4188b4, i, 0x00000010);
789 nv_wo32(priv->unk4188b8, i, 0x00000010);
790 }
791
792 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
793 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
794 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
795 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
796 priv->tpc_total += priv->tpc_nr[gpc];
797 }
798
799 switch (dev_priv->chipset) {
800 case 0xe4:
801 if (priv->tpc_total == 8)
802 priv->magic_not_rop_nr = 3;
803 else
804 if (priv->tpc_total == 7)
805 priv->magic_not_rop_nr = 1;
806 break;
807 case 0xe7:
808 priv->magic_not_rop_nr = 1;
809 break;
810 default:
811 break;
812 }
813
814 if (!priv->magic_not_rop_nr) {
815 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
816 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
817 priv->tpc_nr[3], priv->rop_nr);
818 priv->magic_not_rop_nr = 0x00;
819 }
820
821 NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
825 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
826 return 0;
827
828error:
829 nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
830 return ret;
831}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
new file mode 100644
index 000000000000..2ba70449ba01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVE0_GRAPH_H__
26#define __NVE0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TPC_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nve0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nve0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nve0_graph_fuc fuc409c;
46 struct nve0_graph_fuc fuc409d;
47 struct nve0_graph_fuc fuc41ac;
48 struct nve0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tpc_nr[GPC_MAX];
53 u8 tpc_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nve0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nve0_grctx_generate(struct nouveau_channel *);
73
74/* nve0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nve0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xe4:
82 case 0xe7:
83 return 0xa097;
84 default:
85 return 0;
86 }
87}
88
89#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
new file mode 100644
index 000000000000..d8cb360e92c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_grctx.c
@@ -0,0 +1,2777 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nve0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 0x00000002) {}
36}
37
38static void
39nve0_grctx_generate_icmd(struct drm_device *dev)
40{
41 nv_wr32(dev, 0x400208, 0x80000000);
42 nv_icmd(dev, 0x001000, 0x00000004);
43 nv_icmd(dev, 0x000039, 0x00000000);
44 nv_icmd(dev, 0x00003a, 0x00000000);
45 nv_icmd(dev, 0x00003b, 0x00000000);
46 nv_icmd(dev, 0x0000a9, 0x0000ffff);
47 nv_icmd(dev, 0x000038, 0x0fac6881);
48 nv_icmd(dev, 0x00003d, 0x00000001);
49 nv_icmd(dev, 0x0000e8, 0x00000400);
50 nv_icmd(dev, 0x0000e9, 0x00000400);
51 nv_icmd(dev, 0x0000ea, 0x00000400);
52 nv_icmd(dev, 0x0000eb, 0x00000400);
53 nv_icmd(dev, 0x0000ec, 0x00000400);
54 nv_icmd(dev, 0x0000ed, 0x00000400);
55 nv_icmd(dev, 0x0000ee, 0x00000400);
56 nv_icmd(dev, 0x0000ef, 0x00000400);
57 nv_icmd(dev, 0x000078, 0x00000300);
58 nv_icmd(dev, 0x000079, 0x00000300);
59 nv_icmd(dev, 0x00007a, 0x00000300);
60 nv_icmd(dev, 0x00007b, 0x00000300);
61 nv_icmd(dev, 0x00007c, 0x00000300);
62 nv_icmd(dev, 0x00007d, 0x00000300);
63 nv_icmd(dev, 0x00007e, 0x00000300);
64 nv_icmd(dev, 0x00007f, 0x00000300);
65 nv_icmd(dev, 0x000050, 0x00000011);
66 nv_icmd(dev, 0x000058, 0x00000008);
67 nv_icmd(dev, 0x000059, 0x00000008);
68 nv_icmd(dev, 0x00005a, 0x00000008);
69 nv_icmd(dev, 0x00005b, 0x00000008);
70 nv_icmd(dev, 0x00005c, 0x00000008);
71 nv_icmd(dev, 0x00005d, 0x00000008);
72 nv_icmd(dev, 0x00005e, 0x00000008);
73 nv_icmd(dev, 0x00005f, 0x00000008);
74 nv_icmd(dev, 0x000208, 0x00000001);
75 nv_icmd(dev, 0x000209, 0x00000001);
76 nv_icmd(dev, 0x00020a, 0x00000001);
77 nv_icmd(dev, 0x00020b, 0x00000001);
78 nv_icmd(dev, 0x00020c, 0x00000001);
79 nv_icmd(dev, 0x00020d, 0x00000001);
80 nv_icmd(dev, 0x00020e, 0x00000001);
81 nv_icmd(dev, 0x00020f, 0x00000001);
82 nv_icmd(dev, 0x000081, 0x00000001);
83 nv_icmd(dev, 0x000085, 0x00000004);
84 nv_icmd(dev, 0x000088, 0x00000400);
85 nv_icmd(dev, 0x000090, 0x00000300);
86 nv_icmd(dev, 0x000098, 0x00001001);
87 nv_icmd(dev, 0x0000e3, 0x00000001);
88 nv_icmd(dev, 0x0000da, 0x00000001);
89 nv_icmd(dev, 0x0000f8, 0x00000003);
90 nv_icmd(dev, 0x0000fa, 0x00000001);
91 nv_icmd(dev, 0x00009f, 0x0000ffff);
92 nv_icmd(dev, 0x0000a0, 0x0000ffff);
93 nv_icmd(dev, 0x0000a1, 0x0000ffff);
94 nv_icmd(dev, 0x0000a2, 0x0000ffff);
95 nv_icmd(dev, 0x0000b1, 0x00000001);
96 nv_icmd(dev, 0x0000ad, 0x0000013e);
97 nv_icmd(dev, 0x0000e1, 0x00000010);
98 nv_icmd(dev, 0x000290, 0x00000000);
99 nv_icmd(dev, 0x000291, 0x00000000);
100 nv_icmd(dev, 0x000292, 0x00000000);
101 nv_icmd(dev, 0x000293, 0x00000000);
102 nv_icmd(dev, 0x000294, 0x00000000);
103 nv_icmd(dev, 0x000295, 0x00000000);
104 nv_icmd(dev, 0x000296, 0x00000000);
105 nv_icmd(dev, 0x000297, 0x00000000);
106 nv_icmd(dev, 0x000298, 0x00000000);
107 nv_icmd(dev, 0x000299, 0x00000000);
108 nv_icmd(dev, 0x00029a, 0x00000000);
109 nv_icmd(dev, 0x00029b, 0x00000000);
110 nv_icmd(dev, 0x00029c, 0x00000000);
111 nv_icmd(dev, 0x00029d, 0x00000000);
112 nv_icmd(dev, 0x00029e, 0x00000000);
113 nv_icmd(dev, 0x00029f, 0x00000000);
114 nv_icmd(dev, 0x0003b0, 0x00000000);
115 nv_icmd(dev, 0x0003b1, 0x00000000);
116 nv_icmd(dev, 0x0003b2, 0x00000000);
117 nv_icmd(dev, 0x0003b3, 0x00000000);
118 nv_icmd(dev, 0x0003b4, 0x00000000);
119 nv_icmd(dev, 0x0003b5, 0x00000000);
120 nv_icmd(dev, 0x0003b6, 0x00000000);
121 nv_icmd(dev, 0x0003b7, 0x00000000);
122 nv_icmd(dev, 0x0003b8, 0x00000000);
123 nv_icmd(dev, 0x0003b9, 0x00000000);
124 nv_icmd(dev, 0x0003ba, 0x00000000);
125 nv_icmd(dev, 0x0003bb, 0x00000000);
126 nv_icmd(dev, 0x0003bc, 0x00000000);
127 nv_icmd(dev, 0x0003bd, 0x00000000);
128 nv_icmd(dev, 0x0003be, 0x00000000);
129 nv_icmd(dev, 0x0003bf, 0x00000000);
130 nv_icmd(dev, 0x0002a0, 0x00000000);
131 nv_icmd(dev, 0x0002a1, 0x00000000);
132 nv_icmd(dev, 0x0002a2, 0x00000000);
133 nv_icmd(dev, 0x0002a3, 0x00000000);
134 nv_icmd(dev, 0x0002a4, 0x00000000);
135 nv_icmd(dev, 0x0002a5, 0x00000000);
136 nv_icmd(dev, 0x0002a6, 0x00000000);
137 nv_icmd(dev, 0x0002a7, 0x00000000);
138 nv_icmd(dev, 0x0002a8, 0x00000000);
139 nv_icmd(dev, 0x0002a9, 0x00000000);
140 nv_icmd(dev, 0x0002aa, 0x00000000);
141 nv_icmd(dev, 0x0002ab, 0x00000000);
142 nv_icmd(dev, 0x0002ac, 0x00000000);
143 nv_icmd(dev, 0x0002ad, 0x00000000);
144 nv_icmd(dev, 0x0002ae, 0x00000000);
145 nv_icmd(dev, 0x0002af, 0x00000000);
146 nv_icmd(dev, 0x000420, 0x00000000);
147 nv_icmd(dev, 0x000421, 0x00000000);
148 nv_icmd(dev, 0x000422, 0x00000000);
149 nv_icmd(dev, 0x000423, 0x00000000);
150 nv_icmd(dev, 0x000424, 0x00000000);
151 nv_icmd(dev, 0x000425, 0x00000000);
152 nv_icmd(dev, 0x000426, 0x00000000);
153 nv_icmd(dev, 0x000427, 0x00000000);
154 nv_icmd(dev, 0x000428, 0x00000000);
155 nv_icmd(dev, 0x000429, 0x00000000);
156 nv_icmd(dev, 0x00042a, 0x00000000);
157 nv_icmd(dev, 0x00042b, 0x00000000);
158 nv_icmd(dev, 0x00042c, 0x00000000);
159 nv_icmd(dev, 0x00042d, 0x00000000);
160 nv_icmd(dev, 0x00042e, 0x00000000);
161 nv_icmd(dev, 0x00042f, 0x00000000);
162 nv_icmd(dev, 0x0002b0, 0x00000000);
163 nv_icmd(dev, 0x0002b1, 0x00000000);
164 nv_icmd(dev, 0x0002b2, 0x00000000);
165 nv_icmd(dev, 0x0002b3, 0x00000000);
166 nv_icmd(dev, 0x0002b4, 0x00000000);
167 nv_icmd(dev, 0x0002b5, 0x00000000);
168 nv_icmd(dev, 0x0002b6, 0x00000000);
169 nv_icmd(dev, 0x0002b7, 0x00000000);
170 nv_icmd(dev, 0x0002b8, 0x00000000);
171 nv_icmd(dev, 0x0002b9, 0x00000000);
172 nv_icmd(dev, 0x0002ba, 0x00000000);
173 nv_icmd(dev, 0x0002bb, 0x00000000);
174 nv_icmd(dev, 0x0002bc, 0x00000000);
175 nv_icmd(dev, 0x0002bd, 0x00000000);
176 nv_icmd(dev, 0x0002be, 0x00000000);
177 nv_icmd(dev, 0x0002bf, 0x00000000);
178 nv_icmd(dev, 0x000430, 0x00000000);
179 nv_icmd(dev, 0x000431, 0x00000000);
180 nv_icmd(dev, 0x000432, 0x00000000);
181 nv_icmd(dev, 0x000433, 0x00000000);
182 nv_icmd(dev, 0x000434, 0x00000000);
183 nv_icmd(dev, 0x000435, 0x00000000);
184 nv_icmd(dev, 0x000436, 0x00000000);
185 nv_icmd(dev, 0x000437, 0x00000000);
186 nv_icmd(dev, 0x000438, 0x00000000);
187 nv_icmd(dev, 0x000439, 0x00000000);
188 nv_icmd(dev, 0x00043a, 0x00000000);
189 nv_icmd(dev, 0x00043b, 0x00000000);
190 nv_icmd(dev, 0x00043c, 0x00000000);
191 nv_icmd(dev, 0x00043d, 0x00000000);
192 nv_icmd(dev, 0x00043e, 0x00000000);
193 nv_icmd(dev, 0x00043f, 0x00000000);
194 nv_icmd(dev, 0x0002c0, 0x00000000);
195 nv_icmd(dev, 0x0002c1, 0x00000000);
196 nv_icmd(dev, 0x0002c2, 0x00000000);
197 nv_icmd(dev, 0x0002c3, 0x00000000);
198 nv_icmd(dev, 0x0002c4, 0x00000000);
199 nv_icmd(dev, 0x0002c5, 0x00000000);
200 nv_icmd(dev, 0x0002c6, 0x00000000);
201 nv_icmd(dev, 0x0002c7, 0x00000000);
202 nv_icmd(dev, 0x0002c8, 0x00000000);
203 nv_icmd(dev, 0x0002c9, 0x00000000);
204 nv_icmd(dev, 0x0002ca, 0x00000000);
205 nv_icmd(dev, 0x0002cb, 0x00000000);
206 nv_icmd(dev, 0x0002cc, 0x00000000);
207 nv_icmd(dev, 0x0002cd, 0x00000000);
208 nv_icmd(dev, 0x0002ce, 0x00000000);
209 nv_icmd(dev, 0x0002cf, 0x00000000);
210 nv_icmd(dev, 0x0004d0, 0x00000000);
211 nv_icmd(dev, 0x0004d1, 0x00000000);
212 nv_icmd(dev, 0x0004d2, 0x00000000);
213 nv_icmd(dev, 0x0004d3, 0x00000000);
214 nv_icmd(dev, 0x0004d4, 0x00000000);
215 nv_icmd(dev, 0x0004d5, 0x00000000);
216 nv_icmd(dev, 0x0004d6, 0x00000000);
217 nv_icmd(dev, 0x0004d7, 0x00000000);
218 nv_icmd(dev, 0x0004d8, 0x00000000);
219 nv_icmd(dev, 0x0004d9, 0x00000000);
220 nv_icmd(dev, 0x0004da, 0x00000000);
221 nv_icmd(dev, 0x0004db, 0x00000000);
222 nv_icmd(dev, 0x0004dc, 0x00000000);
223 nv_icmd(dev, 0x0004dd, 0x00000000);
224 nv_icmd(dev, 0x0004de, 0x00000000);
225 nv_icmd(dev, 0x0004df, 0x00000000);
226 nv_icmd(dev, 0x000720, 0x00000000);
227 nv_icmd(dev, 0x000721, 0x00000000);
228 nv_icmd(dev, 0x000722, 0x00000000);
229 nv_icmd(dev, 0x000723, 0x00000000);
230 nv_icmd(dev, 0x000724, 0x00000000);
231 nv_icmd(dev, 0x000725, 0x00000000);
232 nv_icmd(dev, 0x000726, 0x00000000);
233 nv_icmd(dev, 0x000727, 0x00000000);
234 nv_icmd(dev, 0x000728, 0x00000000);
235 nv_icmd(dev, 0x000729, 0x00000000);
236 nv_icmd(dev, 0x00072a, 0x00000000);
237 nv_icmd(dev, 0x00072b, 0x00000000);
238 nv_icmd(dev, 0x00072c, 0x00000000);
239 nv_icmd(dev, 0x00072d, 0x00000000);
240 nv_icmd(dev, 0x00072e, 0x00000000);
241 nv_icmd(dev, 0x00072f, 0x00000000);
242 nv_icmd(dev, 0x0008c0, 0x00000000);
243 nv_icmd(dev, 0x0008c1, 0x00000000);
244 nv_icmd(dev, 0x0008c2, 0x00000000);
245 nv_icmd(dev, 0x0008c3, 0x00000000);
246 nv_icmd(dev, 0x0008c4, 0x00000000);
247 nv_icmd(dev, 0x0008c5, 0x00000000);
248 nv_icmd(dev, 0x0008c6, 0x00000000);
249 nv_icmd(dev, 0x0008c7, 0x00000000);
250 nv_icmd(dev, 0x0008c8, 0x00000000);
251 nv_icmd(dev, 0x0008c9, 0x00000000);
252 nv_icmd(dev, 0x0008ca, 0x00000000);
253 nv_icmd(dev, 0x0008cb, 0x00000000);
254 nv_icmd(dev, 0x0008cc, 0x00000000);
255 nv_icmd(dev, 0x0008cd, 0x00000000);
256 nv_icmd(dev, 0x0008ce, 0x00000000);
257 nv_icmd(dev, 0x0008cf, 0x00000000);
258 nv_icmd(dev, 0x000890, 0x00000000);
259 nv_icmd(dev, 0x000891, 0x00000000);
260 nv_icmd(dev, 0x000892, 0x00000000);
261 nv_icmd(dev, 0x000893, 0x00000000);
262 nv_icmd(dev, 0x000894, 0x00000000);
263 nv_icmd(dev, 0x000895, 0x00000000);
264 nv_icmd(dev, 0x000896, 0x00000000);
265 nv_icmd(dev, 0x000897, 0x00000000);
266 nv_icmd(dev, 0x000898, 0x00000000);
267 nv_icmd(dev, 0x000899, 0x00000000);
268 nv_icmd(dev, 0x00089a, 0x00000000);
269 nv_icmd(dev, 0x00089b, 0x00000000);
270 nv_icmd(dev, 0x00089c, 0x00000000);
271 nv_icmd(dev, 0x00089d, 0x00000000);
272 nv_icmd(dev, 0x00089e, 0x00000000);
273 nv_icmd(dev, 0x00089f, 0x00000000);
274 nv_icmd(dev, 0x0008e0, 0x00000000);
275 nv_icmd(dev, 0x0008e1, 0x00000000);
276 nv_icmd(dev, 0x0008e2, 0x00000000);
277 nv_icmd(dev, 0x0008e3, 0x00000000);
278 nv_icmd(dev, 0x0008e4, 0x00000000);
279 nv_icmd(dev, 0x0008e5, 0x00000000);
280 nv_icmd(dev, 0x0008e6, 0x00000000);
281 nv_icmd(dev, 0x0008e7, 0x00000000);
282 nv_icmd(dev, 0x0008e8, 0x00000000);
283 nv_icmd(dev, 0x0008e9, 0x00000000);
284 nv_icmd(dev, 0x0008ea, 0x00000000);
285 nv_icmd(dev, 0x0008eb, 0x00000000);
286 nv_icmd(dev, 0x0008ec, 0x00000000);
287 nv_icmd(dev, 0x0008ed, 0x00000000);
288 nv_icmd(dev, 0x0008ee, 0x00000000);
289 nv_icmd(dev, 0x0008ef, 0x00000000);
290 nv_icmd(dev, 0x0008a0, 0x00000000);
291 nv_icmd(dev, 0x0008a1, 0x00000000);
292 nv_icmd(dev, 0x0008a2, 0x00000000);
293 nv_icmd(dev, 0x0008a3, 0x00000000);
294 nv_icmd(dev, 0x0008a4, 0x00000000);
295 nv_icmd(dev, 0x0008a5, 0x00000000);
296 nv_icmd(dev, 0x0008a6, 0x00000000);
297 nv_icmd(dev, 0x0008a7, 0x00000000);
298 nv_icmd(dev, 0x0008a8, 0x00000000);
299 nv_icmd(dev, 0x0008a9, 0x00000000);
300 nv_icmd(dev, 0x0008aa, 0x00000000);
301 nv_icmd(dev, 0x0008ab, 0x00000000);
302 nv_icmd(dev, 0x0008ac, 0x00000000);
303 nv_icmd(dev, 0x0008ad, 0x00000000);
304 nv_icmd(dev, 0x0008ae, 0x00000000);
305 nv_icmd(dev, 0x0008af, 0x00000000);
306 nv_icmd(dev, 0x0008f0, 0x00000000);
307 nv_icmd(dev, 0x0008f1, 0x00000000);
308 nv_icmd(dev, 0x0008f2, 0x00000000);
309 nv_icmd(dev, 0x0008f3, 0x00000000);
310 nv_icmd(dev, 0x0008f4, 0x00000000);
311 nv_icmd(dev, 0x0008f5, 0x00000000);
312 nv_icmd(dev, 0x0008f6, 0x00000000);
313 nv_icmd(dev, 0x0008f7, 0x00000000);
314 nv_icmd(dev, 0x0008f8, 0x00000000);
315 nv_icmd(dev, 0x0008f9, 0x00000000);
316 nv_icmd(dev, 0x0008fa, 0x00000000);
317 nv_icmd(dev, 0x0008fb, 0x00000000);
318 nv_icmd(dev, 0x0008fc, 0x00000000);
319 nv_icmd(dev, 0x0008fd, 0x00000000);
320 nv_icmd(dev, 0x0008fe, 0x00000000);
321 nv_icmd(dev, 0x0008ff, 0x00000000);
322 nv_icmd(dev, 0x00094c, 0x000000ff);
323 nv_icmd(dev, 0x00094d, 0xffffffff);
324 nv_icmd(dev, 0x00094e, 0x00000002);
325 nv_icmd(dev, 0x0002ec, 0x00000001);
326 nv_icmd(dev, 0x000303, 0x00000001);
327 nv_icmd(dev, 0x0002e6, 0x00000001);
328 nv_icmd(dev, 0x000466, 0x00000052);
329 nv_icmd(dev, 0x000301, 0x3f800000);
330 nv_icmd(dev, 0x000304, 0x30201000);
331 nv_icmd(dev, 0x000305, 0x70605040);
332 nv_icmd(dev, 0x000306, 0xb8a89888);
333 nv_icmd(dev, 0x000307, 0xf8e8d8c8);
334 nv_icmd(dev, 0x00030a, 0x00ffff00);
335 nv_icmd(dev, 0x00030b, 0x0000001a);
336 nv_icmd(dev, 0x00030c, 0x00000001);
337 nv_icmd(dev, 0x000318, 0x00000001);
338 nv_icmd(dev, 0x000340, 0x00000000);
339 nv_icmd(dev, 0x000375, 0x00000001);
340 nv_icmd(dev, 0x00037d, 0x00000006);
341 nv_icmd(dev, 0x0003a0, 0x00000002);
342 nv_icmd(dev, 0x0003aa, 0x00000001);
343 nv_icmd(dev, 0x0003a9, 0x00000001);
344 nv_icmd(dev, 0x000380, 0x00000001);
345 nv_icmd(dev, 0x000383, 0x00000011);
346 nv_icmd(dev, 0x000360, 0x00000040);
347 nv_icmd(dev, 0x000366, 0x00000000);
348 nv_icmd(dev, 0x000367, 0x00000000);
349 nv_icmd(dev, 0x000368, 0x00000fff);
350 nv_icmd(dev, 0x000370, 0x00000000);
351 nv_icmd(dev, 0x000371, 0x00000000);
352 nv_icmd(dev, 0x000372, 0x000fffff);
353 nv_icmd(dev, 0x00037a, 0x00000012);
354 nv_icmd(dev, 0x000619, 0x00000003);
355 nv_icmd(dev, 0x000811, 0x00000003);
356 nv_icmd(dev, 0x000812, 0x00000004);
357 nv_icmd(dev, 0x000813, 0x00000006);
358 nv_icmd(dev, 0x000814, 0x00000008);
359 nv_icmd(dev, 0x000815, 0x0000000b);
360 nv_icmd(dev, 0x000800, 0x00000001);
361 nv_icmd(dev, 0x000801, 0x00000001);
362 nv_icmd(dev, 0x000802, 0x00000001);
363 nv_icmd(dev, 0x000803, 0x00000001);
364 nv_icmd(dev, 0x000804, 0x00000001);
365 nv_icmd(dev, 0x000805, 0x00000001);
366 nv_icmd(dev, 0x000632, 0x00000001);
367 nv_icmd(dev, 0x000633, 0x00000002);
368 nv_icmd(dev, 0x000634, 0x00000003);
369 nv_icmd(dev, 0x000635, 0x00000004);
370 nv_icmd(dev, 0x000654, 0x3f800000);
371 nv_icmd(dev, 0x000657, 0x3f800000);
372 nv_icmd(dev, 0x000655, 0x3f800000);
373 nv_icmd(dev, 0x000656, 0x3f800000);
374 nv_icmd(dev, 0x0006cd, 0x3f800000);
375 nv_icmd(dev, 0x0007f5, 0x3f800000);
376 nv_icmd(dev, 0x0007dc, 0x39291909);
377 nv_icmd(dev, 0x0007dd, 0x79695949);
378 nv_icmd(dev, 0x0007de, 0xb9a99989);
379 nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
380 nv_icmd(dev, 0x0007e8, 0x00003210);
381 nv_icmd(dev, 0x0007e9, 0x00007654);
382 nv_icmd(dev, 0x0007ea, 0x00000098);
383 nv_icmd(dev, 0x0007ec, 0x39291909);
384 nv_icmd(dev, 0x0007ed, 0x79695949);
385 nv_icmd(dev, 0x0007ee, 0xb9a99989);
386 nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
387 nv_icmd(dev, 0x0007f0, 0x00003210);
388 nv_icmd(dev, 0x0007f1, 0x00007654);
389 nv_icmd(dev, 0x0007f2, 0x00000098);
390 nv_icmd(dev, 0x0005a5, 0x00000001);
391 nv_icmd(dev, 0x000980, 0x00000000);
392 nv_icmd(dev, 0x000981, 0x00000000);
393 nv_icmd(dev, 0x000982, 0x00000000);
394 nv_icmd(dev, 0x000983, 0x00000000);
395 nv_icmd(dev, 0x000984, 0x00000000);
396 nv_icmd(dev, 0x000985, 0x00000000);
397 nv_icmd(dev, 0x000986, 0x00000000);
398 nv_icmd(dev, 0x000987, 0x00000000);
399 nv_icmd(dev, 0x000988, 0x00000000);
400 nv_icmd(dev, 0x000989, 0x00000000);
401 nv_icmd(dev, 0x00098a, 0x00000000);
402 nv_icmd(dev, 0x00098b, 0x00000000);
403 nv_icmd(dev, 0x00098c, 0x00000000);
404 nv_icmd(dev, 0x00098d, 0x00000000);
405 nv_icmd(dev, 0x00098e, 0x00000000);
406 nv_icmd(dev, 0x00098f, 0x00000000);
407 nv_icmd(dev, 0x000990, 0x00000000);
408 nv_icmd(dev, 0x000991, 0x00000000);
409 nv_icmd(dev, 0x000992, 0x00000000);
410 nv_icmd(dev, 0x000993, 0x00000000);
411 nv_icmd(dev, 0x000994, 0x00000000);
412 nv_icmd(dev, 0x000995, 0x00000000);
413 nv_icmd(dev, 0x000996, 0x00000000);
414 nv_icmd(dev, 0x000997, 0x00000000);
415 nv_icmd(dev, 0x000998, 0x00000000);
416 nv_icmd(dev, 0x000999, 0x00000000);
417 nv_icmd(dev, 0x00099a, 0x00000000);
418 nv_icmd(dev, 0x00099b, 0x00000000);
419 nv_icmd(dev, 0x00099c, 0x00000000);
420 nv_icmd(dev, 0x00099d, 0x00000000);
421 nv_icmd(dev, 0x00099e, 0x00000000);
422 nv_icmd(dev, 0x00099f, 0x00000000);
423 nv_icmd(dev, 0x0009a0, 0x00000000);
424 nv_icmd(dev, 0x0009a1, 0x00000000);
425 nv_icmd(dev, 0x0009a2, 0x00000000);
426 nv_icmd(dev, 0x0009a3, 0x00000000);
427 nv_icmd(dev, 0x0009a4, 0x00000000);
428 nv_icmd(dev, 0x0009a5, 0x00000000);
429 nv_icmd(dev, 0x0009a6, 0x00000000);
430 nv_icmd(dev, 0x0009a7, 0x00000000);
431 nv_icmd(dev, 0x0009a8, 0x00000000);
432 nv_icmd(dev, 0x0009a9, 0x00000000);
433 nv_icmd(dev, 0x0009aa, 0x00000000);
434 nv_icmd(dev, 0x0009ab, 0x00000000);
435 nv_icmd(dev, 0x0009ac, 0x00000000);
436 nv_icmd(dev, 0x0009ad, 0x00000000);
437 nv_icmd(dev, 0x0009ae, 0x00000000);
438 nv_icmd(dev, 0x0009af, 0x00000000);
439 nv_icmd(dev, 0x0009b0, 0x00000000);
440 nv_icmd(dev, 0x0009b1, 0x00000000);
441 nv_icmd(dev, 0x0009b2, 0x00000000);
442 nv_icmd(dev, 0x0009b3, 0x00000000);
443 nv_icmd(dev, 0x0009b4, 0x00000000);
444 nv_icmd(dev, 0x0009b5, 0x00000000);
445 nv_icmd(dev, 0x0009b6, 0x00000000);
446 nv_icmd(dev, 0x0009b7, 0x00000000);
447 nv_icmd(dev, 0x0009b8, 0x00000000);
448 nv_icmd(dev, 0x0009b9, 0x00000000);
449 nv_icmd(dev, 0x0009ba, 0x00000000);
450 nv_icmd(dev, 0x0009bb, 0x00000000);
451 nv_icmd(dev, 0x0009bc, 0x00000000);
452 nv_icmd(dev, 0x0009bd, 0x00000000);
453 nv_icmd(dev, 0x0009be, 0x00000000);
454 nv_icmd(dev, 0x0009bf, 0x00000000);
455 nv_icmd(dev, 0x0009c0, 0x00000000);
456 nv_icmd(dev, 0x0009c1, 0x00000000);
457 nv_icmd(dev, 0x0009c2, 0x00000000);
458 nv_icmd(dev, 0x0009c3, 0x00000000);
459 nv_icmd(dev, 0x0009c4, 0x00000000);
460 nv_icmd(dev, 0x0009c5, 0x00000000);
461 nv_icmd(dev, 0x0009c6, 0x00000000);
462 nv_icmd(dev, 0x0009c7, 0x00000000);
463 nv_icmd(dev, 0x0009c8, 0x00000000);
464 nv_icmd(dev, 0x0009c9, 0x00000000);
465 nv_icmd(dev, 0x0009ca, 0x00000000);
466 nv_icmd(dev, 0x0009cb, 0x00000000);
467 nv_icmd(dev, 0x0009cc, 0x00000000);
468 nv_icmd(dev, 0x0009cd, 0x00000000);
469 nv_icmd(dev, 0x0009ce, 0x00000000);
470 nv_icmd(dev, 0x0009cf, 0x00000000);
471 nv_icmd(dev, 0x0009d0, 0x00000000);
472 nv_icmd(dev, 0x0009d1, 0x00000000);
473 nv_icmd(dev, 0x0009d2, 0x00000000);
474 nv_icmd(dev, 0x0009d3, 0x00000000);
475 nv_icmd(dev, 0x0009d4, 0x00000000);
476 nv_icmd(dev, 0x0009d5, 0x00000000);
477 nv_icmd(dev, 0x0009d6, 0x00000000);
478 nv_icmd(dev, 0x0009d7, 0x00000000);
479 nv_icmd(dev, 0x0009d8, 0x00000000);
480 nv_icmd(dev, 0x0009d9, 0x00000000);
481 nv_icmd(dev, 0x0009da, 0x00000000);
482 nv_icmd(dev, 0x0009db, 0x00000000);
483 nv_icmd(dev, 0x0009dc, 0x00000000);
484 nv_icmd(dev, 0x0009dd, 0x00000000);
485 nv_icmd(dev, 0x0009de, 0x00000000);
486 nv_icmd(dev, 0x0009df, 0x00000000);
487 nv_icmd(dev, 0x0009e0, 0x00000000);
488 nv_icmd(dev, 0x0009e1, 0x00000000);
489 nv_icmd(dev, 0x0009e2, 0x00000000);
490 nv_icmd(dev, 0x0009e3, 0x00000000);
491 nv_icmd(dev, 0x0009e4, 0x00000000);
492 nv_icmd(dev, 0x0009e5, 0x00000000);
493 nv_icmd(dev, 0x0009e6, 0x00000000);
494 nv_icmd(dev, 0x0009e7, 0x00000000);
495 nv_icmd(dev, 0x0009e8, 0x00000000);
496 nv_icmd(dev, 0x0009e9, 0x00000000);
497 nv_icmd(dev, 0x0009ea, 0x00000000);
498 nv_icmd(dev, 0x0009eb, 0x00000000);
499 nv_icmd(dev, 0x0009ec, 0x00000000);
500 nv_icmd(dev, 0x0009ed, 0x00000000);
501 nv_icmd(dev, 0x0009ee, 0x00000000);
502 nv_icmd(dev, 0x0009ef, 0x00000000);
503 nv_icmd(dev, 0x0009f0, 0x00000000);
504 nv_icmd(dev, 0x0009f1, 0x00000000);
505 nv_icmd(dev, 0x0009f2, 0x00000000);
506 nv_icmd(dev, 0x0009f3, 0x00000000);
507 nv_icmd(dev, 0x0009f4, 0x00000000);
508 nv_icmd(dev, 0x0009f5, 0x00000000);
509 nv_icmd(dev, 0x0009f6, 0x00000000);
510 nv_icmd(dev, 0x0009f7, 0x00000000);
511 nv_icmd(dev, 0x0009f8, 0x00000000);
512 nv_icmd(dev, 0x0009f9, 0x00000000);
513 nv_icmd(dev, 0x0009fa, 0x00000000);
514 nv_icmd(dev, 0x0009fb, 0x00000000);
515 nv_icmd(dev, 0x0009fc, 0x00000000);
516 nv_icmd(dev, 0x0009fd, 0x00000000);
517 nv_icmd(dev, 0x0009fe, 0x00000000);
518 nv_icmd(dev, 0x0009ff, 0x00000000);
519 nv_icmd(dev, 0x000468, 0x00000004);
520 nv_icmd(dev, 0x00046c, 0x00000001);
521 nv_icmd(dev, 0x000470, 0x00000000);
522 nv_icmd(dev, 0x000471, 0x00000000);
523 nv_icmd(dev, 0x000472, 0x00000000);
524 nv_icmd(dev, 0x000473, 0x00000000);
525 nv_icmd(dev, 0x000474, 0x00000000);
526 nv_icmd(dev, 0x000475, 0x00000000);
527 nv_icmd(dev, 0x000476, 0x00000000);
528 nv_icmd(dev, 0x000477, 0x00000000);
529 nv_icmd(dev, 0x000478, 0x00000000);
530 nv_icmd(dev, 0x000479, 0x00000000);
531 nv_icmd(dev, 0x00047a, 0x00000000);
532 nv_icmd(dev, 0x00047b, 0x00000000);
533 nv_icmd(dev, 0x00047c, 0x00000000);
534 nv_icmd(dev, 0x00047d, 0x00000000);
535 nv_icmd(dev, 0x00047e, 0x00000000);
536 nv_icmd(dev, 0x00047f, 0x00000000);
537 nv_icmd(dev, 0x000480, 0x00000000);
538 nv_icmd(dev, 0x000481, 0x00000000);
539 nv_icmd(dev, 0x000482, 0x00000000);
540 nv_icmd(dev, 0x000483, 0x00000000);
541 nv_icmd(dev, 0x000484, 0x00000000);
542 nv_icmd(dev, 0x000485, 0x00000000);
543 nv_icmd(dev, 0x000486, 0x00000000);
544 nv_icmd(dev, 0x000487, 0x00000000);
545 nv_icmd(dev, 0x000488, 0x00000000);
546 nv_icmd(dev, 0x000489, 0x00000000);
547 nv_icmd(dev, 0x00048a, 0x00000000);
548 nv_icmd(dev, 0x00048b, 0x00000000);
549 nv_icmd(dev, 0x00048c, 0x00000000);
550 nv_icmd(dev, 0x00048d, 0x00000000);
551 nv_icmd(dev, 0x00048e, 0x00000000);
552 nv_icmd(dev, 0x00048f, 0x00000000);
553 nv_icmd(dev, 0x000490, 0x00000000);
554 nv_icmd(dev, 0x000491, 0x00000000);
555 nv_icmd(dev, 0x000492, 0x00000000);
556 nv_icmd(dev, 0x000493, 0x00000000);
557 nv_icmd(dev, 0x000494, 0x00000000);
558 nv_icmd(dev, 0x000495, 0x00000000);
559 nv_icmd(dev, 0x000496, 0x00000000);
560 nv_icmd(dev, 0x000497, 0x00000000);
561 nv_icmd(dev, 0x000498, 0x00000000);
562 nv_icmd(dev, 0x000499, 0x00000000);
563 nv_icmd(dev, 0x00049a, 0x00000000);
564 nv_icmd(dev, 0x00049b, 0x00000000);
565 nv_icmd(dev, 0x00049c, 0x00000000);
566 nv_icmd(dev, 0x00049d, 0x00000000);
567 nv_icmd(dev, 0x00049e, 0x00000000);
568 nv_icmd(dev, 0x00049f, 0x00000000);
569 nv_icmd(dev, 0x0004a0, 0x00000000);
570 nv_icmd(dev, 0x0004a1, 0x00000000);
571 nv_icmd(dev, 0x0004a2, 0x00000000);
572 nv_icmd(dev, 0x0004a3, 0x00000000);
573 nv_icmd(dev, 0x0004a4, 0x00000000);
574 nv_icmd(dev, 0x0004a5, 0x00000000);
575 nv_icmd(dev, 0x0004a6, 0x00000000);
576 nv_icmd(dev, 0x0004a7, 0x00000000);
577 nv_icmd(dev, 0x0004a8, 0x00000000);
578 nv_icmd(dev, 0x0004a9, 0x00000000);
579 nv_icmd(dev, 0x0004aa, 0x00000000);
580 nv_icmd(dev, 0x0004ab, 0x00000000);
581 nv_icmd(dev, 0x0004ac, 0x00000000);
582 nv_icmd(dev, 0x0004ad, 0x00000000);
583 nv_icmd(dev, 0x0004ae, 0x00000000);
584 nv_icmd(dev, 0x0004af, 0x00000000);
585 nv_icmd(dev, 0x0004b0, 0x00000000);
586 nv_icmd(dev, 0x0004b1, 0x00000000);
587 nv_icmd(dev, 0x0004b2, 0x00000000);
588 nv_icmd(dev, 0x0004b3, 0x00000000);
589 nv_icmd(dev, 0x0004b4, 0x00000000);
590 nv_icmd(dev, 0x0004b5, 0x00000000);
591 nv_icmd(dev, 0x0004b6, 0x00000000);
592 nv_icmd(dev, 0x0004b7, 0x00000000);
593 nv_icmd(dev, 0x0004b8, 0x00000000);
594 nv_icmd(dev, 0x0004b9, 0x00000000);
595 nv_icmd(dev, 0x0004ba, 0x00000000);
596 nv_icmd(dev, 0x0004bb, 0x00000000);
597 nv_icmd(dev, 0x0004bc, 0x00000000);
598 nv_icmd(dev, 0x0004bd, 0x00000000);
599 nv_icmd(dev, 0x0004be, 0x00000000);
600 nv_icmd(dev, 0x0004bf, 0x00000000);
601 nv_icmd(dev, 0x0004c0, 0x00000000);
602 nv_icmd(dev, 0x0004c1, 0x00000000);
603 nv_icmd(dev, 0x0004c2, 0x00000000);
604 nv_icmd(dev, 0x0004c3, 0x00000000);
605 nv_icmd(dev, 0x0004c4, 0x00000000);
606 nv_icmd(dev, 0x0004c5, 0x00000000);
607 nv_icmd(dev, 0x0004c6, 0x00000000);
608 nv_icmd(dev, 0x0004c7, 0x00000000);
609 nv_icmd(dev, 0x0004c8, 0x00000000);
610 nv_icmd(dev, 0x0004c9, 0x00000000);
611 nv_icmd(dev, 0x0004ca, 0x00000000);
612 nv_icmd(dev, 0x0004cb, 0x00000000);
613 nv_icmd(dev, 0x0004cc, 0x00000000);
614 nv_icmd(dev, 0x0004cd, 0x00000000);
615 nv_icmd(dev, 0x0004ce, 0x00000000);
616 nv_icmd(dev, 0x0004cf, 0x00000000);
617 nv_icmd(dev, 0x000510, 0x3f800000);
618 nv_icmd(dev, 0x000511, 0x3f800000);
619 nv_icmd(dev, 0x000512, 0x3f800000);
620 nv_icmd(dev, 0x000513, 0x3f800000);
621 nv_icmd(dev, 0x000514, 0x3f800000);
622 nv_icmd(dev, 0x000515, 0x3f800000);
623 nv_icmd(dev, 0x000516, 0x3f800000);
624 nv_icmd(dev, 0x000517, 0x3f800000);
625 nv_icmd(dev, 0x000518, 0x3f800000);
626 nv_icmd(dev, 0x000519, 0x3f800000);
627 nv_icmd(dev, 0x00051a, 0x3f800000);
628 nv_icmd(dev, 0x00051b, 0x3f800000);
629 nv_icmd(dev, 0x00051c, 0x3f800000);
630 nv_icmd(dev, 0x00051d, 0x3f800000);
631 nv_icmd(dev, 0x00051e, 0x3f800000);
632 nv_icmd(dev, 0x00051f, 0x3f800000);
633 nv_icmd(dev, 0x000520, 0x000002b6);
634 nv_icmd(dev, 0x000529, 0x00000001);
635 nv_icmd(dev, 0x000530, 0xffff0000);
636 nv_icmd(dev, 0x000531, 0xffff0000);
637 nv_icmd(dev, 0x000532, 0xffff0000);
638 nv_icmd(dev, 0x000533, 0xffff0000);
639 nv_icmd(dev, 0x000534, 0xffff0000);
640 nv_icmd(dev, 0x000535, 0xffff0000);
641 nv_icmd(dev, 0x000536, 0xffff0000);
642 nv_icmd(dev, 0x000537, 0xffff0000);
643 nv_icmd(dev, 0x000538, 0xffff0000);
644 nv_icmd(dev, 0x000539, 0xffff0000);
645 nv_icmd(dev, 0x00053a, 0xffff0000);
646 nv_icmd(dev, 0x00053b, 0xffff0000);
647 nv_icmd(dev, 0x00053c, 0xffff0000);
648 nv_icmd(dev, 0x00053d, 0xffff0000);
649 nv_icmd(dev, 0x00053e, 0xffff0000);
650 nv_icmd(dev, 0x00053f, 0xffff0000);
651 nv_icmd(dev, 0x000585, 0x0000003f);
652 nv_icmd(dev, 0x000576, 0x00000003);
653 nv_icmd(dev, 0x00057b, 0x00000059);
654 nv_icmd(dev, 0x000586, 0x00000040);
655 nv_icmd(dev, 0x000582, 0x00000080);
656 nv_icmd(dev, 0x000583, 0x00000080);
657 nv_icmd(dev, 0x0005c2, 0x00000001);
658 nv_icmd(dev, 0x000638, 0x00000001);
659 nv_icmd(dev, 0x000639, 0x00000001);
660 nv_icmd(dev, 0x00063a, 0x00000002);
661 nv_icmd(dev, 0x00063b, 0x00000001);
662 nv_icmd(dev, 0x00063c, 0x00000001);
663 nv_icmd(dev, 0x00063d, 0x00000002);
664 nv_icmd(dev, 0x00063e, 0x00000001);
665 nv_icmd(dev, 0x0008b8, 0x00000001);
666 nv_icmd(dev, 0x0008b9, 0x00000001);
667 nv_icmd(dev, 0x0008ba, 0x00000001);
668 nv_icmd(dev, 0x0008bb, 0x00000001);
669 nv_icmd(dev, 0x0008bc, 0x00000001);
670 nv_icmd(dev, 0x0008bd, 0x00000001);
671 nv_icmd(dev, 0x0008be, 0x00000001);
672 nv_icmd(dev, 0x0008bf, 0x00000001);
673 nv_icmd(dev, 0x000900, 0x00000001);
674 nv_icmd(dev, 0x000901, 0x00000001);
675 nv_icmd(dev, 0x000902, 0x00000001);
676 nv_icmd(dev, 0x000903, 0x00000001);
677 nv_icmd(dev, 0x000904, 0x00000001);
678 nv_icmd(dev, 0x000905, 0x00000001);
679 nv_icmd(dev, 0x000906, 0x00000001);
680 nv_icmd(dev, 0x000907, 0x00000001);
681 nv_icmd(dev, 0x000908, 0x00000002);
682 nv_icmd(dev, 0x000909, 0x00000002);
683 nv_icmd(dev, 0x00090a, 0x00000002);
684 nv_icmd(dev, 0x00090b, 0x00000002);
685 nv_icmd(dev, 0x00090c, 0x00000002);
686 nv_icmd(dev, 0x00090d, 0x00000002);
687 nv_icmd(dev, 0x00090e, 0x00000002);
688 nv_icmd(dev, 0x00090f, 0x00000002);
689 nv_icmd(dev, 0x000910, 0x00000001);
690 nv_icmd(dev, 0x000911, 0x00000001);
691 nv_icmd(dev, 0x000912, 0x00000001);
692 nv_icmd(dev, 0x000913, 0x00000001);
693 nv_icmd(dev, 0x000914, 0x00000001);
694 nv_icmd(dev, 0x000915, 0x00000001);
695 nv_icmd(dev, 0x000916, 0x00000001);
696 nv_icmd(dev, 0x000917, 0x00000001);
697 nv_icmd(dev, 0x000918, 0x00000001);
698 nv_icmd(dev, 0x000919, 0x00000001);
699 nv_icmd(dev, 0x00091a, 0x00000001);
700 nv_icmd(dev, 0x00091b, 0x00000001);
701 nv_icmd(dev, 0x00091c, 0x00000001);
702 nv_icmd(dev, 0x00091d, 0x00000001);
703 nv_icmd(dev, 0x00091e, 0x00000001);
704 nv_icmd(dev, 0x00091f, 0x00000001);
705 nv_icmd(dev, 0x000920, 0x00000002);
706 nv_icmd(dev, 0x000921, 0x00000002);
707 nv_icmd(dev, 0x000922, 0x00000002);
708 nv_icmd(dev, 0x000923, 0x00000002);
709 nv_icmd(dev, 0x000924, 0x00000002);
710 nv_icmd(dev, 0x000925, 0x00000002);
711 nv_icmd(dev, 0x000926, 0x00000002);
712 nv_icmd(dev, 0x000927, 0x00000002);
713 nv_icmd(dev, 0x000928, 0x00000001);
714 nv_icmd(dev, 0x000929, 0x00000001);
715 nv_icmd(dev, 0x00092a, 0x00000001);
716 nv_icmd(dev, 0x00092b, 0x00000001);
717 nv_icmd(dev, 0x00092c, 0x00000001);
718 nv_icmd(dev, 0x00092d, 0x00000001);
719 nv_icmd(dev, 0x00092e, 0x00000001);
720 nv_icmd(dev, 0x00092f, 0x00000001);
721 nv_icmd(dev, 0x000648, 0x00000001);
722 nv_icmd(dev, 0x000649, 0x00000001);
723 nv_icmd(dev, 0x00064a, 0x00000001);
724 nv_icmd(dev, 0x00064b, 0x00000001);
725 nv_icmd(dev, 0x00064c, 0x00000001);
726 nv_icmd(dev, 0x00064d, 0x00000001);
727 nv_icmd(dev, 0x00064e, 0x00000001);
728 nv_icmd(dev, 0x00064f, 0x00000001);
729 nv_icmd(dev, 0x000650, 0x00000001);
730 nv_icmd(dev, 0x000658, 0x0000000f);
731 nv_icmd(dev, 0x0007ff, 0x0000000a);
732 nv_icmd(dev, 0x00066a, 0x40000000);
733 nv_icmd(dev, 0x00066b, 0x10000000);
734 nv_icmd(dev, 0x00066c, 0xffff0000);
735 nv_icmd(dev, 0x00066d, 0xffff0000);
736 nv_icmd(dev, 0x0007af, 0x00000008);
737 nv_icmd(dev, 0x0007b0, 0x00000008);
738 nv_icmd(dev, 0x0007f6, 0x00000001);
739 nv_icmd(dev, 0x0006b2, 0x00000055);
740 nv_icmd(dev, 0x0007ad, 0x00000003);
741 nv_icmd(dev, 0x000937, 0x00000001);
742 nv_icmd(dev, 0x000971, 0x00000008);
743 nv_icmd(dev, 0x000972, 0x00000040);
744 nv_icmd(dev, 0x000973, 0x0000012c);
745 nv_icmd(dev, 0x00097c, 0x00000040);
746 nv_icmd(dev, 0x000979, 0x00000003);
747 nv_icmd(dev, 0x000975, 0x00000020);
748 nv_icmd(dev, 0x000976, 0x00000001);
749 nv_icmd(dev, 0x000977, 0x00000020);
750 nv_icmd(dev, 0x000978, 0x00000001);
751 nv_icmd(dev, 0x000957, 0x00000003);
752 nv_icmd(dev, 0x00095e, 0x20164010);
753 nv_icmd(dev, 0x00095f, 0x00000020);
754 nv_icmd(dev, 0x00097d, 0x00000020);
755 nv_icmd(dev, 0x000683, 0x00000006);
756 nv_icmd(dev, 0x000685, 0x003fffff);
757 nv_icmd(dev, 0x000687, 0x003fffff);
758 nv_icmd(dev, 0x0006a0, 0x00000005);
759 nv_icmd(dev, 0x000840, 0x00400008);
760 nv_icmd(dev, 0x000841, 0x08000080);
761 nv_icmd(dev, 0x000842, 0x00400008);
762 nv_icmd(dev, 0x000843, 0x08000080);
763 nv_icmd(dev, 0x000818, 0x00000000);
764 nv_icmd(dev, 0x000819, 0x00000000);
765 nv_icmd(dev, 0x00081a, 0x00000000);
766 nv_icmd(dev, 0x00081b, 0x00000000);
767 nv_icmd(dev, 0x00081c, 0x00000000);
768 nv_icmd(dev, 0x00081d, 0x00000000);
769 nv_icmd(dev, 0x00081e, 0x00000000);
770 nv_icmd(dev, 0x00081f, 0x00000000);
771 nv_icmd(dev, 0x000848, 0x00000000);
772 nv_icmd(dev, 0x000849, 0x00000000);
773 nv_icmd(dev, 0x00084a, 0x00000000);
774 nv_icmd(dev, 0x00084b, 0x00000000);
775 nv_icmd(dev, 0x00084c, 0x00000000);
776 nv_icmd(dev, 0x00084d, 0x00000000);
777 nv_icmd(dev, 0x00084e, 0x00000000);
778 nv_icmd(dev, 0x00084f, 0x00000000);
779 nv_icmd(dev, 0x000850, 0x00000000);
780 nv_icmd(dev, 0x000851, 0x00000000);
781 nv_icmd(dev, 0x000852, 0x00000000);
782 nv_icmd(dev, 0x000853, 0x00000000);
783 nv_icmd(dev, 0x000854, 0x00000000);
784 nv_icmd(dev, 0x000855, 0x00000000);
785 nv_icmd(dev, 0x000856, 0x00000000);
786 nv_icmd(dev, 0x000857, 0x00000000);
787 nv_icmd(dev, 0x000738, 0x00000000);
788 nv_icmd(dev, 0x0006aa, 0x00000001);
789 nv_icmd(dev, 0x0006ab, 0x00000002);
790 nv_icmd(dev, 0x0006ac, 0x00000080);
791 nv_icmd(dev, 0x0006ad, 0x00000100);
792 nv_icmd(dev, 0x0006ae, 0x00000100);
793 nv_icmd(dev, 0x0006b1, 0x00000011);
794 nv_icmd(dev, 0x0006bb, 0x000000cf);
795 nv_icmd(dev, 0x0006ce, 0x2a712488);
796 nv_icmd(dev, 0x000739, 0x4085c000);
797 nv_icmd(dev, 0x00073a, 0x00000080);
798 nv_icmd(dev, 0x000786, 0x80000100);
799 nv_icmd(dev, 0x00073c, 0x00010100);
800 nv_icmd(dev, 0x00073d, 0x02800000);
801 nv_icmd(dev, 0x000787, 0x000000cf);
802 nv_icmd(dev, 0x00078c, 0x00000008);
803 nv_icmd(dev, 0x000792, 0x00000001);
804 nv_icmd(dev, 0x000794, 0x00000001);
805 nv_icmd(dev, 0x000795, 0x00000001);
806 nv_icmd(dev, 0x000796, 0x00000001);
807 nv_icmd(dev, 0x000797, 0x000000cf);
808 nv_icmd(dev, 0x000836, 0x00000001);
809 nv_icmd(dev, 0x00079a, 0x00000002);
810 nv_icmd(dev, 0x000833, 0x04444480);
811 nv_icmd(dev, 0x0007a1, 0x00000001);
812 nv_icmd(dev, 0x0007a3, 0x00000001);
813 nv_icmd(dev, 0x0007a4, 0x00000001);
814 nv_icmd(dev, 0x0007a5, 0x00000001);
815 nv_icmd(dev, 0x000831, 0x00000004);
816 nv_icmd(dev, 0x000b07, 0x00000002);
817 nv_icmd(dev, 0x000b08, 0x00000100);
818 nv_icmd(dev, 0x000b09, 0x00000100);
819 nv_icmd(dev, 0x000b0a, 0x00000001);
820 nv_icmd(dev, 0x000a04, 0x000000ff);
821 nv_icmd(dev, 0x000a0b, 0x00000040);
822 nv_icmd(dev, 0x00097f, 0x00000100);
823 nv_icmd(dev, 0x000a02, 0x00000001);
824 nv_icmd(dev, 0x000809, 0x00000007);
825 nv_icmd(dev, 0x00c221, 0x00000040);
826 nv_icmd(dev, 0x00c1b0, 0x0000000f);
827 nv_icmd(dev, 0x00c1b1, 0x0000000f);
828 nv_icmd(dev, 0x00c1b2, 0x0000000f);
829 nv_icmd(dev, 0x00c1b3, 0x0000000f);
830 nv_icmd(dev, 0x00c1b4, 0x0000000f);
831 nv_icmd(dev, 0x00c1b5, 0x0000000f);
832 nv_icmd(dev, 0x00c1b6, 0x0000000f);
833 nv_icmd(dev, 0x00c1b7, 0x0000000f);
834 nv_icmd(dev, 0x00c1b8, 0x0fac6881);
835 nv_icmd(dev, 0x00c1b9, 0x00fac688);
836 nv_icmd(dev, 0x00c401, 0x00000001);
837 nv_icmd(dev, 0x00c402, 0x00010001);
838 nv_icmd(dev, 0x00c403, 0x00000001);
839 nv_icmd(dev, 0x00c404, 0x00000001);
840 nv_icmd(dev, 0x00c40e, 0x00000020);
841 nv_icmd(dev, 0x00c500, 0x00000003);
842 nv_icmd(dev, 0x01e100, 0x00000001);
843 nv_icmd(dev, 0x001000, 0x00000002);
844 nv_icmd(dev, 0x0006aa, 0x00000001);
845 nv_icmd(dev, 0x0006ad, 0x00000100);
846 nv_icmd(dev, 0x0006ae, 0x00000100);
847 nv_icmd(dev, 0x0006b1, 0x00000011);
848 nv_icmd(dev, 0x00078c, 0x00000008);
849 nv_icmd(dev, 0x000792, 0x00000001);
850 nv_icmd(dev, 0x000794, 0x00000001);
851 nv_icmd(dev, 0x000795, 0x00000001);
852 nv_icmd(dev, 0x000796, 0x00000001);
853 nv_icmd(dev, 0x000797, 0x000000cf);
854 nv_icmd(dev, 0x00079a, 0x00000002);
855 nv_icmd(dev, 0x000833, 0x04444480);
856 nv_icmd(dev, 0x0007a1, 0x00000001);
857 nv_icmd(dev, 0x0007a3, 0x00000001);
858 nv_icmd(dev, 0x0007a4, 0x00000001);
859 nv_icmd(dev, 0x0007a5, 0x00000001);
860 nv_icmd(dev, 0x000831, 0x00000004);
861 nv_icmd(dev, 0x01e100, 0x00000001);
862 nv_icmd(dev, 0x001000, 0x00000008);
863 nv_icmd(dev, 0x000039, 0x00000000);
864 nv_icmd(dev, 0x00003a, 0x00000000);
865 nv_icmd(dev, 0x00003b, 0x00000000);
866 nv_icmd(dev, 0x000380, 0x00000001);
867 nv_icmd(dev, 0x000366, 0x00000000);
868 nv_icmd(dev, 0x000367, 0x00000000);
869 nv_icmd(dev, 0x000368, 0x00000fff);
870 nv_icmd(dev, 0x000370, 0x00000000);
871 nv_icmd(dev, 0x000371, 0x00000000);
872 nv_icmd(dev, 0x000372, 0x000fffff);
873 nv_icmd(dev, 0x000813, 0x00000006);
874 nv_icmd(dev, 0x000814, 0x00000008);
875 nv_icmd(dev, 0x000957, 0x00000003);
876 nv_icmd(dev, 0x000818, 0x00000000);
877 nv_icmd(dev, 0x000819, 0x00000000);
878 nv_icmd(dev, 0x00081a, 0x00000000);
879 nv_icmd(dev, 0x00081b, 0x00000000);
880 nv_icmd(dev, 0x00081c, 0x00000000);
881 nv_icmd(dev, 0x00081d, 0x00000000);
882 nv_icmd(dev, 0x00081e, 0x00000000);
883 nv_icmd(dev, 0x00081f, 0x00000000);
884 nv_icmd(dev, 0x000848, 0x00000000);
885 nv_icmd(dev, 0x000849, 0x00000000);
886 nv_icmd(dev, 0x00084a, 0x00000000);
887 nv_icmd(dev, 0x00084b, 0x00000000);
888 nv_icmd(dev, 0x00084c, 0x00000000);
889 nv_icmd(dev, 0x00084d, 0x00000000);
890 nv_icmd(dev, 0x00084e, 0x00000000);
891 nv_icmd(dev, 0x00084f, 0x00000000);
892 nv_icmd(dev, 0x000850, 0x00000000);
893 nv_icmd(dev, 0x000851, 0x00000000);
894 nv_icmd(dev, 0x000852, 0x00000000);
895 nv_icmd(dev, 0x000853, 0x00000000);
896 nv_icmd(dev, 0x000854, 0x00000000);
897 nv_icmd(dev, 0x000855, 0x00000000);
898 nv_icmd(dev, 0x000856, 0x00000000);
899 nv_icmd(dev, 0x000857, 0x00000000);
900 nv_icmd(dev, 0x000738, 0x00000000);
901 nv_icmd(dev, 0x000b07, 0x00000002);
902 nv_icmd(dev, 0x000b08, 0x00000100);
903 nv_icmd(dev, 0x000b09, 0x00000100);
904 nv_icmd(dev, 0x000b0a, 0x00000001);
905 nv_icmd(dev, 0x000a04, 0x000000ff);
906 nv_icmd(dev, 0x00097f, 0x00000100);
907 nv_icmd(dev, 0x000a02, 0x00000001);
908 nv_icmd(dev, 0x000809, 0x00000007);
909 nv_icmd(dev, 0x00c221, 0x00000040);
910 nv_icmd(dev, 0x00c401, 0x00000001);
911 nv_icmd(dev, 0x00c402, 0x00010001);
912 nv_icmd(dev, 0x00c403, 0x00000001);
913 nv_icmd(dev, 0x00c404, 0x00000001);
914 nv_icmd(dev, 0x00c40e, 0x00000020);
915 nv_icmd(dev, 0x00c500, 0x00000003);
916 nv_icmd(dev, 0x01e100, 0x00000001);
917 nv_icmd(dev, 0x001000, 0x00000001);
918 nv_icmd(dev, 0x000b07, 0x00000002);
919 nv_icmd(dev, 0x000b08, 0x00000100);
920 nv_icmd(dev, 0x000b09, 0x00000100);
921 nv_icmd(dev, 0x000b0a, 0x00000001);
922 nv_icmd(dev, 0x01e100, 0x00000001);
923 nv_wr32(dev, 0x400208, 0x00000000);
924}
925
926static void
927nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
928{
929 nv_wr32(dev, 0x40448c, data);
930 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
931}
932
933static void
934nve0_grctx_generate_a097(struct drm_device *dev)
935{
936 nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
937 nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
938 nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
939 nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
940 nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
941 nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
942 nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
943 nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
944 nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
945 nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
946 nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
947 nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
948 nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
949 nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
950 nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
951 nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
952 nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
953 nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
954 nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
955 nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
956 nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
957 nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
958 nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
959 nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
960 nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
961 nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
962 nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
963 nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
964 nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
965 nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
966 nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
967 nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
968 nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
969 nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
970 nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
971 nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
972 nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
973 nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
974 nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
975 nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
976 nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
977 nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
978 nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
979 nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
980 nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
981 nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
982 nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
983 nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
984 nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
985 nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
986 nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
987 nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
988 nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
989 nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
990 nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
991 nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
992 nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
993 nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
994 nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
995 nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
996 nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
997 nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
998 nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
999 nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
1000 nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
1001 nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
1002 nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
1003 nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
1004 nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
1005 nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
1006 nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
1007 nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
1008 nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
1009 nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
1010 nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
1011 nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
1012 nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
1013 nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
1014 nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
1015 nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
1016 nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
1017 nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
1018 nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
1019 nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
1020 nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
1021 nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
1022 nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
1023 nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
1024 nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
1025 nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
1026 nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
1027 nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
1028 nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
1029 nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
1030 nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
1031 nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
1032 nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
1033 nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
1034 nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
1035 nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
1036 nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
1037 nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
1038 nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
1039 nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
1040 nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
1041 nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
1042 nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
1043 nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
1044 nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
1045 nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
1046 nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
1047 nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
1048 nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
1049 nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
1050 nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
1051 nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
1052 nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
1053 nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
1054 nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
1055 nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
1056 nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
1057 nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
1058 nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
1059 nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
1060 nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
1061 nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
1062 nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
1063 nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
1064 nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
1065 nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
1066 nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
1067 nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
1068 nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
1069 nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
1070 nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
1071 nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
1072 nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
1073 nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
1074 nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
1075 nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
1076 nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
1077 nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
1078 nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
1079 nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
1080 nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
1081 nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
1082 nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
1083 nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
1084 nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
1085 nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
1086 nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
1087 nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
1088 nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
1089 nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
1090 nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
1091 nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
1092 nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
1093 nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
1094 nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
1095 nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
1096 nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
1097 nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
1098 nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
1099 nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
1100 nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
1101 nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
1102 nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
1103 nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
1104 nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
1105 nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
1106 nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
1107 nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
1108 nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
1109 nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
1110 nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
1111 nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
1112 nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
1113 nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
1114 nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
1115 nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
1116 nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
1117 nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
1118 nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
1119 nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
1120 nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
1121 nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
1122 nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
1123 nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
1124 nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
1125 nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
1126 nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
1127 nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
1128 nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
1129 nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
1130 nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
1131 nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
1132 nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
1133 nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
1134 nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
1135 nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
1136 nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
1137 nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
1138 nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
1139 nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
1140 nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
1141 nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
1142 nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
1143 nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
1144 nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
1145 nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
1146 nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
1147 nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
1148 nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
1149 nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
1150 nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
1151 nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
1152 nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
1153 nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
1154 nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
1155 nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
1156 nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
1157 nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
1158 nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
1159 nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
1160 nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
1161 nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
1162 nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
1163 nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
1164 nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
1165 nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
1166 nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
1167 nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
1168 nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
1169 nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
1170 nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
1171 nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
1172 nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
1173 nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
1174 nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
1175 nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
1176 nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
1177 nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
1178 nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
1179 nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
1180 nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
1181 nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
1182 nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
1183 nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
1184 nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
1185 nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
1186 nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
1187 nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
1188 nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
1189 nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
1190 nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
1191 nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
1192 nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
1193 nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
1194 nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
1195 nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
1196 nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
1197 nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
1198 nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
1199 nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
1200 nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
1201 nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
1202 nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
1203 nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
1204 nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
1205 nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
1206 nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
1207 nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
1208 nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
1209 nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
1210 nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
1211 nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
1212 nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
1213 nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
1214 nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
1215 nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
1216 nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
1217 nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
1218 nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
1219 nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
1220 nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
1221 nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
1222 nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
1223 nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
1224 nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
1225 nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
1226 nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
1227 nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
1228 nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
1229 nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
1230 nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
1231 nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
1232 nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
1233 nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
1234 nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
1235 nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
1236 nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
1237 nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
1238 nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
1239 nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
1240 nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
1241 nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
1242 nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
1243 nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
1244 nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
1245 nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
1246 nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
1247 nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
1248 nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
1249 nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
1250 nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
1251 nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
1252 nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
1253 nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
1254 nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
1255 nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
1256 nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
1257 nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
1258 nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
1259 nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
1260 nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
1261 nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
1262 nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
1263 nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
1264 nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
1265 nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
1266 nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
1267 nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
1268 nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
1269 nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
1270 nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
1271 nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
1272 nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
1273 nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
1274 nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
1275 nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
1276 nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
1277 nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
1278 nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
1279 nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
1280 nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
1281 nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
1282 nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
1283 nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
1284 nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
1285 nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
1286 nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
1287 nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
1288 nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
1289 nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
1290 nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
1291 nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
1292 nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
1293 nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
1294 nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
1295 nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
1296 nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
1297 nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
1298 nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
1299 nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
1300 nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
1301 nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
1302 nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
1303 nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
1304 nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
1305 nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
1306 nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
1307 nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
1308 nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
1309 nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
1310 nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
1311 nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
1312 nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
1313 nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
1314 nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
1315 nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
1316 nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
1317 nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
1318 nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
1319 nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
1320 nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
1321 nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
1322 nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
1323 nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
1324 nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
1325 nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
1326 nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
1327 nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
1328 nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
1329 nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
1330 nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
1331 nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
1332 nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
1333 nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
1334 nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
1335 nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
1336 nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
1337 nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
1338 nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
1339 nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
1340 nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
1341 nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
1342 nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
1343 nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
1344 nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
1345 nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
1346 nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
1347 nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
1348 nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
1349 nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
1350 nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
1351 nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
1352 nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
1353 nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
1354 nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
1355 nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
1356 nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
1357 nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
1358 nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
1359 nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
1360 nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
1361 nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
1362 nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
1363 nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
1364 nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
1365 nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
1366 nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
1367 nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
1368 nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
1369 nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
1370 nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
1371 nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
1372 nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
1373 nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
1374 nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
1375 nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
1376 nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
1377 nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
1378 nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
1379 nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
1380 nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
1381 nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
1382 nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
1383 nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
1384 nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
1385 nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
1386 nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
1387 nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
1388 nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
1389 nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
1390 nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
1391 nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
1392 nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
1393 nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
1394 nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
1395 nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
1396 nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
1397 nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
1398 nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
1399 nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
1400 nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
1401 nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
1402 nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
1403 nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
1404 nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
1405 nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
1406 nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
1407 nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
1408 nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
1409 nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
1410 nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
1411 nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
1412 nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
1413 nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
1414 nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
1415 nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
1416 nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
1417 nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
1418 nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
1419 nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
1420 nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
1421 nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
1422 nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
1423 nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
1424 nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
1425 nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
1426 nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
1427 nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
1428 nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
1429 nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
1430 nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
1431 nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
1432 nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
1433 nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
1434 nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
1435 nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
1436 nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
1437 nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
1438 nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
1439 nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
1440 nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
1441 nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
1442 nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
1443 nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
1444 nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
1445 nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
1446 nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
1447 nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
1448 nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
1449 nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
1450 nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
1451 nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
1452 nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
1453 nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
1454 nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
1455 nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
1456 nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
1457 nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
1458 nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
1459 nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
1460 nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
1461 nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
1462 nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
1463 nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
1464 nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
1465 nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
1466 nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
1467 nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
1468 nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
1469 nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
1470 nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
1471 nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
1472 nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
1473 nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
1474 nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
1475 nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
1476 nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
1477 nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
1478 nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
1479 nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
1480 nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
1481 nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
1482 nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
1483 nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
1484 nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
1485 nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
1486 nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
1487 nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
1488 nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
1489 nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
1490 nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
1491 nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
1492 nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
1493 nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
1494 nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
1495 nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
1496 nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
1497 nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
1498 nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
1499 nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
1500 nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
1501 nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
1502 nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
1503 nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
1504 nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
1505 nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
1506 nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
1507 nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
1508 nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
1509 nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
1510 nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
1511 nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
1512 nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
1513 nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
1514 nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
1515 nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
1516 nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
1517 nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
1518 nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
1519 nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
1520 nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
1521 nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
1522 nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
1523 nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
1524 nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
1525 nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
1526 nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
1527 nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
1528 nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
1529 nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
1530 nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
1531 nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
1532 nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
1533 nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
1534 nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
1535 nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
1536 nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
1537 nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
1538 nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
1539 nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
1540 nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
1541 nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
1542 nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
1543 nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
1544 nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
1545 nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
1546 nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
1547 nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
1548 nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
1549 nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
1550 nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
1551 nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
1552 nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
1553 nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
1554 nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
1555 nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
1556 nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
1557 nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
1558 nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
1559 nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
1560 nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
1561 nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
1562 nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
1563 nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
1564 nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
1565 nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
1566 nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
1567 nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
1568 nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
1569 nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
1570 nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
1571 nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
1572 nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
1573 nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
1574 nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
1575 nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
1576 nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
1577 nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
1578 nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
1579 nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
1580 nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
1581 nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
1582 nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
1583 nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
1584 nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
1585 nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
1586 nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
1587 nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
1588 nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
1589 nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
1590 nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
1591 nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
1592 nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
1593 nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
1594 nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
1595 nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
1596 nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
1597 nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
1598 nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
1599 nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
1600 nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
1601 nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
1602 nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
1603 nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
1604 nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
1605 nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
1606 nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
1607 nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
1608 nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
1609 nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
1610 nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
1611 nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
1612 nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
1613 nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
1614 nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
1615 nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
1616 nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
1617 nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
1618 nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
1619 nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
1620 nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
1621 nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
1622 nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
1623 nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
1624 nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
1625 nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
1626 nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
1627 nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
1628 nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
1629 nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
1630 nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
1631 nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
1632 nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
1633 nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
1634 nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
1635 nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
1636 nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
1637 nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
1638 nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
1639 nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
1640 nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
1641 nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
1642 nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
1643 nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
1644 nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
1645 nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
1646 nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
1647 nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
1648 nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
1649 nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
1650 nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
1651 nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
1652 nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
1653 nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
1654 nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
1655 nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
1656 nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
1657 nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
1658 nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
1659 nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
1660 nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
1661 nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
1662 nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
1663 nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
1664 nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
1665 nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
1666 nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
1667 nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
1668 nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
1669 nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
1670 nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
1671 nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
1672 nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
1673 nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
1674 nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
1675 nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
1676 nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
1677 nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
1678 nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
1679 nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
1680 nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
1681 nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
1682 nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
1683 nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
1684 nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
1685 nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
1686 nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
1687 nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
1688 nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
1689 nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
1690 nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
1691 nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
1692 nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
1693 nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
1694 nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
1695 nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
1696 nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
1697 nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
1698 nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
1699 nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
1700 nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
1701 nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
1702 nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
1703 nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
1704 nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
1705 nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
1706 nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
1707 nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
1708 nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
1709 nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
1710 nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
1711 nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
1712 nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
1713 nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
1714 nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
1715 nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
1716 nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
1717 nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
1718 nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
1719 nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
1720 nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
1721 nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
1722 nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
1723 nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
1724 nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
1725 nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
1726 nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
1727 nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
1728 nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
1729 nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
1730 nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
1731 nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
1732 nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
1733 nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
1734 nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
1735 nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
1736 nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
1737 nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
1738 nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
1739 nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
1740 nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
1741 nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
1742 nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
1743 nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
1744 nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
1745 nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
1746 nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
1747 nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
1748 nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
1749 nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
1750 nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
1751 nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
1752 nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
1753 nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
1754 nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
1755 nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
1756 nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
1757 nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
1758 nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
1759 nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
1760 nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
1761 nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
1762 nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
1763 nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
1764 nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
1765 nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
1766 nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
1767 nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
1768 nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
1769 nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
1770 nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
1771 nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
1772 nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
1773 nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
1774 nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
1775 nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
1776 nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
1777 nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
1778 nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
1779 nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
1780 nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
1781 nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
1782 nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
1783 nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
1784 nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
1785 nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
1786 nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
1787 nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
1788 nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
1789 nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
1790 nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
1791 nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
1792 nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
1793 nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
1794 nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
1795 nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
1796 nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
1797 nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
1798 nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
1799 nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
1800 nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
1801 nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
1802 nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
1803 nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
1804 nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
1805 nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
1806 nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
1807 nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
1808 nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
1809 nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
1810 nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
1811 nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
1812 nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
1813 nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
1814 nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
1815 nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
1816 nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
1817 nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
1818 nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
1819 nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
1820 nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
1821 nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
1822 nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
1823 nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
1824 nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
1825 nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
1826 nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
1827 nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
1828 nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
1829 nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
1830 nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
1831 nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
1832 nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
1833 nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
1834 nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
1835 nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
1836 nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
1837 nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
1838 nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
1839 nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
1840 nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
1841 nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
1842 nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
1843 nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
1844 nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
1845 nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
1846 nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
1847 nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
1848 nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
1849 nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
1850 nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
1851 nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
1852 nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
1853 nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
1854 nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
1855 nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
1856 nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
1857 nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
1858 nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
1859 nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
1860 nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
1861 nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
1862 nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
1863 nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
1864 nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
1865 nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
1866 nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
1867 nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
1868 nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
1869 nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
1870 nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
1871 nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
1872 nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
1873 nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
1874 nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
1875 nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
1876 nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
1877 nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
1878 nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
1879 nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
1880 nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
1881 nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
1882 nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
1883 nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
1884 nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
1885 nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
1886 nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
1887 nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
1888 nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
1889 nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
1890 nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
1891 nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
1892 nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
1893 nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
1894 nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
1895 nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
1896 nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
1897 nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
1898 nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
1899 nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
1900 nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
1901 nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
1902 nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
1903 nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
1904 nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
1905 nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
1906 nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
1907 nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
1908 nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
1909 nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
1910 nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
1911 nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
1912 nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
1913 nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
1914 nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
1915 nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
1916 nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
1917 nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
1918 nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
1919 nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
1920 nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
1921 nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
1922 nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
1923 nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
1924 nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
1925 nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
1926 nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
1927 nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
1928 nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
1929 nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
1930 nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
1931 nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
1932 nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
1933 nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
1934 nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
1935 nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
1936 nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
1937 nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
1938 nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
1939 nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
1940 nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
1941 nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
1942 nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
1943 nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
1944 nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
1945 nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
1946 nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
1947 nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
1948 nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
1949 nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
1950 nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
1951 nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
1952 nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
1953 nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
1954 nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
1955 nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
1956 nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
1957 nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
1958 nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
1959 nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
1960 nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
1961 nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
1962 nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
1963 nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
1964 nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
1965 nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
1966 nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
1967 nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
1968 nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
1969 nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
1970 nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
1971 nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
1972 nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
1973 nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
1974 nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
1975 nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
1976 nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
1977 nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
1978 nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
1979 nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
1980 nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
1981 nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
1982 nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
1983 nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
1984 nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
1985 nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
1986 nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
1987 nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
1988 nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
1989 nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
1990 nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
1991 nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
1992 nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
1993 nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
1994 nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
1995 nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
1996 nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
1997 nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
1998 nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
1999 nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
2000 nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
2001 nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
2002 nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
2003 nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
2004 nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
2005 nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
2006 nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
2007 nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
2008 nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
2009 nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
2010 nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
2011 nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
2012 nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
2013 nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
2014 nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
2015 nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
2016 nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
2017 nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
2018 nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
2019 nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
2020 nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
2021 nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
2022 nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
2023 nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
2024 nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
2025 nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
2026 nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
2027 nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
2028 nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
2029 nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
2030 nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
2031 nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
2032 nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
2033 nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
2034 nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
2035 nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
2036 nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
2037 nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
2038 nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
2039 nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
2040 nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
2041 nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
2042 nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
2043 nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
2044 nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
2045 nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
2046 nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
2047 nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
2048 nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
2049 nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
2050 nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
2051 nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
2052 nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
2053 nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
2054 nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
2055 nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
2056 nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
2057 nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
2058 nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
2059 nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
2060 nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
2061 nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
2062 nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
2063 nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
2064 nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
2065 nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
2066 nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
2067 nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
2068 nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
2069 nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
2070 nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
2071 nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
2072 nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
2073 nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
2074 nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
2075 nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
2076 nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
2077 nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
2078 nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
2079 nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
2080 nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
2081 nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
2082 nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
2083 nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
2084 nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
2085 nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
2086 nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
2087 nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
2088 nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
2089 nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
2090 nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
2091 nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
2092 nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
2093 nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
2094 nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
2095 nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
2096 nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
2097 nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
2098 nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
2099 nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
2100 nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
2101 nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
2102 nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
2103 nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
2104 nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
2105 nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
2106 nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
2107 nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
2108 nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
2109 nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
2110 nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
2111 nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
2112 nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
2113 nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
2114 nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
2115 nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
2116 nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
2117 nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
2118 nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
2119 nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
2120 nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
2121 nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
2122 nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
2123 nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
2124 nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
2125 nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
2126 nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
2127 nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
2128 nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
2129 nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
2130 nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
2131 nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
2132 nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
2133 nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
2134 nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
2135 nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
2136 nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
2137 nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
2138 nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
2139 nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
2140 nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
2141 nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
2142 nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
2143 nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
2144 nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
2145 nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
2146 nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
2147 nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
2148 nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
2149 nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
2150 nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
2151 nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
2152 nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
2153 nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
2154 nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
2155 nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
2156 nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
2157 nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
2158 nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
2159 nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
2160 nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
2161}
2162
2163static void
2164nve0_grctx_generate_902d(struct drm_device *dev)
2165{
2166 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
2167 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
2168 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
2169 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
2170 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
2171 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
2172 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
2173 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
2174 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
2175 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
2176 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
2177 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
2178 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
2179 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
2180 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
2181 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
2182 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
2183 nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
2184}
2185
2186static void
2187nve0_graph_generate_unk40xx(struct drm_device *dev)
2188{
2189 nv_wr32(dev, 0x404010, 0x0);
2190 nv_wr32(dev, 0x404014, 0x0);
2191 nv_wr32(dev, 0x404018, 0x0);
2192 nv_wr32(dev, 0x40401c, 0x0);
2193 nv_wr32(dev, 0x404020, 0x0);
2194 nv_wr32(dev, 0x404024, 0xe000);
2195 nv_wr32(dev, 0x404028, 0x0);
2196 nv_wr32(dev, 0x4040a8, 0x0);
2197 nv_wr32(dev, 0x4040ac, 0x0);
2198 nv_wr32(dev, 0x4040b0, 0x0);
2199 nv_wr32(dev, 0x4040b4, 0x0);
2200 nv_wr32(dev, 0x4040b8, 0x0);
2201 nv_wr32(dev, 0x4040bc, 0x0);
2202 nv_wr32(dev, 0x4040c0, 0x0);
2203 nv_wr32(dev, 0x4040c4, 0x0);
2204 nv_wr32(dev, 0x4040c8, 0xf800008f);
2205 nv_wr32(dev, 0x4040d0, 0x0);
2206 nv_wr32(dev, 0x4040d4, 0x0);
2207 nv_wr32(dev, 0x4040d8, 0x0);
2208 nv_wr32(dev, 0x4040dc, 0x0);
2209 nv_wr32(dev, 0x4040e0, 0x0);
2210 nv_wr32(dev, 0x4040e4, 0x0);
2211 nv_wr32(dev, 0x4040e8, 0x1000);
2212 nv_wr32(dev, 0x4040f8, 0x0);
2213 nv_wr32(dev, 0x404130, 0x0);
2214 nv_wr32(dev, 0x404134, 0x0);
2215 nv_wr32(dev, 0x404138, 0x20000040);
2216 nv_wr32(dev, 0x404150, 0x2e);
2217 nv_wr32(dev, 0x404154, 0x400);
2218 nv_wr32(dev, 0x404158, 0x200);
2219 nv_wr32(dev, 0x404164, 0x55);
2220 nv_wr32(dev, 0x4041a0, 0x0);
2221 nv_wr32(dev, 0x4041a4, 0x0);
2222 nv_wr32(dev, 0x4041a8, 0x0);
2223 nv_wr32(dev, 0x4041ac, 0x0);
2224 nv_wr32(dev, 0x404200, 0x0);
2225 nv_wr32(dev, 0x404204, 0x0);
2226 nv_wr32(dev, 0x404208, 0x0);
2227 nv_wr32(dev, 0x40420c, 0x0);
2228}
2229
2230static void
2231nve0_graph_generate_unk44xx(struct drm_device *dev)
2232{
2233 nv_wr32(dev, 0x404404, 0x0);
2234 nv_wr32(dev, 0x404408, 0x0);
2235 nv_wr32(dev, 0x40440c, 0x0);
2236 nv_wr32(dev, 0x404410, 0x0);
2237 nv_wr32(dev, 0x404414, 0x0);
2238 nv_wr32(dev, 0x404418, 0x0);
2239 nv_wr32(dev, 0x40441c, 0x0);
2240 nv_wr32(dev, 0x404420, 0x0);
2241 nv_wr32(dev, 0x404424, 0x0);
2242 nv_wr32(dev, 0x404428, 0x0);
2243 nv_wr32(dev, 0x40442c, 0x0);
2244 nv_wr32(dev, 0x404430, 0x0);
2245 nv_wr32(dev, 0x404434, 0x0);
2246 nv_wr32(dev, 0x404438, 0x0);
2247 nv_wr32(dev, 0x404460, 0x0);
2248 nv_wr32(dev, 0x404464, 0x0);
2249 nv_wr32(dev, 0x404468, 0xffffff);
2250 nv_wr32(dev, 0x40446c, 0x0);
2251 nv_wr32(dev, 0x404480, 0x1);
2252 nv_wr32(dev, 0x404498, 0x1);
2253}
2254
2255static void
2256nve0_graph_generate_unk46xx(struct drm_device *dev)
2257{
2258 nv_wr32(dev, 0x404604, 0x14);
2259 nv_wr32(dev, 0x404608, 0x0);
2260 nv_wr32(dev, 0x40460c, 0x3fff);
2261 nv_wr32(dev, 0x404610, 0x100);
2262 nv_wr32(dev, 0x404618, 0x0);
2263 nv_wr32(dev, 0x40461c, 0x0);
2264 nv_wr32(dev, 0x404620, 0x0);
2265 nv_wr32(dev, 0x404624, 0x0);
2266 nv_wr32(dev, 0x40462c, 0x0);
2267 nv_wr32(dev, 0x404630, 0x0);
2268 nv_wr32(dev, 0x404640, 0x0);
2269 nv_wr32(dev, 0x404654, 0x0);
2270 nv_wr32(dev, 0x404660, 0x0);
2271 nv_wr32(dev, 0x404678, 0x0);
2272 nv_wr32(dev, 0x40467c, 0x2);
2273 nv_wr32(dev, 0x404680, 0x0);
2274 nv_wr32(dev, 0x404684, 0x0);
2275 nv_wr32(dev, 0x404688, 0x0);
2276 nv_wr32(dev, 0x40468c, 0x0);
2277 nv_wr32(dev, 0x404690, 0x0);
2278 nv_wr32(dev, 0x404694, 0x0);
2279 nv_wr32(dev, 0x404698, 0x0);
2280 nv_wr32(dev, 0x40469c, 0x0);
2281 nv_wr32(dev, 0x4046a0, 0x7f0080);
2282 nv_wr32(dev, 0x4046a4, 0x0);
2283 nv_wr32(dev, 0x4046a8, 0x0);
2284 nv_wr32(dev, 0x4046ac, 0x0);
2285 nv_wr32(dev, 0x4046b0, 0x0);
2286 nv_wr32(dev, 0x4046b4, 0x0);
2287 nv_wr32(dev, 0x4046b8, 0x0);
2288 nv_wr32(dev, 0x4046bc, 0x0);
2289 nv_wr32(dev, 0x4046c0, 0x0);
2290 nv_wr32(dev, 0x4046c8, 0x0);
2291 nv_wr32(dev, 0x4046cc, 0x0);
2292 nv_wr32(dev, 0x4046d0, 0x0);
2293}
2294
2295static void
2296nve0_graph_generate_unk47xx(struct drm_device *dev)
2297{
2298 nv_wr32(dev, 0x404700, 0x0);
2299 nv_wr32(dev, 0x404704, 0x0);
2300 nv_wr32(dev, 0x404708, 0x0);
2301 nv_wr32(dev, 0x404718, 0x0);
2302 nv_wr32(dev, 0x40471c, 0x0);
2303 nv_wr32(dev, 0x404720, 0x0);
2304 nv_wr32(dev, 0x404724, 0x0);
2305 nv_wr32(dev, 0x404728, 0x0);
2306 nv_wr32(dev, 0x40472c, 0x0);
2307 nv_wr32(dev, 0x404730, 0x0);
2308 nv_wr32(dev, 0x404734, 0x100);
2309 nv_wr32(dev, 0x404738, 0x0);
2310 nv_wr32(dev, 0x40473c, 0x0);
2311 nv_wr32(dev, 0x404744, 0x0);
2312 nv_wr32(dev, 0x404748, 0x0);
2313 nv_wr32(dev, 0x404754, 0x0);
2314}
2315
2316static void
2317nve0_graph_generate_unk58xx(struct drm_device *dev)
2318{
2319 nv_wr32(dev, 0x405800, 0xf8000bf);
2320 nv_wr32(dev, 0x405830, 0x2180648);
2321 nv_wr32(dev, 0x405834, 0x8000000);
2322 nv_wr32(dev, 0x405838, 0x0);
2323 nv_wr32(dev, 0x405854, 0x0);
2324 nv_wr32(dev, 0x405870, 0x1);
2325 nv_wr32(dev, 0x405874, 0x1);
2326 nv_wr32(dev, 0x405878, 0x1);
2327 nv_wr32(dev, 0x40587c, 0x1);
2328 nv_wr32(dev, 0x405a00, 0x0);
2329 nv_wr32(dev, 0x405a04, 0x0);
2330 nv_wr32(dev, 0x405a18, 0x0);
2331 nv_wr32(dev, 0x405b00, 0x0);
2332 nv_wr32(dev, 0x405b10, 0x1000);
2333}
2334
2335static void
2336nve0_graph_generate_unk60xx(struct drm_device *dev)
2337{
2338 nv_wr32(dev, 0x406020, 0x4103c1);
2339 nv_wr32(dev, 0x406028, 0x1);
2340 nv_wr32(dev, 0x40602c, 0x1);
2341 nv_wr32(dev, 0x406030, 0x1);
2342 nv_wr32(dev, 0x406034, 0x1);
2343}
2344
2345static void
2346nve0_graph_generate_unk64xx(struct drm_device *dev)
2347{
2348 nv_wr32(dev, 0x4064a8, 0x0);
2349 nv_wr32(dev, 0x4064ac, 0x3fff);
2350 nv_wr32(dev, 0x4064b4, 0x0);
2351 nv_wr32(dev, 0x4064b8, 0x0);
2352 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2353 nv_wr32(dev, 0x4064c4, 0x192ffff);
2354 nv_wr32(dev, 0x4064c8, 0x1800600);
2355 nv_wr32(dev, 0x4064cc, 0x0);
2356 nv_wr32(dev, 0x4064d0, 0x0);
2357 nv_wr32(dev, 0x4064d4, 0x0);
2358 nv_wr32(dev, 0x4064d8, 0x0);
2359 nv_wr32(dev, 0x4064dc, 0x0);
2360 nv_wr32(dev, 0x4064e0, 0x0);
2361 nv_wr32(dev, 0x4064e4, 0x0);
2362 nv_wr32(dev, 0x4064e8, 0x0);
2363 nv_wr32(dev, 0x4064ec, 0x0);
2364 nv_wr32(dev, 0x4064fc, 0x22a);
2365}
2366
2367static void
2368nve0_graph_generate_unk70xx(struct drm_device *dev)
2369{
2370 nv_wr32(dev, 0x407040, 0x0);
2371}
2372
2373static void
2374nve0_graph_generate_unk78xx(struct drm_device *dev)
2375{
2376 nv_wr32(dev, 0x407804, 0x23);
2377 nv_wr32(dev, 0x40780c, 0xa418820);
2378 nv_wr32(dev, 0x407810, 0x62080e6);
2379 nv_wr32(dev, 0x407814, 0x20398a4);
2380 nv_wr32(dev, 0x407818, 0xe629062);
2381 nv_wr32(dev, 0x40781c, 0xa418820);
2382 nv_wr32(dev, 0x407820, 0xe6);
2383 nv_wr32(dev, 0x4078bc, 0x103);
2384}
2385
2386static void
2387nve0_graph_generate_unk80xx(struct drm_device *dev)
2388{
2389 nv_wr32(dev, 0x408000, 0x0);
2390 nv_wr32(dev, 0x408004, 0x0);
2391 nv_wr32(dev, 0x408008, 0x30);
2392 nv_wr32(dev, 0x40800c, 0x0);
2393 nv_wr32(dev, 0x408010, 0x0);
2394 nv_wr32(dev, 0x408014, 0x69);
2395 nv_wr32(dev, 0x408018, 0xe100e100);
2396 nv_wr32(dev, 0x408064, 0x0);
2397}
2398
2399static void
2400nve0_graph_generate_unk88xx(struct drm_device *dev)
2401{
2402 nv_wr32(dev, 0x408800, 0x2802a3c);
2403 nv_wr32(dev, 0x408804, 0x40);
2404 nv_wr32(dev, 0x408808, 0x1043e005);
2405 nv_wr32(dev, 0x408840, 0xb);
2406 nv_wr32(dev, 0x408900, 0x3080b801);
2407 nv_wr32(dev, 0x408904, 0x62000001);
2408 nv_wr32(dev, 0x408908, 0xc8102f);
2409 nv_wr32(dev, 0x408980, 0x11d);
2410}
2411
2412static void
2413nve0_graph_generate_gpc(struct drm_device *dev)
2414{
2415 nv_wr32(dev, 0x418380, 0x16);
2416 nv_wr32(dev, 0x418400, 0x38004e00);
2417 nv_wr32(dev, 0x418404, 0x71e0ffff);
2418 nv_wr32(dev, 0x41840c, 0x1008);
2419 nv_wr32(dev, 0x418410, 0xfff0fff);
2420 nv_wr32(dev, 0x418414, 0x2200fff);
2421 nv_wr32(dev, 0x418450, 0x0);
2422 nv_wr32(dev, 0x418454, 0x0);
2423 nv_wr32(dev, 0x418458, 0x0);
2424 nv_wr32(dev, 0x41845c, 0x0);
2425 nv_wr32(dev, 0x418460, 0x0);
2426 nv_wr32(dev, 0x418464, 0x0);
2427 nv_wr32(dev, 0x418468, 0x1);
2428 nv_wr32(dev, 0x41846c, 0x0);
2429 nv_wr32(dev, 0x418470, 0x0);
2430 nv_wr32(dev, 0x418600, 0x1f);
2431 nv_wr32(dev, 0x418684, 0xf);
2432 nv_wr32(dev, 0x418700, 0x2);
2433 nv_wr32(dev, 0x418704, 0x80);
2434 nv_wr32(dev, 0x418708, 0x0);
2435 nv_wr32(dev, 0x41870c, 0x0);
2436 nv_wr32(dev, 0x418710, 0x0);
2437 nv_wr32(dev, 0x418800, 0x7006860a);
2438 nv_wr32(dev, 0x418808, 0x0);
2439 nv_wr32(dev, 0x41880c, 0x0);
2440 nv_wr32(dev, 0x418810, 0x0);
2441 nv_wr32(dev, 0x418828, 0x44);
2442 nv_wr32(dev, 0x418830, 0x10000001);
2443 nv_wr32(dev, 0x4188d8, 0x8);
2444 nv_wr32(dev, 0x4188e0, 0x1000000);
2445 nv_wr32(dev, 0x4188e8, 0x0);
2446 nv_wr32(dev, 0x4188ec, 0x0);
2447 nv_wr32(dev, 0x4188f0, 0x0);
2448 nv_wr32(dev, 0x4188f4, 0x0);
2449 nv_wr32(dev, 0x4188f8, 0x0);
2450 nv_wr32(dev, 0x4188fc, 0x20100018);
2451 nv_wr32(dev, 0x41891c, 0xff00ff);
2452 nv_wr32(dev, 0x418924, 0x0);
2453 nv_wr32(dev, 0x418928, 0xffff00);
2454 nv_wr32(dev, 0x41892c, 0xff00);
2455 nv_wr32(dev, 0x418a00, 0x0);
2456 nv_wr32(dev, 0x418a04, 0x0);
2457 nv_wr32(dev, 0x418a08, 0x0);
2458 nv_wr32(dev, 0x418a0c, 0x10000);
2459 nv_wr32(dev, 0x418a10, 0x0);
2460 nv_wr32(dev, 0x418a14, 0x0);
2461 nv_wr32(dev, 0x418a18, 0x0);
2462 nv_wr32(dev, 0x418a20, 0x0);
2463 nv_wr32(dev, 0x418a24, 0x0);
2464 nv_wr32(dev, 0x418a28, 0x0);
2465 nv_wr32(dev, 0x418a2c, 0x10000);
2466 nv_wr32(dev, 0x418a30, 0x0);
2467 nv_wr32(dev, 0x418a34, 0x0);
2468 nv_wr32(dev, 0x418a38, 0x0);
2469 nv_wr32(dev, 0x418a40, 0x0);
2470 nv_wr32(dev, 0x418a44, 0x0);
2471 nv_wr32(dev, 0x418a48, 0x0);
2472 nv_wr32(dev, 0x418a4c, 0x10000);
2473 nv_wr32(dev, 0x418a50, 0x0);
2474 nv_wr32(dev, 0x418a54, 0x0);
2475 nv_wr32(dev, 0x418a58, 0x0);
2476 nv_wr32(dev, 0x418a60, 0x0);
2477 nv_wr32(dev, 0x418a64, 0x0);
2478 nv_wr32(dev, 0x418a68, 0x0);
2479 nv_wr32(dev, 0x418a6c, 0x10000);
2480 nv_wr32(dev, 0x418a70, 0x0);
2481 nv_wr32(dev, 0x418a74, 0x0);
2482 nv_wr32(dev, 0x418a78, 0x0);
2483 nv_wr32(dev, 0x418a80, 0x0);
2484 nv_wr32(dev, 0x418a84, 0x0);
2485 nv_wr32(dev, 0x418a88, 0x0);
2486 nv_wr32(dev, 0x418a8c, 0x10000);
2487 nv_wr32(dev, 0x418a90, 0x0);
2488 nv_wr32(dev, 0x418a94, 0x0);
2489 nv_wr32(dev, 0x418a98, 0x0);
2490 nv_wr32(dev, 0x418aa0, 0x0);
2491 nv_wr32(dev, 0x418aa4, 0x0);
2492 nv_wr32(dev, 0x418aa8, 0x0);
2493 nv_wr32(dev, 0x418aac, 0x10000);
2494 nv_wr32(dev, 0x418ab0, 0x0);
2495 nv_wr32(dev, 0x418ab4, 0x0);
2496 nv_wr32(dev, 0x418ab8, 0x0);
2497 nv_wr32(dev, 0x418ac0, 0x0);
2498 nv_wr32(dev, 0x418ac4, 0x0);
2499 nv_wr32(dev, 0x418ac8, 0x0);
2500 nv_wr32(dev, 0x418acc, 0x10000);
2501 nv_wr32(dev, 0x418ad0, 0x0);
2502 nv_wr32(dev, 0x418ad4, 0x0);
2503 nv_wr32(dev, 0x418ad8, 0x0);
2504 nv_wr32(dev, 0x418ae0, 0x0);
2505 nv_wr32(dev, 0x418ae4, 0x0);
2506 nv_wr32(dev, 0x418ae8, 0x0);
2507 nv_wr32(dev, 0x418aec, 0x10000);
2508 nv_wr32(dev, 0x418af0, 0x0);
2509 nv_wr32(dev, 0x418af4, 0x0);
2510 nv_wr32(dev, 0x418af8, 0x0);
2511 nv_wr32(dev, 0x418b00, 0x6);
2512 nv_wr32(dev, 0x418b08, 0xa418820);
2513 nv_wr32(dev, 0x418b0c, 0x62080e6);
2514 nv_wr32(dev, 0x418b10, 0x20398a4);
2515 nv_wr32(dev, 0x418b14, 0xe629062);
2516 nv_wr32(dev, 0x418b18, 0xa418820);
2517 nv_wr32(dev, 0x418b1c, 0xe6);
2518 nv_wr32(dev, 0x418bb8, 0x103);
2519 nv_wr32(dev, 0x418c08, 0x1);
2520 nv_wr32(dev, 0x418c10, 0x0);
2521 nv_wr32(dev, 0x418c14, 0x0);
2522 nv_wr32(dev, 0x418c18, 0x0);
2523 nv_wr32(dev, 0x418c1c, 0x0);
2524 nv_wr32(dev, 0x418c20, 0x0);
2525 nv_wr32(dev, 0x418c24, 0x0);
2526 nv_wr32(dev, 0x418c28, 0x0);
2527 nv_wr32(dev, 0x418c2c, 0x0);
2528 nv_wr32(dev, 0x418c40, 0xffffffff);
2529 nv_wr32(dev, 0x418c6c, 0x1);
2530 nv_wr32(dev, 0x418c80, 0x20200004);
2531 nv_wr32(dev, 0x418c8c, 0x1);
2532 nv_wr32(dev, 0x419000, 0x780);
2533 nv_wr32(dev, 0x419004, 0x0);
2534 nv_wr32(dev, 0x419008, 0x0);
2535 nv_wr32(dev, 0x419014, 0x4);
2536}
2537
2538static void
2539nve0_graph_generate_tpc(struct drm_device *dev)
2540{
2541 nv_wr32(dev, 0x419848, 0x0);
2542 nv_wr32(dev, 0x419864, 0x129);
2543 nv_wr32(dev, 0x419888, 0x0);
2544 nv_wr32(dev, 0x419a00, 0xf0);
2545 nv_wr32(dev, 0x419a04, 0x1);
2546 nv_wr32(dev, 0x419a08, 0x21);
2547 nv_wr32(dev, 0x419a0c, 0x20000);
2548 nv_wr32(dev, 0x419a10, 0x0);
2549 nv_wr32(dev, 0x419a14, 0x200);
2550 nv_wr32(dev, 0x419a1c, 0xc000);
2551 nv_wr32(dev, 0x419a20, 0x800);
2552 nv_wr32(dev, 0x419a30, 0x1);
2553 nv_wr32(dev, 0x419ac4, 0x37f440);
2554 nv_wr32(dev, 0x419c00, 0xa);
2555 nv_wr32(dev, 0x419c04, 0x80000006);
2556 nv_wr32(dev, 0x419c08, 0x2);
2557 nv_wr32(dev, 0x419c20, 0x0);
2558 nv_wr32(dev, 0x419c24, 0x84210);
2559 nv_wr32(dev, 0x419c28, 0x3efbefbe);
2560 nv_wr32(dev, 0x419ce8, 0x0);
2561 nv_wr32(dev, 0x419cf4, 0x3203);
2562 nv_wr32(dev, 0x419e04, 0x0);
2563 nv_wr32(dev, 0x419e08, 0x0);
2564 nv_wr32(dev, 0x419e0c, 0x0);
2565 nv_wr32(dev, 0x419e10, 0x402);
2566 nv_wr32(dev, 0x419e44, 0x13eff2);
2567 nv_wr32(dev, 0x419e48, 0x0);
2568 nv_wr32(dev, 0x419e4c, 0x7f);
2569 nv_wr32(dev, 0x419e50, 0x0);
2570 nv_wr32(dev, 0x419e54, 0x0);
2571 nv_wr32(dev, 0x419e58, 0x0);
2572 nv_wr32(dev, 0x419e5c, 0x0);
2573 nv_wr32(dev, 0x419e60, 0x0);
2574 nv_wr32(dev, 0x419e64, 0x0);
2575 nv_wr32(dev, 0x419e68, 0x0);
2576 nv_wr32(dev, 0x419e6c, 0x0);
2577 nv_wr32(dev, 0x419e70, 0x0);
2578 nv_wr32(dev, 0x419e74, 0x0);
2579 nv_wr32(dev, 0x419e78, 0x0);
2580 nv_wr32(dev, 0x419e7c, 0x0);
2581 nv_wr32(dev, 0x419e80, 0x0);
2582 nv_wr32(dev, 0x419e84, 0x0);
2583 nv_wr32(dev, 0x419e88, 0x0);
2584 nv_wr32(dev, 0x419e8c, 0x0);
2585 nv_wr32(dev, 0x419e90, 0x0);
2586 nv_wr32(dev, 0x419e94, 0x0);
2587 nv_wr32(dev, 0x419e98, 0x0);
2588 nv_wr32(dev, 0x419eac, 0x1fcf);
2589 nv_wr32(dev, 0x419eb0, 0xd3f);
2590 nv_wr32(dev, 0x419ec8, 0x1304f);
2591 nv_wr32(dev, 0x419f30, 0x0);
2592 nv_wr32(dev, 0x419f34, 0x0);
2593 nv_wr32(dev, 0x419f38, 0x0);
2594 nv_wr32(dev, 0x419f3c, 0x0);
2595 nv_wr32(dev, 0x419f40, 0x0);
2596 nv_wr32(dev, 0x419f44, 0x0);
2597 nv_wr32(dev, 0x419f48, 0x0);
2598 nv_wr32(dev, 0x419f4c, 0x0);
2599 nv_wr32(dev, 0x419f58, 0x0);
2600 nv_wr32(dev, 0x419f78, 0xb);
2601}
2602
2603static void
2604nve0_graph_generate_tpcunk(struct drm_device *dev)
2605{
2606 nv_wr32(dev, 0x41be24, 0x6);
2607 nv_wr32(dev, 0x41bec0, 0x12180000);
2608 nv_wr32(dev, 0x41bec4, 0x37f7f);
2609 nv_wr32(dev, 0x41bee4, 0x6480430);
2610 nv_wr32(dev, 0x41bf00, 0xa418820);
2611 nv_wr32(dev, 0x41bf04, 0x62080e6);
2612 nv_wr32(dev, 0x41bf08, 0x20398a4);
2613 nv_wr32(dev, 0x41bf0c, 0xe629062);
2614 nv_wr32(dev, 0x41bf10, 0xa418820);
2615 nv_wr32(dev, 0x41bf14, 0xe6);
2616 nv_wr32(dev, 0x41bfd0, 0x900103);
2617 nv_wr32(dev, 0x41bfe0, 0x400001);
2618 nv_wr32(dev, 0x41bfe4, 0x0);
2619}
2620
2621int
2622nve0_grctx_generate(struct nouveau_channel *chan)
2623{
2624 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
2625 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
2626 struct drm_device *dev = chan->dev;
2627 u32 data[6] = {}, data2[2] = {}, tmp;
2628 u32 tpc_set = 0, tpc_mask = 0;
2629 u8 tpcnr[GPC_MAX], a, b;
2630 u8 shift, ntpcv;
2631 int i, gpc, tpc, id;
2632
2633 nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
2634 nv_wr32(dev, 0x400204, 0x00000000);
2635 nv_wr32(dev, 0x400208, 0x00000000);
2636
2637 nve0_graph_generate_unk40xx(dev);
2638 nve0_graph_generate_unk44xx(dev);
2639 nve0_graph_generate_unk46xx(dev);
2640 nve0_graph_generate_unk47xx(dev);
2641 nve0_graph_generate_unk58xx(dev);
2642 nve0_graph_generate_unk60xx(dev);
2643 nve0_graph_generate_unk64xx(dev);
2644 nve0_graph_generate_unk70xx(dev);
2645 nve0_graph_generate_unk78xx(dev);
2646 nve0_graph_generate_unk80xx(dev);
2647 nve0_graph_generate_unk88xx(dev);
2648 nve0_graph_generate_gpc(dev);
2649 nve0_graph_generate_tpc(dev);
2650 nve0_graph_generate_tpcunk(dev);
2651
2652 nv_wr32(dev, 0x404154, 0x0);
2653
2654 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
2655 u32 reg = nv_ro32(grch->mmio, i + 0);
2656 u32 val = nv_ro32(grch->mmio, i + 4);
2657 nv_wr32(dev, reg, val);
2658 }
2659
2660 nv_wr32(dev, 0x418c6c, 0x1);
2661 nv_wr32(dev, 0x41980c, 0x10);
2662 nv_wr32(dev, 0x41be08, 0x4);
2663 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2664 nv_wr32(dev, 0x405800, 0xf8000bf);
2665 nv_wr32(dev, 0x419c00, 0xa);
2666
2667 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2668 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2669 if (tpc < priv->tpc_nr[gpc]) {
2670 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
2671 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
2672 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2673 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
2674 }
2675
2676 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2677 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2678 }
2679 }
2680
2681 tmp = 0;
2682 for (i = 0; i < priv->gpc_nr; i++)
2683 tmp |= priv->tpc_nr[i] << (i * 4);
2684 nv_wr32(dev, 0x406028, tmp);
2685 nv_wr32(dev, 0x405870, tmp);
2686
2687 nv_wr32(dev, 0x40602c, 0x0);
2688 nv_wr32(dev, 0x405874, 0x0);
2689 nv_wr32(dev, 0x406030, 0x0);
2690 nv_wr32(dev, 0x405878, 0x0);
2691 nv_wr32(dev, 0x406034, 0x0);
2692 nv_wr32(dev, 0x40587c, 0x0);
2693
2694 /* calculate first set of magics */
2695 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2696
2697 gpc = -1;
2698 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2699 do {
2700 gpc = (gpc + 1) % priv->gpc_nr;
2701 } while (!tpcnr[gpc]);
2702 tpcnr[gpc]--;
2703
2704 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2705 }
2706
2707 for (; tpc < 32; tpc++)
2708 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2709
2710 /* and the second... */
2711 shift = 0;
2712 ntpcv = priv->tpc_total;
2713 while (!(ntpcv & (1 << 4))) {
2714 ntpcv <<= 1;
2715 shift++;
2716 }
2717
2718 data2[0] = ntpcv << 16;
2719 data2[0] |= shift << 21;
2720 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2721 data2[0] |= priv->tpc_total << 8;
2722 data2[0] |= priv->magic_not_rop_nr;
2723 for (i = 1; i < 7; i++)
2724 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2725
2726 /* and write it all the various parts of PGRAPH */
2727 nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2728 for (i = 0; i < 6; i++)
2729 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
2730
2731 nv_wr32(dev, 0x41bfd0, data2[0]);
2732 nv_wr32(dev, 0x41bfe4, data2[1]);
2733 for (i = 0; i < 6; i++)
2734 nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
2735
2736 nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2737 for (i = 0; i < 6; i++)
2738 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
2739
2740
2741 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2742 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2743 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2744
2745 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2746 a = (i * (priv->tpc_total - 1)) / 32;
2747 if (a != b) {
2748 b = a;
2749 do {
2750 gpc = (gpc + 1) % priv->gpc_nr;
2751 } while (!tpcnr[gpc]);
2752 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2753
2754 tpc_set |= 1 << ((gpc * 8) + tpc);
2755 }
2756
2757 nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
2758 nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2759 }
2760
2761 for (i = 0; i < 8; i++)
2762 nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
2763
2764 nv_wr32(dev, 0x405b00, 0x201);
2765 nv_wr32(dev, 0x408850, 0x2);
2766 nv_wr32(dev, 0x408958, 0x2);
2767 nv_wr32(dev, 0x419f78, 0xa);
2768
2769 nve0_grctx_generate_icmd(dev);
2770 nve0_grctx_generate_a097(dev);
2771 nve0_grctx_generate_902d(dev);
2772
2773 nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
2774 nv_wr32(dev, 0x418800, 0x7026860a); //XXX
2775 nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
2776 return 0;
2777}