aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c129
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c63
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c2
8 files changed, 119 insertions, 94 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6d66314d16bd..6e78b1aaa74d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,7 @@
36#include "nouveau_mm.h" 36#include "nouveau_mm.h"
37#include "nouveau_vm.h" 37#include "nouveau_vm.h"
38#include "nouveau_fence.h" 38#include "nouveau_fence.h"
39#include "nouveau_ramht.h"
39 40
40#include <linux/log2.h> 41#include <linux/log2.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
@@ -511,6 +512,17 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
511} 512}
512 513
513static int 514static int
515nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
516{
517 int ret = RING_SPACE(chan, 2);
518 if (ret == 0) {
519 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
520 OUT_RING (chan, handle);
521 }
522 return ret;
523}
524
525static int
514nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 526nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
515 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
516{ 528{
@@ -528,17 +540,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
528 if (ret) 540 if (ret)
529 return ret; 541 return ret;
530 542
531 BEGIN_NVC0(chan, NvSubM2MF, 0x0238, 2); 543 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
532 OUT_RING (chan, upper_32_bits(dst_offset)); 544 OUT_RING (chan, upper_32_bits(dst_offset));
533 OUT_RING (chan, lower_32_bits(dst_offset)); 545 OUT_RING (chan, lower_32_bits(dst_offset));
534 BEGIN_NVC0(chan, NvSubM2MF, 0x030c, 6); 546 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
535 OUT_RING (chan, upper_32_bits(src_offset)); 547 OUT_RING (chan, upper_32_bits(src_offset));
536 OUT_RING (chan, lower_32_bits(src_offset)); 548 OUT_RING (chan, lower_32_bits(src_offset));
537 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 549 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
538 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 550 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
539 OUT_RING (chan, PAGE_SIZE); /* line_length */ 551 OUT_RING (chan, PAGE_SIZE); /* line_length */
540 OUT_RING (chan, line_count); 552 OUT_RING (chan, line_count);
541 BEGIN_NVC0(chan, NvSubM2MF, 0x0300, 1); 553 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
542 OUT_RING (chan, 0x00100110); 554 OUT_RING (chan, 0x00100110);
543 555
544 page_count -= line_count; 556 page_count -= line_count;
@@ -550,6 +562,28 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
550} 562}
551 563
552static int 564static int
565nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
566{
567 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
568 &chan->m2mf_ntfy);
569 if (ret == 0) {
570 ret = RING_SPACE(chan, 6);
571 if (ret == 0) {
572 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
573 OUT_RING (chan, handle);
574 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
575 OUT_RING (chan, NvNotify0);
576 OUT_RING (chan, NvDmaFB);
577 OUT_RING (chan, NvDmaFB);
578 } else {
579 nouveau_ramht_remove(chan, NvNotify0);
580 }
581 }
582
583 return ret;
584}
585
586static int
553nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 587nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
554 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 588 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
555{ 589{
@@ -573,7 +607,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
573 if (ret) 607 if (ret)
574 return ret; 608 return ret;
575 609
576 BEGIN_NV04(chan, NvSubM2MF, 0x0200, 7); 610 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
577 OUT_RING (chan, 0); 611 OUT_RING (chan, 0);
578 OUT_RING (chan, 0); 612 OUT_RING (chan, 0);
579 OUT_RING (chan, stride); 613 OUT_RING (chan, stride);
@@ -586,7 +620,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
586 if (ret) 620 if (ret)
587 return ret; 621 return ret;
588 622
589 BEGIN_NV04(chan, NvSubM2MF, 0x0200, 1); 623 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
590 OUT_RING (chan, 1); 624 OUT_RING (chan, 1);
591 } 625 }
592 if (old_mem->mem_type == TTM_PL_VRAM && 626 if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -595,7 +629,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
595 if (ret) 629 if (ret)
596 return ret; 630 return ret;
597 631
598 BEGIN_NV04(chan, NvSubM2MF, 0x021c, 7); 632 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
599 OUT_RING (chan, 0); 633 OUT_RING (chan, 0);
600 OUT_RING (chan, 0); 634 OUT_RING (chan, 0);
601 OUT_RING (chan, stride); 635 OUT_RING (chan, stride);
@@ -608,7 +642,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
608 if (ret) 642 if (ret)
609 return ret; 643 return ret;
610 644
611 BEGIN_NV04(chan, NvSubM2MF, 0x021c, 1); 645 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
612 OUT_RING (chan, 1); 646 OUT_RING (chan, 1);
613 } 647 }
614 648
@@ -616,10 +650,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
616 if (ret) 650 if (ret)
617 return ret; 651 return ret;
618 652
619 BEGIN_NV04(chan, NvSubM2MF, 0x0238, 2); 653 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
620 OUT_RING (chan, upper_32_bits(src_offset)); 654 OUT_RING (chan, upper_32_bits(src_offset));
621 OUT_RING (chan, upper_32_bits(dst_offset)); 655 OUT_RING (chan, upper_32_bits(dst_offset));
622 BEGIN_NV04(chan, NvSubM2MF, 0x030c, 8); 656 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
623 OUT_RING (chan, lower_32_bits(src_offset)); 657 OUT_RING (chan, lower_32_bits(src_offset));
624 OUT_RING (chan, lower_32_bits(dst_offset)); 658 OUT_RING (chan, lower_32_bits(dst_offset));
625 OUT_RING (chan, stride); 659 OUT_RING (chan, stride);
@@ -628,7 +662,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
628 OUT_RING (chan, height); 662 OUT_RING (chan, height);
629 OUT_RING (chan, 0x00000101); 663 OUT_RING (chan, 0x00000101);
630 OUT_RING (chan, 0x00000000); 664 OUT_RING (chan, 0x00000000);
631 BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 665 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
632 OUT_RING (chan, 0); 666 OUT_RING (chan, 0);
633 667
634 length -= amount; 668 length -= amount;
@@ -639,6 +673,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
639 return 0; 673 return 0;
640} 674}
641 675
676static int
677nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{
679 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
680 &chan->m2mf_ntfy);
681 if (ret == 0) {
682 ret = RING_SPACE(chan, 4);
683 if (ret == 0) {
684 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
685 OUT_RING (chan, handle);
686 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
687 OUT_RING (chan, NvNotify0);
688 }
689 }
690
691 return ret;
692}
693
642static inline uint32_t 694static inline uint32_t
643nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 695nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
644 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 696 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -661,7 +713,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
661 if (ret) 713 if (ret)
662 return ret; 714 return ret;
663 715
664 BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 716 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
665 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); 717 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
666 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); 718 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
667 719
@@ -673,7 +725,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
673 if (ret) 725 if (ret)
674 return ret; 726 return ret;
675 727
676 BEGIN_NV04(chan, NvSubM2MF, 728 BEGIN_NV04(chan, NvSubCopy,
677 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 729 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
678 OUT_RING (chan, src_offset); 730 OUT_RING (chan, src_offset);
679 OUT_RING (chan, dst_offset); 731 OUT_RING (chan, dst_offset);
@@ -683,7 +735,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
683 OUT_RING (chan, line_count); 735 OUT_RING (chan, line_count);
684 OUT_RING (chan, 0x00000101); 736 OUT_RING (chan, 0x00000101);
685 OUT_RING (chan, 0x00000000); 737 OUT_RING (chan, 0x00000000);
686 BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 738 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
687 OUT_RING (chan, 0); 739 OUT_RING (chan, 0);
688 740
689 page_count -= line_count; 741 page_count -= line_count;
@@ -743,16 +795,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
743 goto out; 795 goto out;
744 } 796 }
745 797
746 if (dev_priv->card_type < NV_50) 798 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
747 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
748 else
749 if (dev_priv->card_type < NV_C0)
750 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
751 else
752 if (dev_priv->card_type < NV_E0)
753 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
754 else
755 ret = nve0_bo_move_copy(chan, bo, &bo->mem, new_mem);
756 if (ret == 0) { 799 if (ret == 0) {
757 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 800 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
758 no_wait_reserve, 801 no_wait_reserve,
@@ -764,6 +807,42 @@ out:
764 return ret; 807 return ret;
765} 808}
766 809
810void
811nouveau_bo_move_init(struct nouveau_channel *chan)
812{
813 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
814 static const struct {
815 const char *name;
816 u32 oclass;
817 int (*exec)(struct nouveau_channel *,
818 struct ttm_buffer_object *,
819 struct ttm_mem_reg *, struct ttm_mem_reg *);
820 int (*init)(struct nouveau_channel *, u32 handle);
821 } _methods[] = {
822 { "COPY", 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
823 { "M2MF", 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
824 { "M2MF", 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
825 { "M2MF", 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
826 {}
827 }, *mthd = _methods;
828 const char *name = "CPU";
829 int ret;
830
831 do {
832 ret = nouveau_gpuobj_gr_new(chan, mthd->oclass, mthd->oclass);
833 if (ret == 0) {
834 ret = mthd->init(chan, mthd->oclass);
835 if (ret == 0) {
836 dev_priv->ttm.move = mthd->exec;
837 name = mthd->name;
838 break;
839 }
840 }
841 } while ((++mthd)->exec);
842
843 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
844}
845
767static int 846static int
768nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 847nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
769 bool no_wait_reserve, bool no_wait_gpu, 848 bool no_wait_reserve, bool no_wait_gpu,
@@ -920,8 +999,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
920 goto out; 999 goto out;
921 } 1000 }
922 1001
923 /* Software copy if the card isn't up and running yet. */ 1002 /* CPU copy if we have no accelerated method available */
924 if (!dev_priv->channel) { 1003 if (!dev_priv->ttm.move) {
925 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1004 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
926 goto out; 1005 goto out;
927 } 1006 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9420538d2374..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -355,7 +355,7 @@ nouveau_channel_ref(struct nouveau_channel *chan,
355 *pchan = chan; 355 *pchan = chan;
356} 356}
357 357
358void 358int
359nouveau_channel_idle(struct nouveau_channel *chan) 359nouveau_channel_idle(struct nouveau_channel *chan)
360{ 360{
361 struct drm_device *dev = chan->dev; 361 struct drm_device *dev = chan->dev;
@@ -370,6 +370,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
370 370
371 if (ret) 371 if (ret)
372 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); 372 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
373 return ret;
373} 374}
374 375
375/* cleans up all the fifos from file_priv */ 376/* cleans up all the fifos from file_priv */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index b8838592d4e9..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,13 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
48 48
49/* Hardcoded object assignments to subchannels (subchannel id). */ 49/* Hardcoded object assignments to subchannels (subchannel id). */
50enum { 50enum {
51 NvSubM2MF = 0, 51 NvSubCtxSurf2D = 0,
52 NvSubSw = 1, 52 NvSubSw = 1,
53 NvSubCtxSurf2D = 2, 53 NvSubImageBlit = 2,
54 NvSub2D = 3, 54 NvSub2D = 3,
55 NvSubGdiRect = 3, 55 NvSubGdiRect = 3,
56 NvSubCopy = 4, 56 NvSubCopy = 4,
57 NvSubImageBlit = 4
58}; 57};
59 58
60/* Object handles. */ 59/* Object handles. */
@@ -74,6 +73,7 @@ enum {
74 NvSema = 0x8000000f, 73 NvSema = 0x8000000f,
75 NvEvoSema0 = 0x80000010, 74 NvEvoSema0 = 0x80000010,
76 NvEvoSema1 = 0x80000011, 75 NvEvoSema1 = 0x80000011,
76 NvNotify1 = 0x80000012,
77 77
78 /* G80+ display objects */ 78 /* G80+ display objects */
79 NvEvoVRAM = 0x01000000, 79 NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1ede35491f54..634d222c93de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -693,6 +693,9 @@ struct drm_nouveau_private {
693 struct ttm_bo_global_ref bo_global_ref; 693 struct ttm_bo_global_ref bo_global_ref;
694 struct ttm_bo_device bdev; 694 struct ttm_bo_device bdev;
695 atomic_t validate_sequence; 695 atomic_t validate_sequence;
696 int (*move)(struct nouveau_channel *,
697 struct ttm_buffer_object *,
698 struct ttm_mem_reg *, struct ttm_mem_reg *);
696 } ttm; 699 } ttm;
697 700
698 struct { 701 struct {
@@ -930,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
930extern void nouveau_channel_put(struct nouveau_channel **); 933extern void nouveau_channel_put(struct nouveau_channel **);
931extern void nouveau_channel_ref(struct nouveau_channel *chan, 934extern void nouveau_channel_ref(struct nouveau_channel *chan,
932 struct nouveau_channel **pchan); 935 struct nouveau_channel **pchan);
933extern void nouveau_channel_idle(struct nouveau_channel *chan); 936extern int nouveau_channel_idle(struct nouveau_channel *chan);
934 937
935/* nouveau_object.c */ 938/* nouveau_object.c */
936#define NVOBJ_ENGINE_ADD(d, e, p) do { \ 939#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1322,6 +1325,7 @@ extern int nv04_crtc_create(struct drm_device *, int index);
1322 1325
1323/* nouveau_bo.c */ 1326/* nouveau_bo.c */
1324extern struct ttm_bo_driver nouveau_bo_driver; 1327extern struct ttm_bo_driver nouveau_bo_driver;
1328extern void nouveau_bo_move_init(struct nouveau_channel *);
1325extern int nouveau_bo_new(struct drm_device *, int size, int align, 1329extern int nouveau_bo_new(struct drm_device *, int size, int align,
1326 uint32_t flags, uint32_t tile_mode, 1330 uint32_t flags, uint32_t tile_mode,
1327 uint32_t tile_flags, 1331 uint32_t tile_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index d7e56ce410b0..a3f961d98992 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -508,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
508 return eng->object_new(chan, oc->engine, handle, class); 508 return eng->object_new(chan, oc->engine, handle, class);
509 } 509 }
510 510
511 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
512 return -EINVAL; 511 return -EINVAL;
513} 512}
514 513
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e4e73a13a2b2..9b4c900a3f30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -509,73 +509,16 @@ nouveau_card_channel_init(struct drm_device *dev)
509{ 509{
510 struct drm_nouveau_private *dev_priv = dev->dev_private; 510 struct drm_nouveau_private *dev_priv = dev->dev_private;
511 struct nouveau_channel *chan; 511 struct nouveau_channel *chan;
512 int ret, oclass; 512 int ret;
513 513
514 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT); 514 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
515 dev_priv->channel = chan; 515 dev_priv->channel = chan;
516 if (ret) 516 if (ret)
517 return ret; 517 return ret;
518
519 mutex_unlock(&dev_priv->channel->mutex); 518 mutex_unlock(&dev_priv->channel->mutex);
520 519
521 if (dev_priv->card_type <= NV_50) { 520 nouveau_bo_move_init(chan);
522 if (dev_priv->card_type < NV_50) 521 return 0;
523 oclass = 0x0039;
524 else
525 oclass = 0x5039;
526
527 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
528 if (ret)
529 goto error;
530
531 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
532 &chan->m2mf_ntfy);
533 if (ret)
534 goto error;
535
536 ret = RING_SPACE(chan, 6);
537 if (ret)
538 goto error;
539
540 BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
541 OUT_RING (chan, NvM2MF);
542 BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
543 OUT_RING (chan, NvNotify0);
544 OUT_RING (chan, chan->vram_handle);
545 OUT_RING (chan, chan->gart_handle);
546 } else
547 if (dev_priv->card_type <= NV_D0) {
548 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
549 if (ret)
550 goto error;
551
552 ret = RING_SPACE(chan, 2);
553 if (ret)
554 goto error;
555
556 BEGIN_NVC0(chan, NvSubM2MF, 0x0000, 1);
557 OUT_RING (chan, 0x00009039);
558 } else
559 if (dev_priv->card_type <= NV_E0) {
560 /* not used, but created to get a graph context */
561 ret = nouveau_gpuobj_gr_new(chan, 0xa040, 0xa040);
562 if (ret)
563 goto error;
564
565 /* bind strange copy engine to subchannel 4 (fixed...) */
566 ret = RING_SPACE(chan, 2);
567 if (ret)
568 goto error;
569
570 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
571 OUT_RING (chan, 0x0000a0b5);
572 }
573
574 FIRE_RING (chan);
575error:
576 if (ret)
577 nouveau_card_channel_fini(dev);
578 return ret;
579} 522}
580 523
581static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { 524static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 61747e0d1180..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -201,8 +201,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
201 201
202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1); 202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 203 OUT_RING(chan, Nv2D);
204 BEGIN_NV04(chan, NvSub2D, 0x0180, 4); 204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, NvNotify0);
206 OUT_RING(chan, chan->vram_handle); 205 OUT_RING(chan, chan->vram_handle);
207 OUT_RING(chan, chan->vram_handle); 206 OUT_RING(chan, chan->vram_handle);
208 OUT_RING(chan, chan->vram_handle); 207 OUT_RING(chan, chan->vram_handle);
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
index 41356274ca61..8a8051b68f10 100644
--- a/drivers/gpu/drm/nouveau/nve0_graph.c
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -822,7 +822,7 @@ nve0_graph_create(struct drm_device *dev)
822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */ 822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */ 823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */ 824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
825 //NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */ 825 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
826 return 0; 826 return 0;
827 827
828error: 828error: