aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2016-11-04 03:20:36 -0400
committerBen Skeggs <bskeggs@redhat.com>2016-11-06 23:05:01 -0500
commite1ef6b42d9014079e5ab71acd8669d39808c3c73 (patch)
tree654642467925a33cfe0be71f420e2fb68a05e565 /drivers/gpu
parentc2d926aacc7eeaf39edbd0ab8d5fba33acbb168d (diff)
drm/nouveau/kms/nv50: remove code to support non-atomic page flips
Made completely unreachable (and broken) by atomic commits. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c62
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c22
8 files changed, 27 insertions, 288 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 9e16deffba86..62d75308c57c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -835,10 +835,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
835 if (ret) 835 if (ret)
836 goto fail; 836 goto fail;
837 837
838 if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) 838 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
839 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
840 else
841 BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
842 OUT_RING (chan, 0x00000000); 839 OUT_RING (chan, 0x00000000);
843 FIRE_RING (chan); 840 FIRE_RING (chan);
844 841
@@ -867,6 +864,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
867 struct nouveau_channel *chan; 864 struct nouveau_channel *chan;
868 struct nouveau_cli *cli; 865 struct nouveau_cli *cli;
869 struct nouveau_fence *fence; 866 struct nouveau_fence *fence;
867 struct nv04_display *dispnv04 = nv04_display(dev);
868 int head = nouveau_crtc(crtc)->index;
870 int ret; 869 int ret;
871 870
872 chan = drm->channel; 871 chan = drm->channel;
@@ -913,32 +912,23 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
913 drm_crtc_vblank_get(crtc); 912 drm_crtc_vblank_get(crtc);
914 913
915 /* Emit a page flip */ 914 /* Emit a page flip */
916 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 915 if (swap_interval) {
917 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); 916 ret = RING_SPACE(chan, 8);
918 if (ret) 917 if (ret)
919 goto fail_unreserve; 918 goto fail_unreserve;
920 } else {
921 struct nv04_display *dispnv04 = nv04_display(dev);
922 int head = nouveau_crtc(crtc)->index;
923
924 if (swap_interval) {
925 ret = RING_SPACE(chan, 8);
926 if (ret)
927 goto fail_unreserve;
928
929 BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
930 OUT_RING (chan, 0);
931 BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
932 OUT_RING (chan, head);
933 BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
934 OUT_RING (chan, 0);
935 BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
936 OUT_RING (chan, 0);
937 }
938 919
939 nouveau_bo_ref(new_bo, &dispnv04->image[head]); 920 BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
921 OUT_RING (chan, 0);
922 BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
923 OUT_RING (chan, head);
924 BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
925 OUT_RING (chan, 0);
926 BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
927 OUT_RING (chan, 0);
940 } 928 }
941 929
930 nouveau_bo_ref(new_bo, &dispnv04->image[head]);
931
942 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 932 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
943 if (ret) 933 if (ret)
944 goto fail_unreserve; 934 goto fail_unreserve;
@@ -986,16 +976,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
986 976
987 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 977 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
988 if (s->event) { 978 if (s->event) {
989 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 979 drm_crtc_arm_vblank_event(s->crtc, s->event);
990 drm_crtc_arm_vblank_event(s->crtc, s->event); 980 } else {
991 } else {
992 drm_crtc_send_vblank_event(s->crtc, s->event);
993
994 /* Give up ownership of vblank for page-flipped crtc */
995 drm_crtc_vblank_put(s->crtc);
996 }
997 }
998 else {
999 /* Give up ownership of vblank for page-flipped crtc */ 981 /* Give up ownership of vblank for page-flipped crtc */
1000 drm_crtc_vblank_put(s->crtc); 982 drm_crtc_vblank_put(s->crtc);
1001 } 983 }
@@ -1017,12 +999,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
1017 struct nouveau_page_flip_state state; 999 struct nouveau_page_flip_state state;
1018 1000
1019 if (!nouveau_finish_page_flip(chan, &state)) { 1001 if (!nouveau_finish_page_flip(chan, &state)) {
1020 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1002 nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
1021 nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc), 1003 state.offset + state.crtc->y *
1022 state.offset + state.crtc->y * 1004 state.pitch + state.crtc->x *
1023 state.pitch + state.crtc->x * 1005 state.bpp / 8);
1024 state.bpp / 8);
1025 }
1026 } 1006 }
1027 1007
1028 return NVIF_NOTIFY_KEEP; 1008 return NVIF_NOTIFY_KEEP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 41f3c019e534..ccdce1b4eec4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -92,7 +92,6 @@ struct nv84_fence_chan {
92 struct nouveau_fence_chan base; 92 struct nouveau_fence_chan base;
93 struct nvkm_vma vma; 93 struct nvkm_vma vma;
94 struct nvkm_vma vma_gart; 94 struct nvkm_vma vma_gart;
95 struct nvkm_vma dispc_vma[4];
96}; 95};
97 96
98struct nv84_fence_priv { 97struct nv84_fence_priv {
@@ -102,7 +101,6 @@ struct nv84_fence_priv {
102 u32 *suspend; 101 u32 *suspend;
103}; 102};
104 103
105u64 nv84_fence_crtc(struct nouveau_channel *, int);
106int nv84_fence_context_new(struct nouveau_channel *); 104int nv84_fence_context_new(struct nouveau_channel *);
107 105
108#endif 106#endif
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index bcf3a213d5d8..2998bde29211 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -57,10 +57,7 @@ void
57nv10_fence_context_del(struct nouveau_channel *chan) 57nv10_fence_context_del(struct nouveau_channel *chan)
58{ 58{
59 struct nv10_fence_chan *fctx = chan->fence; 59 struct nv10_fence_chan *fctx = chan->fence;
60 int i;
61 nouveau_fence_context_del(&fctx->base); 60 nouveau_fence_context_del(&fctx->base);
62 for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
63 nvif_object_fini(&fctx->head[i]);
64 nvif_object_fini(&fctx->sema); 61 nvif_object_fini(&fctx->sema);
65 chan->fence = NULL; 62 chan->fence = NULL;
66 nouveau_fence_context_free(&fctx->base); 63 nouveau_fence_context_free(&fctx->base);
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index a87259f3983a..b7a508585304 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -7,7 +7,6 @@
7struct nv10_fence_chan { 7struct nv10_fence_chan {
8 struct nouveau_fence_chan base; 8 struct nouveau_fence_chan base;
9 struct nvif_object sema; 9 struct nvif_object sema;
10 struct nvif_object head[4];
11}; 10};
12 11
13struct nv10_fence_priv { 12struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8d48ee475149..4b195cc32ec4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -658,11 +658,8 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
658 658
659struct nv50_head { 659struct nv50_head {
660 struct nouveau_crtc base; 660 struct nouveau_crtc base;
661 struct nouveau_bo *image;
662 struct nv50_ovly ovly; 661 struct nv50_ovly ovly;
663 struct nv50_oimm oimm; 662 struct nv50_oimm oimm;
664
665 struct nv50_base *_base;
666}; 663};
667 664
668#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c)) 665#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
@@ -740,40 +737,6 @@ evo_kick(u32 *push, void *evoc)
740 *((p)++) = _d; \ 737 *((p)++) = _d; \
741} while(0) 738} while(0)
742 739
743static bool
744evo_sync_wait(void *data)
745{
746 if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
747 return true;
748 usleep_range(1, 2);
749 return false;
750}
751
752static int
753evo_sync(struct drm_device *dev)
754{
755 struct nvif_device *device = &nouveau_drm(dev)->device;
756 struct nv50_disp *disp = nv50_disp(dev);
757 struct nv50_mast *mast = nv50_mast(dev);
758 u32 *push = evo_wait(mast, 8);
759 if (push) {
760 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
761 evo_mthd(push, 0x0084, 1);
762 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
763 evo_mthd(push, 0x0080, 2);
764 evo_data(push, 0x00000000);
765 evo_data(push, 0x00000000);
766 evo_kick(push, mast);
767 if (nvif_msec(device, 2000,
768 if (evo_sync_wait(disp->sync))
769 break;
770 ) >= 0)
771 return 0;
772 }
773
774 return -EBUSY;
775}
776
777/****************************************************************************** 740/******************************************************************************
778 * Plane 741 * Plane
779 *****************************************************************************/ 742 *****************************************************************************/
@@ -789,8 +752,6 @@ struct nv50_wndw {
789 u16 ntfy; 752 u16 ntfy;
790 u16 sema; 753 u16 sema;
791 u32 data; 754 u32 data;
792
793 struct nv50_wndw_atom asy;
794}; 755};
795 756
796struct nv50_wndw_func { 757struct nv50_wndw_func {
@@ -1582,151 +1543,6 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1582} 1543}
1583 1544
1584/****************************************************************************** 1545/******************************************************************************
1585 * Page flipping channel
1586 *****************************************************************************/
1587struct nouveau_bo *
1588nv50_display_crtc_sema(struct drm_device *dev, int crtc)
1589{
1590 return nv50_disp(dev)->sync;
1591}
1592
1593struct nv50_display_flip {
1594 struct nv50_disp *disp;
1595 struct nv50_base *base;
1596};
1597
1598static bool
1599nv50_display_flip_wait(void *data)
1600{
1601 struct nv50_display_flip *flip = data;
1602 if (nouveau_bo_rd32(flip->disp->sync, flip->base->wndw.sema / 4) ==
1603 flip->base->wndw.data)
1604 return true;
1605 usleep_range(1, 2);
1606 return false;
1607}
1608
1609void
1610nv50_display_flip_stop(struct drm_crtc *crtc)
1611{
1612 struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
1613 struct nv50_base *base = nv50_head(crtc)->_base;
1614 struct nv50_wndw *wndw = &base->wndw;
1615 struct nv50_wndw_atom *asyw = &wndw->asy;
1616 struct nv50_display_flip flip = {
1617 .disp = nv50_disp(crtc->dev),
1618 .base = base,
1619 };
1620
1621 asyw->state.crtc = NULL;
1622 asyw->state.fb = NULL;
1623 nv50_wndw_atomic_check(&wndw->plane, &asyw->state);
1624 nv50_wndw_flush_clr(wndw, 0, true, asyw);
1625
1626 nvif_msec(device, 2000,
1627 if (nv50_display_flip_wait(&flip))
1628 break;
1629 );
1630}
1631
1632int
1633nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1634 struct nouveau_channel *chan, u32 swap_interval)
1635{
1636 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
1637 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1638 struct nv50_head *head = nv50_head(crtc);
1639 struct nv50_base *base = nv50_head(crtc)->_base;
1640 struct nv50_wndw *wndw = &base->wndw;
1641 struct nv50_wndw_atom *asyw = &wndw->asy;
1642 int ret;
1643
1644 if (crtc->primary->fb->width != fb->width ||
1645 crtc->primary->fb->height != fb->height)
1646 return -EINVAL;
1647
1648 if (chan == NULL)
1649 evo_sync(crtc->dev);
1650
1651 if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
1652 ret = RING_SPACE(chan, 8);
1653 if (ret)
1654 return ret;
1655
1656 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
1657 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
1658 OUT_RING (chan, base->wndw.sema ^ 0x10);
1659 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
1660 OUT_RING (chan, base->wndw.data + 1);
1661 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
1662 OUT_RING (chan, base->wndw.sema);
1663 OUT_RING (chan, base->wndw.data);
1664 } else
1665 if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
1666 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + base->wndw.sema;
1667 ret = RING_SPACE(chan, 12);
1668 if (ret)
1669 return ret;
1670
1671 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
1672 OUT_RING (chan, chan->vram.handle);
1673 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
1674 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
1675 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
1676 OUT_RING (chan, base->wndw.data + 1);
1677 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
1678 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
1679 OUT_RING (chan, upper_32_bits(addr));
1680 OUT_RING (chan, lower_32_bits(addr));
1681 OUT_RING (chan, base->wndw.data);
1682 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
1683 } else
1684 if (chan) {
1685 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + base->wndw.sema;
1686 ret = RING_SPACE(chan, 10);
1687 if (ret)
1688 return ret;
1689
1690 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
1691 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
1692 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
1693 OUT_RING (chan, base->wndw.data + 1);
1694 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
1695 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
1696 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
1697 OUT_RING (chan, upper_32_bits(addr));
1698 OUT_RING (chan, lower_32_bits(addr));
1699 OUT_RING (chan, base->wndw.data);
1700 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
1701 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
1702 }
1703
1704 if (chan) {
1705 base->wndw.sema ^= 0x10;
1706 base->wndw.data++;
1707 FIRE_RING (chan);
1708 }
1709
1710 /* queue the flip */
1711 asyw->state.crtc = &head->base.base;
1712 asyw->state.fb = fb;
1713 asyw->interval = swap_interval;
1714 asyw->image.handle = nv_fb->r_handle;
1715 asyw->image.offset = nv_fb->nvbo->bo.offset;
1716 asyw->sema.handle = base->chan.base.sync.handle;
1717 asyw->sema.offset = base->wndw.sema;
1718 asyw->sema.acquire = base->wndw.data++;
1719 asyw->sema.release = base->wndw.data;
1720 nv50_wndw_atomic_check(&wndw->plane, &asyw->state);
1721 asyw->set.sema = true;
1722 nv50_wndw_flush_set(wndw, 0, asyw);
1723 nv50_wndw_wait_armed(wndw, asyw);
1724
1725 nouveau_bo_ref(nv_fb->nvbo, &head->image);
1726 return 0;
1727}
1728
1729/******************************************************************************
1730 * Head 1546 * Head
1731 *****************************************************************************/ 1547 *****************************************************************************/
1732static void 1548static void
@@ -2610,8 +2426,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
2610 } 2426 }
2611 2427
2612 crtc = &head->base.base; 2428 crtc = &head->base.base;
2613 head->_base = base;
2614
2615 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane, 2429 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2616 &curs->wndw.plane, &nv50_crtc_func, 2430 &curs->wndw.plane, &nv50_crtc_func,
2617 "head-%d", head->base.index); 2431 "head-%d", head->base.index);
@@ -4061,7 +3875,6 @@ nv50_display_fini(struct drm_device *dev)
4061int 3875int
4062nv50_display_init(struct drm_device *dev) 3876nv50_display_init(struct drm_device *dev)
4063{ 3877{
4064 struct nv50_disp *disp = nv50_disp(dev);
4065 struct drm_encoder *encoder; 3878 struct drm_encoder *encoder;
4066 struct drm_plane *plane; 3879 struct drm_plane *plane;
4067 struct drm_crtc *crtc; 3880 struct drm_crtc *crtc;
@@ -4071,13 +3884,6 @@ nv50_display_init(struct drm_device *dev)
4071 if (!push) 3884 if (!push)
4072 return -EBUSY; 3885 return -EBUSY;
4073 3886
4074 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4075 struct nv50_wndw *wndw = &nv50_head(crtc)->_base->wndw;
4076
4077 nv50_crtc_lut_load(crtc);
4078 nouveau_bo_wr32(disp->sync, wndw->sema / 4, wndw->data);
4079 }
4080
4081 evo_mthd(push, 0x0088, 1); 3887 evo_mthd(push, 0x0088, 1);
4082 evo_data(push, nv50_mast(dev)->base.sync.handle); 3888 evo_data(push, nv50_mast(dev)->base.sync.handle);
4083 evo_kick(push, nv50_mast(dev)); 3889 evo_kick(push, nv50_mast(dev));
@@ -4094,6 +3900,10 @@ nv50_display_init(struct drm_device *dev)
4094 } 3900 }
4095 } 3901 }
4096 3902
3903 drm_for_each_crtc(crtc, dev) {
3904 nv50_crtc_lut_load(crtc);
3905 }
3906
4097 drm_for_each_plane(plane, dev) { 3907 drm_for_each_plane(plane, dev) {
4098 struct nv50_wndw *wndw = nv50_wndw(plane); 3908 struct nv50_wndw *wndw = nv50_wndw(plane);
4099 if (plane->funcs != &nv50_wndw) 3909 if (plane->funcs != &nv50_wndw)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 70da347aa8c5..918187cee84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,11 +35,4 @@ int nv50_display_create(struct drm_device *);
35void nv50_display_destroy(struct drm_device *); 35void nv50_display_destroy(struct drm_device *);
36int nv50_display_init(struct drm_device *); 36int nv50_display_init(struct drm_device *);
37void nv50_display_fini(struct drm_device *); 37void nv50_display_fini(struct drm_device *);
38
39void nv50_display_flip_stop(struct drm_crtc *);
40int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
41 struct nouveau_channel *, u32 swap_interval);
42
43struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
44
45#endif /* __NV50_DISPLAY_H__ */ 38#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 8c5295414578..f68c7054fd53 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,13 +35,12 @@
35static int 35static int
36nv50_fence_context_new(struct nouveau_channel *chan) 36nv50_fence_context_new(struct nouveau_channel *chan)
37{ 37{
38 struct drm_device *dev = chan->drm->dev;
39 struct nv10_fence_priv *priv = chan->drm->fence; 38 struct nv10_fence_priv *priv = chan->drm->fence;
40 struct nv10_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
41 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
42 u32 start = mem->start * PAGE_SIZE; 41 u32 start = mem->start * PAGE_SIZE;
43 u32 limit = start + mem->size - 1; 42 u32 limit = start + mem->size - 1;
44 int ret, i; 43 int ret;
45 44
46 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 45 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
47 if (!fctx) 46 if (!fctx)
@@ -60,23 +59,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
60 .limit = limit, 59 .limit = limit,
61 }, sizeof(struct nv_dma_v0), 60 }, sizeof(struct nv_dma_v0),
62 &fctx->sema); 61 &fctx->sema);
63
64 /* dma objects for display sync channel semaphore blocks */
65 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
66 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
67 u32 start = bo->bo.mem.start * PAGE_SIZE;
68 u32 limit = start + bo->bo.mem.size - 1;
69
70 ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
71 NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
72 .target = NV_DMA_V0_TARGET_VRAM,
73 .access = NV_DMA_V0_ACCESS_RDWR,
74 .start = start,
75 .limit = limit,
76 }, sizeof(struct nv_dma_v0),
77 &fctx->head[i]);
78 }
79
80 if (ret) 62 if (ret)
81 nv10_fence_context_del(chan); 63 nv10_fence_context_del(chan);
82 return ret; 64 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 23ef04b4e0b2..52b87ae83e7b 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -28,13 +28,6 @@
28 28
29#include "nv50_display.h" 29#include "nv50_display.h"
30 30
31u64
32nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
33{
34 struct nv84_fence_chan *fctx = chan->fence;
35 return fctx->dispc_vma[crtc].offset;
36}
37
38static int 31static int
39nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) 32nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
40{ 33{
@@ -110,15 +103,8 @@ nv84_fence_read(struct nouveau_channel *chan)
110static void 103static void
111nv84_fence_context_del(struct nouveau_channel *chan) 104nv84_fence_context_del(struct nouveau_channel *chan)
112{ 105{
113 struct drm_device *dev = chan->drm->dev;
114 struct nv84_fence_priv *priv = chan->drm->fence; 106 struct nv84_fence_priv *priv = chan->drm->fence;
115 struct nv84_fence_chan *fctx = chan->fence; 107 struct nv84_fence_chan *fctx = chan->fence;
116 int i;
117
118 for (i = 0; i < dev->mode_config.num_crtc; i++) {
119 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 }
122 108
123 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); 109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
124 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 110 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
@@ -134,7 +120,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
134 struct nouveau_cli *cli = (void *)chan->user.client; 120 struct nouveau_cli *cli = (void *)chan->user.client;
135 struct nv84_fence_priv *priv = chan->drm->fence; 121 struct nv84_fence_priv *priv = chan->drm->fence;
136 struct nv84_fence_chan *fctx; 122 struct nv84_fence_chan *fctx;
137 int ret, i; 123 int ret;
138 124
139 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 125 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
140 if (!fctx) 126 if (!fctx)
@@ -154,12 +140,6 @@ nv84_fence_context_new(struct nouveau_channel *chan)
154 &fctx->vma_gart); 140 &fctx->vma_gart);
155 } 141 }
156 142
157 /* map display semaphore buffers into channel's vm */
158 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
159 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
160 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
161 }
162
163 if (ret) 143 if (ret)
164 nv84_fence_context_del(chan); 144 nv84_fence_context_del(chan);
165 return ret; 145 return ret;