aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv84_fence.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-02-13 18:28:37 -0500
committerBen Skeggs <bskeggs@redhat.com>2013-02-20 01:00:51 -0500
commita34caf78f26bda63869471cb3f46f354f4658758 (patch)
treedc9b1ce9d37ea3d2f0c6a34f7e4c90cc5be3a8f0 /drivers/gpu/drm/nouveau/nv84_fence.c
parentfa531bc8b4278010fd11819c089f6679890addee (diff)
drm/nv84/fence: access fences with full virtual address, not offset
Allows most of the code to be shared between nv84/nvc0 implementations, and paves the way for doing emit/sync on non-VRAM buffers (multi-gpu, dma-buf). Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv84_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c153
1 files changed, 99 insertions, 54 deletions
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index e64e8154a5af..58c2401b18ff 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/client.h>
26#include <core/class.h> 27#include <core/class.h>
27 28
28#include <engine/fifo.h> 29#include <engine/fifo.h>
@@ -33,80 +34,96 @@
33 34
34#include "nv50_display.h" 35#include "nv50_display.h"
35 36
36struct nv84_fence_chan { 37u64
37 struct nouveau_fence_chan base; 38nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
38}; 39{
39 40 struct nv84_fence_chan *fctx = chan->fence;
40struct nv84_fence_priv { 41 return fctx->dispc_vma[crtc].offset;
41 struct nouveau_fence_priv base; 42}
42 struct nouveau_gpuobj *mem;
43};
44 43
45static int 44static int
46nv84_fence_emit(struct nouveau_fence *fence) 45nv84_fence_emit(struct nouveau_fence *fence)
47{ 46{
48 struct nouveau_channel *chan = fence->channel; 47 struct nouveau_channel *chan = fence->channel;
48 struct nv84_fence_chan *fctx = chan->fence;
49 struct nouveau_fifo_chan *fifo = (void *)chan->object; 49 struct nouveau_fifo_chan *fifo = (void *)chan->object;
50 int ret = RING_SPACE(chan, 8); 50 u64 addr = fctx->vma.offset + fifo->chid * 16;
51 int ret;
52
53 ret = RING_SPACE(chan, 8);
51 if (ret == 0) { 54 if (ret == 0) {
52 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 55 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
53 OUT_RING (chan, NvSema); 56 OUT_RING (chan, chan->vram);
54 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); 57 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
55 OUT_RING (chan, upper_32_bits(fifo->chid * 16)); 58 OUT_RING (chan, upper_32_bits(addr));
56 OUT_RING (chan, lower_32_bits(fifo->chid * 16)); 59 OUT_RING (chan, lower_32_bits(addr));
57 OUT_RING (chan, fence->sequence); 60 OUT_RING (chan, fence->sequence);
58 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 61 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
59 OUT_RING (chan, 0x00000000); 62 OUT_RING (chan, 0x00000000);
60 FIRE_RING (chan); 63 FIRE_RING (chan);
61 } 64 }
65
62 return ret; 66 return ret;
63} 67}
64 68
65
66static int 69static int
67nv84_fence_sync(struct nouveau_fence *fence, 70nv84_fence_sync(struct nouveau_fence *fence,
68 struct nouveau_channel *prev, struct nouveau_channel *chan) 71 struct nouveau_channel *prev, struct nouveau_channel *chan)
69{ 72{
73 struct nv84_fence_chan *fctx = chan->fence;
70 struct nouveau_fifo_chan *fifo = (void *)prev->object; 74 struct nouveau_fifo_chan *fifo = (void *)prev->object;
71 int ret = RING_SPACE(chan, 7); 75 u64 addr = fctx->vma.offset + fifo->chid * 16;
76 int ret;
77
78 ret = RING_SPACE(chan, 7);
72 if (ret == 0) { 79 if (ret == 0) {
73 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 80 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
74 OUT_RING (chan, NvSema); 81 OUT_RING (chan, chan->vram);
75 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 82 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
76 OUT_RING (chan, upper_32_bits(fifo->chid * 16)); 83 OUT_RING (chan, upper_32_bits(addr));
77 OUT_RING (chan, lower_32_bits(fifo->chid * 16)); 84 OUT_RING (chan, lower_32_bits(addr));
78 OUT_RING (chan, fence->sequence); 85 OUT_RING (chan, fence->sequence);
79 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); 86 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
80 FIRE_RING (chan); 87 FIRE_RING (chan);
81 } 88 }
89
82 return ret; 90 return ret;
83} 91}
84 92
85static u32 93u32
86nv84_fence_read(struct nouveau_channel *chan) 94nv84_fence_read(struct nouveau_channel *chan)
87{ 95{
88 struct nouveau_fifo_chan *fifo = (void *)chan->object; 96 struct nouveau_fifo_chan *fifo = (void *)chan->object;
89 struct nv84_fence_priv *priv = chan->drm->fence; 97 struct nv84_fence_priv *priv = chan->drm->fence;
90 return nv_ro32(priv->mem, fifo->chid * 16); 98 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
91} 99}
92 100
93static void 101void
94nv84_fence_context_del(struct nouveau_channel *chan) 102nv84_fence_context_del(struct nouveau_channel *chan)
95{ 103{
104 struct drm_device *dev = chan->drm->dev;
105 struct nv84_fence_priv *priv = chan->drm->fence;
96 struct nv84_fence_chan *fctx = chan->fence; 106 struct nv84_fence_chan *fctx = chan->fence;
107 int i;
108
109 for (i = 0; i < dev->mode_config.num_crtc; i++) {
110 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
111 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
112 }
113
114 nouveau_bo_vma_del(priv->bo, &fctx->vma);
97 nouveau_fence_context_del(&fctx->base); 115 nouveau_fence_context_del(&fctx->base);
98 chan->fence = NULL; 116 chan->fence = NULL;
99 kfree(fctx); 117 kfree(fctx);
100} 118}
101 119
102static int 120int
103nv84_fence_context_new(struct nouveau_channel *chan) 121nv84_fence_context_new(struct nouveau_channel *chan)
104{ 122{
105 struct drm_device *dev = chan->drm->dev;
106 struct nouveau_fifo_chan *fifo = (void *)chan->object; 123 struct nouveau_fifo_chan *fifo = (void *)chan->object;
124 struct nouveau_client *client = nouveau_client(fifo);
107 struct nv84_fence_priv *priv = chan->drm->fence; 125 struct nv84_fence_priv *priv = chan->drm->fence;
108 struct nv84_fence_chan *fctx; 126 struct nv84_fence_chan *fctx;
109 struct nouveau_object *object;
110 int ret, i; 127 int ret, i;
111 128
112 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 129 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -115,43 +132,59 @@ nv84_fence_context_new(struct nouveau_channel *chan)
115 132
116 nouveau_fence_context_new(&fctx->base); 133 nouveau_fence_context_new(&fctx->base);
117 134
118 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 135 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
119 NvSema, 0x0002, 136 if (ret)
120 &(struct nv_dma_class) { 137 nv84_fence_context_del(chan);
121 .flags = NV_DMA_TARGET_VRAM |
122 NV_DMA_ACCESS_RDWR,
123 .start = priv->mem->addr,
124 .limit = priv->mem->addr +
125 priv->mem->size - 1,
126 }, sizeof(struct nv_dma_class),
127 &object);
128
129 /* dma objects for display sync channel semaphore blocks */
130 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
131 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
132 138
133 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 139 /* map display semaphore buffers into channel's vm */
134 NvEvoSema0 + i, 0x003d, 140 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
135 &(struct nv_dma_class) { 141 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
136 .flags = NV_DMA_TARGET_VRAM | 142 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
137 NV_DMA_ACCESS_RDWR,
138 .start = bo->bo.offset,
139 .limit = bo->bo.offset + 0xfff,
140 }, sizeof(struct nv_dma_class),
141 &object);
142 } 143 }
143 144
144 if (ret) 145 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
145 nv84_fence_context_del(chan);
146 nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
147 return ret; 146 return ret;
148} 147}
149 148
150static void 149bool
150nv84_fence_suspend(struct nouveau_drm *drm)
151{
152 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
153 struct nv84_fence_priv *priv = drm->fence;
154 int i;
155
156 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
157 if (priv->suspend) {
158 for (i = 0; i <= pfifo->max; i++)
159 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
160 }
161
162 return priv->suspend != NULL;
163}
164
165void
166nv84_fence_resume(struct nouveau_drm *drm)
167{
168 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
169 struct nv84_fence_priv *priv = drm->fence;
170 int i;
171
172 if (priv->suspend) {
173 for (i = 0; i <= pfifo->max; i++)
174 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
175 vfree(priv->suspend);
176 priv->suspend = NULL;
177 }
178}
179
180void
151nv84_fence_destroy(struct nouveau_drm *drm) 181nv84_fence_destroy(struct nouveau_drm *drm)
152{ 182{
153 struct nv84_fence_priv *priv = drm->fence; 183 struct nv84_fence_priv *priv = drm->fence;
154 nouveau_gpuobj_ref(NULL, &priv->mem); 184 nouveau_bo_unmap(priv->bo);
185 if (priv->bo)
186 nouveau_bo_unpin(priv->bo);
187 nouveau_bo_ref(NULL, &priv->bo);
155 drm->fence = NULL; 188 drm->fence = NULL;
156 kfree(priv); 189 kfree(priv);
157} 190}
@@ -161,7 +194,6 @@ nv84_fence_create(struct nouveau_drm *drm)
161{ 194{
162 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 195 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
163 struct nv84_fence_priv *priv; 196 struct nv84_fence_priv *priv;
164 u32 chan = pfifo->max + 1;
165 int ret; 197 int ret;
166 198
167 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 199 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -169,6 +201,8 @@ nv84_fence_create(struct nouveau_drm *drm)
169 return -ENOMEM; 201 return -ENOMEM;
170 202
171 priv->base.dtor = nv84_fence_destroy; 203 priv->base.dtor = nv84_fence_destroy;
204 priv->base.suspend = nv84_fence_suspend;
205 priv->base.resume = nv84_fence_resume;
172 priv->base.context_new = nv84_fence_context_new; 206 priv->base.context_new = nv84_fence_context_new;
173 priv->base.context_del = nv84_fence_context_del; 207 priv->base.context_del = nv84_fence_context_del;
174 priv->base.emit = nv84_fence_emit; 208 priv->base.emit = nv84_fence_emit;
@@ -178,8 +212,19 @@ nv84_fence_create(struct nouveau_drm *drm)
178 init_waitqueue_head(&priv->base.waiting); 212 init_waitqueue_head(&priv->base.waiting);
179 priv->base.uevent = true; 213 priv->base.uevent = true;
180 214
181 ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0, 215 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
182 &priv->mem); 216 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
217 if (ret == 0) {
218 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
219 if (ret == 0) {
220 ret = nouveau_bo_map(priv->bo);
221 if (ret)
222 nouveau_bo_unpin(priv->bo);
223 }
224 if (ret)
225 nouveau_bo_ref(NULL, &priv->bo);
226 }
227
183 if (ret) 228 if (ret)
184 nv84_fence_destroy(drm); 229 nv84_fence_destroy(drm);
185 return ret; 230 return ret;