aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_dma.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-02-11 01:37:26 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-02-25 00:08:29 -0500
commit9a391ad8a2cdd7e5be9b6aabb56f4a46683ba377 (patch)
tree2dafd2b541dff055e88406cfb48679530b12192e /drivers/gpu/drm/nouveau/nouveau_dma.c
parentff9e5279b14dc024599cc705ee199dadb94e90a3 (diff)
drm/nv50: switch to indirect push buffer controls
PFIFO on G80 and up has a new mode where the main ring buffer is simply a ring of pointers to indirect buffers containing the actual command/data packets. In order to be able to implement index buffers in the 3D driver we need to be able to submit data-only push buffers right after the cmd packet header, which is only possible using the new command submission method. This commit doesn't make it possible to implement index buffers yet, some userspace interface changes will be required, but it does allow for testing/debugging of the hardware-side support in the meantime. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
1 files changed, 106 insertions, 2 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745af..b9c80bb17250 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
32void 32void
33nouveau_dma_pre_init(struct nouveau_channel *chan) 33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{ 34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; 35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type == NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47 } else {
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
36 chan->dma.put = 0; 51 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put; 52 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur; 53 chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
162 return (val - chan->pushbuf_base) >> 2; 177 return (val - chan->pushbuf_base) >> 2;
163} 178}
164 179
180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int dwords)
183{
184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = (bo->bo.mem.mm_node->start << PAGE_SHIFT) + delta;
186 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
187
188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, offset);
190 nouveau_bo_wr32(pb, ip++, dwords << 10);
191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--;
195}
196
197static int
198nv50_dma_push_wait(struct nouveau_channel *chan, int count)
199{
200 uint32_t cnt = 0, prev_get = 0;
201
202 while (chan->dma.ib_free < count) {
203 uint32_t get = nvchan_rd32(chan, 0x88);
204 if (get != prev_get) {
205 prev_get = get;
206 cnt = 0;
207 }
208
209 if ((++cnt & 0xff) == 0) {
210 DRM_UDELAY(1);
211 if (cnt > 100000)
212 return -EBUSY;
213 }
214
215 chan->dma.ib_free = get - chan->dma.ib_put;
216 if (chan->dma.ib_free <= 0)
217 chan->dma.ib_free += chan->dma.ib_max + 1;
218 }
219
220 return 0;
221}
222
223static int
224nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
225{
226 uint32_t cnt = 0, prev_get = 0;
227 int ret;
228
229 ret = nv50_dma_push_wait(chan, slots + 1);
230 if (unlikely(ret))
231 return ret;
232
233 while (chan->dma.free < count) {
234 int get = READ_GET(chan, &prev_get, &cnt);
235 if (unlikely(get < 0)) {
236 if (get == -EINVAL)
237 continue;
238
239 return get;
240 }
241
242 if (get <= chan->dma.cur) {
243 chan->dma.free = chan->dma.max - chan->dma.cur;
244 if (chan->dma.free >= count)
245 break;
246
247 FIRE_RING(chan);
248 do {
249 get = READ_GET(chan, &prev_get, &cnt);
250 if (unlikely(get < 0)) {
251 if (get == -EINVAL)
252 continue;
253 return get;
254 }
255 } while (get == 0);
256 chan->dma.cur = 0;
257 chan->dma.put = 0;
258 }
259
260 chan->dma.free = get - chan->dma.cur - 1;
261 }
262
263 return 0;
264}
265
165int 266int
166nouveau_dma_wait(struct nouveau_channel *chan, int size) 267nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
167{ 268{
168 uint32_t prev_get = 0, cnt = 0; 269 uint32_t prev_get = 0, cnt = 0;
169 int get; 270 int get;
170 271
272 if (chan->dma.ib_max)
273 return nv50_dma_wait(chan, slots, size);
274
171 while (chan->dma.free < size) { 275 while (chan->dma.free < size) {
172 get = READ_GET(chan, &prev_get, &cnt); 276 get = READ_GET(chan, &prev_get, &cnt);
173 if (unlikely(get == -EBUSY)) 277 if (unlikely(get == -EBUSY))