aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c56
1 files changed, 16 insertions, 40 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index efd08232340..40f91e1e584 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,41 +24,16 @@
24 * 24 *
25 */ 25 */
26 26
27#include "drmP.h" 27#include <core/client.h>
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31#include <core/ramht.h>
32
33void
34nouveau_dma_init(struct nouveau_channel *chan)
35{
36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38
39 if (dev_priv->card_type >= NV_50) {
40 const int ib_size = pushbuf->bo.mem.size / 2;
41
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 chan->dma.ib_max = (ib_size / 8) - 1;
44 chan->dma.ib_put = 0;
45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
46 28
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; 29#include "nouveau_drm.h"
48 } else { 30#include "nouveau_dma.h"
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
50 }
51
52 chan->dma.put = 0;
53 chan->dma.cur = chan->dma.put;
54 chan->dma.free = chan->dma.max - chan->dma.cur;
55}
56 31
57void 32void
58OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 33OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
59{ 34{
60 bool is_iomem; 35 bool is_iomem;
61 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); 36 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
62 mem = &mem[chan->dma.cur]; 37 mem = &mem[chan->dma.cur];
63 if (is_iomem) 38 if (is_iomem)
64 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 39 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -79,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
79{ 54{
80 uint64_t val; 55 uint64_t val;
81 56
82 val = nvchan_rd32(chan, chan->user_get); 57 val = nv_ro32(chan->object, chan->user_get);
83 if (chan->user_get_hi) 58 if (chan->user_get_hi)
84 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; 59 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
85 60
86 /* reset counter as long as GET is still advancing, this is 61 /* reset counter as long as GET is still advancing, this is
87 * to avoid misdetecting a GPU lockup if the GPU happens to 62 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -93,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
93 } 68 }
94 69
95 if ((++*timeout & 0xff) == 0) { 70 if ((++*timeout & 0xff) == 0) {
96 DRM_UDELAY(1); 71 udelay(1);
97 if (*timeout > 100000) 72 if (*timeout > 100000)
98 return -EBUSY; 73 return -EBUSY;
99 } 74 }
100 75
101 if (val < chan->pushbuf_base || 76 if (val < chan->push.vma.offset ||
102 val > chan->pushbuf_base + (chan->dma.max << 2)) 77 val > chan->push.vma.offset + (chan->dma.max << 2))
103 return -EINVAL; 78 return -EINVAL;
104 79
105 return (val - chan->pushbuf_base) >> 2; 80 return (val - chan->push.vma.offset) >> 2;
106} 81}
107 82
108void 83void
109nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 84nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
110 int delta, int length) 85 int delta, int length)
111{ 86{
112 struct nouveau_bo *pb = chan->pushbuf_bo; 87 struct nouveau_bo *pb = chan->push.buffer;
113 struct nouveau_vma *vma; 88 struct nouveau_vma *vma;
114 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
115 u64 offset; 90 u64 offset;
116 91
117 vma = nouveau_bo_vma_find(bo, chan->vm); 92 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
118 BUG_ON(!vma); 93 BUG_ON(!vma);
119 offset = vma->offset + delta; 94 offset = vma->offset + delta;
120 95
121 BUG_ON(chan->dma.ib_free < 1); 96 BUG_ON(chan->dma.ib_free < 1);
97
122 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 98 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
123 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 99 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
124 100
@@ -128,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
128 /* Flush writes. */ 104 /* Flush writes. */
129 nouveau_bo_rd32(pb, 0); 105 nouveau_bo_rd32(pb, 0);
130 106
131 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 107 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
132 chan->dma.ib_free--; 108 chan->dma.ib_free--;
133} 109}
134 110
@@ -138,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
138 uint32_t cnt = 0, prev_get = 0; 114 uint32_t cnt = 0, prev_get = 0;
139 115
140 while (chan->dma.ib_free < count) { 116 while (chan->dma.ib_free < count) {
141 uint32_t get = nvchan_rd32(chan, 0x88); 117 uint32_t get = nv_ro32(chan->object, 0x88);
142 if (get != prev_get) { 118 if (get != prev_get) {
143 prev_get = get; 119 prev_get = get;
144 cnt = 0; 120 cnt = 0;
@@ -249,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
249 * instruct the GPU to jump back to the start right 225 * instruct the GPU to jump back to the start right
250 * after processing the currently pending commands. 226 * after processing the currently pending commands.
251 */ 227 */
252 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 228 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
253 229
254 /* wait for GET to depart from the skips area. 230 /* wait for GET to depart from the skips area.
255 * prevents writing GET==PUT and causing a race 231 * prevents writing GET==PUT and causing a race