diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dma.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dma.c | 353 |
1 files changed, 353 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c new file mode 100644 index 000000000000..65c441a1999f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Ben Skeggs. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining | ||
6 | * a copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial | ||
15 | * portions of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "drmP.h" | ||
28 | #include "drm.h" | ||
29 | #include "nouveau_drv.h" | ||
30 | #include "nouveau_dma.h" | ||
31 | |||
32 | void | ||
33 | nouveau_dma_pre_init(struct nouveau_channel *chan) | ||
34 | { | ||
35 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
36 | struct nouveau_bo *pushbuf = chan->pushbuf_bo; | ||
37 | |||
38 | if (dev_priv->card_type == NV_50) { | ||
39 | const int ib_size = pushbuf->bo.mem.size / 2; | ||
40 | |||
41 | chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; | ||
42 | chan->dma.ib_max = (ib_size / 8) - 1; | ||
43 | chan->dma.ib_put = 0; | ||
44 | chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; | ||
45 | |||
46 | chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; | ||
47 | } else { | ||
48 | chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2; | ||
49 | } | ||
50 | |||
51 | chan->dma.put = 0; | ||
52 | chan->dma.cur = chan->dma.put; | ||
53 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
54 | } | ||
55 | |||
56 | int | ||
57 | nouveau_dma_init(struct nouveau_channel *chan) | ||
58 | { | ||
59 | struct drm_device *dev = chan->dev; | ||
60 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
61 | struct nouveau_gpuobj *m2mf = NULL; | ||
62 | struct nouveau_gpuobj *nvsw = NULL; | ||
63 | int ret, i; | ||
64 | |||
65 | /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ | ||
66 | ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? | ||
67 | 0x0039 : 0x5039, &m2mf); | ||
68 | if (ret) | ||
69 | return ret; | ||
70 | |||
71 | ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); | ||
72 | if (ret) | ||
73 | return ret; | ||
74 | |||
75 | /* Create an NV_SW object for various sync purposes */ | ||
76 | ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); | ||
77 | if (ret) | ||
78 | return ret; | ||
79 | |||
80 | ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); | ||
81 | if (ret) | ||
82 | return ret; | ||
83 | |||
84 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | ||
85 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | ||
86 | if (ret) | ||
87 | return ret; | ||
88 | |||
89 | /* Map push buffer */ | ||
90 | ret = nouveau_bo_map(chan->pushbuf_bo); | ||
91 | if (ret) | ||
92 | return ret; | ||
93 | |||
94 | /* Map M2MF notifier object - fbcon. */ | ||
95 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
96 | ret = nouveau_bo_map(chan->notifier_bo); | ||
97 | if (ret) | ||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | /* Insert NOPS for NOUVEAU_DMA_SKIPS */ | ||
102 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | ||
103 | if (ret) | ||
104 | return ret; | ||
105 | |||
106 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | ||
107 | OUT_RING(chan, 0); | ||
108 | |||
109 | /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */ | ||
110 | ret = RING_SPACE(chan, 4); | ||
111 | if (ret) | ||
112 | return ret; | ||
113 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); | ||
114 | OUT_RING(chan, NvM2MF); | ||
115 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); | ||
116 | OUT_RING(chan, NvNotify0); | ||
117 | |||
118 | /* Initialise NV_SW */ | ||
119 | ret = RING_SPACE(chan, 2); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | BEGIN_RING(chan, NvSubSw, 0, 1); | ||
123 | OUT_RING(chan, NvSw); | ||
124 | |||
125 | /* Sit back and pray the channel works.. */ | ||
126 | FIRE_RING(chan); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | void | ||
132 | OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) | ||
133 | { | ||
134 | bool is_iomem; | ||
135 | u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); | ||
136 | mem = &mem[chan->dma.cur]; | ||
137 | if (is_iomem) | ||
138 | memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); | ||
139 | else | ||
140 | memcpy(mem, data, nr_dwords * 4); | ||
141 | chan->dma.cur += nr_dwords; | ||
142 | } | ||
143 | |||
144 | /* Fetch and adjust GPU GET pointer | ||
145 | * | ||
146 | * Returns: | ||
147 | * value >= 0, the adjusted GET pointer | ||
148 | * -EINVAL if GET pointer currently outside main push buffer | ||
149 | * -EBUSY if timeout exceeded | ||
150 | */ | ||
151 | static inline int | ||
152 | READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) | ||
153 | { | ||
154 | uint32_t val; | ||
155 | |||
156 | val = nvchan_rd32(chan, chan->user_get); | ||
157 | |||
158 | /* reset counter as long as GET is still advancing, this is | ||
159 | * to avoid misdetecting a GPU lockup if the GPU happens to | ||
160 | * just be processing an operation that takes a long time | ||
161 | */ | ||
162 | if (val != *prev_get) { | ||
163 | *prev_get = val; | ||
164 | *timeout = 0; | ||
165 | } | ||
166 | |||
167 | if ((++*timeout & 0xff) == 0) { | ||
168 | DRM_UDELAY(1); | ||
169 | if (*timeout > 100000) | ||
170 | return -EBUSY; | ||
171 | } | ||
172 | |||
173 | if (val < chan->pushbuf_base || | ||
174 | val > chan->pushbuf_base + (chan->dma.max << 2)) | ||
175 | return -EINVAL; | ||
176 | |||
177 | return (val - chan->pushbuf_base) >> 2; | ||
178 | } | ||
179 | |||
180 | void | ||
181 | nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, | ||
182 | int delta, int length) | ||
183 | { | ||
184 | struct nouveau_bo *pb = chan->pushbuf_bo; | ||
185 | uint64_t offset = bo->bo.offset + delta; | ||
186 | int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; | ||
187 | |||
188 | BUG_ON(chan->dma.ib_free < 1); | ||
189 | nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); | ||
190 | nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); | ||
191 | |||
192 | chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; | ||
193 | |||
194 | DRM_MEMORYBARRIER(); | ||
195 | /* Flush writes. */ | ||
196 | nouveau_bo_rd32(pb, 0); | ||
197 | |||
198 | nvchan_wr32(chan, 0x8c, chan->dma.ib_put); | ||
199 | chan->dma.ib_free--; | ||
200 | } | ||
201 | |||
202 | static int | ||
203 | nv50_dma_push_wait(struct nouveau_channel *chan, int count) | ||
204 | { | ||
205 | uint32_t cnt = 0, prev_get = 0; | ||
206 | |||
207 | while (chan->dma.ib_free < count) { | ||
208 | uint32_t get = nvchan_rd32(chan, 0x88); | ||
209 | if (get != prev_get) { | ||
210 | prev_get = get; | ||
211 | cnt = 0; | ||
212 | } | ||
213 | |||
214 | if ((++cnt & 0xff) == 0) { | ||
215 | DRM_UDELAY(1); | ||
216 | if (cnt > 100000) | ||
217 | return -EBUSY; | ||
218 | } | ||
219 | |||
220 | chan->dma.ib_free = get - chan->dma.ib_put; | ||
221 | if (chan->dma.ib_free <= 0) | ||
222 | chan->dma.ib_free += chan->dma.ib_max + 1; | ||
223 | } | ||
224 | |||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | static int | ||
229 | nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) | ||
230 | { | ||
231 | uint32_t cnt = 0, prev_get = 0; | ||
232 | int ret; | ||
233 | |||
234 | ret = nv50_dma_push_wait(chan, slots + 1); | ||
235 | if (unlikely(ret)) | ||
236 | return ret; | ||
237 | |||
238 | while (chan->dma.free < count) { | ||
239 | int get = READ_GET(chan, &prev_get, &cnt); | ||
240 | if (unlikely(get < 0)) { | ||
241 | if (get == -EINVAL) | ||
242 | continue; | ||
243 | |||
244 | return get; | ||
245 | } | ||
246 | |||
247 | if (get <= chan->dma.cur) { | ||
248 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
249 | if (chan->dma.free >= count) | ||
250 | break; | ||
251 | |||
252 | FIRE_RING(chan); | ||
253 | do { | ||
254 | get = READ_GET(chan, &prev_get, &cnt); | ||
255 | if (unlikely(get < 0)) { | ||
256 | if (get == -EINVAL) | ||
257 | continue; | ||
258 | return get; | ||
259 | } | ||
260 | } while (get == 0); | ||
261 | chan->dma.cur = 0; | ||
262 | chan->dma.put = 0; | ||
263 | } | ||
264 | |||
265 | chan->dma.free = get - chan->dma.cur - 1; | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | int | ||
272 | nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) | ||
273 | { | ||
274 | uint32_t prev_get = 0, cnt = 0; | ||
275 | int get; | ||
276 | |||
277 | if (chan->dma.ib_max) | ||
278 | return nv50_dma_wait(chan, slots, size); | ||
279 | |||
280 | while (chan->dma.free < size) { | ||
281 | get = READ_GET(chan, &prev_get, &cnt); | ||
282 | if (unlikely(get == -EBUSY)) | ||
283 | return -EBUSY; | ||
284 | |||
285 | /* loop until we have a usable GET pointer. the value | ||
286 | * we read from the GPU may be outside the main ring if | ||
287 | * PFIFO is processing a buffer called from the main ring, | ||
288 | * discard these values until something sensible is seen. | ||
289 | * | ||
290 | * the other case we discard GET is while the GPU is fetching | ||
291 | * from the SKIPS area, so the code below doesn't have to deal | ||
292 | * with some fun corner cases. | ||
293 | */ | ||
294 | if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) | ||
295 | continue; | ||
296 | |||
297 | if (get <= chan->dma.cur) { | ||
298 | /* engine is fetching behind us, or is completely | ||
299 | * idle (GET == PUT) so we have free space up until | ||
300 | * the end of the push buffer | ||
301 | * | ||
302 | * we can only hit that path once per call due to | ||
303 | * looping back to the beginning of the push buffer, | ||
304 | * we'll hit the fetching-ahead-of-us path from that | ||
305 | * point on. | ||
306 | * | ||
307 | * the *one* exception to that rule is if we read | ||
308 | * GET==PUT, in which case the below conditional will | ||
309 | * always succeed and break us out of the wait loop. | ||
310 | */ | ||
311 | chan->dma.free = chan->dma.max - chan->dma.cur; | ||
312 | if (chan->dma.free >= size) | ||
313 | break; | ||
314 | |||
315 | /* not enough space left at the end of the push buffer, | ||
316 | * instruct the GPU to jump back to the start right | ||
317 | * after processing the currently pending commands. | ||
318 | */ | ||
319 | OUT_RING(chan, chan->pushbuf_base | 0x20000000); | ||
320 | |||
321 | /* wait for GET to depart from the skips area. | ||
322 | * prevents writing GET==PUT and causing a race | ||
323 | * condition that causes us to think the GPU is | ||
324 | * idle when it's not. | ||
325 | */ | ||
326 | do { | ||
327 | get = READ_GET(chan, &prev_get, &cnt); | ||
328 | if (unlikely(get == -EBUSY)) | ||
329 | return -EBUSY; | ||
330 | if (unlikely(get == -EINVAL)) | ||
331 | continue; | ||
332 | } while (get <= NOUVEAU_DMA_SKIPS); | ||
333 | WRITE_PUT(NOUVEAU_DMA_SKIPS); | ||
334 | |||
335 | /* we're now submitting commands at the start of | ||
336 | * the push buffer. | ||
337 | */ | ||
338 | chan->dma.cur = | ||
339 | chan->dma.put = NOUVEAU_DMA_SKIPS; | ||
340 | } | ||
341 | |||
342 | /* engine fetching ahead of us, we have space up until the | ||
343 | * current GET pointer. the "- 1" is to ensure there's | ||
344 | * space left to emit a jump back to the beginning of the | ||
345 | * push buffer if we require it. we can never get GET == PUT | ||
346 | * here, so this is safe. | ||
347 | */ | ||
348 | chan->dma.free = get - chan->dma.cur - 1; | ||
349 | } | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||