diff options
Diffstat (limited to 'drivers/char/drm/mga_dma.c')
-rw-r--r-- | drivers/char/drm/mga_dma.c | 1162 |
1 files changed, 0 insertions, 1162 deletions
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c deleted file mode 100644 index c1d12dbfa8d8..000000000000 --- a/drivers/char/drm/mga_dma.c +++ /dev/null | |||
@@ -1,1162 +0,0 @@ | |||
1 | /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- | ||
2 | * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | */ | ||
27 | |||
28 | /** | ||
29 | * \file mga_dma.c | ||
30 | * DMA support for MGA G200 / G400. | ||
31 | * | ||
32 | * \author Rickard E. (Rik) Faith <faith@valinux.com> | ||
33 | * \author Jeff Hartmann <jhartmann@valinux.com> | ||
34 | * \author Keith Whitwell <keith@tungstengraphics.com> | ||
35 | * \author Gareth Hughes <gareth@valinux.com> | ||
36 | */ | ||
37 | |||
38 | #include "drmP.h" | ||
39 | #include "drm.h" | ||
40 | #include "drm_sarea.h" | ||
41 | #include "mga_drm.h" | ||
42 | #include "mga_drv.h" | ||
43 | |||
44 | #define MGA_DEFAULT_USEC_TIMEOUT 10000 | ||
45 | #define MGA_FREELIST_DEBUG 0 | ||
46 | |||
47 | #define MINIMAL_CLEANUP 0 | ||
48 | #define FULL_CLEANUP 1 | ||
49 | static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); | ||
50 | |||
51 | /* ================================================================ | ||
52 | * Engine control | ||
53 | */ | ||
54 | |||
55 | int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) | ||
56 | { | ||
57 | u32 status = 0; | ||
58 | int i; | ||
59 | DRM_DEBUG("\n"); | ||
60 | |||
61 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
62 | status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; | ||
63 | if (status == MGA_ENDPRDMASTS) { | ||
64 | MGA_WRITE8(MGA_CRTC_INDEX, 0); | ||
65 | return 0; | ||
66 | } | ||
67 | DRM_UDELAY(1); | ||
68 | } | ||
69 | |||
70 | #if MGA_DMA_DEBUG | ||
71 | DRM_ERROR("failed!\n"); | ||
72 | DRM_INFO(" status=0x%08x\n", status); | ||
73 | #endif | ||
74 | return -EBUSY; | ||
75 | } | ||
76 | |||
77 | static int mga_do_dma_reset(drm_mga_private_t * dev_priv) | ||
78 | { | ||
79 | drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
80 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | ||
81 | |||
82 | DRM_DEBUG("\n"); | ||
83 | |||
84 | /* The primary DMA stream should look like new right about now. | ||
85 | */ | ||
86 | primary->tail = 0; | ||
87 | primary->space = primary->size; | ||
88 | primary->last_flush = 0; | ||
89 | |||
90 | sarea_priv->last_wrap = 0; | ||
91 | |||
92 | /* FIXME: Reset counters, buffer ages etc... | ||
93 | */ | ||
94 | |||
95 | /* FIXME: What else do we need to reinitialize? WARP stuff? | ||
96 | */ | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* ================================================================ | ||
102 | * Primary DMA stream | ||
103 | */ | ||
104 | |||
105 | void mga_do_dma_flush(drm_mga_private_t * dev_priv) | ||
106 | { | ||
107 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | ||
108 | u32 head, tail; | ||
109 | u32 status = 0; | ||
110 | int i; | ||
111 | DMA_LOCALS; | ||
112 | DRM_DEBUG("\n"); | ||
113 | |||
114 | /* We need to wait so that we can do an safe flush */ | ||
115 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
116 | status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; | ||
117 | if (status == MGA_ENDPRDMASTS) | ||
118 | break; | ||
119 | DRM_UDELAY(1); | ||
120 | } | ||
121 | |||
122 | if (primary->tail == primary->last_flush) { | ||
123 | DRM_DEBUG(" bailing out...\n"); | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | tail = primary->tail + dev_priv->primary->offset; | ||
128 | |||
129 | /* We need to pad the stream between flushes, as the card | ||
130 | * actually (partially?) reads the first of these commands. | ||
131 | * See page 4-16 in the G400 manual, middle of the page or so. | ||
132 | */ | ||
133 | BEGIN_DMA(1); | ||
134 | |||
135 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, | ||
136 | MGA_DMAPAD, 0x00000000, | ||
137 | MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); | ||
138 | |||
139 | ADVANCE_DMA(); | ||
140 | |||
141 | primary->last_flush = primary->tail; | ||
142 | |||
143 | head = MGA_READ(MGA_PRIMADDRESS); | ||
144 | |||
145 | if (head <= tail) { | ||
146 | primary->space = primary->size - primary->tail; | ||
147 | } else { | ||
148 | primary->space = head - tail; | ||
149 | } | ||
150 | |||
151 | DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); | ||
152 | DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset); | ||
153 | DRM_DEBUG(" space = 0x%06x\n", primary->space); | ||
154 | |||
155 | mga_flush_write_combine(); | ||
156 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); | ||
157 | |||
158 | DRM_DEBUG("done.\n"); | ||
159 | } | ||
160 | |||
161 | void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) | ||
162 | { | ||
163 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | ||
164 | u32 head, tail; | ||
165 | DMA_LOCALS; | ||
166 | DRM_DEBUG("\n"); | ||
167 | |||
168 | BEGIN_DMA_WRAP(); | ||
169 | |||
170 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, | ||
171 | MGA_DMAPAD, 0x00000000, | ||
172 | MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); | ||
173 | |||
174 | ADVANCE_DMA(); | ||
175 | |||
176 | tail = primary->tail + dev_priv->primary->offset; | ||
177 | |||
178 | primary->tail = 0; | ||
179 | primary->last_flush = 0; | ||
180 | primary->last_wrap++; | ||
181 | |||
182 | head = MGA_READ(MGA_PRIMADDRESS); | ||
183 | |||
184 | if (head == dev_priv->primary->offset) { | ||
185 | primary->space = primary->size; | ||
186 | } else { | ||
187 | primary->space = head - dev_priv->primary->offset; | ||
188 | } | ||
189 | |||
190 | DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); | ||
191 | DRM_DEBUG(" tail = 0x%06x\n", primary->tail); | ||
192 | DRM_DEBUG(" wrap = %d\n", primary->last_wrap); | ||
193 | DRM_DEBUG(" space = 0x%06x\n", primary->space); | ||
194 | |||
195 | mga_flush_write_combine(); | ||
196 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); | ||
197 | |||
198 | set_bit(0, &primary->wrapped); | ||
199 | DRM_DEBUG("done.\n"); | ||
200 | } | ||
201 | |||
202 | void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) | ||
203 | { | ||
204 | drm_mga_primary_buffer_t *primary = &dev_priv->prim; | ||
205 | drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
206 | u32 head = dev_priv->primary->offset; | ||
207 | DRM_DEBUG("\n"); | ||
208 | |||
209 | sarea_priv->last_wrap++; | ||
210 | DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap); | ||
211 | |||
212 | mga_flush_write_combine(); | ||
213 | MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); | ||
214 | |||
215 | clear_bit(0, &primary->wrapped); | ||
216 | DRM_DEBUG("done.\n"); | ||
217 | } | ||
218 | |||
219 | /* ================================================================ | ||
220 | * Freelist management | ||
221 | */ | ||
222 | |||
223 | #define MGA_BUFFER_USED ~0 | ||
224 | #define MGA_BUFFER_FREE 0 | ||
225 | |||
226 | #if MGA_FREELIST_DEBUG | ||
227 | static void mga_freelist_print(struct drm_device * dev) | ||
228 | { | ||
229 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
230 | drm_mga_freelist_t *entry; | ||
231 | |||
232 | DRM_INFO("\n"); | ||
233 | DRM_INFO("current dispatch: last=0x%x done=0x%x\n", | ||
234 | dev_priv->sarea_priv->last_dispatch, | ||
235 | (unsigned int)(MGA_READ(MGA_PRIMADDRESS) - | ||
236 | dev_priv->primary->offset)); | ||
237 | DRM_INFO("current freelist:\n"); | ||
238 | |||
239 | for (entry = dev_priv->head->next; entry; entry = entry->next) { | ||
240 | DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n", | ||
241 | entry, entry->buf->idx, entry->age.head, | ||
242 | entry->age.head - dev_priv->primary->offset); | ||
243 | } | ||
244 | DRM_INFO("\n"); | ||
245 | } | ||
246 | #endif | ||
247 | |||
248 | static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) | ||
249 | { | ||
250 | struct drm_device_dma *dma = dev->dma; | ||
251 | struct drm_buf *buf; | ||
252 | drm_mga_buf_priv_t *buf_priv; | ||
253 | drm_mga_freelist_t *entry; | ||
254 | int i; | ||
255 | DRM_DEBUG("count=%d\n", dma->buf_count); | ||
256 | |||
257 | dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); | ||
258 | if (dev_priv->head == NULL) | ||
259 | return -ENOMEM; | ||
260 | |||
261 | memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); | ||
262 | SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); | ||
263 | |||
264 | for (i = 0; i < dma->buf_count; i++) { | ||
265 | buf = dma->buflist[i]; | ||
266 | buf_priv = buf->dev_private; | ||
267 | |||
268 | entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); | ||
269 | if (entry == NULL) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | memset(entry, 0, sizeof(drm_mga_freelist_t)); | ||
273 | |||
274 | entry->next = dev_priv->head->next; | ||
275 | entry->prev = dev_priv->head; | ||
276 | SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); | ||
277 | entry->buf = buf; | ||
278 | |||
279 | if (dev_priv->head->next != NULL) | ||
280 | dev_priv->head->next->prev = entry; | ||
281 | if (entry->next == NULL) | ||
282 | dev_priv->tail = entry; | ||
283 | |||
284 | buf_priv->list_entry = entry; | ||
285 | buf_priv->discard = 0; | ||
286 | buf_priv->dispatched = 0; | ||
287 | |||
288 | dev_priv->head->next = entry; | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static void mga_freelist_cleanup(struct drm_device * dev) | ||
295 | { | ||
296 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
297 | drm_mga_freelist_t *entry; | ||
298 | drm_mga_freelist_t *next; | ||
299 | DRM_DEBUG("\n"); | ||
300 | |||
301 | entry = dev_priv->head; | ||
302 | while (entry) { | ||
303 | next = entry->next; | ||
304 | drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); | ||
305 | entry = next; | ||
306 | } | ||
307 | |||
308 | dev_priv->head = dev_priv->tail = NULL; | ||
309 | } | ||
310 | |||
311 | #if 0 | ||
312 | /* FIXME: Still needed? | ||
313 | */ | ||
314 | static void mga_freelist_reset(struct drm_device * dev) | ||
315 | { | ||
316 | struct drm_device_dma *dma = dev->dma; | ||
317 | struct drm_buf *buf; | ||
318 | drm_mga_buf_priv_t *buf_priv; | ||
319 | int i; | ||
320 | |||
321 | for (i = 0; i < dma->buf_count; i++) { | ||
322 | buf = dma->buflist[i]; | ||
323 | buf_priv = buf->dev_private; | ||
324 | SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0); | ||
325 | } | ||
326 | } | ||
327 | #endif | ||
328 | |||
329 | static struct drm_buf *mga_freelist_get(struct drm_device * dev) | ||
330 | { | ||
331 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
332 | drm_mga_freelist_t *next; | ||
333 | drm_mga_freelist_t *prev; | ||
334 | drm_mga_freelist_t *tail = dev_priv->tail; | ||
335 | u32 head, wrap; | ||
336 | DRM_DEBUG("\n"); | ||
337 | |||
338 | head = MGA_READ(MGA_PRIMADDRESS); | ||
339 | wrap = dev_priv->sarea_priv->last_wrap; | ||
340 | |||
341 | DRM_DEBUG(" tail=0x%06lx %d\n", | ||
342 | tail->age.head ? | ||
343 | tail->age.head - dev_priv->primary->offset : 0, | ||
344 | tail->age.wrap); | ||
345 | DRM_DEBUG(" head=0x%06lx %d\n", | ||
346 | head - dev_priv->primary->offset, wrap); | ||
347 | |||
348 | if (TEST_AGE(&tail->age, head, wrap)) { | ||
349 | prev = dev_priv->tail->prev; | ||
350 | next = dev_priv->tail; | ||
351 | prev->next = NULL; | ||
352 | next->prev = next->next = NULL; | ||
353 | dev_priv->tail = prev; | ||
354 | SET_AGE(&next->age, MGA_BUFFER_USED, 0); | ||
355 | return next->buf; | ||
356 | } | ||
357 | |||
358 | DRM_DEBUG("returning NULL!\n"); | ||
359 | return NULL; | ||
360 | } | ||
361 | |||
362 | int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) | ||
363 | { | ||
364 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
365 | drm_mga_buf_priv_t *buf_priv = buf->dev_private; | ||
366 | drm_mga_freelist_t *head, *entry, *prev; | ||
367 | |||
368 | DRM_DEBUG("age=0x%06lx wrap=%d\n", | ||
369 | buf_priv->list_entry->age.head - | ||
370 | dev_priv->primary->offset, buf_priv->list_entry->age.wrap); | ||
371 | |||
372 | entry = buf_priv->list_entry; | ||
373 | head = dev_priv->head; | ||
374 | |||
375 | if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { | ||
376 | SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); | ||
377 | prev = dev_priv->tail; | ||
378 | prev->next = entry; | ||
379 | entry->prev = prev; | ||
380 | entry->next = NULL; | ||
381 | } else { | ||
382 | prev = head->next; | ||
383 | head->next = entry; | ||
384 | prev->prev = entry; | ||
385 | entry->prev = head; | ||
386 | entry->next = prev; | ||
387 | } | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | /* ================================================================ | ||
393 | * DMA initialization, cleanup | ||
394 | */ | ||
395 | |||
396 | int mga_driver_load(struct drm_device * dev, unsigned long flags) | ||
397 | { | ||
398 | drm_mga_private_t *dev_priv; | ||
399 | |||
400 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
401 | if (!dev_priv) | ||
402 | return -ENOMEM; | ||
403 | |||
404 | dev->dev_private = (void *)dev_priv; | ||
405 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); | ||
406 | |||
407 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | ||
408 | dev_priv->chipset = flags; | ||
409 | |||
410 | dev_priv->mmio_base = drm_get_resource_start(dev, 1); | ||
411 | dev_priv->mmio_size = drm_get_resource_len(dev, 1); | ||
412 | |||
413 | dev->counters += 3; | ||
414 | dev->types[6] = _DRM_STAT_IRQ; | ||
415 | dev->types[7] = _DRM_STAT_PRIMARY; | ||
416 | dev->types[8] = _DRM_STAT_SECONDARY; | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | #if __OS_HAS_AGP | ||
422 | /** | ||
423 | * Bootstrap the driver for AGP DMA. | ||
424 | * | ||
425 | * \todo | ||
426 | * Investigate whether there is any benifit to storing the WARP microcode in | ||
427 | * AGP memory. If not, the microcode may as well always be put in PCI | ||
428 | * memory. | ||
429 | * | ||
430 | * \todo | ||
431 | * This routine needs to set dma_bs->agp_mode to the mode actually configured | ||
432 | * in the hardware. Looking just at the Linux AGP driver code, I don't see | ||
433 | * an easy way to determine this. | ||
434 | * | ||
435 | * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap | ||
436 | */ | ||
437 | static int mga_do_agp_dma_bootstrap(struct drm_device * dev, | ||
438 | drm_mga_dma_bootstrap_t * dma_bs) | ||
439 | { | ||
440 | drm_mga_private_t *const dev_priv = | ||
441 | (drm_mga_private_t *) dev->dev_private; | ||
442 | unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
443 | int err; | ||
444 | unsigned offset; | ||
445 | const unsigned secondary_size = dma_bs->secondary_bin_count | ||
446 | * dma_bs->secondary_bin_size; | ||
447 | const unsigned agp_size = (dma_bs->agp_size << 20); | ||
448 | struct drm_buf_desc req; | ||
449 | struct drm_agp_mode mode; | ||
450 | struct drm_agp_info info; | ||
451 | struct drm_agp_buffer agp_req; | ||
452 | struct drm_agp_binding bind_req; | ||
453 | |||
454 | /* Acquire AGP. */ | ||
455 | err = drm_agp_acquire(dev); | ||
456 | if (err) { | ||
457 | DRM_ERROR("Unable to acquire AGP: %d\n", err); | ||
458 | return err; | ||
459 | } | ||
460 | |||
461 | err = drm_agp_info(dev, &info); | ||
462 | if (err) { | ||
463 | DRM_ERROR("Unable to get AGP info: %d\n", err); | ||
464 | return err; | ||
465 | } | ||
466 | |||
467 | mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; | ||
468 | err = drm_agp_enable(dev, mode); | ||
469 | if (err) { | ||
470 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); | ||
471 | return err; | ||
472 | } | ||
473 | |||
474 | /* In addition to the usual AGP mode configuration, the G200 AGP cards | ||
475 | * need to have the AGP mode "manually" set. | ||
476 | */ | ||
477 | |||
478 | if (dev_priv->chipset == MGA_CARD_TYPE_G200) { | ||
479 | if (mode.mode & 0x02) { | ||
480 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); | ||
481 | } else { | ||
482 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); | ||
483 | } | ||
484 | } | ||
485 | |||
486 | /* Allocate and bind AGP memory. */ | ||
487 | agp_req.size = agp_size; | ||
488 | agp_req.type = 0; | ||
489 | err = drm_agp_alloc(dev, &agp_req); | ||
490 | if (err) { | ||
491 | dev_priv->agp_size = 0; | ||
492 | DRM_ERROR("Unable to allocate %uMB AGP memory\n", | ||
493 | dma_bs->agp_size); | ||
494 | return err; | ||
495 | } | ||
496 | |||
497 | dev_priv->agp_size = agp_size; | ||
498 | dev_priv->agp_handle = agp_req.handle; | ||
499 | |||
500 | bind_req.handle = agp_req.handle; | ||
501 | bind_req.offset = 0; | ||
502 | err = drm_agp_bind(dev, &bind_req); | ||
503 | if (err) { | ||
504 | DRM_ERROR("Unable to bind AGP memory: %d\n", err); | ||
505 | return err; | ||
506 | } | ||
507 | |||
508 | /* Make drm_addbufs happy by not trying to create a mapping for less | ||
509 | * than a page. | ||
510 | */ | ||
511 | if (warp_size < PAGE_SIZE) | ||
512 | warp_size = PAGE_SIZE; | ||
513 | |||
514 | offset = 0; | ||
515 | err = drm_addmap(dev, offset, warp_size, | ||
516 | _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); | ||
517 | if (err) { | ||
518 | DRM_ERROR("Unable to map WARP microcode: %d\n", err); | ||
519 | return err; | ||
520 | } | ||
521 | |||
522 | offset += warp_size; | ||
523 | err = drm_addmap(dev, offset, dma_bs->primary_size, | ||
524 | _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); | ||
525 | if (err) { | ||
526 | DRM_ERROR("Unable to map primary DMA region: %d\n", err); | ||
527 | return err; | ||
528 | } | ||
529 | |||
530 | offset += dma_bs->primary_size; | ||
531 | err = drm_addmap(dev, offset, secondary_size, | ||
532 | _DRM_AGP, 0, &dev->agp_buffer_map); | ||
533 | if (err) { | ||
534 | DRM_ERROR("Unable to map secondary DMA region: %d\n", err); | ||
535 | return err; | ||
536 | } | ||
537 | |||
538 | (void)memset(&req, 0, sizeof(req)); | ||
539 | req.count = dma_bs->secondary_bin_count; | ||
540 | req.size = dma_bs->secondary_bin_size; | ||
541 | req.flags = _DRM_AGP_BUFFER; | ||
542 | req.agp_start = offset; | ||
543 | |||
544 | err = drm_addbufs_agp(dev, &req); | ||
545 | if (err) { | ||
546 | DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); | ||
547 | return err; | ||
548 | } | ||
549 | |||
550 | { | ||
551 | struct drm_map_list *_entry; | ||
552 | unsigned long agp_token = 0; | ||
553 | |||
554 | list_for_each_entry(_entry, &dev->maplist, head) { | ||
555 | if (_entry->map == dev->agp_buffer_map) | ||
556 | agp_token = _entry->user_token; | ||
557 | } | ||
558 | if (!agp_token) | ||
559 | return -EFAULT; | ||
560 | |||
561 | dev->agp_buffer_token = agp_token; | ||
562 | } | ||
563 | |||
564 | offset += secondary_size; | ||
565 | err = drm_addmap(dev, offset, agp_size - offset, | ||
566 | _DRM_AGP, 0, &dev_priv->agp_textures); | ||
567 | if (err) { | ||
568 | DRM_ERROR("Unable to map AGP texture region %d\n", err); | ||
569 | return err; | ||
570 | } | ||
571 | |||
572 | drm_core_ioremap(dev_priv->warp, dev); | ||
573 | drm_core_ioremap(dev_priv->primary, dev); | ||
574 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
575 | |||
576 | if (!dev_priv->warp->handle || | ||
577 | !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { | ||
578 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", | ||
579 | dev_priv->warp->handle, dev_priv->primary->handle, | ||
580 | dev->agp_buffer_map->handle); | ||
581 | return -ENOMEM; | ||
582 | } | ||
583 | |||
584 | dev_priv->dma_access = MGA_PAGPXFER; | ||
585 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | ||
586 | |||
587 | DRM_INFO("Initialized card for AGP DMA.\n"); | ||
588 | return 0; | ||
589 | } | ||
590 | #else | ||
591 | static int mga_do_agp_dma_bootstrap(struct drm_device * dev, | ||
592 | drm_mga_dma_bootstrap_t * dma_bs) | ||
593 | { | ||
594 | return -EINVAL; | ||
595 | } | ||
596 | #endif | ||
597 | |||
598 | /** | ||
599 | * Bootstrap the driver for PCI DMA. | ||
600 | * | ||
601 | * \todo | ||
602 | * The algorithm for decreasing the size of the primary DMA buffer could be | ||
603 | * better. The size should be rounded up to the nearest page size, then | ||
604 | * decrease the request size by a single page each pass through the loop. | ||
605 | * | ||
606 | * \todo | ||
607 | * Determine whether the maximum address passed to drm_pci_alloc is correct. | ||
608 | * The same goes for drm_addbufs_pci. | ||
609 | * | ||
610 | * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap | ||
611 | */ | ||
612 | static int mga_do_pci_dma_bootstrap(struct drm_device * dev, | ||
613 | drm_mga_dma_bootstrap_t * dma_bs) | ||
614 | { | ||
615 | drm_mga_private_t *const dev_priv = | ||
616 | (drm_mga_private_t *) dev->dev_private; | ||
617 | unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
618 | unsigned int primary_size; | ||
619 | unsigned int bin_count; | ||
620 | int err; | ||
621 | struct drm_buf_desc req; | ||
622 | |||
623 | if (dev->dma == NULL) { | ||
624 | DRM_ERROR("dev->dma is NULL\n"); | ||
625 | return -EFAULT; | ||
626 | } | ||
627 | |||
628 | /* Make drm_addbufs happy by not trying to create a mapping for less | ||
629 | * than a page. | ||
630 | */ | ||
631 | if (warp_size < PAGE_SIZE) | ||
632 | warp_size = PAGE_SIZE; | ||
633 | |||
634 | /* The proper alignment is 0x100 for this mapping */ | ||
635 | err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, | ||
636 | _DRM_READ_ONLY, &dev_priv->warp); | ||
637 | if (err != 0) { | ||
638 | DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", | ||
639 | err); | ||
640 | return err; | ||
641 | } | ||
642 | |||
643 | /* Other than the bottom two bits being used to encode other | ||
644 | * information, there don't appear to be any restrictions on the | ||
645 | * alignment of the primary or secondary DMA buffers. | ||
646 | */ | ||
647 | |||
648 | for (primary_size = dma_bs->primary_size; primary_size != 0; | ||
649 | primary_size >>= 1) { | ||
650 | /* The proper alignment for this mapping is 0x04 */ | ||
651 | err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, | ||
652 | _DRM_READ_ONLY, &dev_priv->primary); | ||
653 | if (!err) | ||
654 | break; | ||
655 | } | ||
656 | |||
657 | if (err != 0) { | ||
658 | DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); | ||
659 | return -ENOMEM; | ||
660 | } | ||
661 | |||
662 | if (dev_priv->primary->size != dma_bs->primary_size) { | ||
663 | DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", | ||
664 | dma_bs->primary_size, | ||
665 | (unsigned)dev_priv->primary->size); | ||
666 | dma_bs->primary_size = dev_priv->primary->size; | ||
667 | } | ||
668 | |||
669 | for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; | ||
670 | bin_count--) { | ||
671 | (void)memset(&req, 0, sizeof(req)); | ||
672 | req.count = bin_count; | ||
673 | req.size = dma_bs->secondary_bin_size; | ||
674 | |||
675 | err = drm_addbufs_pci(dev, &req); | ||
676 | if (!err) { | ||
677 | break; | ||
678 | } | ||
679 | } | ||
680 | |||
681 | if (bin_count == 0) { | ||
682 | DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); | ||
683 | return err; | ||
684 | } | ||
685 | |||
686 | if (bin_count != dma_bs->secondary_bin_count) { | ||
687 | DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " | ||
688 | "to %u.\n", dma_bs->secondary_bin_count, bin_count); | ||
689 | |||
690 | dma_bs->secondary_bin_count = bin_count; | ||
691 | } | ||
692 | |||
693 | dev_priv->dma_access = 0; | ||
694 | dev_priv->wagp_enable = 0; | ||
695 | |||
696 | dma_bs->agp_mode = 0; | ||
697 | |||
698 | DRM_INFO("Initialized card for PCI DMA.\n"); | ||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | static int mga_do_dma_bootstrap(struct drm_device * dev, | ||
703 | drm_mga_dma_bootstrap_t * dma_bs) | ||
704 | { | ||
705 | const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); | ||
706 | int err; | ||
707 | drm_mga_private_t *const dev_priv = | ||
708 | (drm_mga_private_t *) dev->dev_private; | ||
709 | |||
710 | dev_priv->used_new_dma_init = 1; | ||
711 | |||
712 | /* The first steps are the same for both PCI and AGP based DMA. Map | ||
713 | * the cards MMIO registers and map a status page. | ||
714 | */ | ||
715 | err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, | ||
716 | _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); | ||
717 | if (err) { | ||
718 | DRM_ERROR("Unable to map MMIO region: %d\n", err); | ||
719 | return err; | ||
720 | } | ||
721 | |||
722 | err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, | ||
723 | _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, | ||
724 | &dev_priv->status); | ||
725 | if (err) { | ||
726 | DRM_ERROR("Unable to map status region: %d\n", err); | ||
727 | return err; | ||
728 | } | ||
729 | |||
730 | /* The DMA initialization procedure is slightly different for PCI and | ||
731 | * AGP cards. AGP cards just allocate a large block of AGP memory and | ||
732 | * carve off portions of it for internal uses. The remaining memory | ||
733 | * is returned to user-mode to be used for AGP textures. | ||
734 | */ | ||
735 | if (is_agp) { | ||
736 | err = mga_do_agp_dma_bootstrap(dev, dma_bs); | ||
737 | } | ||
738 | |||
739 | /* If we attempted to initialize the card for AGP DMA but failed, | ||
740 | * clean-up any mess that may have been created. | ||
741 | */ | ||
742 | |||
743 | if (err) { | ||
744 | mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); | ||
745 | } | ||
746 | |||
747 | /* Not only do we want to try and initialized PCI cards for PCI DMA, | ||
748 | * but we also try to initialized AGP cards that could not be | ||
749 | * initialized for AGP DMA. This covers the case where we have an AGP | ||
750 | * card in a system with an unsupported AGP chipset. In that case the | ||
751 | * card will be detected as AGP, but we won't be able to allocate any | ||
752 | * AGP memory, etc. | ||
753 | */ | ||
754 | |||
755 | if (!is_agp || err) { | ||
756 | err = mga_do_pci_dma_bootstrap(dev, dma_bs); | ||
757 | } | ||
758 | |||
759 | return err; | ||
760 | } | ||
761 | |||
762 | int mga_dma_bootstrap(struct drm_device *dev, void *data, | ||
763 | struct drm_file *file_priv) | ||
764 | { | ||
765 | drm_mga_dma_bootstrap_t *bootstrap = data; | ||
766 | int err; | ||
767 | static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; | ||
768 | const drm_mga_private_t *const dev_priv = | ||
769 | (drm_mga_private_t *) dev->dev_private; | ||
770 | |||
771 | err = mga_do_dma_bootstrap(dev, bootstrap); | ||
772 | if (err) { | ||
773 | mga_do_cleanup_dma(dev, FULL_CLEANUP); | ||
774 | return err; | ||
775 | } | ||
776 | |||
777 | if (dev_priv->agp_textures != NULL) { | ||
778 | bootstrap->texture_handle = dev_priv->agp_textures->offset; | ||
779 | bootstrap->texture_size = dev_priv->agp_textures->size; | ||
780 | } else { | ||
781 | bootstrap->texture_handle = 0; | ||
782 | bootstrap->texture_size = 0; | ||
783 | } | ||
784 | |||
785 | bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; | ||
786 | |||
787 | return err; | ||
788 | } | ||
789 | |||
790 | static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) | ||
791 | { | ||
792 | drm_mga_private_t *dev_priv; | ||
793 | int ret; | ||
794 | DRM_DEBUG("\n"); | ||
795 | |||
796 | dev_priv = dev->dev_private; | ||
797 | |||
798 | if (init->sgram) { | ||
799 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; | ||
800 | } else { | ||
801 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; | ||
802 | } | ||
803 | dev_priv->maccess = init->maccess; | ||
804 | |||
805 | dev_priv->fb_cpp = init->fb_cpp; | ||
806 | dev_priv->front_offset = init->front_offset; | ||
807 | dev_priv->front_pitch = init->front_pitch; | ||
808 | dev_priv->back_offset = init->back_offset; | ||
809 | dev_priv->back_pitch = init->back_pitch; | ||
810 | |||
811 | dev_priv->depth_cpp = init->depth_cpp; | ||
812 | dev_priv->depth_offset = init->depth_offset; | ||
813 | dev_priv->depth_pitch = init->depth_pitch; | ||
814 | |||
815 | /* FIXME: Need to support AGP textures... | ||
816 | */ | ||
817 | dev_priv->texture_offset = init->texture_offset[0]; | ||
818 | dev_priv->texture_size = init->texture_size[0]; | ||
819 | |||
820 | dev_priv->sarea = drm_getsarea(dev); | ||
821 | if (!dev_priv->sarea) { | ||
822 | DRM_ERROR("failed to find sarea!\n"); | ||
823 | return -EINVAL; | ||
824 | } | ||
825 | |||
826 | if (!dev_priv->used_new_dma_init) { | ||
827 | |||
828 | dev_priv->dma_access = MGA_PAGPXFER; | ||
829 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | ||
830 | |||
831 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | ||
832 | if (!dev_priv->status) { | ||
833 | DRM_ERROR("failed to find status page!\n"); | ||
834 | return -EINVAL; | ||
835 | } | ||
836 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | ||
837 | if (!dev_priv->mmio) { | ||
838 | DRM_ERROR("failed to find mmio region!\n"); | ||
839 | return -EINVAL; | ||
840 | } | ||
841 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); | ||
842 | if (!dev_priv->warp) { | ||
843 | DRM_ERROR("failed to find warp microcode region!\n"); | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); | ||
847 | if (!dev_priv->primary) { | ||
848 | DRM_ERROR("failed to find primary dma region!\n"); | ||
849 | return -EINVAL; | ||
850 | } | ||
851 | dev->agp_buffer_token = init->buffers_offset; | ||
852 | dev->agp_buffer_map = | ||
853 | drm_core_findmap(dev, init->buffers_offset); | ||
854 | if (!dev->agp_buffer_map) { | ||
855 | DRM_ERROR("failed to find dma buffer region!\n"); | ||
856 | return -EINVAL; | ||
857 | } | ||
858 | |||
859 | drm_core_ioremap(dev_priv->warp, dev); | ||
860 | drm_core_ioremap(dev_priv->primary, dev); | ||
861 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
862 | } | ||
863 | |||
864 | dev_priv->sarea_priv = | ||
865 | (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + | ||
866 | init->sarea_priv_offset); | ||
867 | |||
868 | if (!dev_priv->warp->handle || | ||
869 | !dev_priv->primary->handle || | ||
870 | ((dev_priv->dma_access != 0) && | ||
871 | ((dev->agp_buffer_map == NULL) || | ||
872 | (dev->agp_buffer_map->handle == NULL)))) { | ||
873 | DRM_ERROR("failed to ioremap agp regions!\n"); | ||
874 | return -ENOMEM; | ||
875 | } | ||
876 | |||
877 | ret = mga_warp_install_microcode(dev_priv); | ||
878 | if (ret < 0) { | ||
879 | DRM_ERROR("failed to install WARP ucode!: %d\n", ret); | ||
880 | return ret; | ||
881 | } | ||
882 | |||
883 | ret = mga_warp_init(dev_priv); | ||
884 | if (ret < 0) { | ||
885 | DRM_ERROR("failed to init WARP engine!: %d\n", ret); | ||
886 | return ret; | ||
887 | } | ||
888 | |||
889 | dev_priv->prim.status = (u32 *) dev_priv->status->handle; | ||
890 | |||
891 | mga_do_wait_for_idle(dev_priv); | ||
892 | |||
893 | /* Init the primary DMA registers. | ||
894 | */ | ||
895 | MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); | ||
896 | #if 0 | ||
897 | MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ | ||
898 | MGA_PRIMPTREN1); /* DWGSYNC */ | ||
899 | #endif | ||
900 | |||
901 | dev_priv->prim.start = (u8 *) dev_priv->primary->handle; | ||
902 | dev_priv->prim.end = ((u8 *) dev_priv->primary->handle | ||
903 | + dev_priv->primary->size); | ||
904 | dev_priv->prim.size = dev_priv->primary->size; | ||
905 | |||
906 | dev_priv->prim.tail = 0; | ||
907 | dev_priv->prim.space = dev_priv->prim.size; | ||
908 | dev_priv->prim.wrapped = 0; | ||
909 | |||
910 | dev_priv->prim.last_flush = 0; | ||
911 | dev_priv->prim.last_wrap = 0; | ||
912 | |||
913 | dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; | ||
914 | |||
915 | dev_priv->prim.status[0] = dev_priv->primary->offset; | ||
916 | dev_priv->prim.status[1] = 0; | ||
917 | |||
918 | dev_priv->sarea_priv->last_wrap = 0; | ||
919 | dev_priv->sarea_priv->last_frame.head = 0; | ||
920 | dev_priv->sarea_priv->last_frame.wrap = 0; | ||
921 | |||
922 | if (mga_freelist_init(dev, dev_priv) < 0) { | ||
923 | DRM_ERROR("could not initialize freelist\n"); | ||
924 | return -ENOMEM; | ||
925 | } | ||
926 | |||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) | ||
931 | { | ||
932 | int err = 0; | ||
933 | DRM_DEBUG("\n"); | ||
934 | |||
935 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
936 | * may not have been called from userspace and after dev_private | ||
937 | * is freed, it's too late. | ||
938 | */ | ||
939 | if (dev->irq_enabled) | ||
940 | drm_irq_uninstall(dev); | ||
941 | |||
942 | if (dev->dev_private) { | ||
943 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
944 | |||
945 | if ((dev_priv->warp != NULL) | ||
946 | && (dev_priv->warp->type != _DRM_CONSISTENT)) | ||
947 | drm_core_ioremapfree(dev_priv->warp, dev); | ||
948 | |||
949 | if ((dev_priv->primary != NULL) | ||
950 | && (dev_priv->primary->type != _DRM_CONSISTENT)) | ||
951 | drm_core_ioremapfree(dev_priv->primary, dev); | ||
952 | |||
953 | if (dev->agp_buffer_map != NULL) | ||
954 | drm_core_ioremapfree(dev->agp_buffer_map, dev); | ||
955 | |||
956 | if (dev_priv->used_new_dma_init) { | ||
957 | #if __OS_HAS_AGP | ||
958 | if (dev_priv->agp_handle != 0) { | ||
959 | struct drm_agp_binding unbind_req; | ||
960 | struct drm_agp_buffer free_req; | ||
961 | |||
962 | unbind_req.handle = dev_priv->agp_handle; | ||
963 | drm_agp_unbind(dev, &unbind_req); | ||
964 | |||
965 | free_req.handle = dev_priv->agp_handle; | ||
966 | drm_agp_free(dev, &free_req); | ||
967 | |||
968 | dev_priv->agp_textures = NULL; | ||
969 | dev_priv->agp_size = 0; | ||
970 | dev_priv->agp_handle = 0; | ||
971 | } | ||
972 | |||
973 | if ((dev->agp != NULL) && dev->agp->acquired) { | ||
974 | err = drm_agp_release(dev); | ||
975 | } | ||
976 | #endif | ||
977 | } | ||
978 | |||
979 | dev_priv->warp = NULL; | ||
980 | dev_priv->primary = NULL; | ||
981 | dev_priv->sarea = NULL; | ||
982 | dev_priv->sarea_priv = NULL; | ||
983 | dev->agp_buffer_map = NULL; | ||
984 | |||
985 | if (full_cleanup) { | ||
986 | dev_priv->mmio = NULL; | ||
987 | dev_priv->status = NULL; | ||
988 | dev_priv->used_new_dma_init = 0; | ||
989 | } | ||
990 | |||
991 | memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); | ||
992 | dev_priv->warp_pipe = 0; | ||
993 | memset(dev_priv->warp_pipe_phys, 0, | ||
994 | sizeof(dev_priv->warp_pipe_phys)); | ||
995 | |||
996 | if (dev_priv->head != NULL) { | ||
997 | mga_freelist_cleanup(dev); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | return err; | ||
1002 | } | ||
1003 | |||
1004 | int mga_dma_init(struct drm_device *dev, void *data, | ||
1005 | struct drm_file *file_priv) | ||
1006 | { | ||
1007 | drm_mga_init_t *init = data; | ||
1008 | int err; | ||
1009 | |||
1010 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1011 | |||
1012 | switch (init->func) { | ||
1013 | case MGA_INIT_DMA: | ||
1014 | err = mga_do_init_dma(dev, init); | ||
1015 | if (err) { | ||
1016 | (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); | ||
1017 | } | ||
1018 | return err; | ||
1019 | case MGA_CLEANUP_DMA: | ||
1020 | return mga_do_cleanup_dma(dev, FULL_CLEANUP); | ||
1021 | } | ||
1022 | |||
1023 | return -EINVAL; | ||
1024 | } | ||
1025 | |||
1026 | /* ================================================================ | ||
1027 | * Primary DMA stream management | ||
1028 | */ | ||
1029 | |||
1030 | int mga_dma_flush(struct drm_device *dev, void *data, | ||
1031 | struct drm_file *file_priv) | ||
1032 | { | ||
1033 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
1034 | struct drm_lock *lock = data; | ||
1035 | |||
1036 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1037 | |||
1038 | DRM_DEBUG("%s%s%s\n", | ||
1039 | (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", | ||
1040 | (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", | ||
1041 | (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); | ||
1042 | |||
1043 | WRAP_WAIT_WITH_RETURN(dev_priv); | ||
1044 | |||
1045 | if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { | ||
1046 | mga_do_dma_flush(dev_priv); | ||
1047 | } | ||
1048 | |||
1049 | if (lock->flags & _DRM_LOCK_QUIESCENT) { | ||
1050 | #if MGA_DMA_DEBUG | ||
1051 | int ret = mga_do_wait_for_idle(dev_priv); | ||
1052 | if (ret < 0) | ||
1053 | DRM_INFO("-EBUSY\n"); | ||
1054 | return ret; | ||
1055 | #else | ||
1056 | return mga_do_wait_for_idle(dev_priv); | ||
1057 | #endif | ||
1058 | } else { | ||
1059 | return 0; | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1063 | int mga_dma_reset(struct drm_device *dev, void *data, | ||
1064 | struct drm_file *file_priv) | ||
1065 | { | ||
1066 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
1067 | |||
1068 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1069 | |||
1070 | return mga_do_dma_reset(dev_priv); | ||
1071 | } | ||
1072 | |||
1073 | /* ================================================================ | ||
1074 | * DMA buffer management | ||
1075 | */ | ||
1076 | |||
1077 | static int mga_dma_get_buffers(struct drm_device * dev, | ||
1078 | struct drm_file *file_priv, struct drm_dma * d) | ||
1079 | { | ||
1080 | struct drm_buf *buf; | ||
1081 | int i; | ||
1082 | |||
1083 | for (i = d->granted_count; i < d->request_count; i++) { | ||
1084 | buf = mga_freelist_get(dev); | ||
1085 | if (!buf) | ||
1086 | return -EAGAIN; | ||
1087 | |||
1088 | buf->file_priv = file_priv; | ||
1089 | |||
1090 | if (DRM_COPY_TO_USER(&d->request_indices[i], | ||
1091 | &buf->idx, sizeof(buf->idx))) | ||
1092 | return -EFAULT; | ||
1093 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | ||
1094 | &buf->total, sizeof(buf->total))) | ||
1095 | return -EFAULT; | ||
1096 | |||
1097 | d->granted_count++; | ||
1098 | } | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | int mga_dma_buffers(struct drm_device *dev, void *data, | ||
1103 | struct drm_file *file_priv) | ||
1104 | { | ||
1105 | struct drm_device_dma *dma = dev->dma; | ||
1106 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
1107 | struct drm_dma *d = data; | ||
1108 | int ret = 0; | ||
1109 | |||
1110 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1111 | |||
1112 | /* Please don't send us buffers. | ||
1113 | */ | ||
1114 | if (d->send_count != 0) { | ||
1115 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | ||
1116 | DRM_CURRENTPID, d->send_count); | ||
1117 | return -EINVAL; | ||
1118 | } | ||
1119 | |||
1120 | /* We'll send you buffers. | ||
1121 | */ | ||
1122 | if (d->request_count < 0 || d->request_count > dma->buf_count) { | ||
1123 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
1124 | DRM_CURRENTPID, d->request_count, dma->buf_count); | ||
1125 | return -EINVAL; | ||
1126 | } | ||
1127 | |||
1128 | WRAP_TEST_WITH_RETURN(dev_priv); | ||
1129 | |||
1130 | d->granted_count = 0; | ||
1131 | |||
1132 | if (d->request_count) { | ||
1133 | ret = mga_dma_get_buffers(dev, file_priv, d); | ||
1134 | } | ||
1135 | |||
1136 | return ret; | ||
1137 | } | ||
1138 | |||
1139 | /** | ||
1140 | * Called just before the module is unloaded. | ||
1141 | */ | ||
1142 | int mga_driver_unload(struct drm_device * dev) | ||
1143 | { | ||
1144 | drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
1145 | dev->dev_private = NULL; | ||
1146 | |||
1147 | return 0; | ||
1148 | } | ||
1149 | |||
1150 | /** | ||
1151 | * Called when the last opener of the device is closed. | ||
1152 | */ | ||
1153 | void mga_driver_lastclose(struct drm_device * dev) | ||
1154 | { | ||
1155 | mga_do_cleanup_dma(dev, FULL_CLEANUP); | ||
1156 | } | ||
1157 | |||
1158 | int mga_driver_dma_quiescent(struct drm_device * dev) | ||
1159 | { | ||
1160 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
1161 | return mga_do_wait_for_idle(dev_priv); | ||
1162 | } | ||