aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/savage/savage_bci.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu/drm/savage/savage_bci.c
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/savage/savage_bci.c')
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c1095
1 files changed, 1095 insertions, 0 deletions
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
new file mode 100644
index 000000000000..d465b2f9c1cd
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -0,0 +1,1095 @@
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0
34
35static int savage_do_cleanup_bci(struct drm_device *dev);
36
37static int
38savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
39{
40 uint32_t mask = dev_priv->status_used_mask;
41 uint32_t threshold = dev_priv->bci_threshold_hi;
42 uint32_t status;
43 int i;
44
45#if SAVAGE_BCI_DEBUG
46 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
47 DRM_ERROR("Trying to emit %d words "
48 "(more than guaranteed space in COB)\n", n);
49#endif
50
51 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
52 DRM_MEMORYBARRIER();
53 status = dev_priv->status_ptr[0];
54 if ((status & mask) < threshold)
55 return 0;
56 DRM_UDELAY(1);
57 }
58
59#if SAVAGE_BCI_DEBUG
60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62#endif
63 return -EBUSY;
64}
65
66static int
67savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
68{
69 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
70 uint32_t status;
71 int i;
72
73 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
74 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
75 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
76 return 0;
77 DRM_UDELAY(1);
78 }
79
80#if SAVAGE_BCI_DEBUG
81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status);
83#endif
84 return -EBUSY;
85}
86
87static int
88savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
89{
90 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
91 uint32_t status;
92 int i;
93
94 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
95 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
96 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
97 return 0;
98 DRM_UDELAY(1);
99 }
100
101#if SAVAGE_BCI_DEBUG
102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status);
104#endif
105 return -EBUSY;
106}
107
108/*
109 * Waiting for events.
110 *
111 * The BIOSresets the event tag to 0 on mode changes. Therefore we
112 * never emit 0 to the event tag. If we find a 0 event tag we know the
113 * BIOS stomped on it and return success assuming that the BIOS waited
114 * for engine idle.
115 *
116 * Note: if the Xserver uses the event tag it has to follow the same
117 * rule. Otherwise there may be glitches every 2^16 events.
118 */
119static int
120savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
121{
122 uint32_t status;
123 int i;
124
125 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
126 DRM_MEMORYBARRIER();
127 status = dev_priv->status_ptr[1];
128 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
129 (status & 0xffff) == 0)
130 return 0;
131 DRM_UDELAY(1);
132 }
133
134#if SAVAGE_BCI_DEBUG
135 DRM_ERROR("failed!\n");
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137#endif
138
139 return -EBUSY;
140}
141
142static int
143savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
144{
145 uint32_t status;
146 int i;
147
148 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
149 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
150 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
151 (status & 0xffff) == 0)
152 return 0;
153 DRM_UDELAY(1);
154 }
155
156#if SAVAGE_BCI_DEBUG
157 DRM_ERROR("failed!\n");
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159#endif
160
161 return -EBUSY;
162}
163
164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
165 unsigned int flags)
166{
167 uint16_t count;
168 BCI_LOCALS;
169
170 if (dev_priv->status_ptr) {
171 /* coordinate with Xserver */
172 count = dev_priv->status_ptr[1023];
173 if (count < dev_priv->event_counter)
174 dev_priv->event_wrap++;
175 } else {
176 count = dev_priv->event_counter;
177 }
178 count = (count + 1) & 0xffff;
179 if (count == 0) {
180 count++; /* See the comment above savage_wait_event_*. */
181 dev_priv->event_wrap++;
182 }
183 dev_priv->event_counter = count;
184 if (dev_priv->status_ptr)
185 dev_priv->status_ptr[1023] = (uint32_t) count;
186
187 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
188 unsigned int wait_cmd = BCI_CMD_WAIT;
189 if ((flags & SAVAGE_WAIT_2D))
190 wait_cmd |= BCI_CMD_WAIT_2D;
191 if ((flags & SAVAGE_WAIT_3D))
192 wait_cmd |= BCI_CMD_WAIT_3D;
193 BEGIN_BCI(2);
194 BCI_WRITE(wait_cmd);
195 } else {
196 BEGIN_BCI(1);
197 }
198 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
199
200 return count;
201}
202
203/*
204 * Freelist management
205 */
206static int savage_freelist_init(struct drm_device * dev)
207{
208 drm_savage_private_t *dev_priv = dev->dev_private;
209 struct drm_device_dma *dma = dev->dma;
210 struct drm_buf *buf;
211 drm_savage_buf_priv_t *entry;
212 int i;
213 DRM_DEBUG("count=%d\n", dma->buf_count);
214
215 dev_priv->head.next = &dev_priv->tail;
216 dev_priv->head.prev = NULL;
217 dev_priv->head.buf = NULL;
218
219 dev_priv->tail.next = NULL;
220 dev_priv->tail.prev = &dev_priv->head;
221 dev_priv->tail.buf = NULL;
222
223 for (i = 0; i < dma->buf_count; i++) {
224 buf = dma->buflist[i];
225 entry = buf->dev_private;
226
227 SET_AGE(&entry->age, 0, 0);
228 entry->buf = buf;
229
230 entry->next = dev_priv->head.next;
231 entry->prev = &dev_priv->head;
232 dev_priv->head.next->prev = entry;
233 dev_priv->head.next = entry;
234 }
235
236 return 0;
237}
238
239static struct drm_buf *savage_freelist_get(struct drm_device * dev)
240{
241 drm_savage_private_t *dev_priv = dev->dev_private;
242 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
243 uint16_t event;
244 unsigned int wrap;
245 DRM_DEBUG("\n");
246
247 UPDATE_EVENT_COUNTER();
248 if (dev_priv->status_ptr)
249 event = dev_priv->status_ptr[1] & 0xffff;
250 else
251 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
252 wrap = dev_priv->event_wrap;
253 if (event > dev_priv->event_counter)
254 wrap--; /* hardware hasn't passed the last wrap yet */
255
256 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
257 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
258
259 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
260 drm_savage_buf_priv_t *next = tail->next;
261 drm_savage_buf_priv_t *prev = tail->prev;
262 prev->next = next;
263 next->prev = prev;
264 tail->next = tail->prev = NULL;
265 return tail->buf;
266 }
267
268 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
269 return NULL;
270}
271
272void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
273{
274 drm_savage_private_t *dev_priv = dev->dev_private;
275 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
276
277 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
278
279 if (entry->next != NULL || entry->prev != NULL) {
280 DRM_ERROR("entry already on freelist.\n");
281 return;
282 }
283
284 prev = &dev_priv->head;
285 next = prev->next;
286 prev->next = entry;
287 next->prev = entry;
288 entry->prev = prev;
289 entry->next = next;
290}
291
292/*
293 * Command DMA
294 */
295static int savage_dma_init(drm_savage_private_t * dev_priv)
296{
297 unsigned int i;
298
299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
300 (SAVAGE_DMA_PAGE_SIZE * 4);
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
303 if (dev_priv->dma_pages == NULL)
304 return -ENOMEM;
305
306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
308 dev_priv->dma_pages[i].used = 0;
309 dev_priv->dma_pages[i].flushed = 0;
310 }
311 SET_AGE(&dev_priv->last_dma_age, 0, 0);
312
313 dev_priv->first_dma_page = 0;
314 dev_priv->current_dma_page = 0;
315
316 return 0;
317}
318
319void savage_dma_reset(drm_savage_private_t * dev_priv)
320{
321 uint16_t event;
322 unsigned int wrap, i;
323 event = savage_bci_emit_event(dev_priv, 0);
324 wrap = dev_priv->event_wrap;
325 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
326 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
327 dev_priv->dma_pages[i].used = 0;
328 dev_priv->dma_pages[i].flushed = 0;
329 }
330 SET_AGE(&dev_priv->last_dma_age, event, wrap);
331 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
332}
333
334void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
335{
336 uint16_t event;
337 unsigned int wrap;
338
339 /* Faked DMA buffer pages don't age. */
340 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
341 return;
342
343 UPDATE_EVENT_COUNTER();
344 if (dev_priv->status_ptr)
345 event = dev_priv->status_ptr[1] & 0xffff;
346 else
347 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
348 wrap = dev_priv->event_wrap;
349 if (event > dev_priv->event_counter)
350 wrap--; /* hardware hasn't passed the last wrap yet */
351
352 if (dev_priv->dma_pages[page].age.wrap > wrap ||
353 (dev_priv->dma_pages[page].age.wrap == wrap &&
354 dev_priv->dma_pages[page].age.event > event)) {
355 if (dev_priv->wait_evnt(dev_priv,
356 dev_priv->dma_pages[page].age.event)
357 < 0)
358 DRM_ERROR("wait_evnt failed!\n");
359 }
360}
361
362uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
363{
364 unsigned int cur = dev_priv->current_dma_page;
365 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
366 dev_priv->dma_pages[cur].used;
367 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
368 SAVAGE_DMA_PAGE_SIZE;
369 uint32_t *dma_ptr;
370 unsigned int i;
371
372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
374
375 if (cur + nr_pages < dev_priv->nr_dma_pages) {
376 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
377 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
385 nr_pages =
386 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
387 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
388 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
389 dev_priv->dma_pages[i].used = 0;
390 dev_priv->dma_pages[i].flushed = 0;
391 }
392 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
393 dev_priv->first_dma_page = cur = 0;
394 }
395 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
396#if SAVAGE_DMA_DEBUG
397 if (dev_priv->dma_pages[i].used) {
398 DRM_ERROR("unflushed page %u: used=%u\n",
399 i, dev_priv->dma_pages[i].used);
400 }
401#endif
402 if (n > SAVAGE_DMA_PAGE_SIZE)
403 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
404 else
405 dev_priv->dma_pages[i].used = n;
406 n -= SAVAGE_DMA_PAGE_SIZE;
407 }
408 dev_priv->current_dma_page = --i;
409
410 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
411 i, dev_priv->dma_pages[i].used, n);
412
413 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
414
415 return dma_ptr;
416}
417
418static void savage_dma_flush(drm_savage_private_t * dev_priv)
419{
420 unsigned int first = dev_priv->first_dma_page;
421 unsigned int cur = dev_priv->current_dma_page;
422 uint16_t event;
423 unsigned int wrap, pad, align, len, i;
424 unsigned long phys_addr;
425 BCI_LOCALS;
426
427 if (first == cur &&
428 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
429 return;
430
431 /* pad length to multiples of 2 entries
432 * align start of next DMA block to multiles of 8 entries */
433 pad = -dev_priv->dma_pages[cur].used & 1;
434 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
435
436 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437 "pad=%u, align=%u\n",
438 first, cur, dev_priv->dma_pages[first].flushed,
439 dev_priv->dma_pages[cur].used, pad, align);
440
441 /* pad with noops */
442 if (pad) {
443 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
444 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad;
446 while (pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
460
461 DRM_DEBUG("phys_addr=%lx, len=%u\n",
462 phys_addr | dev_priv->dma_type, len);
463
464 BEGIN_BCI(3);
465 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
466 BCI_WRITE(phys_addr | dev_priv->dma_type);
467 BCI_DMA(len);
468
469 /* fix alignment of the start of the next block */
470 dev_priv->dma_pages[cur].used += align;
471
472 /* age DMA pages */
473 event = savage_bci_emit_event(dev_priv, 0);
474 wrap = dev_priv->event_wrap;
475 for (i = first; i < cur; ++i) {
476 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
477 dev_priv->dma_pages[i].used = 0;
478 dev_priv->dma_pages[i].flushed = 0;
479 }
480 /* age the current page only when it's full */
481 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
482 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
483 dev_priv->dma_pages[cur].used = 0;
484 dev_priv->dma_pages[cur].flushed = 0;
485 /* advance to next page */
486 cur++;
487 if (cur == dev_priv->nr_dma_pages)
488 cur = 0;
489 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
490 } else {
491 dev_priv->first_dma_page = cur;
492 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
493 }
494 SET_AGE(&dev_priv->last_dma_age, event, wrap);
495
496 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
497 dev_priv->dma_pages[cur].used,
498 dev_priv->dma_pages[cur].flushed);
499}
500
501static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
502{
503 unsigned int i, j;
504 BCI_LOCALS;
505
506 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
507 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
508 return;
509
510 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
511 dev_priv->first_dma_page, dev_priv->current_dma_page,
512 dev_priv->dma_pages[dev_priv->current_dma_page].used);
513
514 for (i = dev_priv->first_dma_page;
515 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
516 ++i) {
517 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
518 i * SAVAGE_DMA_PAGE_SIZE;
519#if SAVAGE_DMA_DEBUG
520 /* Sanity check: all pages except the last one must be full. */
521 if (i < dev_priv->current_dma_page &&
522 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
523 DRM_ERROR("partial DMA page %u: used=%u",
524 i, dev_priv->dma_pages[i].used);
525 }
526#endif
527 BEGIN_BCI(dev_priv->dma_pages[i].used);
528 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
529 BCI_WRITE(dma_ptr[j]);
530 }
531 dev_priv->dma_pages[i].used = 0;
532 }
533
534 /* reset to first page */
535 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
536}
537
538int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539{
540 drm_savage_private_t *dev_priv;
541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
543 if (dev_priv == NULL)
544 return -ENOMEM;
545
546 memset(dev_priv, 0, sizeof(drm_savage_private_t));
547 dev->dev_private = (void *)dev_priv;
548
549 dev_priv->chipset = (enum savage_family)chipset;
550
551 return 0;
552}
553
554
555/*
556 * Initalize mappings. On Savage4 and SavageIX the alignment
557 * and size of the aperture is not suitable for automatic MTRR setup
558 * in drm_addmap. Therefore we add them manually before the maps are
559 * initialized, and tear them down on last close.
560 */
561int savage_driver_firstopen(struct drm_device *dev)
562{
563 drm_savage_private_t *dev_priv = dev->dev_private;
564 unsigned long mmio_base, fb_base, fb_size, aperture_base;
565 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
566 * in case we decide we need information on the BAR for BSD in the
567 * future.
568 */
569 unsigned int fb_rsrc, aper_rsrc;
570 int ret = 0;
571
572 dev_priv->mtrr[0].handle = -1;
573 dev_priv->mtrr[1].handle = -1;
574 dev_priv->mtrr[2].handle = -1;
575 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
576 fb_rsrc = 0;
577 fb_base = drm_get_resource_start(dev, 0);
578 fb_size = SAVAGE_FB_SIZE_S3;
579 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
580 aper_rsrc = 0;
581 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
582 /* this should always be true */
583 if (drm_get_resource_len(dev, 0) == 0x08000000) {
584 /* Don't make MMIO write-cobining! We need 3
585 * MTRRs. */
586 dev_priv->mtrr[0].base = fb_base;
587 dev_priv->mtrr[0].size = 0x01000000;
588 dev_priv->mtrr[0].handle =
589 drm_mtrr_add(dev_priv->mtrr[0].base,
590 dev_priv->mtrr[0].size, DRM_MTRR_WC);
591 dev_priv->mtrr[1].base = fb_base + 0x02000000;
592 dev_priv->mtrr[1].size = 0x02000000;
593 dev_priv->mtrr[1].handle =
594 drm_mtrr_add(dev_priv->mtrr[1].base,
595 dev_priv->mtrr[1].size, DRM_MTRR_WC);
596 dev_priv->mtrr[2].base = fb_base + 0x04000000;
597 dev_priv->mtrr[2].size = 0x04000000;
598 dev_priv->mtrr[2].handle =
599 drm_mtrr_add(dev_priv->mtrr[2].base,
600 dev_priv->mtrr[2].size, DRM_MTRR_WC);
601 } else {
602 DRM_ERROR("strange pci_resource_len %08lx\n",
603 drm_get_resource_len(dev, 0));
604 }
605 } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
606 dev_priv->chipset != S3_SAVAGE2000) {
607 mmio_base = drm_get_resource_start(dev, 0);
608 fb_rsrc = 1;
609 fb_base = drm_get_resource_start(dev, 1);
610 fb_size = SAVAGE_FB_SIZE_S4;
611 aper_rsrc = 1;
612 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
613 /* this should always be true */
614 if (drm_get_resource_len(dev, 1) == 0x08000000) {
615 /* Can use one MTRR to cover both fb and
616 * aperture. */
617 dev_priv->mtrr[0].base = fb_base;
618 dev_priv->mtrr[0].size = 0x08000000;
619 dev_priv->mtrr[0].handle =
620 drm_mtrr_add(dev_priv->mtrr[0].base,
621 dev_priv->mtrr[0].size, DRM_MTRR_WC);
622 } else {
623 DRM_ERROR("strange pci_resource_len %08lx\n",
624 drm_get_resource_len(dev, 1));
625 }
626 } else {
627 mmio_base = drm_get_resource_start(dev, 0);
628 fb_rsrc = 1;
629 fb_base = drm_get_resource_start(dev, 1);
630 fb_size = drm_get_resource_len(dev, 1);
631 aper_rsrc = 2;
632 aperture_base = drm_get_resource_start(dev, 2);
633 /* Automatic MTRR setup will do the right thing. */
634 }
635
636 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
637 _DRM_READ_ONLY, &dev_priv->mmio);
638 if (ret)
639 return ret;
640
641 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
642 _DRM_WRITE_COMBINING, &dev_priv->fb);
643 if (ret)
644 return ret;
645
646 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
647 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
648 &dev_priv->aperture);
649 if (ret)
650 return ret;
651
652 return ret;
653}
654
655/*
656 * Delete MTRRs and free device-private data.
657 */
658void savage_driver_lastclose(struct drm_device *dev)
659{
660 drm_savage_private_t *dev_priv = dev->dev_private;
661 int i;
662
663 for (i = 0; i < 3; ++i)
664 if (dev_priv->mtrr[i].handle >= 0)
665 drm_mtrr_del(dev_priv->mtrr[i].handle,
666 dev_priv->mtrr[i].base,
667 dev_priv->mtrr[i].size, DRM_MTRR_WC);
668}
669
670int savage_driver_unload(struct drm_device *dev)
671{
672 drm_savage_private_t *dev_priv = dev->dev_private;
673
674 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
675
676 return 0;
677}
678
679static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
680{
681 drm_savage_private_t *dev_priv = dev->dev_private;
682
683 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
685 return -EINVAL;
686 }
687 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
689 return -EINVAL;
690 }
691 if (init->dma_type != SAVAGE_DMA_AGP &&
692 init->dma_type != SAVAGE_DMA_PCI) {
693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
694 return -EINVAL;
695 }
696
697 dev_priv->cob_size = init->cob_size;
698 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
699 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
700 dev_priv->dma_type = init->dma_type;
701
702 dev_priv->fb_bpp = init->fb_bpp;
703 dev_priv->front_offset = init->front_offset;
704 dev_priv->front_pitch = init->front_pitch;
705 dev_priv->back_offset = init->back_offset;
706 dev_priv->back_pitch = init->back_pitch;
707 dev_priv->depth_bpp = init->depth_bpp;
708 dev_priv->depth_offset = init->depth_offset;
709 dev_priv->depth_pitch = init->depth_pitch;
710
711 dev_priv->texture_offset = init->texture_offset;
712 dev_priv->texture_size = init->texture_size;
713
714 dev_priv->sarea = drm_getsarea(dev);
715 if (!dev_priv->sarea) {
716 DRM_ERROR("could not find sarea!\n");
717 savage_do_cleanup_bci(dev);
718 return -EINVAL;
719 }
720 if (init->status_offset != 0) {
721 dev_priv->status = drm_core_findmap(dev, init->status_offset);
722 if (!dev_priv->status) {
723 DRM_ERROR("could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev);
725 return -EINVAL;
726 }
727 } else {
728 dev_priv->status = NULL;
729 }
730 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
731 dev->agp_buffer_token = init->buffers_offset;
732 dev->agp_buffer_map = drm_core_findmap(dev,
733 init->buffers_offset);
734 if (!dev->agp_buffer_map) {
735 DRM_ERROR("could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev);
737 return -EINVAL;
738 }
739 drm_core_ioremap(dev->agp_buffer_map, dev);
740 if (!dev->agp_buffer_map) {
741 DRM_ERROR("failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev);
743 return -ENOMEM;
744 }
745 }
746 if (init->agp_textures_offset) {
747 dev_priv->agp_textures =
748 drm_core_findmap(dev, init->agp_textures_offset);
749 if (!dev_priv->agp_textures) {
750 DRM_ERROR("could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev);
752 return -EINVAL;
753 }
754 } else {
755 dev_priv->agp_textures = NULL;
756 }
757
758 if (init->cmd_dma_offset) {
759 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
760 DRM_ERROR("command DMA not supported on "
761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev);
763 return -EINVAL;
764 }
765 if (dev->dma && dev->dma->buflist) {
766 DRM_ERROR("command and vertex DMA not supported "
767 "at the same time.\n");
768 savage_do_cleanup_bci(dev);
769 return -EINVAL;
770 }
771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
772 if (!dev_priv->cmd_dma) {
773 DRM_ERROR("could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev);
775 return -EINVAL;
776 }
777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
778 if (dev_priv->cmd_dma->type != _DRM_AGP) {
779 DRM_ERROR("AGP command DMA region is not a "
780 "_DRM_AGP map!\n");
781 savage_do_cleanup_bci(dev);
782 return -EINVAL;
783 }
784 drm_core_ioremap(dev_priv->cmd_dma, dev);
785 if (!dev_priv->cmd_dma->handle) {
786 DRM_ERROR("failed to ioremap command "
787 "DMA region!\n");
788 savage_do_cleanup_bci(dev);
789 return -ENOMEM;
790 }
791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
792 DRM_ERROR("PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev);
795 return -EINVAL;
796 }
797 } else {
798 dev_priv->cmd_dma = NULL;
799 }
800
801 dev_priv->dma_flush = savage_dma_flush;
802 if (!dev_priv->cmd_dma) {
803 DRM_DEBUG("falling back to faked command DMA.\n");
804 dev_priv->fake_dma.offset = 0;
805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
806 dev_priv->fake_dma.type = _DRM_SHM;
807 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
808 DRM_MEM_DRIVER);
809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev);
812 return -ENOMEM;
813 }
814 dev_priv->cmd_dma = &dev_priv->fake_dma;
815 dev_priv->dma_flush = savage_fake_dma_flush;
816 }
817
818 dev_priv->sarea_priv =
819 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
820 init->sarea_priv_offset);
821
822 /* setup bitmap descriptors */
823 {
824 unsigned int color_tile_format;
825 unsigned int depth_tile_format;
826 unsigned int front_stride, back_stride, depth_stride;
827 if (dev_priv->chipset <= S3_SAVAGE4) {
828 color_tile_format = dev_priv->fb_bpp == 16 ?
829 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
830 depth_tile_format = dev_priv->depth_bpp == 16 ?
831 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
832 } else {
833 color_tile_format = SAVAGE_BD_TILE_DEST;
834 depth_tile_format = SAVAGE_BD_TILE_DEST;
835 }
836 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
837 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
838 depth_stride =
839 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
840
841 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
842 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
843 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
844
845 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
846 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
847 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
848
849 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
850 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
851 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
852 }
853
854 /* setup status and bci ptr */
855 dev_priv->event_counter = 0;
856 dev_priv->event_wrap = 0;
857 dev_priv->bci_ptr = (volatile uint32_t *)
858 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
859 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
860 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
861 } else {
862 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
863 }
864 if (dev_priv->status != NULL) {
865 dev_priv->status_ptr =
866 (volatile uint32_t *)dev_priv->status->handle;
867 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
868 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
869 dev_priv->status_ptr[1023] = dev_priv->event_counter;
870 } else {
871 dev_priv->status_ptr = NULL;
872 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
873 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
874 } else {
875 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
876 }
877 dev_priv->wait_evnt = savage_bci_wait_event_reg;
878 }
879
880 /* cliprect functions */
881 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
882 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
883 else
884 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
885
886 if (savage_freelist_init(dev) < 0) {
887 DRM_ERROR("could not initialize freelist\n");
888 savage_do_cleanup_bci(dev);
889 return -ENOMEM;
890 }
891
892 if (savage_dma_init(dev_priv) < 0) {
893 DRM_ERROR("could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev);
895 return -ENOMEM;
896 }
897
898 return 0;
899}
900
901static int savage_do_cleanup_bci(struct drm_device * dev)
902{
903 drm_savage_private_t *dev_priv = dev->dev_private;
904
905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
906 if (dev_priv->fake_dma.handle)
907 drm_free(dev_priv->fake_dma.handle,
908 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
909 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
910 dev_priv->cmd_dma->type == _DRM_AGP &&
911 dev_priv->dma_type == SAVAGE_DMA_AGP)
912 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
913
914 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
915 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
916 drm_core_ioremapfree(dev->agp_buffer_map, dev);
917 /* make sure the next instance (which may be running
918 * in PCI mode) doesn't try to use an old
919 * agp_buffer_map. */
920 dev->agp_buffer_map = NULL;
921 }
922
923 if (dev_priv->dma_pages)
924 drm_free(dev_priv->dma_pages,
925 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
926 DRM_MEM_DRIVER);
927
928 return 0;
929}
930
931static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
932{
933 drm_savage_init_t *init = data;
934
935 LOCK_TEST_WITH_RETURN(dev, file_priv);
936
937 switch (init->func) {
938 case SAVAGE_INIT_BCI:
939 return savage_do_init_bci(dev, init);
940 case SAVAGE_CLEANUP_BCI:
941 return savage_do_cleanup_bci(dev);
942 }
943
944 return -EINVAL;
945}
946
947static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
948{
949 drm_savage_private_t *dev_priv = dev->dev_private;
950 drm_savage_event_emit_t *event = data;
951
952 DRM_DEBUG("\n");
953
954 LOCK_TEST_WITH_RETURN(dev, file_priv);
955
956 event->count = savage_bci_emit_event(dev_priv, event->flags);
957 event->count |= dev_priv->event_wrap << 16;
958
959 return 0;
960}
961
962static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
963{
964 drm_savage_private_t *dev_priv = dev->dev_private;
965 drm_savage_event_wait_t *event = data;
966 unsigned int event_e, hw_e;
967 unsigned int event_w, hw_w;
968
969 DRM_DEBUG("\n");
970
971 UPDATE_EVENT_COUNTER();
972 if (dev_priv->status_ptr)
973 hw_e = dev_priv->status_ptr[1] & 0xffff;
974 else
975 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
976 hw_w = dev_priv->event_wrap;
977 if (hw_e > dev_priv->event_counter)
978 hw_w--; /* hardware hasn't passed the last wrap yet */
979
980 event_e = event->count & 0xffff;
981 event_w = event->count >> 16;
982
983 /* Don't need to wait if
984 * - event counter wrapped since the event was emitted or
985 * - the hardware has advanced up to or over the event to wait for.
986 */
987 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
988 return 0;
989 else
990 return dev_priv->wait_evnt(dev_priv, event_e);
991}
992
993/*
994 * DMA buffer management
995 */
996
997static int savage_bci_get_buffers(struct drm_device *dev,
998 struct drm_file *file_priv,
999 struct drm_dma *d)
1000{
1001 struct drm_buf *buf;
1002 int i;
1003
1004 for (i = d->granted_count; i < d->request_count; i++) {
1005 buf = savage_freelist_get(dev);
1006 if (!buf)
1007 return -EAGAIN;
1008
1009 buf->file_priv = file_priv;
1010
1011 if (DRM_COPY_TO_USER(&d->request_indices[i],
1012 &buf->idx, sizeof(buf->idx)))
1013 return -EFAULT;
1014 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1015 &buf->total, sizeof(buf->total)))
1016 return -EFAULT;
1017
1018 d->granted_count++;
1019 }
1020 return 0;
1021}
1022
1023int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1024{
1025 struct drm_device_dma *dma = dev->dma;
1026 struct drm_dma *d = data;
1027 int ret = 0;
1028
1029 LOCK_TEST_WITH_RETURN(dev, file_priv);
1030
1031 /* Please don't send us buffers.
1032 */
1033 if (d->send_count != 0) {
1034 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1035 DRM_CURRENTPID, d->send_count);
1036 return -EINVAL;
1037 }
1038
1039 /* We'll send you buffers.
1040 */
1041 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1042 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1043 DRM_CURRENTPID, d->request_count, dma->buf_count);
1044 return -EINVAL;
1045 }
1046
1047 d->granted_count = 0;
1048
1049 if (d->request_count) {
1050 ret = savage_bci_get_buffers(dev, file_priv, d);
1051 }
1052
1053 return ret;
1054}
1055
1056void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1057{
1058 struct drm_device_dma *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i;
1061
1062 if (!dma)
1063 return;
1064 if (!dev_priv)
1065 return;
1066 if (!dma->buflist)
1067 return;
1068
1069 /*i830_flush_queue(dev); */
1070
1071 for (i = 0; i < dma->buf_count; i++) {
1072 struct drm_buf *buf = dma->buflist[i];
1073 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1074
1075 if (buf->file_priv == file_priv && buf_priv &&
1076 buf_priv->next == NULL && buf_priv->prev == NULL) {
1077 uint16_t event;
1078 DRM_DEBUG("reclaimed from client\n");
1079 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1080 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1081 savage_freelist_put(dev, buf);
1082 }
1083 }
1084
1085 drm_core_reclaim_buffers(dev, file_priv);
1086}
1087
1088struct drm_ioctl_desc savage_ioctls[] = {
1089 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1090 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1091 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1092 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1093};
1094
1095int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);