diff options
Diffstat (limited to 'drivers/gpu/drm/savage')
-rw-r--r-- | drivers/gpu/drm/savage/Makefile | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/savage/savage_bci.c | 1095 | ||||
-rw-r--r-- | drivers/gpu/drm/savage/savage_drv.c | 88 | ||||
-rw-r--r-- | drivers/gpu/drm/savage/savage_drv.h | 575 | ||||
-rw-r--r-- | drivers/gpu/drm/savage/savage_state.c | 1163 |
5 files changed, 2930 insertions, 0 deletions
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile new file mode 100644 index 000000000000..d8f84ac7bb26 --- /dev/null +++ b/drivers/gpu/drm/savage/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
4 | |||
5 | ccflags-y = -Iinclude/drm | ||
6 | savage-y := savage_drv.o savage_bci.o savage_state.o | ||
7 | |||
8 | obj-$(CONFIG_DRM_SAVAGE)+= savage.o | ||
9 | |||
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c new file mode 100644 index 000000000000..d465b2f9c1cd --- /dev/null +++ b/drivers/gpu/drm/savage/savage_bci.c | |||
@@ -0,0 +1,1095 @@ | |||
1 | /* savage_bci.c -- BCI support for Savage | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | #include "drmP.h" | ||
26 | #include "savage_drm.h" | ||
27 | #include "savage_drv.h" | ||
28 | |||
29 | /* Need a long timeout for shadow status updates can take a while | ||
30 | * and so can waiting for events when the queue is full. */ | ||
31 | #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ | ||
32 | #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ | ||
33 | #define SAVAGE_FREELIST_DEBUG 0 | ||
34 | |||
35 | static int savage_do_cleanup_bci(struct drm_device *dev); | ||
36 | |||
37 | static int | ||
38 | savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) | ||
39 | { | ||
40 | uint32_t mask = dev_priv->status_used_mask; | ||
41 | uint32_t threshold = dev_priv->bci_threshold_hi; | ||
42 | uint32_t status; | ||
43 | int i; | ||
44 | |||
45 | #if SAVAGE_BCI_DEBUG | ||
46 | if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) | ||
47 | DRM_ERROR("Trying to emit %d words " | ||
48 | "(more than guaranteed space in COB)\n", n); | ||
49 | #endif | ||
50 | |||
51 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
52 | DRM_MEMORYBARRIER(); | ||
53 | status = dev_priv->status_ptr[0]; | ||
54 | if ((status & mask) < threshold) | ||
55 | return 0; | ||
56 | DRM_UDELAY(1); | ||
57 | } | ||
58 | |||
59 | #if SAVAGE_BCI_DEBUG | ||
60 | DRM_ERROR("failed!\n"); | ||
61 | DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); | ||
62 | #endif | ||
63 | return -EBUSY; | ||
64 | } | ||
65 | |||
66 | static int | ||
67 | savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) | ||
68 | { | ||
69 | uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; | ||
70 | uint32_t status; | ||
71 | int i; | ||
72 | |||
73 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
74 | status = SAVAGE_READ(SAVAGE_STATUS_WORD0); | ||
75 | if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) | ||
76 | return 0; | ||
77 | DRM_UDELAY(1); | ||
78 | } | ||
79 | |||
80 | #if SAVAGE_BCI_DEBUG | ||
81 | DRM_ERROR("failed!\n"); | ||
82 | DRM_INFO(" status=0x%08x\n", status); | ||
83 | #endif | ||
84 | return -EBUSY; | ||
85 | } | ||
86 | |||
87 | static int | ||
88 | savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) | ||
89 | { | ||
90 | uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; | ||
91 | uint32_t status; | ||
92 | int i; | ||
93 | |||
94 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
95 | status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); | ||
96 | if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) | ||
97 | return 0; | ||
98 | DRM_UDELAY(1); | ||
99 | } | ||
100 | |||
101 | #if SAVAGE_BCI_DEBUG | ||
102 | DRM_ERROR("failed!\n"); | ||
103 | DRM_INFO(" status=0x%08x\n", status); | ||
104 | #endif | ||
105 | return -EBUSY; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Waiting for events. | ||
110 | * | ||
111 | * The BIOSresets the event tag to 0 on mode changes. Therefore we | ||
112 | * never emit 0 to the event tag. If we find a 0 event tag we know the | ||
113 | * BIOS stomped on it and return success assuming that the BIOS waited | ||
114 | * for engine idle. | ||
115 | * | ||
116 | * Note: if the Xserver uses the event tag it has to follow the same | ||
117 | * rule. Otherwise there may be glitches every 2^16 events. | ||
118 | */ | ||
119 | static int | ||
120 | savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) | ||
121 | { | ||
122 | uint32_t status; | ||
123 | int i; | ||
124 | |||
125 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { | ||
126 | DRM_MEMORYBARRIER(); | ||
127 | status = dev_priv->status_ptr[1]; | ||
128 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || | ||
129 | (status & 0xffff) == 0) | ||
130 | return 0; | ||
131 | DRM_UDELAY(1); | ||
132 | } | ||
133 | |||
134 | #if SAVAGE_BCI_DEBUG | ||
135 | DRM_ERROR("failed!\n"); | ||
136 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | ||
137 | #endif | ||
138 | |||
139 | return -EBUSY; | ||
140 | } | ||
141 | |||
142 | static int | ||
143 | savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) | ||
144 | { | ||
145 | uint32_t status; | ||
146 | int i; | ||
147 | |||
148 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { | ||
149 | status = SAVAGE_READ(SAVAGE_STATUS_WORD1); | ||
150 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || | ||
151 | (status & 0xffff) == 0) | ||
152 | return 0; | ||
153 | DRM_UDELAY(1); | ||
154 | } | ||
155 | |||
156 | #if SAVAGE_BCI_DEBUG | ||
157 | DRM_ERROR("failed!\n"); | ||
158 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | ||
159 | #endif | ||
160 | |||
161 | return -EBUSY; | ||
162 | } | ||
163 | |||
164 | uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, | ||
165 | unsigned int flags) | ||
166 | { | ||
167 | uint16_t count; | ||
168 | BCI_LOCALS; | ||
169 | |||
170 | if (dev_priv->status_ptr) { | ||
171 | /* coordinate with Xserver */ | ||
172 | count = dev_priv->status_ptr[1023]; | ||
173 | if (count < dev_priv->event_counter) | ||
174 | dev_priv->event_wrap++; | ||
175 | } else { | ||
176 | count = dev_priv->event_counter; | ||
177 | } | ||
178 | count = (count + 1) & 0xffff; | ||
179 | if (count == 0) { | ||
180 | count++; /* See the comment above savage_wait_event_*. */ | ||
181 | dev_priv->event_wrap++; | ||
182 | } | ||
183 | dev_priv->event_counter = count; | ||
184 | if (dev_priv->status_ptr) | ||
185 | dev_priv->status_ptr[1023] = (uint32_t) count; | ||
186 | |||
187 | if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { | ||
188 | unsigned int wait_cmd = BCI_CMD_WAIT; | ||
189 | if ((flags & SAVAGE_WAIT_2D)) | ||
190 | wait_cmd |= BCI_CMD_WAIT_2D; | ||
191 | if ((flags & SAVAGE_WAIT_3D)) | ||
192 | wait_cmd |= BCI_CMD_WAIT_3D; | ||
193 | BEGIN_BCI(2); | ||
194 | BCI_WRITE(wait_cmd); | ||
195 | } else { | ||
196 | BEGIN_BCI(1); | ||
197 | } | ||
198 | BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count); | ||
199 | |||
200 | return count; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Freelist management | ||
205 | */ | ||
206 | static int savage_freelist_init(struct drm_device * dev) | ||
207 | { | ||
208 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
209 | struct drm_device_dma *dma = dev->dma; | ||
210 | struct drm_buf *buf; | ||
211 | drm_savage_buf_priv_t *entry; | ||
212 | int i; | ||
213 | DRM_DEBUG("count=%d\n", dma->buf_count); | ||
214 | |||
215 | dev_priv->head.next = &dev_priv->tail; | ||
216 | dev_priv->head.prev = NULL; | ||
217 | dev_priv->head.buf = NULL; | ||
218 | |||
219 | dev_priv->tail.next = NULL; | ||
220 | dev_priv->tail.prev = &dev_priv->head; | ||
221 | dev_priv->tail.buf = NULL; | ||
222 | |||
223 | for (i = 0; i < dma->buf_count; i++) { | ||
224 | buf = dma->buflist[i]; | ||
225 | entry = buf->dev_private; | ||
226 | |||
227 | SET_AGE(&entry->age, 0, 0); | ||
228 | entry->buf = buf; | ||
229 | |||
230 | entry->next = dev_priv->head.next; | ||
231 | entry->prev = &dev_priv->head; | ||
232 | dev_priv->head.next->prev = entry; | ||
233 | dev_priv->head.next = entry; | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static struct drm_buf *savage_freelist_get(struct drm_device * dev) | ||
240 | { | ||
241 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
242 | drm_savage_buf_priv_t *tail = dev_priv->tail.prev; | ||
243 | uint16_t event; | ||
244 | unsigned int wrap; | ||
245 | DRM_DEBUG("\n"); | ||
246 | |||
247 | UPDATE_EVENT_COUNTER(); | ||
248 | if (dev_priv->status_ptr) | ||
249 | event = dev_priv->status_ptr[1] & 0xffff; | ||
250 | else | ||
251 | event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
252 | wrap = dev_priv->event_wrap; | ||
253 | if (event > dev_priv->event_counter) | ||
254 | wrap--; /* hardware hasn't passed the last wrap yet */ | ||
255 | |||
256 | DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); | ||
257 | DRM_DEBUG(" head=0x%04x %d\n", event, wrap); | ||
258 | |||
259 | if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { | ||
260 | drm_savage_buf_priv_t *next = tail->next; | ||
261 | drm_savage_buf_priv_t *prev = tail->prev; | ||
262 | prev->next = next; | ||
263 | next->prev = prev; | ||
264 | tail->next = tail->prev = NULL; | ||
265 | return tail->buf; | ||
266 | } | ||
267 | |||
268 | DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); | ||
269 | return NULL; | ||
270 | } | ||
271 | |||
272 | void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) | ||
273 | { | ||
274 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
275 | drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; | ||
276 | |||
277 | DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); | ||
278 | |||
279 | if (entry->next != NULL || entry->prev != NULL) { | ||
280 | DRM_ERROR("entry already on freelist.\n"); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | prev = &dev_priv->head; | ||
285 | next = prev->next; | ||
286 | prev->next = entry; | ||
287 | next->prev = entry; | ||
288 | entry->prev = prev; | ||
289 | entry->next = next; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Command DMA | ||
294 | */ | ||
295 | static int savage_dma_init(drm_savage_private_t * dev_priv) | ||
296 | { | ||
297 | unsigned int i; | ||
298 | |||
299 | dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / | ||
300 | (SAVAGE_DMA_PAGE_SIZE * 4); | ||
301 | dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * | ||
302 | dev_priv->nr_dma_pages, DRM_MEM_DRIVER); | ||
303 | if (dev_priv->dma_pages == NULL) | ||
304 | return -ENOMEM; | ||
305 | |||
306 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { | ||
307 | SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); | ||
308 | dev_priv->dma_pages[i].used = 0; | ||
309 | dev_priv->dma_pages[i].flushed = 0; | ||
310 | } | ||
311 | SET_AGE(&dev_priv->last_dma_age, 0, 0); | ||
312 | |||
313 | dev_priv->first_dma_page = 0; | ||
314 | dev_priv->current_dma_page = 0; | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | void savage_dma_reset(drm_savage_private_t * dev_priv) | ||
320 | { | ||
321 | uint16_t event; | ||
322 | unsigned int wrap, i; | ||
323 | event = savage_bci_emit_event(dev_priv, 0); | ||
324 | wrap = dev_priv->event_wrap; | ||
325 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { | ||
326 | SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); | ||
327 | dev_priv->dma_pages[i].used = 0; | ||
328 | dev_priv->dma_pages[i].flushed = 0; | ||
329 | } | ||
330 | SET_AGE(&dev_priv->last_dma_age, event, wrap); | ||
331 | dev_priv->first_dma_page = dev_priv->current_dma_page = 0; | ||
332 | } | ||
333 | |||
334 | void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page) | ||
335 | { | ||
336 | uint16_t event; | ||
337 | unsigned int wrap; | ||
338 | |||
339 | /* Faked DMA buffer pages don't age. */ | ||
340 | if (dev_priv->cmd_dma == &dev_priv->fake_dma) | ||
341 | return; | ||
342 | |||
343 | UPDATE_EVENT_COUNTER(); | ||
344 | if (dev_priv->status_ptr) | ||
345 | event = dev_priv->status_ptr[1] & 0xffff; | ||
346 | else | ||
347 | event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
348 | wrap = dev_priv->event_wrap; | ||
349 | if (event > dev_priv->event_counter) | ||
350 | wrap--; /* hardware hasn't passed the last wrap yet */ | ||
351 | |||
352 | if (dev_priv->dma_pages[page].age.wrap > wrap || | ||
353 | (dev_priv->dma_pages[page].age.wrap == wrap && | ||
354 | dev_priv->dma_pages[page].age.event > event)) { | ||
355 | if (dev_priv->wait_evnt(dev_priv, | ||
356 | dev_priv->dma_pages[page].age.event) | ||
357 | < 0) | ||
358 | DRM_ERROR("wait_evnt failed!\n"); | ||
359 | } | ||
360 | } | ||
361 | |||
362 | uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) | ||
363 | { | ||
364 | unsigned int cur = dev_priv->current_dma_page; | ||
365 | unsigned int rest = SAVAGE_DMA_PAGE_SIZE - | ||
366 | dev_priv->dma_pages[cur].used; | ||
367 | unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / | ||
368 | SAVAGE_DMA_PAGE_SIZE; | ||
369 | uint32_t *dma_ptr; | ||
370 | unsigned int i; | ||
371 | |||
372 | DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", | ||
373 | cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); | ||
374 | |||
375 | if (cur + nr_pages < dev_priv->nr_dma_pages) { | ||
376 | dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + | ||
377 | cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; | ||
378 | if (n < rest) | ||
379 | rest = n; | ||
380 | dev_priv->dma_pages[cur].used += rest; | ||
381 | n -= rest; | ||
382 | cur++; | ||
383 | } else { | ||
384 | dev_priv->dma_flush(dev_priv); | ||
385 | nr_pages = | ||
386 | (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; | ||
387 | for (i = cur; i < dev_priv->nr_dma_pages; ++i) { | ||
388 | dev_priv->dma_pages[i].age = dev_priv->last_dma_age; | ||
389 | dev_priv->dma_pages[i].used = 0; | ||
390 | dev_priv->dma_pages[i].flushed = 0; | ||
391 | } | ||
392 | dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; | ||
393 | dev_priv->first_dma_page = cur = 0; | ||
394 | } | ||
395 | for (i = cur; nr_pages > 0; ++i, --nr_pages) { | ||
396 | #if SAVAGE_DMA_DEBUG | ||
397 | if (dev_priv->dma_pages[i].used) { | ||
398 | DRM_ERROR("unflushed page %u: used=%u\n", | ||
399 | i, dev_priv->dma_pages[i].used); | ||
400 | } | ||
401 | #endif | ||
402 | if (n > SAVAGE_DMA_PAGE_SIZE) | ||
403 | dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; | ||
404 | else | ||
405 | dev_priv->dma_pages[i].used = n; | ||
406 | n -= SAVAGE_DMA_PAGE_SIZE; | ||
407 | } | ||
408 | dev_priv->current_dma_page = --i; | ||
409 | |||
410 | DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", | ||
411 | i, dev_priv->dma_pages[i].used, n); | ||
412 | |||
413 | savage_dma_wait(dev_priv, dev_priv->current_dma_page); | ||
414 | |||
415 | return dma_ptr; | ||
416 | } | ||
417 | |||
418 | static void savage_dma_flush(drm_savage_private_t * dev_priv) | ||
419 | { | ||
420 | unsigned int first = dev_priv->first_dma_page; | ||
421 | unsigned int cur = dev_priv->current_dma_page; | ||
422 | uint16_t event; | ||
423 | unsigned int wrap, pad, align, len, i; | ||
424 | unsigned long phys_addr; | ||
425 | BCI_LOCALS; | ||
426 | |||
427 | if (first == cur && | ||
428 | dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) | ||
429 | return; | ||
430 | |||
431 | /* pad length to multiples of 2 entries | ||
432 | * align start of next DMA block to multiles of 8 entries */ | ||
433 | pad = -dev_priv->dma_pages[cur].used & 1; | ||
434 | align = -(dev_priv->dma_pages[cur].used + pad) & 7; | ||
435 | |||
436 | DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " | ||
437 | "pad=%u, align=%u\n", | ||
438 | first, cur, dev_priv->dma_pages[first].flushed, | ||
439 | dev_priv->dma_pages[cur].used, pad, align); | ||
440 | |||
441 | /* pad with noops */ | ||
442 | if (pad) { | ||
443 | uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + | ||
444 | cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; | ||
445 | dev_priv->dma_pages[cur].used += pad; | ||
446 | while (pad != 0) { | ||
447 | *dma_ptr++ = BCI_CMD_WAIT; | ||
448 | pad--; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | DRM_MEMORYBARRIER(); | ||
453 | |||
454 | /* do flush ... */ | ||
455 | phys_addr = dev_priv->cmd_dma->offset + | ||
456 | (first * SAVAGE_DMA_PAGE_SIZE + | ||
457 | dev_priv->dma_pages[first].flushed) * 4; | ||
458 | len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + | ||
459 | dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; | ||
460 | |||
461 | DRM_DEBUG("phys_addr=%lx, len=%u\n", | ||
462 | phys_addr | dev_priv->dma_type, len); | ||
463 | |||
464 | BEGIN_BCI(3); | ||
465 | BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); | ||
466 | BCI_WRITE(phys_addr | dev_priv->dma_type); | ||
467 | BCI_DMA(len); | ||
468 | |||
469 | /* fix alignment of the start of the next block */ | ||
470 | dev_priv->dma_pages[cur].used += align; | ||
471 | |||
472 | /* age DMA pages */ | ||
473 | event = savage_bci_emit_event(dev_priv, 0); | ||
474 | wrap = dev_priv->event_wrap; | ||
475 | for (i = first; i < cur; ++i) { | ||
476 | SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); | ||
477 | dev_priv->dma_pages[i].used = 0; | ||
478 | dev_priv->dma_pages[i].flushed = 0; | ||
479 | } | ||
480 | /* age the current page only when it's full */ | ||
481 | if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { | ||
482 | SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); | ||
483 | dev_priv->dma_pages[cur].used = 0; | ||
484 | dev_priv->dma_pages[cur].flushed = 0; | ||
485 | /* advance to next page */ | ||
486 | cur++; | ||
487 | if (cur == dev_priv->nr_dma_pages) | ||
488 | cur = 0; | ||
489 | dev_priv->first_dma_page = dev_priv->current_dma_page = cur; | ||
490 | } else { | ||
491 | dev_priv->first_dma_page = cur; | ||
492 | dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; | ||
493 | } | ||
494 | SET_AGE(&dev_priv->last_dma_age, event, wrap); | ||
495 | |||
496 | DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, | ||
497 | dev_priv->dma_pages[cur].used, | ||
498 | dev_priv->dma_pages[cur].flushed); | ||
499 | } | ||
500 | |||
501 | static void savage_fake_dma_flush(drm_savage_private_t * dev_priv) | ||
502 | { | ||
503 | unsigned int i, j; | ||
504 | BCI_LOCALS; | ||
505 | |||
506 | if (dev_priv->first_dma_page == dev_priv->current_dma_page && | ||
507 | dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) | ||
508 | return; | ||
509 | |||
510 | DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", | ||
511 | dev_priv->first_dma_page, dev_priv->current_dma_page, | ||
512 | dev_priv->dma_pages[dev_priv->current_dma_page].used); | ||
513 | |||
514 | for (i = dev_priv->first_dma_page; | ||
515 | i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; | ||
516 | ++i) { | ||
517 | uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + | ||
518 | i * SAVAGE_DMA_PAGE_SIZE; | ||
519 | #if SAVAGE_DMA_DEBUG | ||
520 | /* Sanity check: all pages except the last one must be full. */ | ||
521 | if (i < dev_priv->current_dma_page && | ||
522 | dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { | ||
523 | DRM_ERROR("partial DMA page %u: used=%u", | ||
524 | i, dev_priv->dma_pages[i].used); | ||
525 | } | ||
526 | #endif | ||
527 | BEGIN_BCI(dev_priv->dma_pages[i].used); | ||
528 | for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { | ||
529 | BCI_WRITE(dma_ptr[j]); | ||
530 | } | ||
531 | dev_priv->dma_pages[i].used = 0; | ||
532 | } | ||
533 | |||
534 | /* reset to first page */ | ||
535 | dev_priv->first_dma_page = dev_priv->current_dma_page = 0; | ||
536 | } | ||
537 | |||
538 | int savage_driver_load(struct drm_device *dev, unsigned long chipset) | ||
539 | { | ||
540 | drm_savage_private_t *dev_priv; | ||
541 | |||
542 | dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); | ||
543 | if (dev_priv == NULL) | ||
544 | return -ENOMEM; | ||
545 | |||
546 | memset(dev_priv, 0, sizeof(drm_savage_private_t)); | ||
547 | dev->dev_private = (void *)dev_priv; | ||
548 | |||
549 | dev_priv->chipset = (enum savage_family)chipset; | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | |||
555 | /* | ||
556 | * Initalize mappings. On Savage4 and SavageIX the alignment | ||
557 | * and size of the aperture is not suitable for automatic MTRR setup | ||
558 | * in drm_addmap. Therefore we add them manually before the maps are | ||
559 | * initialized, and tear them down on last close. | ||
560 | */ | ||
561 | int savage_driver_firstopen(struct drm_device *dev) | ||
562 | { | ||
563 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
564 | unsigned long mmio_base, fb_base, fb_size, aperture_base; | ||
565 | /* fb_rsrc and aper_rsrc aren't really used currently, but still exist | ||
566 | * in case we decide we need information on the BAR for BSD in the | ||
567 | * future. | ||
568 | */ | ||
569 | unsigned int fb_rsrc, aper_rsrc; | ||
570 | int ret = 0; | ||
571 | |||
572 | dev_priv->mtrr[0].handle = -1; | ||
573 | dev_priv->mtrr[1].handle = -1; | ||
574 | dev_priv->mtrr[2].handle = -1; | ||
575 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
576 | fb_rsrc = 0; | ||
577 | fb_base = drm_get_resource_start(dev, 0); | ||
578 | fb_size = SAVAGE_FB_SIZE_S3; | ||
579 | mmio_base = fb_base + SAVAGE_FB_SIZE_S3; | ||
580 | aper_rsrc = 0; | ||
581 | aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; | ||
582 | /* this should always be true */ | ||
583 | if (drm_get_resource_len(dev, 0) == 0x08000000) { | ||
584 | /* Don't make MMIO write-cobining! We need 3 | ||
585 | * MTRRs. */ | ||
586 | dev_priv->mtrr[0].base = fb_base; | ||
587 | dev_priv->mtrr[0].size = 0x01000000; | ||
588 | dev_priv->mtrr[0].handle = | ||
589 | drm_mtrr_add(dev_priv->mtrr[0].base, | ||
590 | dev_priv->mtrr[0].size, DRM_MTRR_WC); | ||
591 | dev_priv->mtrr[1].base = fb_base + 0x02000000; | ||
592 | dev_priv->mtrr[1].size = 0x02000000; | ||
593 | dev_priv->mtrr[1].handle = | ||
594 | drm_mtrr_add(dev_priv->mtrr[1].base, | ||
595 | dev_priv->mtrr[1].size, DRM_MTRR_WC); | ||
596 | dev_priv->mtrr[2].base = fb_base + 0x04000000; | ||
597 | dev_priv->mtrr[2].size = 0x04000000; | ||
598 | dev_priv->mtrr[2].handle = | ||
599 | drm_mtrr_add(dev_priv->mtrr[2].base, | ||
600 | dev_priv->mtrr[2].size, DRM_MTRR_WC); | ||
601 | } else { | ||
602 | DRM_ERROR("strange pci_resource_len %08lx\n", | ||
603 | drm_get_resource_len(dev, 0)); | ||
604 | } | ||
605 | } else if (dev_priv->chipset != S3_SUPERSAVAGE && | ||
606 | dev_priv->chipset != S3_SAVAGE2000) { | ||
607 | mmio_base = drm_get_resource_start(dev, 0); | ||
608 | fb_rsrc = 1; | ||
609 | fb_base = drm_get_resource_start(dev, 1); | ||
610 | fb_size = SAVAGE_FB_SIZE_S4; | ||
611 | aper_rsrc = 1; | ||
612 | aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; | ||
613 | /* this should always be true */ | ||
614 | if (drm_get_resource_len(dev, 1) == 0x08000000) { | ||
615 | /* Can use one MTRR to cover both fb and | ||
616 | * aperture. */ | ||
617 | dev_priv->mtrr[0].base = fb_base; | ||
618 | dev_priv->mtrr[0].size = 0x08000000; | ||
619 | dev_priv->mtrr[0].handle = | ||
620 | drm_mtrr_add(dev_priv->mtrr[0].base, | ||
621 | dev_priv->mtrr[0].size, DRM_MTRR_WC); | ||
622 | } else { | ||
623 | DRM_ERROR("strange pci_resource_len %08lx\n", | ||
624 | drm_get_resource_len(dev, 1)); | ||
625 | } | ||
626 | } else { | ||
627 | mmio_base = drm_get_resource_start(dev, 0); | ||
628 | fb_rsrc = 1; | ||
629 | fb_base = drm_get_resource_start(dev, 1); | ||
630 | fb_size = drm_get_resource_len(dev, 1); | ||
631 | aper_rsrc = 2; | ||
632 | aperture_base = drm_get_resource_start(dev, 2); | ||
633 | /* Automatic MTRR setup will do the right thing. */ | ||
634 | } | ||
635 | |||
636 | ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, | ||
637 | _DRM_READ_ONLY, &dev_priv->mmio); | ||
638 | if (ret) | ||
639 | return ret; | ||
640 | |||
641 | ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, | ||
642 | _DRM_WRITE_COMBINING, &dev_priv->fb); | ||
643 | if (ret) | ||
644 | return ret; | ||
645 | |||
646 | ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, | ||
647 | _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, | ||
648 | &dev_priv->aperture); | ||
649 | if (ret) | ||
650 | return ret; | ||
651 | |||
652 | return ret; | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * Delete MTRRs and free device-private data. | ||
657 | */ | ||
658 | void savage_driver_lastclose(struct drm_device *dev) | ||
659 | { | ||
660 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
661 | int i; | ||
662 | |||
663 | for (i = 0; i < 3; ++i) | ||
664 | if (dev_priv->mtrr[i].handle >= 0) | ||
665 | drm_mtrr_del(dev_priv->mtrr[i].handle, | ||
666 | dev_priv->mtrr[i].base, | ||
667 | dev_priv->mtrr[i].size, DRM_MTRR_WC); | ||
668 | } | ||
669 | |||
670 | int savage_driver_unload(struct drm_device *dev) | ||
671 | { | ||
672 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
673 | |||
674 | drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | ||
680 | { | ||
681 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
682 | |||
683 | if (init->fb_bpp != 16 && init->fb_bpp != 32) { | ||
684 | DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); | ||
685 | return -EINVAL; | ||
686 | } | ||
687 | if (init->depth_bpp != 16 && init->depth_bpp != 32) { | ||
688 | DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); | ||
689 | return -EINVAL; | ||
690 | } | ||
691 | if (init->dma_type != SAVAGE_DMA_AGP && | ||
692 | init->dma_type != SAVAGE_DMA_PCI) { | ||
693 | DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); | ||
694 | return -EINVAL; | ||
695 | } | ||
696 | |||
697 | dev_priv->cob_size = init->cob_size; | ||
698 | dev_priv->bci_threshold_lo = init->bci_threshold_lo; | ||
699 | dev_priv->bci_threshold_hi = init->bci_threshold_hi; | ||
700 | dev_priv->dma_type = init->dma_type; | ||
701 | |||
702 | dev_priv->fb_bpp = init->fb_bpp; | ||
703 | dev_priv->front_offset = init->front_offset; | ||
704 | dev_priv->front_pitch = init->front_pitch; | ||
705 | dev_priv->back_offset = init->back_offset; | ||
706 | dev_priv->back_pitch = init->back_pitch; | ||
707 | dev_priv->depth_bpp = init->depth_bpp; | ||
708 | dev_priv->depth_offset = init->depth_offset; | ||
709 | dev_priv->depth_pitch = init->depth_pitch; | ||
710 | |||
711 | dev_priv->texture_offset = init->texture_offset; | ||
712 | dev_priv->texture_size = init->texture_size; | ||
713 | |||
714 | dev_priv->sarea = drm_getsarea(dev); | ||
715 | if (!dev_priv->sarea) { | ||
716 | DRM_ERROR("could not find sarea!\n"); | ||
717 | savage_do_cleanup_bci(dev); | ||
718 | return -EINVAL; | ||
719 | } | ||
720 | if (init->status_offset != 0) { | ||
721 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | ||
722 | if (!dev_priv->status) { | ||
723 | DRM_ERROR("could not find shadow status region!\n"); | ||
724 | savage_do_cleanup_bci(dev); | ||
725 | return -EINVAL; | ||
726 | } | ||
727 | } else { | ||
728 | dev_priv->status = NULL; | ||
729 | } | ||
730 | if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { | ||
731 | dev->agp_buffer_token = init->buffers_offset; | ||
732 | dev->agp_buffer_map = drm_core_findmap(dev, | ||
733 | init->buffers_offset); | ||
734 | if (!dev->agp_buffer_map) { | ||
735 | DRM_ERROR("could not find DMA buffer region!\n"); | ||
736 | savage_do_cleanup_bci(dev); | ||
737 | return -EINVAL; | ||
738 | } | ||
739 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
740 | if (!dev->agp_buffer_map) { | ||
741 | DRM_ERROR("failed to ioremap DMA buffer region!\n"); | ||
742 | savage_do_cleanup_bci(dev); | ||
743 | return -ENOMEM; | ||
744 | } | ||
745 | } | ||
746 | if (init->agp_textures_offset) { | ||
747 | dev_priv->agp_textures = | ||
748 | drm_core_findmap(dev, init->agp_textures_offset); | ||
749 | if (!dev_priv->agp_textures) { | ||
750 | DRM_ERROR("could not find agp texture region!\n"); | ||
751 | savage_do_cleanup_bci(dev); | ||
752 | return -EINVAL; | ||
753 | } | ||
754 | } else { | ||
755 | dev_priv->agp_textures = NULL; | ||
756 | } | ||
757 | |||
758 | if (init->cmd_dma_offset) { | ||
759 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
760 | DRM_ERROR("command DMA not supported on " | ||
761 | "Savage3D/MX/IX.\n"); | ||
762 | savage_do_cleanup_bci(dev); | ||
763 | return -EINVAL; | ||
764 | } | ||
765 | if (dev->dma && dev->dma->buflist) { | ||
766 | DRM_ERROR("command and vertex DMA not supported " | ||
767 | "at the same time.\n"); | ||
768 | savage_do_cleanup_bci(dev); | ||
769 | return -EINVAL; | ||
770 | } | ||
771 | dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); | ||
772 | if (!dev_priv->cmd_dma) { | ||
773 | DRM_ERROR("could not find command DMA region!\n"); | ||
774 | savage_do_cleanup_bci(dev); | ||
775 | return -EINVAL; | ||
776 | } | ||
777 | if (dev_priv->dma_type == SAVAGE_DMA_AGP) { | ||
778 | if (dev_priv->cmd_dma->type != _DRM_AGP) { | ||
779 | DRM_ERROR("AGP command DMA region is not a " | ||
780 | "_DRM_AGP map!\n"); | ||
781 | savage_do_cleanup_bci(dev); | ||
782 | return -EINVAL; | ||
783 | } | ||
784 | drm_core_ioremap(dev_priv->cmd_dma, dev); | ||
785 | if (!dev_priv->cmd_dma->handle) { | ||
786 | DRM_ERROR("failed to ioremap command " | ||
787 | "DMA region!\n"); | ||
788 | savage_do_cleanup_bci(dev); | ||
789 | return -ENOMEM; | ||
790 | } | ||
791 | } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { | ||
792 | DRM_ERROR("PCI command DMA region is not a " | ||
793 | "_DRM_CONSISTENT map!\n"); | ||
794 | savage_do_cleanup_bci(dev); | ||
795 | return -EINVAL; | ||
796 | } | ||
797 | } else { | ||
798 | dev_priv->cmd_dma = NULL; | ||
799 | } | ||
800 | |||
801 | dev_priv->dma_flush = savage_dma_flush; | ||
802 | if (!dev_priv->cmd_dma) { | ||
803 | DRM_DEBUG("falling back to faked command DMA.\n"); | ||
804 | dev_priv->fake_dma.offset = 0; | ||
805 | dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; | ||
806 | dev_priv->fake_dma.type = _DRM_SHM; | ||
807 | dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, | ||
808 | DRM_MEM_DRIVER); | ||
809 | if (!dev_priv->fake_dma.handle) { | ||
810 | DRM_ERROR("could not allocate faked DMA buffer!\n"); | ||
811 | savage_do_cleanup_bci(dev); | ||
812 | return -ENOMEM; | ||
813 | } | ||
814 | dev_priv->cmd_dma = &dev_priv->fake_dma; | ||
815 | dev_priv->dma_flush = savage_fake_dma_flush; | ||
816 | } | ||
817 | |||
818 | dev_priv->sarea_priv = | ||
819 | (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + | ||
820 | init->sarea_priv_offset); | ||
821 | |||
822 | /* setup bitmap descriptors */ | ||
823 | { | ||
824 | unsigned int color_tile_format; | ||
825 | unsigned int depth_tile_format; | ||
826 | unsigned int front_stride, back_stride, depth_stride; | ||
827 | if (dev_priv->chipset <= S3_SAVAGE4) { | ||
828 | color_tile_format = dev_priv->fb_bpp == 16 ? | ||
829 | SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; | ||
830 | depth_tile_format = dev_priv->depth_bpp == 16 ? | ||
831 | SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; | ||
832 | } else { | ||
833 | color_tile_format = SAVAGE_BD_TILE_DEST; | ||
834 | depth_tile_format = SAVAGE_BD_TILE_DEST; | ||
835 | } | ||
836 | front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); | ||
837 | back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); | ||
838 | depth_stride = | ||
839 | dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); | ||
840 | |||
841 | dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | | ||
842 | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
843 | (color_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
844 | |||
845 | dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE | | ||
846 | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
847 | (color_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
848 | |||
849 | dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | | ||
850 | (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
851 | (depth_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
852 | } | ||
853 | |||
854 | /* setup status and bci ptr */ | ||
855 | dev_priv->event_counter = 0; | ||
856 | dev_priv->event_wrap = 0; | ||
857 | dev_priv->bci_ptr = (volatile uint32_t *) | ||
858 | ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); | ||
859 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
860 | dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; | ||
861 | } else { | ||
862 | dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; | ||
863 | } | ||
864 | if (dev_priv->status != NULL) { | ||
865 | dev_priv->status_ptr = | ||
866 | (volatile uint32_t *)dev_priv->status->handle; | ||
867 | dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; | ||
868 | dev_priv->wait_evnt = savage_bci_wait_event_shadow; | ||
869 | dev_priv->status_ptr[1023] = dev_priv->event_counter; | ||
870 | } else { | ||
871 | dev_priv->status_ptr = NULL; | ||
872 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
873 | dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; | ||
874 | } else { | ||
875 | dev_priv->wait_fifo = savage_bci_wait_fifo_s4; | ||
876 | } | ||
877 | dev_priv->wait_evnt = savage_bci_wait_event_reg; | ||
878 | } | ||
879 | |||
880 | /* cliprect functions */ | ||
881 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) | ||
882 | dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; | ||
883 | else | ||
884 | dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; | ||
885 | |||
886 | if (savage_freelist_init(dev) < 0) { | ||
887 | DRM_ERROR("could not initialize freelist\n"); | ||
888 | savage_do_cleanup_bci(dev); | ||
889 | return -ENOMEM; | ||
890 | } | ||
891 | |||
892 | if (savage_dma_init(dev_priv) < 0) { | ||
893 | DRM_ERROR("could not initialize command DMA\n"); | ||
894 | savage_do_cleanup_bci(dev); | ||
895 | return -ENOMEM; | ||
896 | } | ||
897 | |||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | static int savage_do_cleanup_bci(struct drm_device * dev) | ||
902 | { | ||
903 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
904 | |||
905 | if (dev_priv->cmd_dma == &dev_priv->fake_dma) { | ||
906 | if (dev_priv->fake_dma.handle) | ||
907 | drm_free(dev_priv->fake_dma.handle, | ||
908 | SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); | ||
909 | } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && | ||
910 | dev_priv->cmd_dma->type == _DRM_AGP && | ||
911 | dev_priv->dma_type == SAVAGE_DMA_AGP) | ||
912 | drm_core_ioremapfree(dev_priv->cmd_dma, dev); | ||
913 | |||
914 | if (dev_priv->dma_type == SAVAGE_DMA_AGP && | ||
915 | dev->agp_buffer_map && dev->agp_buffer_map->handle) { | ||
916 | drm_core_ioremapfree(dev->agp_buffer_map, dev); | ||
917 | /* make sure the next instance (which may be running | ||
918 | * in PCI mode) doesn't try to use an old | ||
919 | * agp_buffer_map. */ | ||
920 | dev->agp_buffer_map = NULL; | ||
921 | } | ||
922 | |||
923 | if (dev_priv->dma_pages) | ||
924 | drm_free(dev_priv->dma_pages, | ||
925 | sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, | ||
926 | DRM_MEM_DRIVER); | ||
927 | |||
928 | return 0; | ||
929 | } | ||
930 | |||
931 | static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
932 | { | ||
933 | drm_savage_init_t *init = data; | ||
934 | |||
935 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
936 | |||
937 | switch (init->func) { | ||
938 | case SAVAGE_INIT_BCI: | ||
939 | return savage_do_init_bci(dev, init); | ||
940 | case SAVAGE_CLEANUP_BCI: | ||
941 | return savage_do_cleanup_bci(dev); | ||
942 | } | ||
943 | |||
944 | return -EINVAL; | ||
945 | } | ||
946 | |||
947 | static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
948 | { | ||
949 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
950 | drm_savage_event_emit_t *event = data; | ||
951 | |||
952 | DRM_DEBUG("\n"); | ||
953 | |||
954 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
955 | |||
956 | event->count = savage_bci_emit_event(dev_priv, event->flags); | ||
957 | event->count |= dev_priv->event_wrap << 16; | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
963 | { | ||
964 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
965 | drm_savage_event_wait_t *event = data; | ||
966 | unsigned int event_e, hw_e; | ||
967 | unsigned int event_w, hw_w; | ||
968 | |||
969 | DRM_DEBUG("\n"); | ||
970 | |||
971 | UPDATE_EVENT_COUNTER(); | ||
972 | if (dev_priv->status_ptr) | ||
973 | hw_e = dev_priv->status_ptr[1] & 0xffff; | ||
974 | else | ||
975 | hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
976 | hw_w = dev_priv->event_wrap; | ||
977 | if (hw_e > dev_priv->event_counter) | ||
978 | hw_w--; /* hardware hasn't passed the last wrap yet */ | ||
979 | |||
980 | event_e = event->count & 0xffff; | ||
981 | event_w = event->count >> 16; | ||
982 | |||
983 | /* Don't need to wait if | ||
984 | * - event counter wrapped since the event was emitted or | ||
985 | * - the hardware has advanced up to or over the event to wait for. | ||
986 | */ | ||
987 | if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) | ||
988 | return 0; | ||
989 | else | ||
990 | return dev_priv->wait_evnt(dev_priv, event_e); | ||
991 | } | ||
992 | |||
993 | /* | ||
994 | * DMA buffer management | ||
995 | */ | ||
996 | |||
997 | static int savage_bci_get_buffers(struct drm_device *dev, | ||
998 | struct drm_file *file_priv, | ||
999 | struct drm_dma *d) | ||
1000 | { | ||
1001 | struct drm_buf *buf; | ||
1002 | int i; | ||
1003 | |||
1004 | for (i = d->granted_count; i < d->request_count; i++) { | ||
1005 | buf = savage_freelist_get(dev); | ||
1006 | if (!buf) | ||
1007 | return -EAGAIN; | ||
1008 | |||
1009 | buf->file_priv = file_priv; | ||
1010 | |||
1011 | if (DRM_COPY_TO_USER(&d->request_indices[i], | ||
1012 | &buf->idx, sizeof(buf->idx))) | ||
1013 | return -EFAULT; | ||
1014 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | ||
1015 | &buf->total, sizeof(buf->total))) | ||
1016 | return -EFAULT; | ||
1017 | |||
1018 | d->granted_count++; | ||
1019 | } | ||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1024 | { | ||
1025 | struct drm_device_dma *dma = dev->dma; | ||
1026 | struct drm_dma *d = data; | ||
1027 | int ret = 0; | ||
1028 | |||
1029 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1030 | |||
1031 | /* Please don't send us buffers. | ||
1032 | */ | ||
1033 | if (d->send_count != 0) { | ||
1034 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | ||
1035 | DRM_CURRENTPID, d->send_count); | ||
1036 | return -EINVAL; | ||
1037 | } | ||
1038 | |||
1039 | /* We'll send you buffers. | ||
1040 | */ | ||
1041 | if (d->request_count < 0 || d->request_count > dma->buf_count) { | ||
1042 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
1043 | DRM_CURRENTPID, d->request_count, dma->buf_count); | ||
1044 | return -EINVAL; | ||
1045 | } | ||
1046 | |||
1047 | d->granted_count = 0; | ||
1048 | |||
1049 | if (d->request_count) { | ||
1050 | ret = savage_bci_get_buffers(dev, file_priv, d); | ||
1051 | } | ||
1052 | |||
1053 | return ret; | ||
1054 | } | ||
1055 | |||
1056 | void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) | ||
1057 | { | ||
1058 | struct drm_device_dma *dma = dev->dma; | ||
1059 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
1060 | int i; | ||
1061 | |||
1062 | if (!dma) | ||
1063 | return; | ||
1064 | if (!dev_priv) | ||
1065 | return; | ||
1066 | if (!dma->buflist) | ||
1067 | return; | ||
1068 | |||
1069 | /*i830_flush_queue(dev); */ | ||
1070 | |||
1071 | for (i = 0; i < dma->buf_count; i++) { | ||
1072 | struct drm_buf *buf = dma->buflist[i]; | ||
1073 | drm_savage_buf_priv_t *buf_priv = buf->dev_private; | ||
1074 | |||
1075 | if (buf->file_priv == file_priv && buf_priv && | ||
1076 | buf_priv->next == NULL && buf_priv->prev == NULL) { | ||
1077 | uint16_t event; | ||
1078 | DRM_DEBUG("reclaimed from client\n"); | ||
1079 | event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); | ||
1080 | SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); | ||
1081 | savage_freelist_put(dev, buf); | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | drm_core_reclaim_buffers(dev, file_priv); | ||
1086 | } | ||
1087 | |||
1088 | struct drm_ioctl_desc savage_ioctls[] = { | ||
1089 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||
1090 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), | ||
1091 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), | ||
1092 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), | ||
1093 | }; | ||
1094 | |||
1095 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); | ||
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c new file mode 100644 index 000000000000..eee52aa92a7c --- /dev/null +++ b/drivers/gpu/drm/savage/savage_drv.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* savage_drv.c -- Savage driver for Linux | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include "drmP.h" | ||
27 | #include "savage_drm.h" | ||
28 | #include "savage_drv.h" | ||
29 | |||
30 | #include "drm_pciids.h" | ||
31 | |||
32 | static struct pci_device_id pciidlist[] = { | ||
33 | savage_PCI_IDS | ||
34 | }; | ||
35 | |||
36 | static struct drm_driver driver = { | ||
37 | .driver_features = | ||
38 | DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, | ||
39 | .dev_priv_size = sizeof(drm_savage_buf_priv_t), | ||
40 | .load = savage_driver_load, | ||
41 | .firstopen = savage_driver_firstopen, | ||
42 | .lastclose = savage_driver_lastclose, | ||
43 | .unload = savage_driver_unload, | ||
44 | .reclaim_buffers = savage_reclaim_buffers, | ||
45 | .get_map_ofs = drm_core_get_map_ofs, | ||
46 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
47 | .ioctls = savage_ioctls, | ||
48 | .dma_ioctl = savage_bci_buffers, | ||
49 | .fops = { | ||
50 | .owner = THIS_MODULE, | ||
51 | .open = drm_open, | ||
52 | .release = drm_release, | ||
53 | .ioctl = drm_ioctl, | ||
54 | .mmap = drm_mmap, | ||
55 | .poll = drm_poll, | ||
56 | .fasync = drm_fasync, | ||
57 | }, | ||
58 | |||
59 | .pci_driver = { | ||
60 | .name = DRIVER_NAME, | ||
61 | .id_table = pciidlist, | ||
62 | }, | ||
63 | |||
64 | .name = DRIVER_NAME, | ||
65 | .desc = DRIVER_DESC, | ||
66 | .date = DRIVER_DATE, | ||
67 | .major = DRIVER_MAJOR, | ||
68 | .minor = DRIVER_MINOR, | ||
69 | .patchlevel = DRIVER_PATCHLEVEL, | ||
70 | }; | ||
71 | |||
72 | static int __init savage_init(void) | ||
73 | { | ||
74 | driver.num_ioctls = savage_max_ioctl; | ||
75 | return drm_init(&driver); | ||
76 | } | ||
77 | |||
78 | static void __exit savage_exit(void) | ||
79 | { | ||
80 | drm_exit(&driver); | ||
81 | } | ||
82 | |||
83 | module_init(savage_init); | ||
84 | module_exit(savage_exit); | ||
85 | |||
86 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
87 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
88 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h new file mode 100644 index 000000000000..df2aac6636f7 --- /dev/null +++ b/drivers/gpu/drm/savage/savage_drv.h | |||
@@ -0,0 +1,575 @@ | |||
1 | /* savage_drv.h -- Private header for the savage driver */ | ||
2 | /* | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef __SAVAGE_DRV_H__ | ||
27 | #define __SAVAGE_DRV_H__ | ||
28 | |||
29 | #define DRIVER_AUTHOR "Felix Kuehling" | ||
30 | |||
31 | #define DRIVER_NAME "savage" | ||
32 | #define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]" | ||
33 | #define DRIVER_DATE "20050313" | ||
34 | |||
35 | #define DRIVER_MAJOR 2 | ||
36 | #define DRIVER_MINOR 4 | ||
37 | #define DRIVER_PATCHLEVEL 1 | ||
38 | /* Interface history: | ||
39 | * | ||
40 | * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy | ||
41 | * 2.0 The first real DRM | ||
42 | * 2.1 Scissors registers managed by the DRM, 3D operations clipped by | ||
43 | * cliprects of the cmdbuf ioctl | ||
44 | * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX | ||
45 | * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits | ||
46 | * wide and thus very long lived (unlikely to ever wrap). The size | ||
47 | * in the struct was 32 bits before, but only 16 bits were used | ||
48 | * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is | ||
49 | * actually used | ||
50 | */ | ||
51 | |||
52 | typedef struct drm_savage_age { | ||
53 | uint16_t event; | ||
54 | unsigned int wrap; | ||
55 | } drm_savage_age_t; | ||
56 | |||
57 | typedef struct drm_savage_buf_priv { | ||
58 | struct drm_savage_buf_priv *next; | ||
59 | struct drm_savage_buf_priv *prev; | ||
60 | drm_savage_age_t age; | ||
61 | struct drm_buf *buf; | ||
62 | } drm_savage_buf_priv_t; | ||
63 | |||
64 | typedef struct drm_savage_dma_page { | ||
65 | drm_savage_age_t age; | ||
66 | unsigned int used, flushed; | ||
67 | } drm_savage_dma_page_t; | ||
68 | #define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ | ||
69 | /* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command | ||
70 | * size of 16kbytes or 4k entries. Minimum requirement would be | ||
71 | * 10kbytes for 255 40-byte vertices in one drawing command. */ | ||
72 | #define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4) | ||
73 | |||
74 | /* interesting bits of hardware state that are saved in dev_priv */ | ||
75 | typedef union { | ||
76 | struct drm_savage_common_state { | ||
77 | uint32_t vbaddr; | ||
78 | } common; | ||
79 | struct { | ||
80 | unsigned char pad[sizeof(struct drm_savage_common_state)]; | ||
81 | uint32_t texctrl, texaddr; | ||
82 | uint32_t scstart, new_scstart; | ||
83 | uint32_t scend, new_scend; | ||
84 | } s3d; | ||
85 | struct { | ||
86 | unsigned char pad[sizeof(struct drm_savage_common_state)]; | ||
87 | uint32_t texdescr, texaddr0, texaddr1; | ||
88 | uint32_t drawctrl0, new_drawctrl0; | ||
89 | uint32_t drawctrl1, new_drawctrl1; | ||
90 | } s4; | ||
91 | } drm_savage_state_t; | ||
92 | |||
93 | /* these chip tags should match the ones in the 2D driver in savage_regs.h. */ | ||
94 | enum savage_family { | ||
95 | S3_UNKNOWN = 0, | ||
96 | S3_SAVAGE3D, | ||
97 | S3_SAVAGE_MX, | ||
98 | S3_SAVAGE4, | ||
99 | S3_PROSAVAGE, | ||
100 | S3_TWISTER, | ||
101 | S3_PROSAVAGEDDR, | ||
102 | S3_SUPERSAVAGE, | ||
103 | S3_SAVAGE2000, | ||
104 | S3_LAST | ||
105 | }; | ||
106 | |||
107 | extern struct drm_ioctl_desc savage_ioctls[]; | ||
108 | extern int savage_max_ioctl; | ||
109 | |||
110 | #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) | ||
111 | |||
112 | #define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \ | ||
113 | || (chip==S3_PROSAVAGE) \ | ||
114 | || (chip==S3_TWISTER) \ | ||
115 | || (chip==S3_PROSAVAGEDDR)) | ||
116 | |||
117 | #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) | ||
118 | |||
119 | #define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) | ||
120 | |||
121 | #define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \ | ||
122 | ||(chip==S3_PROSAVAGEDDR)) | ||
123 | |||
124 | /* flags */ | ||
125 | #define SAVAGE_IS_AGP 1 | ||
126 | |||
127 | typedef struct drm_savage_private { | ||
128 | drm_savage_sarea_t *sarea_priv; | ||
129 | |||
130 | drm_savage_buf_priv_t head, tail; | ||
131 | |||
132 | /* who am I? */ | ||
133 | enum savage_family chipset; | ||
134 | |||
135 | unsigned int cob_size; | ||
136 | unsigned int bci_threshold_lo, bci_threshold_hi; | ||
137 | unsigned int dma_type; | ||
138 | |||
139 | /* frame buffer layout */ | ||
140 | unsigned int fb_bpp; | ||
141 | unsigned int front_offset, front_pitch; | ||
142 | unsigned int back_offset, back_pitch; | ||
143 | unsigned int depth_bpp; | ||
144 | unsigned int depth_offset, depth_pitch; | ||
145 | |||
146 | /* bitmap descriptors for swap and clear */ | ||
147 | unsigned int front_bd, back_bd, depth_bd; | ||
148 | |||
149 | /* local textures */ | ||
150 | unsigned int texture_offset; | ||
151 | unsigned int texture_size; | ||
152 | |||
153 | /* memory regions in physical memory */ | ||
154 | drm_local_map_t *sarea; | ||
155 | drm_local_map_t *mmio; | ||
156 | drm_local_map_t *fb; | ||
157 | drm_local_map_t *aperture; | ||
158 | drm_local_map_t *status; | ||
159 | drm_local_map_t *agp_textures; | ||
160 | drm_local_map_t *cmd_dma; | ||
161 | drm_local_map_t fake_dma; | ||
162 | |||
163 | struct { | ||
164 | int handle; | ||
165 | unsigned long base, size; | ||
166 | } mtrr[3]; | ||
167 | |||
168 | /* BCI and status-related stuff */ | ||
169 | volatile uint32_t *status_ptr, *bci_ptr; | ||
170 | uint32_t status_used_mask; | ||
171 | uint16_t event_counter; | ||
172 | unsigned int event_wrap; | ||
173 | |||
174 | /* Savage4 command DMA */ | ||
175 | drm_savage_dma_page_t *dma_pages; | ||
176 | unsigned int nr_dma_pages, first_dma_page, current_dma_page; | ||
177 | drm_savage_age_t last_dma_age; | ||
178 | |||
179 | /* saved hw state for global/local check on S3D */ | ||
180 | uint32_t hw_draw_ctrl, hw_zbuf_ctrl; | ||
181 | /* and for scissors (global, so don't emit if not changed) */ | ||
182 | uint32_t hw_scissors_start, hw_scissors_end; | ||
183 | |||
184 | drm_savage_state_t state; | ||
185 | |||
186 | /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */ | ||
187 | unsigned int waiting; | ||
188 | |||
189 | /* config/hardware-dependent function pointers */ | ||
190 | int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n); | ||
191 | int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e); | ||
192 | /* Err, there is a macro wait_event in include/linux/wait.h. | ||
193 | * Avoid unwanted macro expansion. */ | ||
194 | void (*emit_clip_rect) (struct drm_savage_private * dev_priv, | ||
195 | const struct drm_clip_rect * pbox); | ||
196 | void (*dma_flush) (struct drm_savage_private * dev_priv); | ||
197 | } drm_savage_private_t; | ||
198 | |||
199 | /* ioctls */ | ||
200 | extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv); | ||
201 | extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); | ||
202 | |||
203 | /* BCI functions */ | ||
204 | extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, | ||
205 | unsigned int flags); | ||
206 | extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf); | ||
207 | extern void savage_dma_reset(drm_savage_private_t * dev_priv); | ||
208 | extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page); | ||
209 | extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, | ||
210 | unsigned int n); | ||
211 | extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); | ||
212 | extern int savage_driver_firstopen(struct drm_device *dev); | ||
213 | extern void savage_driver_lastclose(struct drm_device *dev); | ||
214 | extern int savage_driver_unload(struct drm_device *dev); | ||
215 | extern void savage_reclaim_buffers(struct drm_device *dev, | ||
216 | struct drm_file *file_priv); | ||
217 | |||
218 | /* state functions */ | ||
219 | extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, | ||
220 | const struct drm_clip_rect * pbox); | ||
221 | extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | ||
222 | const struct drm_clip_rect * pbox); | ||
223 | |||
224 | #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ | ||
225 | #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ | ||
226 | #define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */ | ||
227 | #define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */ | ||
228 | #define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */ | ||
229 | |||
230 | #define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region | ||
231 | * inside the MMIO region */ | ||
232 | #define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip | ||
233 | * BCI FIFO */ | ||
234 | |||
235 | /* | ||
236 | * MMIO registers | ||
237 | */ | ||
238 | #define SAVAGE_STATUS_WORD0 0x48C00 | ||
239 | #define SAVAGE_STATUS_WORD1 0x48C04 | ||
240 | #define SAVAGE_ALT_STATUS_WORD0 0x48C60 | ||
241 | |||
242 | #define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff | ||
243 | #define SAVAGE_FIFO_USED_MASK_S4 0x001fffff | ||
244 | |||
245 | /* Copied from savage_bci.h in the 2D driver with some renaming. */ | ||
246 | |||
247 | /* Bitmap descriptors */ | ||
248 | #define SAVAGE_BD_STRIDE_SHIFT 0 | ||
249 | #define SAVAGE_BD_BPP_SHIFT 16 | ||
250 | #define SAVAGE_BD_TILE_SHIFT 24 | ||
251 | #define SAVAGE_BD_BW_DISABLE (1<<28) | ||
252 | /* common: */ | ||
253 | #define SAVAGE_BD_TILE_LINEAR 0 | ||
254 | /* savage4, MX, IX, 3D */ | ||
255 | #define SAVAGE_BD_TILE_16BPP 2 | ||
256 | #define SAVAGE_BD_TILE_32BPP 3 | ||
257 | /* twister, prosavage, DDR, supersavage, 2000 */ | ||
258 | #define SAVAGE_BD_TILE_DEST 1 | ||
259 | #define SAVAGE_BD_TILE_TEXTURE 2 | ||
260 | /* GBD - BCI enable */ | ||
261 | /* savage4, MX, IX, 3D */ | ||
262 | #define SAVAGE_GBD_BCI_ENABLE 8 | ||
263 | /* twister, prosavage, DDR, supersavage, 2000 */ | ||
264 | #define SAVAGE_GBD_BCI_ENABLE_TWISTER 0 | ||
265 | |||
266 | #define SAVAGE_GBD_BIG_ENDIAN 4 | ||
267 | #define SAVAGE_GBD_LITTLE_ENDIAN 0 | ||
268 | #define SAVAGE_GBD_64 1 | ||
269 | |||
270 | /* Global Bitmap Descriptor */ | ||
271 | #define SAVAGE_BCI_GLB_BD_LOW 0x8168 | ||
272 | #define SAVAGE_BCI_GLB_BD_HIGH 0x816C | ||
273 | |||
274 | /* | ||
275 | * BCI registers | ||
276 | */ | ||
277 | /* Savage4/Twister/ProSavage 3D registers */ | ||
278 | #define SAVAGE_DRAWLOCALCTRL_S4 0x1e | ||
279 | #define SAVAGE_TEXPALADDR_S4 0x1f | ||
280 | #define SAVAGE_TEXCTRL0_S4 0x20 | ||
281 | #define SAVAGE_TEXCTRL1_S4 0x21 | ||
282 | #define SAVAGE_TEXADDR0_S4 0x22 | ||
283 | #define SAVAGE_TEXADDR1_S4 0x23 | ||
284 | #define SAVAGE_TEXBLEND0_S4 0x24 | ||
285 | #define SAVAGE_TEXBLEND1_S4 0x25 | ||
286 | #define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ | ||
287 | #define SAVAGE_TEXDESCR_S4 0x27 | ||
288 | #define SAVAGE_FOGTABLE_S4 0x28 | ||
289 | #define SAVAGE_FOGCTRL_S4 0x30 | ||
290 | #define SAVAGE_STENCILCTRL_S4 0x31 | ||
291 | #define SAVAGE_ZBUFCTRL_S4 0x32 | ||
292 | #define SAVAGE_ZBUFOFF_S4 0x33 | ||
293 | #define SAVAGE_DESTCTRL_S4 0x34 | ||
294 | #define SAVAGE_DRAWCTRL0_S4 0x35 | ||
295 | #define SAVAGE_DRAWCTRL1_S4 0x36 | ||
296 | #define SAVAGE_ZWATERMARK_S4 0x37 | ||
297 | #define SAVAGE_DESTTEXRWWATERMARK_S4 0x38 | ||
298 | #define SAVAGE_TEXBLENDCOLOR_S4 0x39 | ||
299 | /* Savage3D/MX/IX 3D registers */ | ||
300 | #define SAVAGE_TEXPALADDR_S3D 0x18 | ||
301 | #define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ | ||
302 | #define SAVAGE_TEXADDR_S3D 0x1A | ||
303 | #define SAVAGE_TEXDESCR_S3D 0x1B | ||
304 | #define SAVAGE_TEXCTRL_S3D 0x1C | ||
305 | #define SAVAGE_FOGTABLE_S3D 0x20 | ||
306 | #define SAVAGE_FOGCTRL_S3D 0x30 | ||
307 | #define SAVAGE_DRAWCTRL_S3D 0x31 | ||
308 | #define SAVAGE_ZBUFCTRL_S3D 0x32 | ||
309 | #define SAVAGE_ZBUFOFF_S3D 0x33 | ||
310 | #define SAVAGE_DESTCTRL_S3D 0x34 | ||
311 | #define SAVAGE_SCSTART_S3D 0x35 | ||
312 | #define SAVAGE_SCEND_S3D 0x36 | ||
313 | #define SAVAGE_ZWATERMARK_S3D 0x37 | ||
314 | #define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38 | ||
315 | /* common stuff */ | ||
316 | #define SAVAGE_VERTBUFADDR 0x3e | ||
317 | #define SAVAGE_BITPLANEWTMASK 0xd7 | ||
318 | #define SAVAGE_DMABUFADDR 0x51 | ||
319 | |||
320 | /* texture enable bits (needed for tex addr checking) */ | ||
321 | #define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ | ||
322 | #define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ | ||
323 | #define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ | ||
324 | |||
325 | /* Global fields in Savage4/Twister/ProSavage 3D registers: | ||
326 | * | ||
327 | * All texture registers and DrawLocalCtrl are local. All other | ||
328 | * registers are global. */ | ||
329 | |||
330 | /* Global fields in Savage3D/MX/IX 3D registers: | ||
331 | * | ||
332 | * All texture registers are local. DrawCtrl and ZBufCtrl are | ||
333 | * partially local. All other registers are global. | ||
334 | * | ||
335 | * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal | ||
336 | * ZBufCtrl global fields: zCmpFunc, zBufEn | ||
337 | */ | ||
338 | #define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c | ||
339 | #define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027 | ||
340 | |||
341 | /* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d) | ||
342 | */ | ||
343 | #define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff | ||
344 | #define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff | ||
345 | |||
346 | /* | ||
347 | * BCI commands | ||
348 | */ | ||
349 | #define BCI_CMD_NOP 0x40000000 | ||
350 | #define BCI_CMD_RECT 0x48000000 | ||
351 | #define BCI_CMD_RECT_XP 0x01000000 | ||
352 | #define BCI_CMD_RECT_YP 0x02000000 | ||
353 | #define BCI_CMD_SCANLINE 0x50000000 | ||
354 | #define BCI_CMD_LINE 0x5C000000 | ||
355 | #define BCI_CMD_LINE_LAST_PIXEL 0x58000000 | ||
356 | #define BCI_CMD_BYTE_TEXT 0x63000000 | ||
357 | #define BCI_CMD_NT_BYTE_TEXT 0x67000000 | ||
358 | #define BCI_CMD_BIT_TEXT 0x6C000000 | ||
359 | #define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF) | ||
360 | #define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16)) | ||
361 | #define BCI_CMD_SEND_COLOR 0x00008000 | ||
362 | |||
363 | #define BCI_CMD_CLIP_NONE 0x00000000 | ||
364 | #define BCI_CMD_CLIP_CURRENT 0x00002000 | ||
365 | #define BCI_CMD_CLIP_LR 0x00004000 | ||
366 | #define BCI_CMD_CLIP_NEW 0x00006000 | ||
367 | |||
368 | #define BCI_CMD_DEST_GBD 0x00000000 | ||
369 | #define BCI_CMD_DEST_PBD 0x00000800 | ||
370 | #define BCI_CMD_DEST_PBD_NEW 0x00000C00 | ||
371 | #define BCI_CMD_DEST_SBD 0x00001000 | ||
372 | #define BCI_CMD_DEST_SBD_NEW 0x00001400 | ||
373 | |||
374 | #define BCI_CMD_SRC_TRANSPARENT 0x00000200 | ||
375 | #define BCI_CMD_SRC_SOLID 0x00000000 | ||
376 | #define BCI_CMD_SRC_GBD 0x00000020 | ||
377 | #define BCI_CMD_SRC_COLOR 0x00000040 | ||
378 | #define BCI_CMD_SRC_MONO 0x00000060 | ||
379 | #define BCI_CMD_SRC_PBD_COLOR 0x00000080 | ||
380 | #define BCI_CMD_SRC_PBD_MONO 0x000000A0 | ||
381 | #define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0 | ||
382 | #define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0 | ||
383 | #define BCI_CMD_SRC_SBD_COLOR 0x00000100 | ||
384 | #define BCI_CMD_SRC_SBD_MONO 0x00000120 | ||
385 | #define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140 | ||
386 | #define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160 | ||
387 | |||
388 | #define BCI_CMD_PAT_TRANSPARENT 0x00000010 | ||
389 | #define BCI_CMD_PAT_NONE 0x00000000 | ||
390 | #define BCI_CMD_PAT_COLOR 0x00000002 | ||
391 | #define BCI_CMD_PAT_MONO 0x00000003 | ||
392 | #define BCI_CMD_PAT_PBD_COLOR 0x00000004 | ||
393 | #define BCI_CMD_PAT_PBD_MONO 0x00000005 | ||
394 | #define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006 | ||
395 | #define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007 | ||
396 | #define BCI_CMD_PAT_SBD_COLOR 0x00000008 | ||
397 | #define BCI_CMD_PAT_SBD_MONO 0x00000009 | ||
398 | #define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A | ||
399 | #define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B | ||
400 | |||
401 | #define BCI_BD_BW_DISABLE 0x10000000 | ||
402 | #define BCI_BD_TILE_MASK 0x03000000 | ||
403 | #define BCI_BD_TILE_NONE 0x00000000 | ||
404 | #define BCI_BD_TILE_16 0x02000000 | ||
405 | #define BCI_BD_TILE_32 0x03000000 | ||
406 | #define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF) | ||
407 | #define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16)) | ||
408 | #define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF) | ||
409 | #define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF)) | ||
410 | |||
411 | #define BCI_CMD_SET_REGISTER 0x96000000 | ||
412 | |||
413 | #define BCI_CMD_WAIT 0xC0000000 | ||
414 | #define BCI_CMD_WAIT_3D 0x00010000 | ||
415 | #define BCI_CMD_WAIT_2D 0x00020000 | ||
416 | |||
417 | #define BCI_CMD_UPDATE_EVENT_TAG 0x98000000 | ||
418 | |||
419 | #define BCI_CMD_DRAW_PRIM 0x80000000 | ||
420 | #define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000 | ||
421 | #define BCI_CMD_DRAW_CONT 0x01000000 | ||
422 | #define BCI_CMD_DRAW_TRILIST 0x00000000 | ||
423 | #define BCI_CMD_DRAW_TRISTRIP 0x02000000 | ||
424 | #define BCI_CMD_DRAW_TRIFAN 0x04000000 | ||
425 | #define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff | ||
426 | #define BCI_CMD_DRAW_NO_Z 0x00000001 | ||
427 | #define BCI_CMD_DRAW_NO_W 0x00000002 | ||
428 | #define BCI_CMD_DRAW_NO_CD 0x00000004 | ||
429 | #define BCI_CMD_DRAW_NO_CS 0x00000008 | ||
430 | #define BCI_CMD_DRAW_NO_U0 0x00000010 | ||
431 | #define BCI_CMD_DRAW_NO_V0 0x00000020 | ||
432 | #define BCI_CMD_DRAW_NO_UV0 0x00000030 | ||
433 | #define BCI_CMD_DRAW_NO_U1 0x00000040 | ||
434 | #define BCI_CMD_DRAW_NO_V1 0x00000080 | ||
435 | #define BCI_CMD_DRAW_NO_UV1 0x000000c0 | ||
436 | |||
437 | #define BCI_CMD_DMA 0xa8000000 | ||
438 | |||
439 | #define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF) | ||
440 | #define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF) | ||
441 | #define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF) | ||
442 | #define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) | ||
443 | #define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) | ||
444 | #define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF) | ||
445 | |||
446 | #define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF)) | ||
447 | #define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF)) | ||
448 | #define BCI_LINE_MISC(maj, ym, xp, yp, err) \ | ||
449 | (((maj) & 0x1FFF) | \ | ||
450 | ((ym) ? 1<<13 : 0) | \ | ||
451 | ((xp) ? 1<<14 : 0) | \ | ||
452 | ((yp) ? 1<<15 : 0) | \ | ||
453 | ((err) << 16)) | ||
454 | |||
455 | /* | ||
456 | * common commands | ||
457 | */ | ||
458 | #define BCI_SET_REGISTERS( first, n ) \ | ||
459 | BCI_WRITE(BCI_CMD_SET_REGISTER | \ | ||
460 | ((uint32_t)(n) & 0xff) << 16 | \ | ||
461 | ((uint32_t)(first) & 0xffff)) | ||
462 | #define DMA_SET_REGISTERS( first, n ) \ | ||
463 | DMA_WRITE(BCI_CMD_SET_REGISTER | \ | ||
464 | ((uint32_t)(n) & 0xff) << 16 | \ | ||
465 | ((uint32_t)(first) & 0xffff)) | ||
466 | |||
467 | #define BCI_DRAW_PRIMITIVE(n, type, skip) \ | ||
468 | BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ | ||
469 | ((n) << 16)) | ||
470 | #define DMA_DRAW_PRIMITIVE(n, type, skip) \ | ||
471 | DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ | ||
472 | ((n) << 16)) | ||
473 | |||
474 | #define BCI_DRAW_INDICES_S3D(n, type, i0) \ | ||
475 | BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ | ||
476 | ((n) << 16) | (i0)) | ||
477 | |||
478 | #define BCI_DRAW_INDICES_S4(n, type, skip) \ | ||
479 | BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ | ||
480 | (skip) | ((n) << 16)) | ||
481 | |||
482 | #define BCI_DMA(n) \ | ||
483 | BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1)) | ||
484 | |||
485 | /* | ||
486 | * access to MMIO | ||
487 | */ | ||
488 | #define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) | ||
489 | #define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) ) | ||
490 | |||
491 | /* | ||
492 | * access to the burst command interface (BCI) | ||
493 | */ | ||
494 | #define SAVAGE_BCI_DEBUG 1 | ||
495 | |||
496 | #define BCI_LOCALS volatile uint32_t *bci_ptr; | ||
497 | |||
498 | #define BEGIN_BCI( n ) do { \ | ||
499 | dev_priv->wait_fifo(dev_priv, (n)); \ | ||
500 | bci_ptr = dev_priv->bci_ptr; \ | ||
501 | } while(0) | ||
502 | |||
503 | #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) | ||
504 | |||
505 | /* | ||
506 | * command DMA support | ||
507 | */ | ||
508 | #define SAVAGE_DMA_DEBUG 1 | ||
509 | |||
510 | #define DMA_LOCALS uint32_t *dma_ptr; | ||
511 | |||
512 | #define BEGIN_DMA( n ) do { \ | ||
513 | unsigned int cur = dev_priv->current_dma_page; \ | ||
514 | unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \ | ||
515 | dev_priv->dma_pages[cur].used; \ | ||
516 | if ((n) > rest) { \ | ||
517 | dma_ptr = savage_dma_alloc(dev_priv, (n)); \ | ||
518 | } else { /* fast path for small allocations */ \ | ||
519 | dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \ | ||
520 | cur * SAVAGE_DMA_PAGE_SIZE + \ | ||
521 | dev_priv->dma_pages[cur].used; \ | ||
522 | if (dev_priv->dma_pages[cur].used == 0) \ | ||
523 | savage_dma_wait(dev_priv, cur); \ | ||
524 | dev_priv->dma_pages[cur].used += (n); \ | ||
525 | } \ | ||
526 | } while(0) | ||
527 | |||
528 | #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) | ||
529 | |||
530 | #define DMA_COPY(src, n) do { \ | ||
531 | memcpy(dma_ptr, (src), (n)*4); \ | ||
532 | dma_ptr += n; \ | ||
533 | } while(0) | ||
534 | |||
535 | #if SAVAGE_DMA_DEBUG | ||
536 | #define DMA_COMMIT() do { \ | ||
537 | unsigned int cur = dev_priv->current_dma_page; \ | ||
538 | uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \ | ||
539 | cur * SAVAGE_DMA_PAGE_SIZE + \ | ||
540 | dev_priv->dma_pages[cur].used; \ | ||
541 | if (dma_ptr != expected) { \ | ||
542 | DRM_ERROR("DMA allocation and use don't match: " \ | ||
543 | "%p != %p\n", expected, dma_ptr); \ | ||
544 | savage_dma_reset(dev_priv); \ | ||
545 | } \ | ||
546 | } while(0) | ||
547 | #else | ||
548 | #define DMA_COMMIT() do {/* nothing */} while(0) | ||
549 | #endif | ||
550 | |||
551 | #define DMA_FLUSH() dev_priv->dma_flush(dev_priv) | ||
552 | |||
553 | /* Buffer aging via event tag | ||
554 | */ | ||
555 | |||
556 | #define UPDATE_EVENT_COUNTER( ) do { \ | ||
557 | if (dev_priv->status_ptr) { \ | ||
558 | uint16_t count; \ | ||
559 | /* coordinate with Xserver */ \ | ||
560 | count = dev_priv->status_ptr[1023]; \ | ||
561 | if (count < dev_priv->event_counter) \ | ||
562 | dev_priv->event_wrap++; \ | ||
563 | dev_priv->event_counter = count; \ | ||
564 | } \ | ||
565 | } while(0) | ||
566 | |||
567 | #define SET_AGE( age, e, w ) do { \ | ||
568 | (age)->event = e; \ | ||
569 | (age)->wrap = w; \ | ||
570 | } while(0) | ||
571 | |||
572 | #define TEST_AGE( age, e, w ) \ | ||
573 | ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) | ||
574 | |||
575 | #endif /* __SAVAGE_DRV_H__ */ | ||
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c new file mode 100644 index 000000000000..5f6238fdf1fa --- /dev/null +++ b/drivers/gpu/drm/savage/savage_state.c | |||
@@ -0,0 +1,1163 @@ | |||
1 | /* savage_state.c -- State and drawing support for Savage | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | #include "drmP.h" | ||
26 | #include "savage_drm.h" | ||
27 | #include "savage_drv.h" | ||
28 | |||
29 | void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, | ||
30 | const struct drm_clip_rect * pbox) | ||
31 | { | ||
32 | uint32_t scstart = dev_priv->state.s3d.new_scstart; | ||
33 | uint32_t scend = dev_priv->state.s3d.new_scend; | ||
34 | scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | | ||
35 | ((uint32_t) pbox->x1 & 0x000007ff) | | ||
36 | (((uint32_t) pbox->y1 << 16) & 0x07ff0000); | ||
37 | scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | | ||
38 | (((uint32_t) pbox->x2 - 1) & 0x000007ff) | | ||
39 | ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000); | ||
40 | if (scstart != dev_priv->state.s3d.scstart || | ||
41 | scend != dev_priv->state.s3d.scend) { | ||
42 | DMA_LOCALS; | ||
43 | BEGIN_DMA(4); | ||
44 | DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); | ||
45 | DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); | ||
46 | DMA_WRITE(scstart); | ||
47 | DMA_WRITE(scend); | ||
48 | dev_priv->state.s3d.scstart = scstart; | ||
49 | dev_priv->state.s3d.scend = scend; | ||
50 | dev_priv->waiting = 1; | ||
51 | DMA_COMMIT(); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | ||
56 | const struct drm_clip_rect * pbox) | ||
57 | { | ||
58 | uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; | ||
59 | uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; | ||
60 | drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | | ||
61 | ((uint32_t) pbox->x1 & 0x000007ff) | | ||
62 | (((uint32_t) pbox->y1 << 12) & 0x00fff000); | ||
63 | drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | | ||
64 | (((uint32_t) pbox->x2 - 1) & 0x000007ff) | | ||
65 | ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000); | ||
66 | if (drawctrl0 != dev_priv->state.s4.drawctrl0 || | ||
67 | drawctrl1 != dev_priv->state.s4.drawctrl1) { | ||
68 | DMA_LOCALS; | ||
69 | BEGIN_DMA(4); | ||
70 | DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); | ||
71 | DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); | ||
72 | DMA_WRITE(drawctrl0); | ||
73 | DMA_WRITE(drawctrl1); | ||
74 | dev_priv->state.s4.drawctrl0 = drawctrl0; | ||
75 | dev_priv->state.s4.drawctrl1 = drawctrl1; | ||
76 | dev_priv->waiting = 1; | ||
77 | DMA_COMMIT(); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, | ||
82 | uint32_t addr) | ||
83 | { | ||
84 | if ((addr & 6) != 2) { /* reserved bits */ | ||
85 | DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); | ||
86 | return -EINVAL; | ||
87 | } | ||
88 | if (!(addr & 1)) { /* local */ | ||
89 | addr &= ~7; | ||
90 | if (addr < dev_priv->texture_offset || | ||
91 | addr >= dev_priv->texture_offset + dev_priv->texture_size) { | ||
92 | DRM_ERROR | ||
93 | ("bad texAddr%d %08x (local addr out of range)\n", | ||
94 | unit, addr); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | } else { /* AGP */ | ||
98 | if (!dev_priv->agp_textures) { | ||
99 | DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", | ||
100 | unit, addr); | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | addr &= ~7; | ||
104 | if (addr < dev_priv->agp_textures->offset || | ||
105 | addr >= (dev_priv->agp_textures->offset + | ||
106 | dev_priv->agp_textures->size)) { | ||
107 | DRM_ERROR | ||
108 | ("bad texAddr%d %08x (AGP addr out of range)\n", | ||
109 | unit, addr); | ||
110 | return -EINVAL; | ||
111 | } | ||
112 | } | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | #define SAVE_STATE(reg,where) \ | ||
117 | if(start <= reg && start+count > reg) \ | ||
118 | dev_priv->state.where = regs[reg - start] | ||
119 | #define SAVE_STATE_MASK(reg,where,mask) do { \ | ||
120 | if(start <= reg && start+count > reg) { \ | ||
121 | uint32_t tmp; \ | ||
122 | tmp = regs[reg - start]; \ | ||
123 | dev_priv->state.where = (tmp & (mask)) | \ | ||
124 | (dev_priv->state.where & ~(mask)); \ | ||
125 | } \ | ||
126 | } while (0) | ||
127 | |||
128 | static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, | ||
129 | unsigned int start, unsigned int count, | ||
130 | const uint32_t *regs) | ||
131 | { | ||
132 | if (start < SAVAGE_TEXPALADDR_S3D || | ||
133 | start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { | ||
134 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | ||
135 | start, start + count - 1); | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, | ||
140 | ~SAVAGE_SCISSOR_MASK_S3D); | ||
141 | SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend, | ||
142 | ~SAVAGE_SCISSOR_MASK_S3D); | ||
143 | |||
144 | /* if any texture regs were changed ... */ | ||
145 | if (start <= SAVAGE_TEXCTRL_S3D && | ||
146 | start + count > SAVAGE_TEXPALADDR_S3D) { | ||
147 | /* ... check texture state */ | ||
148 | SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); | ||
149 | SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); | ||
150 | if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) | ||
151 | return savage_verify_texaddr(dev_priv, 0, | ||
152 | dev_priv->state.s3d.texaddr); | ||
153 | } | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int savage_verify_state_s4(drm_savage_private_t * dev_priv, | ||
159 | unsigned int start, unsigned int count, | ||
160 | const uint32_t *regs) | ||
161 | { | ||
162 | int ret = 0; | ||
163 | |||
164 | if (start < SAVAGE_DRAWLOCALCTRL_S4 || | ||
165 | start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { | ||
166 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | ||
167 | start, start + count - 1); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, | ||
172 | ~SAVAGE_SCISSOR_MASK_S4); | ||
173 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1, | ||
174 | ~SAVAGE_SCISSOR_MASK_S4); | ||
175 | |||
176 | /* if any texture regs were changed ... */ | ||
177 | if (start <= SAVAGE_TEXDESCR_S4 && | ||
178 | start + count > SAVAGE_TEXPALADDR_S4) { | ||
179 | /* ... check texture state */ | ||
180 | SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); | ||
181 | SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); | ||
182 | SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); | ||
183 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) | ||
184 | ret |= savage_verify_texaddr(dev_priv, 0, | ||
185 | dev_priv->state.s4.texaddr0); | ||
186 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) | ||
187 | ret |= savage_verify_texaddr(dev_priv, 1, | ||
188 | dev_priv->state.s4.texaddr1); | ||
189 | } | ||
190 | |||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | #undef SAVE_STATE | ||
195 | #undef SAVE_STATE_MASK | ||
196 | |||
197 | static int savage_dispatch_state(drm_savage_private_t * dev_priv, | ||
198 | const drm_savage_cmd_header_t * cmd_header, | ||
199 | const uint32_t *regs) | ||
200 | { | ||
201 | unsigned int count = cmd_header->state.count; | ||
202 | unsigned int start = cmd_header->state.start; | ||
203 | unsigned int count2 = 0; | ||
204 | unsigned int bci_size; | ||
205 | int ret; | ||
206 | DMA_LOCALS; | ||
207 | |||
208 | if (!count) | ||
209 | return 0; | ||
210 | |||
211 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
212 | ret = savage_verify_state_s3d(dev_priv, start, count, regs); | ||
213 | if (ret != 0) | ||
214 | return ret; | ||
215 | /* scissor regs are emitted in savage_dispatch_draw */ | ||
216 | if (start < SAVAGE_SCSTART_S3D) { | ||
217 | if (start + count > SAVAGE_SCEND_S3D + 1) | ||
218 | count2 = count - (SAVAGE_SCEND_S3D + 1 - start); | ||
219 | if (start + count > SAVAGE_SCSTART_S3D) | ||
220 | count = SAVAGE_SCSTART_S3D - start; | ||
221 | } else if (start <= SAVAGE_SCEND_S3D) { | ||
222 | if (start + count > SAVAGE_SCEND_S3D + 1) { | ||
223 | count -= SAVAGE_SCEND_S3D + 1 - start; | ||
224 | start = SAVAGE_SCEND_S3D + 1; | ||
225 | } else | ||
226 | return 0; | ||
227 | } | ||
228 | } else { | ||
229 | ret = savage_verify_state_s4(dev_priv, start, count, regs); | ||
230 | if (ret != 0) | ||
231 | return ret; | ||
232 | /* scissor regs are emitted in savage_dispatch_draw */ | ||
233 | if (start < SAVAGE_DRAWCTRL0_S4) { | ||
234 | if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) | ||
235 | count2 = count - | ||
236 | (SAVAGE_DRAWCTRL1_S4 + 1 - start); | ||
237 | if (start + count > SAVAGE_DRAWCTRL0_S4) | ||
238 | count = SAVAGE_DRAWCTRL0_S4 - start; | ||
239 | } else if (start <= SAVAGE_DRAWCTRL1_S4) { | ||
240 | if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { | ||
241 | count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; | ||
242 | start = SAVAGE_DRAWCTRL1_S4 + 1; | ||
243 | } else | ||
244 | return 0; | ||
245 | } | ||
246 | } | ||
247 | |||
248 | bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255; | ||
249 | |||
250 | if (cmd_header->state.global) { | ||
251 | BEGIN_DMA(bci_size + 1); | ||
252 | DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); | ||
253 | dev_priv->waiting = 1; | ||
254 | } else { | ||
255 | BEGIN_DMA(bci_size); | ||
256 | } | ||
257 | |||
258 | do { | ||
259 | while (count > 0) { | ||
260 | unsigned int n = count < 255 ? count : 255; | ||
261 | DMA_SET_REGISTERS(start, n); | ||
262 | DMA_COPY(regs, n); | ||
263 | count -= n; | ||
264 | start += n; | ||
265 | regs += n; | ||
266 | } | ||
267 | start += 2; | ||
268 | regs += 2; | ||
269 | count = count2; | ||
270 | count2 = 0; | ||
271 | } while (count); | ||
272 | |||
273 | DMA_COMMIT(); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | ||
279 | const drm_savage_cmd_header_t * cmd_header, | ||
280 | const struct drm_buf * dmabuf) | ||
281 | { | ||
282 | unsigned char reorder = 0; | ||
283 | unsigned int prim = cmd_header->prim.prim; | ||
284 | unsigned int skip = cmd_header->prim.skip; | ||
285 | unsigned int n = cmd_header->prim.count; | ||
286 | unsigned int start = cmd_header->prim.start; | ||
287 | unsigned int i; | ||
288 | BCI_LOCALS; | ||
289 | |||
290 | if (!dmabuf) { | ||
291 | DRM_ERROR("called without dma buffers!\n"); | ||
292 | return -EINVAL; | ||
293 | } | ||
294 | |||
295 | if (!n) | ||
296 | return 0; | ||
297 | |||
298 | switch (prim) { | ||
299 | case SAVAGE_PRIM_TRILIST_201: | ||
300 | reorder = 1; | ||
301 | prim = SAVAGE_PRIM_TRILIST; | ||
302 | case SAVAGE_PRIM_TRILIST: | ||
303 | if (n % 3 != 0) { | ||
304 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | ||
305 | n); | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | break; | ||
309 | case SAVAGE_PRIM_TRISTRIP: | ||
310 | case SAVAGE_PRIM_TRIFAN: | ||
311 | if (n < 3) { | ||
312 | DRM_ERROR | ||
313 | ("wrong number of vertices %u in TRIFAN/STRIP\n", | ||
314 | n); | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | break; | ||
318 | default: | ||
319 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | |||
323 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
324 | if (skip != 0) { | ||
325 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | } else { | ||
329 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | ||
330 | (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - | ||
331 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | ||
332 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | ||
333 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | ||
334 | return -EINVAL; | ||
335 | } | ||
336 | if (reorder) { | ||
337 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | ||
338 | return -EINVAL; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | if (start + n > dmabuf->total / 32) { | ||
343 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | ||
344 | start, start + n - 1, dmabuf->total / 32); | ||
345 | return -EINVAL; | ||
346 | } | ||
347 | |||
348 | /* Vertex DMA doesn't work with command DMA at the same time, | ||
349 | * so we use BCI_... to submit commands here. Flush buffered | ||
350 | * faked DMA first. */ | ||
351 | DMA_FLUSH(); | ||
352 | |||
353 | if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { | ||
354 | BEGIN_BCI(2); | ||
355 | BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); | ||
356 | BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); | ||
357 | dev_priv->state.common.vbaddr = dmabuf->bus_address; | ||
358 | } | ||
359 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { | ||
360 | /* Workaround for what looks like a hardware bug. If a | ||
361 | * WAIT_3D_IDLE was emitted some time before the | ||
362 | * indexed drawing command then the engine will lock | ||
363 | * up. There are two known workarounds: | ||
364 | * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ | ||
365 | BEGIN_BCI(63); | ||
366 | for (i = 0; i < 63; ++i) | ||
367 | BCI_WRITE(BCI_CMD_WAIT); | ||
368 | dev_priv->waiting = 0; | ||
369 | } | ||
370 | |||
371 | prim <<= 25; | ||
372 | while (n != 0) { | ||
373 | /* Can emit up to 255 indices (85 triangles) at once. */ | ||
374 | unsigned int count = n > 255 ? 255 : n; | ||
375 | if (reorder) { | ||
376 | /* Need to reorder indices for correct flat | ||
377 | * shading while preserving the clock sense | ||
378 | * for correct culling. Only on Savage3D. */ | ||
379 | int reorder[3] = { -1, -1, -1 }; | ||
380 | reorder[start % 3] = 2; | ||
381 | |||
382 | BEGIN_BCI((count + 1 + 1) / 2); | ||
383 | BCI_DRAW_INDICES_S3D(count, prim, start + 2); | ||
384 | |||
385 | for (i = start + 1; i + 1 < start + count; i += 2) | ||
386 | BCI_WRITE((i + reorder[i % 3]) | | ||
387 | ((i + 1 + | ||
388 | reorder[(i + 1) % 3]) << 16)); | ||
389 | if (i < start + count) | ||
390 | BCI_WRITE(i + reorder[i % 3]); | ||
391 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
392 | BEGIN_BCI((count + 1 + 1) / 2); | ||
393 | BCI_DRAW_INDICES_S3D(count, prim, start); | ||
394 | |||
395 | for (i = start + 1; i + 1 < start + count; i += 2) | ||
396 | BCI_WRITE(i | ((i + 1) << 16)); | ||
397 | if (i < start + count) | ||
398 | BCI_WRITE(i); | ||
399 | } else { | ||
400 | BEGIN_BCI((count + 2 + 1) / 2); | ||
401 | BCI_DRAW_INDICES_S4(count, prim, skip); | ||
402 | |||
403 | for (i = start; i + 1 < start + count; i += 2) | ||
404 | BCI_WRITE(i | ((i + 1) << 16)); | ||
405 | if (i < start + count) | ||
406 | BCI_WRITE(i); | ||
407 | } | ||
408 | |||
409 | start += count; | ||
410 | n -= count; | ||
411 | |||
412 | prim |= BCI_CMD_DRAW_CONT; | ||
413 | } | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | ||
419 | const drm_savage_cmd_header_t * cmd_header, | ||
420 | const uint32_t *vtxbuf, unsigned int vb_size, | ||
421 | unsigned int vb_stride) | ||
422 | { | ||
423 | unsigned char reorder = 0; | ||
424 | unsigned int prim = cmd_header->prim.prim; | ||
425 | unsigned int skip = cmd_header->prim.skip; | ||
426 | unsigned int n = cmd_header->prim.count; | ||
427 | unsigned int start = cmd_header->prim.start; | ||
428 | unsigned int vtx_size; | ||
429 | unsigned int i; | ||
430 | DMA_LOCALS; | ||
431 | |||
432 | if (!n) | ||
433 | return 0; | ||
434 | |||
435 | switch (prim) { | ||
436 | case SAVAGE_PRIM_TRILIST_201: | ||
437 | reorder = 1; | ||
438 | prim = SAVAGE_PRIM_TRILIST; | ||
439 | case SAVAGE_PRIM_TRILIST: | ||
440 | if (n % 3 != 0) { | ||
441 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | ||
442 | n); | ||
443 | return -EINVAL; | ||
444 | } | ||
445 | break; | ||
446 | case SAVAGE_PRIM_TRISTRIP: | ||
447 | case SAVAGE_PRIM_TRIFAN: | ||
448 | if (n < 3) { | ||
449 | DRM_ERROR | ||
450 | ("wrong number of vertices %u in TRIFAN/STRIP\n", | ||
451 | n); | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | break; | ||
455 | default: | ||
456 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
457 | return -EINVAL; | ||
458 | } | ||
459 | |||
460 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
461 | if (skip > SAVAGE_SKIP_ALL_S3D) { | ||
462 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
463 | return -EINVAL; | ||
464 | } | ||
465 | vtx_size = 8; /* full vertex */ | ||
466 | } else { | ||
467 | if (skip > SAVAGE_SKIP_ALL_S4) { | ||
468 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
469 | return -EINVAL; | ||
470 | } | ||
471 | vtx_size = 10; /* full vertex */ | ||
472 | } | ||
473 | |||
474 | vtx_size -= (skip & 1) + (skip >> 1 & 1) + | ||
475 | (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + | ||
476 | (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); | ||
477 | |||
478 | if (vtx_size > vb_stride) { | ||
479 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | ||
480 | vtx_size, vb_stride); | ||
481 | return -EINVAL; | ||
482 | } | ||
483 | |||
484 | if (start + n > vb_size / (vb_stride * 4)) { | ||
485 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | ||
486 | start, start + n - 1, vb_size / (vb_stride * 4)); | ||
487 | return -EINVAL; | ||
488 | } | ||
489 | |||
490 | prim <<= 25; | ||
491 | while (n != 0) { | ||
492 | /* Can emit up to 255 vertices (85 triangles) at once. */ | ||
493 | unsigned int count = n > 255 ? 255 : n; | ||
494 | if (reorder) { | ||
495 | /* Need to reorder vertices for correct flat | ||
496 | * shading while preserving the clock sense | ||
497 | * for correct culling. Only on Savage3D. */ | ||
498 | int reorder[3] = { -1, -1, -1 }; | ||
499 | reorder[start % 3] = 2; | ||
500 | |||
501 | BEGIN_DMA(count * vtx_size + 1); | ||
502 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
503 | |||
504 | for (i = start; i < start + count; ++i) { | ||
505 | unsigned int j = i + reorder[i % 3]; | ||
506 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); | ||
507 | } | ||
508 | |||
509 | DMA_COMMIT(); | ||
510 | } else { | ||
511 | BEGIN_DMA(count * vtx_size + 1); | ||
512 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
513 | |||
514 | if (vb_stride == vtx_size) { | ||
515 | DMA_COPY(&vtxbuf[vb_stride * start], | ||
516 | vtx_size * count); | ||
517 | } else { | ||
518 | for (i = start; i < start + count; ++i) { | ||
519 | DMA_COPY(&vtxbuf [vb_stride * i], | ||
520 | vtx_size); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | DMA_COMMIT(); | ||
525 | } | ||
526 | |||
527 | start += count; | ||
528 | n -= count; | ||
529 | |||
530 | prim |= BCI_CMD_DRAW_CONT; | ||
531 | } | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | ||
537 | const drm_savage_cmd_header_t * cmd_header, | ||
538 | const uint16_t *idx, | ||
539 | const struct drm_buf * dmabuf) | ||
540 | { | ||
541 | unsigned char reorder = 0; | ||
542 | unsigned int prim = cmd_header->idx.prim; | ||
543 | unsigned int skip = cmd_header->idx.skip; | ||
544 | unsigned int n = cmd_header->idx.count; | ||
545 | unsigned int i; | ||
546 | BCI_LOCALS; | ||
547 | |||
548 | if (!dmabuf) { | ||
549 | DRM_ERROR("called without dma buffers!\n"); | ||
550 | return -EINVAL; | ||
551 | } | ||
552 | |||
553 | if (!n) | ||
554 | return 0; | ||
555 | |||
556 | switch (prim) { | ||
557 | case SAVAGE_PRIM_TRILIST_201: | ||
558 | reorder = 1; | ||
559 | prim = SAVAGE_PRIM_TRILIST; | ||
560 | case SAVAGE_PRIM_TRILIST: | ||
561 | if (n % 3 != 0) { | ||
562 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | break; | ||
566 | case SAVAGE_PRIM_TRISTRIP: | ||
567 | case SAVAGE_PRIM_TRIFAN: | ||
568 | if (n < 3) { | ||
569 | DRM_ERROR | ||
570 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | break; | ||
574 | default: | ||
575 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
580 | if (skip != 0) { | ||
581 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | ||
582 | return -EINVAL; | ||
583 | } | ||
584 | } else { | ||
585 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | ||
586 | (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - | ||
587 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | ||
588 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | ||
589 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | ||
590 | return -EINVAL; | ||
591 | } | ||
592 | if (reorder) { | ||
593 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | ||
594 | return -EINVAL; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* Vertex DMA doesn't work with command DMA at the same time, | ||
599 | * so we use BCI_... to submit commands here. Flush buffered | ||
600 | * faked DMA first. */ | ||
601 | DMA_FLUSH(); | ||
602 | |||
603 | if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { | ||
604 | BEGIN_BCI(2); | ||
605 | BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); | ||
606 | BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); | ||
607 | dev_priv->state.common.vbaddr = dmabuf->bus_address; | ||
608 | } | ||
609 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { | ||
610 | /* Workaround for what looks like a hardware bug. If a | ||
611 | * WAIT_3D_IDLE was emitted some time before the | ||
612 | * indexed drawing command then the engine will lock | ||
613 | * up. There are two known workarounds: | ||
614 | * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ | ||
615 | BEGIN_BCI(63); | ||
616 | for (i = 0; i < 63; ++i) | ||
617 | BCI_WRITE(BCI_CMD_WAIT); | ||
618 | dev_priv->waiting = 0; | ||
619 | } | ||
620 | |||
621 | prim <<= 25; | ||
622 | while (n != 0) { | ||
623 | /* Can emit up to 255 indices (85 triangles) at once. */ | ||
624 | unsigned int count = n > 255 ? 255 : n; | ||
625 | |||
626 | /* check indices */ | ||
627 | for (i = 0; i < count; ++i) { | ||
628 | if (idx[i] > dmabuf->total / 32) { | ||
629 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | ||
630 | i, idx[i], dmabuf->total / 32); | ||
631 | return -EINVAL; | ||
632 | } | ||
633 | } | ||
634 | |||
635 | if (reorder) { | ||
636 | /* Need to reorder indices for correct flat | ||
637 | * shading while preserving the clock sense | ||
638 | * for correct culling. Only on Savage3D. */ | ||
639 | int reorder[3] = { 2, -1, -1 }; | ||
640 | |||
641 | BEGIN_BCI((count + 1 + 1) / 2); | ||
642 | BCI_DRAW_INDICES_S3D(count, prim, idx[2]); | ||
643 | |||
644 | for (i = 1; i + 1 < count; i += 2) | ||
645 | BCI_WRITE(idx[i + reorder[i % 3]] | | ||
646 | (idx[i + 1 + | ||
647 | reorder[(i + 1) % 3]] << 16)); | ||
648 | if (i < count) | ||
649 | BCI_WRITE(idx[i + reorder[i % 3]]); | ||
650 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
651 | BEGIN_BCI((count + 1 + 1) / 2); | ||
652 | BCI_DRAW_INDICES_S3D(count, prim, idx[0]); | ||
653 | |||
654 | for (i = 1; i + 1 < count; i += 2) | ||
655 | BCI_WRITE(idx[i] | (idx[i + 1] << 16)); | ||
656 | if (i < count) | ||
657 | BCI_WRITE(idx[i]); | ||
658 | } else { | ||
659 | BEGIN_BCI((count + 2 + 1) / 2); | ||
660 | BCI_DRAW_INDICES_S4(count, prim, skip); | ||
661 | |||
662 | for (i = 0; i + 1 < count; i += 2) | ||
663 | BCI_WRITE(idx[i] | (idx[i + 1] << 16)); | ||
664 | if (i < count) | ||
665 | BCI_WRITE(idx[i]); | ||
666 | } | ||
667 | |||
668 | idx += count; | ||
669 | n -= count; | ||
670 | |||
671 | prim |= BCI_CMD_DRAW_CONT; | ||
672 | } | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | ||
678 | const drm_savage_cmd_header_t * cmd_header, | ||
679 | const uint16_t *idx, | ||
680 | const uint32_t *vtxbuf, | ||
681 | unsigned int vb_size, unsigned int vb_stride) | ||
682 | { | ||
683 | unsigned char reorder = 0; | ||
684 | unsigned int prim = cmd_header->idx.prim; | ||
685 | unsigned int skip = cmd_header->idx.skip; | ||
686 | unsigned int n = cmd_header->idx.count; | ||
687 | unsigned int vtx_size; | ||
688 | unsigned int i; | ||
689 | DMA_LOCALS; | ||
690 | |||
691 | if (!n) | ||
692 | return 0; | ||
693 | |||
694 | switch (prim) { | ||
695 | case SAVAGE_PRIM_TRILIST_201: | ||
696 | reorder = 1; | ||
697 | prim = SAVAGE_PRIM_TRILIST; | ||
698 | case SAVAGE_PRIM_TRILIST: | ||
699 | if (n % 3 != 0) { | ||
700 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); | ||
701 | return -EINVAL; | ||
702 | } | ||
703 | break; | ||
704 | case SAVAGE_PRIM_TRISTRIP: | ||
705 | case SAVAGE_PRIM_TRIFAN: | ||
706 | if (n < 3) { | ||
707 | DRM_ERROR | ||
708 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); | ||
709 | return -EINVAL; | ||
710 | } | ||
711 | break; | ||
712 | default: | ||
713 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | |||
717 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
718 | if (skip > SAVAGE_SKIP_ALL_S3D) { | ||
719 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | vtx_size = 8; /* full vertex */ | ||
723 | } else { | ||
724 | if (skip > SAVAGE_SKIP_ALL_S4) { | ||
725 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
726 | return -EINVAL; | ||
727 | } | ||
728 | vtx_size = 10; /* full vertex */ | ||
729 | } | ||
730 | |||
731 | vtx_size -= (skip & 1) + (skip >> 1 & 1) + | ||
732 | (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + | ||
733 | (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); | ||
734 | |||
735 | if (vtx_size > vb_stride) { | ||
736 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | ||
737 | vtx_size, vb_stride); | ||
738 | return -EINVAL; | ||
739 | } | ||
740 | |||
741 | prim <<= 25; | ||
742 | while (n != 0) { | ||
743 | /* Can emit up to 255 vertices (85 triangles) at once. */ | ||
744 | unsigned int count = n > 255 ? 255 : n; | ||
745 | |||
746 | /* Check indices */ | ||
747 | for (i = 0; i < count; ++i) { | ||
748 | if (idx[i] > vb_size / (vb_stride * 4)) { | ||
749 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | ||
750 | i, idx[i], vb_size / (vb_stride * 4)); | ||
751 | return -EINVAL; | ||
752 | } | ||
753 | } | ||
754 | |||
755 | if (reorder) { | ||
756 | /* Need to reorder vertices for correct flat | ||
757 | * shading while preserving the clock sense | ||
758 | * for correct culling. Only on Savage3D. */ | ||
759 | int reorder[3] = { 2, -1, -1 }; | ||
760 | |||
761 | BEGIN_DMA(count * vtx_size + 1); | ||
762 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
763 | |||
764 | for (i = 0; i < count; ++i) { | ||
765 | unsigned int j = idx[i + reorder[i % 3]]; | ||
766 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); | ||
767 | } | ||
768 | |||
769 | DMA_COMMIT(); | ||
770 | } else { | ||
771 | BEGIN_DMA(count * vtx_size + 1); | ||
772 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
773 | |||
774 | for (i = 0; i < count; ++i) { | ||
775 | unsigned int j = idx[i]; | ||
776 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); | ||
777 | } | ||
778 | |||
779 | DMA_COMMIT(); | ||
780 | } | ||
781 | |||
782 | idx += count; | ||
783 | n -= count; | ||
784 | |||
785 | prim |= BCI_CMD_DRAW_CONT; | ||
786 | } | ||
787 | |||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | ||
792 | const drm_savage_cmd_header_t * cmd_header, | ||
793 | const drm_savage_cmd_header_t *data, | ||
794 | unsigned int nbox, | ||
795 | const struct drm_clip_rect *boxes) | ||
796 | { | ||
797 | unsigned int flags = cmd_header->clear0.flags; | ||
798 | unsigned int clear_cmd; | ||
799 | unsigned int i, nbufs; | ||
800 | DMA_LOCALS; | ||
801 | |||
802 | if (nbox == 0) | ||
803 | return 0; | ||
804 | |||
805 | clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | | ||
806 | BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; | ||
807 | BCI_CMD_SET_ROP(clear_cmd, 0xCC); | ||
808 | |||
809 | nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + | ||
810 | ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0); | ||
811 | if (nbufs == 0) | ||
812 | return 0; | ||
813 | |||
814 | if (data->clear1.mask != 0xffffffff) { | ||
815 | /* set mask */ | ||
816 | BEGIN_DMA(2); | ||
817 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | ||
818 | DMA_WRITE(data->clear1.mask); | ||
819 | DMA_COMMIT(); | ||
820 | } | ||
821 | for (i = 0; i < nbox; ++i) { | ||
822 | unsigned int x, y, w, h; | ||
823 | unsigned int buf; | ||
824 | x = boxes[i].x1, y = boxes[i].y1; | ||
825 | w = boxes[i].x2 - boxes[i].x1; | ||
826 | h = boxes[i].y2 - boxes[i].y1; | ||
827 | BEGIN_DMA(nbufs * 6); | ||
828 | for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { | ||
829 | if (!(flags & buf)) | ||
830 | continue; | ||
831 | DMA_WRITE(clear_cmd); | ||
832 | switch (buf) { | ||
833 | case SAVAGE_FRONT: | ||
834 | DMA_WRITE(dev_priv->front_offset); | ||
835 | DMA_WRITE(dev_priv->front_bd); | ||
836 | break; | ||
837 | case SAVAGE_BACK: | ||
838 | DMA_WRITE(dev_priv->back_offset); | ||
839 | DMA_WRITE(dev_priv->back_bd); | ||
840 | break; | ||
841 | case SAVAGE_DEPTH: | ||
842 | DMA_WRITE(dev_priv->depth_offset); | ||
843 | DMA_WRITE(dev_priv->depth_bd); | ||
844 | break; | ||
845 | } | ||
846 | DMA_WRITE(data->clear1.value); | ||
847 | DMA_WRITE(BCI_X_Y(x, y)); | ||
848 | DMA_WRITE(BCI_W_H(w, h)); | ||
849 | } | ||
850 | DMA_COMMIT(); | ||
851 | } | ||
852 | if (data->clear1.mask != 0xffffffff) { | ||
853 | /* reset mask */ | ||
854 | BEGIN_DMA(2); | ||
855 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | ||
856 | DMA_WRITE(0xffffffff); | ||
857 | DMA_COMMIT(); | ||
858 | } | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | static int savage_dispatch_swap(drm_savage_private_t * dev_priv, | ||
864 | unsigned int nbox, const struct drm_clip_rect *boxes) | ||
865 | { | ||
866 | unsigned int swap_cmd; | ||
867 | unsigned int i; | ||
868 | DMA_LOCALS; | ||
869 | |||
870 | if (nbox == 0) | ||
871 | return 0; | ||
872 | |||
873 | swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | | ||
874 | BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; | ||
875 | BCI_CMD_SET_ROP(swap_cmd, 0xCC); | ||
876 | |||
877 | for (i = 0; i < nbox; ++i) { | ||
878 | BEGIN_DMA(6); | ||
879 | DMA_WRITE(swap_cmd); | ||
880 | DMA_WRITE(dev_priv->back_offset); | ||
881 | DMA_WRITE(dev_priv->back_bd); | ||
882 | DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); | ||
883 | DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); | ||
884 | DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, | ||
885 | boxes[i].y2 - boxes[i].y1)); | ||
886 | DMA_COMMIT(); | ||
887 | } | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static int savage_dispatch_draw(drm_savage_private_t * dev_priv, | ||
893 | const drm_savage_cmd_header_t *start, | ||
894 | const drm_savage_cmd_header_t *end, | ||
895 | const struct drm_buf * dmabuf, | ||
896 | const unsigned int *vtxbuf, | ||
897 | unsigned int vb_size, unsigned int vb_stride, | ||
898 | unsigned int nbox, | ||
899 | const struct drm_clip_rect *boxes) | ||
900 | { | ||
901 | unsigned int i, j; | ||
902 | int ret; | ||
903 | |||
904 | for (i = 0; i < nbox; ++i) { | ||
905 | const drm_savage_cmd_header_t *cmdbuf; | ||
906 | dev_priv->emit_clip_rect(dev_priv, &boxes[i]); | ||
907 | |||
908 | cmdbuf = start; | ||
909 | while (cmdbuf < end) { | ||
910 | drm_savage_cmd_header_t cmd_header; | ||
911 | cmd_header = *cmdbuf; | ||
912 | cmdbuf++; | ||
913 | switch (cmd_header.cmd.cmd) { | ||
914 | case SAVAGE_CMD_DMA_PRIM: | ||
915 | ret = savage_dispatch_dma_prim( | ||
916 | dev_priv, &cmd_header, dmabuf); | ||
917 | break; | ||
918 | case SAVAGE_CMD_VB_PRIM: | ||
919 | ret = savage_dispatch_vb_prim( | ||
920 | dev_priv, &cmd_header, | ||
921 | vtxbuf, vb_size, vb_stride); | ||
922 | break; | ||
923 | case SAVAGE_CMD_DMA_IDX: | ||
924 | j = (cmd_header.idx.count + 3) / 4; | ||
925 | /* j was check in savage_bci_cmdbuf */ | ||
926 | ret = savage_dispatch_dma_idx(dev_priv, | ||
927 | &cmd_header, (const uint16_t *)cmdbuf, | ||
928 | dmabuf); | ||
929 | cmdbuf += j; | ||
930 | break; | ||
931 | case SAVAGE_CMD_VB_IDX: | ||
932 | j = (cmd_header.idx.count + 3) / 4; | ||
933 | /* j was check in savage_bci_cmdbuf */ | ||
934 | ret = savage_dispatch_vb_idx(dev_priv, | ||
935 | &cmd_header, (const uint16_t *)cmdbuf, | ||
936 | (const uint32_t *)vtxbuf, vb_size, | ||
937 | vb_stride); | ||
938 | cmdbuf += j; | ||
939 | break; | ||
940 | default: | ||
941 | /* What's the best return code? EFAULT? */ | ||
942 | DRM_ERROR("IMPLEMENTATION ERROR: " | ||
943 | "non-drawing-command %d\n", | ||
944 | cmd_header.cmd.cmd); | ||
945 | return -EINVAL; | ||
946 | } | ||
947 | |||
948 | if (ret != 0) | ||
949 | return ret; | ||
950 | } | ||
951 | } | ||
952 | |||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
957 | { | ||
958 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
959 | struct drm_device_dma *dma = dev->dma; | ||
960 | struct drm_buf *dmabuf; | ||
961 | drm_savage_cmdbuf_t *cmdbuf = data; | ||
962 | drm_savage_cmd_header_t *kcmd_addr = NULL; | ||
963 | drm_savage_cmd_header_t *first_draw_cmd; | ||
964 | unsigned int *kvb_addr = NULL; | ||
965 | struct drm_clip_rect *kbox_addr = NULL; | ||
966 | unsigned int i, j; | ||
967 | int ret = 0; | ||
968 | |||
969 | DRM_DEBUG("\n"); | ||
970 | |||
971 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
972 | |||
973 | if (dma && dma->buflist) { | ||
974 | if (cmdbuf->dma_idx > dma->buf_count) { | ||
975 | DRM_ERROR | ||
976 | ("vertex buffer index %u out of range (0-%u)\n", | ||
977 | cmdbuf->dma_idx, dma->buf_count - 1); | ||
978 | return -EINVAL; | ||
979 | } | ||
980 | dmabuf = dma->buflist[cmdbuf->dma_idx]; | ||
981 | } else { | ||
982 | dmabuf = NULL; | ||
983 | } | ||
984 | |||
985 | /* Copy the user buffers into kernel temporary areas. This hasn't been | ||
986 | * a performance loss compared to VERIFYAREA_READ/ | ||
987 | * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct | ||
988 | * for locking on FreeBSD. | ||
989 | */ | ||
990 | if (cmdbuf->size) { | ||
991 | kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); | ||
992 | if (kcmd_addr == NULL) | ||
993 | return -ENOMEM; | ||
994 | |||
995 | if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, | ||
996 | cmdbuf->size * 8)) | ||
997 | { | ||
998 | drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); | ||
999 | return -EFAULT; | ||
1000 | } | ||
1001 | cmdbuf->cmd_addr = kcmd_addr; | ||
1002 | } | ||
1003 | if (cmdbuf->vb_size) { | ||
1004 | kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); | ||
1005 | if (kvb_addr == NULL) { | ||
1006 | ret = -ENOMEM; | ||
1007 | goto done; | ||
1008 | } | ||
1009 | |||
1010 | if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, | ||
1011 | cmdbuf->vb_size)) { | ||
1012 | ret = -EFAULT; | ||
1013 | goto done; | ||
1014 | } | ||
1015 | cmdbuf->vb_addr = kvb_addr; | ||
1016 | } | ||
1017 | if (cmdbuf->nbox) { | ||
1018 | kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), | ||
1019 | DRM_MEM_DRIVER); | ||
1020 | if (kbox_addr == NULL) { | ||
1021 | ret = -ENOMEM; | ||
1022 | goto done; | ||
1023 | } | ||
1024 | |||
1025 | if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, | ||
1026 | cmdbuf->nbox * sizeof(struct drm_clip_rect))) { | ||
1027 | ret = -EFAULT; | ||
1028 | goto done; | ||
1029 | } | ||
1030 | cmdbuf->box_addr = kbox_addr; | ||
1031 | } | ||
1032 | |||
1033 | /* Make sure writes to DMA buffers are finished before sending | ||
1034 | * DMA commands to the graphics hardware. */ | ||
1035 | DRM_MEMORYBARRIER(); | ||
1036 | |||
1037 | /* Coming from user space. Don't know if the Xserver has | ||
1038 | * emitted wait commands. Assuming the worst. */ | ||
1039 | dev_priv->waiting = 1; | ||
1040 | |||
1041 | i = 0; | ||
1042 | first_draw_cmd = NULL; | ||
1043 | while (i < cmdbuf->size) { | ||
1044 | drm_savage_cmd_header_t cmd_header; | ||
1045 | cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; | ||
1046 | cmdbuf->cmd_addr++; | ||
1047 | i++; | ||
1048 | |||
1049 | /* Group drawing commands with same state to minimize | ||
1050 | * iterations over clip rects. */ | ||
1051 | j = 0; | ||
1052 | switch (cmd_header.cmd.cmd) { | ||
1053 | case SAVAGE_CMD_DMA_IDX: | ||
1054 | case SAVAGE_CMD_VB_IDX: | ||
1055 | j = (cmd_header.idx.count + 3) / 4; | ||
1056 | if (i + j > cmdbuf->size) { | ||
1057 | DRM_ERROR("indexed drawing command extends " | ||
1058 | "beyond end of command buffer\n"); | ||
1059 | DMA_FLUSH(); | ||
1060 | return -EINVAL; | ||
1061 | } | ||
1062 | /* fall through */ | ||
1063 | case SAVAGE_CMD_DMA_PRIM: | ||
1064 | case SAVAGE_CMD_VB_PRIM: | ||
1065 | if (!first_draw_cmd) | ||
1066 | first_draw_cmd = cmdbuf->cmd_addr - 1; | ||
1067 | cmdbuf->cmd_addr += j; | ||
1068 | i += j; | ||
1069 | break; | ||
1070 | default: | ||
1071 | if (first_draw_cmd) { | ||
1072 | ret = savage_dispatch_draw( | ||
1073 | dev_priv, first_draw_cmd, | ||
1074 | cmdbuf->cmd_addr - 1, | ||
1075 | dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, | ||
1076 | cmdbuf->vb_stride, | ||
1077 | cmdbuf->nbox, cmdbuf->box_addr); | ||
1078 | if (ret != 0) | ||
1079 | return ret; | ||
1080 | first_draw_cmd = NULL; | ||
1081 | } | ||
1082 | } | ||
1083 | if (first_draw_cmd) | ||
1084 | continue; | ||
1085 | |||
1086 | switch (cmd_header.cmd.cmd) { | ||
1087 | case SAVAGE_CMD_STATE: | ||
1088 | j = (cmd_header.state.count + 1) / 2; | ||
1089 | if (i + j > cmdbuf->size) { | ||
1090 | DRM_ERROR("command SAVAGE_CMD_STATE extends " | ||
1091 | "beyond end of command buffer\n"); | ||
1092 | DMA_FLUSH(); | ||
1093 | ret = -EINVAL; | ||
1094 | goto done; | ||
1095 | } | ||
1096 | ret = savage_dispatch_state(dev_priv, &cmd_header, | ||
1097 | (const uint32_t *)cmdbuf->cmd_addr); | ||
1098 | cmdbuf->cmd_addr += j; | ||
1099 | i += j; | ||
1100 | break; | ||
1101 | case SAVAGE_CMD_CLEAR: | ||
1102 | if (i + 1 > cmdbuf->size) { | ||
1103 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " | ||
1104 | "beyond end of command buffer\n"); | ||
1105 | DMA_FLUSH(); | ||
1106 | ret = -EINVAL; | ||
1107 | goto done; | ||
1108 | } | ||
1109 | ret = savage_dispatch_clear(dev_priv, &cmd_header, | ||
1110 | cmdbuf->cmd_addr, | ||
1111 | cmdbuf->nbox, | ||
1112 | cmdbuf->box_addr); | ||
1113 | cmdbuf->cmd_addr++; | ||
1114 | i++; | ||
1115 | break; | ||
1116 | case SAVAGE_CMD_SWAP: | ||
1117 | ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, | ||
1118 | cmdbuf->box_addr); | ||
1119 | break; | ||
1120 | default: | ||
1121 | DRM_ERROR("invalid command 0x%x\n", | ||
1122 | cmd_header.cmd.cmd); | ||
1123 | DMA_FLUSH(); | ||
1124 | ret = -EINVAL; | ||
1125 | goto done; | ||
1126 | } | ||
1127 | |||
1128 | if (ret != 0) { | ||
1129 | DMA_FLUSH(); | ||
1130 | goto done; | ||
1131 | } | ||
1132 | } | ||
1133 | |||
1134 | if (first_draw_cmd) { | ||
1135 | ret = savage_dispatch_draw ( | ||
1136 | dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, | ||
1137 | cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, | ||
1138 | cmdbuf->nbox, cmdbuf->box_addr); | ||
1139 | if (ret != 0) { | ||
1140 | DMA_FLUSH(); | ||
1141 | goto done; | ||
1142 | } | ||
1143 | } | ||
1144 | |||
1145 | DMA_FLUSH(); | ||
1146 | |||
1147 | if (dmabuf && cmdbuf->discard) { | ||
1148 | drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; | ||
1149 | uint16_t event; | ||
1150 | event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); | ||
1151 | SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); | ||
1152 | savage_freelist_put(dev, dmabuf); | ||
1153 | } | ||
1154 | |||
1155 | done: | ||
1156 | /* If we didn't need to allocate them, these'll be NULL */ | ||
1157 | drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); | ||
1158 | drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); | ||
1159 | drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), | ||
1160 | DRM_MEM_DRIVER); | ||
1161 | |||
1162 | return ret; | ||
1163 | } | ||