aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/via_verifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/drm/via_verifier.c')
-rw-r--r--drivers/char/drm/via_verifier.c1061
1 files changed, 1061 insertions, 0 deletions
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
new file mode 100644
index 00000000000..07923b0c7a9
--- /dev/null
+++ b/drivers/char/drm/via_verifier.c
@@ -0,0 +1,1061 @@
1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
26 *
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
28 * be very slow.
29 */
30
31
32#include "via_3d_reg.h"
33#include "drmP.h"
34#include "drm.h"
35#include "via_drm.h"
36#include "via_verifier.h"
37#include "via_drv.h"
38
39typedef enum{
40 state_command,
41 state_header2,
42 state_header1,
43 state_vheader5,
44 state_vheader6,
45 state_error
46} verifier_state_t;
47
48
49typedef enum{
50 no_check = 0,
51 check_for_header2,
52 check_for_header1,
53 check_for_header2_err,
54 check_for_header1_err,
55 check_for_fire,
56 check_z_buffer_addr0,
57 check_z_buffer_addr1,
58 check_z_buffer_addr_mode,
59 check_destination_addr0,
60 check_destination_addr1,
61 check_destination_addr_mode,
62 check_for_dummy,
63 check_for_dd,
64 check_texture_addr0,
65 check_texture_addr1,
66 check_texture_addr2,
67 check_texture_addr3,
68 check_texture_addr4,
69 check_texture_addr5,
70 check_texture_addr6,
71 check_texture_addr7,
72 check_texture_addr8,
73 check_texture_addr_mode,
74 check_for_vertex_count,
75 check_number_texunits,
76 forbidden_command
77}hazard_t;
78
79/*
80 * Associates each hazard above with a possible multi-command
81 * sequence. For example an address that is split over multiple
82 * commands and that needs to be checked at the first command
83 * that does not include any part of the address.
84 */
85
86static drm_via_sequence_t seqs[] = {
87 no_sequence,
88 no_sequence,
89 no_sequence,
90 no_sequence,
91 no_sequence,
92 no_sequence,
93 z_address,
94 z_address,
95 z_address,
96 dest_address,
97 dest_address,
98 dest_address,
99 no_sequence,
100 no_sequence,
101 tex_address,
102 tex_address,
103 tex_address,
104 tex_address,
105 tex_address,
106 tex_address,
107 tex_address,
108 tex_address,
109 tex_address,
110 tex_address,
111 no_sequence
112};
113
114typedef struct{
115 unsigned int code;
116 hazard_t hz;
117} hz_init_t;
118
119
120
121static hz_init_t init_table1[] = {
122 {0xf2, check_for_header2_err},
123 {0xf0, check_for_header1_err},
124 {0xee, check_for_fire},
125 {0xcc, check_for_dummy},
126 {0xdd, check_for_dd},
127 {0x00, no_check},
128 {0x10, check_z_buffer_addr0},
129 {0x11, check_z_buffer_addr1},
130 {0x12, check_z_buffer_addr_mode},
131 {0x13, no_check},
132 {0x14, no_check},
133 {0x15, no_check},
134 {0x23, no_check},
135 {0x24, no_check},
136 {0x33, no_check},
137 {0x34, no_check},
138 {0x35, no_check},
139 {0x36, no_check},
140 {0x37, no_check},
141 {0x38, no_check},
142 {0x39, no_check},
143 {0x3A, no_check},
144 {0x3B, no_check},
145 {0x3C, no_check},
146 {0x3D, no_check},
147 {0x3E, no_check},
148 {0x40, check_destination_addr0},
149 {0x41, check_destination_addr1},
150 {0x42, check_destination_addr_mode},
151 {0x43, no_check},
152 {0x44, no_check},
153 {0x50, no_check},
154 {0x51, no_check},
155 {0x52, no_check},
156 {0x53, no_check},
157 {0x54, no_check},
158 {0x55, no_check},
159 {0x56, no_check},
160 {0x57, no_check},
161 {0x58, no_check},
162 {0x70, no_check},
163 {0x71, no_check},
164 {0x78, no_check},
165 {0x79, no_check},
166 {0x7A, no_check},
167 {0x7B, no_check},
168 {0x7C, no_check},
169 {0x7D, check_for_vertex_count}
170};
171
172
173
174static hz_init_t init_table2[] = {
175 {0xf2, check_for_header2_err},
176 {0xf0, check_for_header1_err},
177 {0xee, check_for_fire},
178 {0xcc, check_for_dummy},
179 {0x00, check_texture_addr0},
180 {0x01, check_texture_addr0},
181 {0x02, check_texture_addr0},
182 {0x03, check_texture_addr0},
183 {0x04, check_texture_addr0},
184 {0x05, check_texture_addr0},
185 {0x06, check_texture_addr0},
186 {0x07, check_texture_addr0},
187 {0x08, check_texture_addr0},
188 {0x09, check_texture_addr0},
189 {0x20, check_texture_addr1},
190 {0x21, check_texture_addr1},
191 {0x22, check_texture_addr1},
192 {0x23, check_texture_addr4},
193 {0x2B, check_texture_addr3},
194 {0x2C, check_texture_addr3},
195 {0x2D, check_texture_addr3},
196 {0x2E, check_texture_addr3},
197 {0x2F, check_texture_addr3},
198 {0x30, check_texture_addr3},
199 {0x31, check_texture_addr3},
200 {0x32, check_texture_addr3},
201 {0x33, check_texture_addr3},
202 {0x34, check_texture_addr3},
203 {0x4B, check_texture_addr5},
204 {0x4C, check_texture_addr6},
205 {0x51, check_texture_addr7},
206 {0x52, check_texture_addr8},
207 {0x77, check_texture_addr2},
208 {0x78, no_check},
209 {0x79, no_check},
210 {0x7A, no_check},
211 {0x7B, check_texture_addr_mode},
212 {0x7C, no_check},
213 {0x7D, no_check},
214 {0x7E, no_check},
215 {0x7F, no_check},
216 {0x80, no_check},
217 {0x81, no_check},
218 {0x82, no_check},
219 {0x83, no_check},
220 {0x85, no_check},
221 {0x86, no_check},
222 {0x87, no_check},
223 {0x88, no_check},
224 {0x89, no_check},
225 {0x8A, no_check},
226 {0x90, no_check},
227 {0x91, no_check},
228 {0x92, no_check},
229 {0x93, no_check}
230};
231
232static hz_init_t init_table3[] = {
233 {0xf2, check_for_header2_err},
234 {0xf0, check_for_header1_err},
235 {0xcc, check_for_dummy},
236 {0x00, check_number_texunits}
237};
238
239
240static hazard_t table1[256];
241static hazard_t table2[256];
242static hazard_t table3[256];
243
244
245
246static __inline__ int
247eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
248{
249 if ((*buf - buf_end) >= num_words) {
250 *buf += num_words;
251 return 0;
252 }
253 DRM_ERROR("Illegal termination of DMA command buffer\n");
254 return 1;
255}
256
257
258/*
259 * Partially stolen from drm_memory.h
260 */
261
262static __inline__ drm_map_t *
263via_drm_lookup_agp_map (drm_via_state_t *seq, unsigned long offset, unsigned long size,
264 drm_device_t *dev)
265{
266 struct list_head *list;
267 drm_map_list_t *r_list;
268 drm_map_t *map = seq->map_cache;
269
270 if (map && map->offset <= offset && (offset + size) <= (map->offset + map->size)) {
271 return map;
272 }
273
274 list_for_each(list, &dev->maplist->head) {
275 r_list = (drm_map_list_t *) list;
276 map = r_list->map;
277 if (!map)
278 continue;
279 if (map->offset <= offset && (offset + size) <= (map->offset + map->size) &&
280 !(map->flags & _DRM_RESTRICTED) && (map->type == _DRM_AGP)) {
281 seq->map_cache = map;
282 return map;
283 }
284 }
285 return NULL;
286}
287
288
289/*
290 * Require that all AGP texture levels reside in the same AGP map which should
291 * be mappable by the client. This is not a big restriction.
292 * FIXME: To actually enforce this security policy strictly, drm_rmmap
293 * would have to wait for dma quiescent before removing an AGP map.
294 * The via_drm_lookup_agp_map call in reality seems to take
295 * very little CPU time.
296 */
297
298
299static __inline__ int
300finish_current_sequence(drm_via_state_t *cur_seq)
301{
302 switch(cur_seq->unfinished) {
303 case z_address:
304 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
305 break;
306 case dest_address:
307 DRM_DEBUG("Destination start address is 0x%x\n", cur_seq->d_addr);
308 break;
309 case tex_address:
310 if (cur_seq->agp_texture) {
311 unsigned start = cur_seq->tex_level_lo[cur_seq->texture];
312 unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
313 unsigned long lo=~0, hi=0, tmp;
314 uint32_t *addr, *pitch, *height, tex;
315 unsigned i;
316
317 if (end > 9) end = 9;
318 if (start > 9) start = 9;
319
320 addr =&(cur_seq->t_addr[tex = cur_seq->texture][start]);
321 pitch = &(cur_seq->pitch[tex][start]);
322 height = &(cur_seq->height[tex][start]);
323
324 for (i=start; i<= end; ++i) {
325 tmp = *addr++;
326 if (tmp < lo) lo = tmp;
327 tmp += (*height++ << *pitch++);
328 if (tmp > hi) hi = tmp;
329 }
330
331 if (! via_drm_lookup_agp_map (cur_seq, lo, hi - lo, cur_seq->dev)) {
332 DRM_ERROR("AGP texture is not in allowed map\n");
333 return 2;
334 }
335 }
336 break;
337 default:
338 break;
339 }
340 cur_seq->unfinished = no_sequence;
341 return 0;
342}
343
344static __inline__ int
345investigate_hazard( uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
346{
347 register uint32_t tmp, *tmp_addr;
348
349 if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
350 int ret;
351 if ((ret = finish_current_sequence(cur_seq))) return ret;
352 }
353
354 switch(hz) {
355 case check_for_header2:
356 if (cmd == HALCYON_HEADER2) return 1;
357 return 0;
358 case check_for_header1:
359 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) return 1;
360 return 0;
361 case check_for_header2_err:
362 if (cmd == HALCYON_HEADER2) return 1;
363 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
364 break;
365 case check_for_header1_err:
366 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) return 1;
367 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
368 break;
369 case check_for_fire:
370 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD) return 1;
371 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
372 break;
373 case check_for_dummy:
374 if (HC_DUMMY == cmd) return 0;
375 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
376 break;
377 case check_for_dd:
378 if (0xdddddddd == cmd) return 0;
379 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
380 break;
381 case check_z_buffer_addr0:
382 cur_seq->unfinished = z_address;
383 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
384 (cmd & 0x00FFFFFF);
385 return 0;
386 case check_z_buffer_addr1:
387 cur_seq->unfinished = z_address;
388 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
389 ((cmd & 0xFF) << 24);
390 return 0;
391 case check_z_buffer_addr_mode:
392 cur_seq->unfinished = z_address;
393 if ((cmd & 0x0000C000) == 0) return 0;
394 DRM_ERROR("Attempt to place Z buffer in system memory\n");
395 return 2;
396 case check_destination_addr0:
397 cur_seq->unfinished = dest_address;
398 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
399 (cmd & 0x00FFFFFF);
400 return 0;
401 case check_destination_addr1:
402 cur_seq->unfinished = dest_address;
403 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
404 ((cmd & 0xFF) << 24);
405 return 0;
406 case check_destination_addr_mode:
407 cur_seq->unfinished = dest_address;
408 if ((cmd & 0x0000C000) == 0) return 0;
409 DRM_ERROR("Attempt to place 3D drawing buffer in system memory\n");
410 return 2;
411 case check_texture_addr0:
412 cur_seq->unfinished = tex_address;
413 tmp = (cmd >> 24);
414 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
415 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
416 return 0;
417 case check_texture_addr1:
418 cur_seq->unfinished = tex_address;
419 tmp = ((cmd >> 24) - 0x20);
420 tmp += tmp << 1;
421 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
422 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
423 tmp_addr++;
424 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
425 tmp_addr++;
426 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
427 return 0;
428 case check_texture_addr2:
429 cur_seq->unfinished = tex_address;
430 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
431 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
432 return 0;
433 case check_texture_addr3:
434 cur_seq->unfinished = tex_address;
435 tmp = ((cmd >> 24) - 0x2B);
436 cur_seq->pitch[cur_seq->texture][tmp] = (cmd & 0x00F00000) >> 20;
437 if (!tmp && (cmd & 0x000FFFFF)) {
438 DRM_ERROR("Unimplemented texture level 0 pitch mode.\n");
439 return 2;
440 }
441 return 0;
442 case check_texture_addr4:
443 cur_seq->unfinished = tex_address;
444 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
445 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
446 return 0;
447 case check_texture_addr5:
448 case check_texture_addr6:
449 cur_seq->unfinished = tex_address;
450 /*
451 * Texture width. We don't care since we have the pitch.
452 */
453 return 0;
454 case check_texture_addr7:
455 cur_seq->unfinished = tex_address;
456 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
457 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
458 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
459 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
460 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
461 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
462 tmp_addr[0] = 1 << (cmd & 0x0000000F);
463 return 0;
464 case check_texture_addr8:
465 cur_seq->unfinished = tex_address;
466 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
467 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
468 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
469 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
470 tmp_addr[6] = 1 << (cmd & 0x0000000F);
471 return 0;
472 case check_texture_addr_mode:
473 cur_seq->unfinished = tex_address;
474 if ( 2 == (tmp = cmd & 0x00000003)) {
475 DRM_ERROR("Attempt to fetch texture from system memory.\n");
476 return 2;
477 }
478 cur_seq->agp_texture = (tmp == 3);
479 cur_seq->tex_palette_size[cur_seq->texture] =
480 (cmd >> 16) & 0x000000007;
481 return 0;
482 case check_for_vertex_count:
483 cur_seq->vertex_count = cmd & 0x0000FFFF;
484 return 0;
485 case check_number_texunits:
486 cur_seq->multitex = (cmd >> 3) & 1;
487 return 0;
488 default:
489 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
490 return 2;
491 }
492 return 2;
493}
494
495
496static __inline__ int
497via_check_prim_list(uint32_t const **buffer, const uint32_t *buf_end,
498 drm_via_state_t *cur_seq)
499{
500 drm_via_private_t *dev_priv = (drm_via_private_t *) cur_seq->dev->dev_private;
501 uint32_t a_fire, bcmd , dw_count;
502 int ret = 0;
503 int have_fire;
504 const uint32_t *buf = *buffer;
505
506 while(buf < buf_end) {
507 have_fire = 0;
508 if ((buf_end - buf) < 2) {
509 DRM_ERROR("Unexpected termination of primitive list.\n");
510 ret = 1;
511 break;
512 }
513 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB) break;
514 bcmd = *buf++;
515 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
516 DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
517 *buf);
518 ret = 1;
519 break;
520 }
521 a_fire = *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK | HC_HE3Fire_MASK;
522
523 /*
524 * How many dwords per vertex ?
525 */
526
527 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
528 DRM_ERROR("Illegal B command vertex data for AGP.\n");
529 ret = 1;
530 break;
531 }
532
533 dw_count = 0;
534 if (bcmd & (1 << 7)) dw_count += (cur_seq->multitex) ? 2:1;
535 if (bcmd & (1 << 8)) dw_count += (cur_seq->multitex) ? 2:1;
536 if (bcmd & (1 << 9)) dw_count++;
537 if (bcmd & (1 << 10)) dw_count++;
538 if (bcmd & (1 << 11)) dw_count++;
539 if (bcmd & (1 << 12)) dw_count++;
540 if (bcmd & (1 << 13)) dw_count++;
541 if (bcmd & (1 << 14)) dw_count++;
542
543 while(buf < buf_end) {
544 if (*buf == a_fire) {
545 if (dev_priv->num_fire_offsets >= VIA_FIRE_BUF_SIZE) {
546 DRM_ERROR("Fire offset buffer full.\n");
547 ret = 1;
548 break;
549 }
550 dev_priv->fire_offsets[dev_priv->num_fire_offsets++] = buf;
551 have_fire = 1;
552 buf++;
553 if (buf < buf_end && *buf == a_fire)
554 buf++;
555 break;
556 }
557 if ((*buf == HALCYON_HEADER2) ||
558 ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
559 DRM_ERROR("Missing Vertex Fire command, "
560 "Stray Vertex Fire command or verifier "
561 "lost sync.\n");
562 ret = 1;
563 break;
564 }
565 if ((ret = eat_words(&buf, buf_end, dw_count)))
566 break;
567 }
568 if (buf >= buf_end && !have_fire) {
569 DRM_ERROR("Missing Vertex Fire command or verifier "
570 "lost sync.\n");
571 ret = 1;
572 break;
573 }
574 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
575 DRM_ERROR("AGP Primitive list end misaligned.\n");
576 ret = 1;
577 break;
578 }
579 }
580 *buffer = buf;
581 return ret;
582}
583
584
585
586
587
588static __inline__ verifier_state_t
589via_check_header2( uint32_t const **buffer, const uint32_t *buf_end,
590 drm_via_state_t *hc_state)
591{
592 uint32_t cmd;
593 int hz_mode;
594 hazard_t hz;
595 const uint32_t *buf = *buffer;
596 const hazard_t *hz_table;
597
598
599 if ((buf_end - buf) < 2) {
600 DRM_ERROR("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
601 return state_error;
602 }
603 buf++;
604 cmd = (*buf++ & 0xFFFF0000) >> 16;
605
606 switch(cmd) {
607 case HC_ParaType_CmdVdata:
608 if (via_check_prim_list(&buf, buf_end, hc_state ))
609 return state_error;
610 *buffer = buf;
611 return state_command;
612 case HC_ParaType_NotTex:
613 hz_table = table1;
614 break;
615 case HC_ParaType_Tex:
616 hc_state->texture = 0;
617 hz_table = table2;
618 break;
619 case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
620 hc_state->texture = 1;
621 hz_table = table2;
622 break;
623 case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
624 hz_table = table3;
625 break;
626 case HC_ParaType_Auto:
627 if (eat_words(&buf, buf_end, 2))
628 return state_error;
629 *buffer = buf;
630 return state_command;
631 case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
632 if (eat_words(&buf, buf_end, 32))
633 return state_error;
634 *buffer = buf;
635 return state_command;
636 case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
637 case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
638 DRM_ERROR("Texture palettes are rejected because of "
639 "lack of info how to determine their size.\n");
640 return state_error;
641 case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
642 DRM_ERROR("Fog factor palettes are rejected because of "
643 "lack of info how to determine their size.\n");
644 return state_error;
645 default:
646
647 /*
648 * There are some unimplemented HC_ParaTypes here, that
649 * need to be implemented if the Mesa driver is extended.
650 */
651
652 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
653 "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
654 cmd, *(buf -2));
655 *buffer = buf;
656 return state_error;
657 }
658
659 while(buf < buf_end) {
660 cmd = *buf++;
661 if ((hz = hz_table[cmd >> 24])) {
662 if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
663 if (hz_mode == 1) {
664 buf--;
665 break;
666 }
667 return state_error;
668 }
669 } else if (hc_state->unfinished &&
670 finish_current_sequence(hc_state)) {
671 return state_error;
672 }
673 }
674 if (hc_state->unfinished && finish_current_sequence(hc_state)) {
675 return state_error;
676 }
677 *buffer = buf;
678 return state_command;
679}
680
681static __inline__ verifier_state_t
682via_parse_header2( drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end,
683 int *fire_count)
684{
685 uint32_t cmd;
686 const uint32_t *buf = *buffer;
687 const uint32_t *next_fire;
688 int burst = 0;
689
690 next_fire = dev_priv->fire_offsets[*fire_count];
691 buf++;
692 cmd = (*buf & 0xFFFF0000) >> 16;
693 VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
694 switch(cmd) {
695 case HC_ParaType_CmdVdata:
696 while ((buf < buf_end) &&
697 (*fire_count < dev_priv->num_fire_offsets) &&
698 (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB ) {
699 while(buf <= next_fire) {
700 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE + (burst & 63), *buf++);
701 burst += 4;
702 }
703 if ( ( buf < buf_end ) && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
704 buf++;
705
706 if (++(*fire_count) < dev_priv->num_fire_offsets)
707 next_fire = dev_priv->fire_offsets[*fire_count];
708 }
709 break;
710 default:
711 while(buf < buf_end) {
712
713 if ( *buf == HC_HEADER2 ||
714 (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
715 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
716 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6 ) break;
717
718 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE + (burst & 63), *buf++);
719 burst +=4;
720 }
721 }
722 *buffer = buf;
723 return state_command;
724}
725
726
727
728static __inline__ int
729verify_mmio_address( uint32_t address)
730{
731 if ((address > 0x3FF) && (address < 0xC00 )) {
732 DRM_ERROR("Invalid VIDEO DMA command. "
733 "Attempt to access 3D- or command burst area.\n");
734 return 1;
735 } else if ((address > 0xCFF) && (address < 0x1300)) {
736 DRM_ERROR("Invalid VIDEO DMA command. "
737 "Attempt to access PCI DMA area.\n");
738 return 1;
739 } else if (address > 0x13FF ) {
740 DRM_ERROR("Invalid VIDEO DMA command. "
741 "Attempt to access VGA registers.\n");
742 return 1;
743 }
744 return 0;
745}
746
747static __inline__ int
748verify_video_tail( uint32_t const **buffer, const uint32_t *buf_end, uint32_t dwords)
749{
750 const uint32_t *buf = *buffer;
751
752 if (buf_end - buf < dwords) {
753 DRM_ERROR("Illegal termination of video command.\n");
754 return 1;
755 }
756 while (dwords--) {
757 if (*buf++) {
758 DRM_ERROR("Illegal video command tail.\n");
759 return 1;
760 }
761 }
762 *buffer = buf;
763 return 0;
764}
765
766
767static __inline__ verifier_state_t
768via_check_header1( uint32_t const **buffer, const uint32_t *buf_end )
769{
770 uint32_t cmd;
771 const uint32_t *buf = *buffer;
772 verifier_state_t ret = state_command;
773
774 while (buf < buf_end) {
775 cmd = *buf;
776 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
777 (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
778 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
779 break;
780 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
781 "Attempt to access 3D- or command burst area.\n");
782 ret = state_error;
783 break;
784 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
785 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
786 break;
787 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
788 "Attempt to access VGA registers.\n");
789 ret = state_error;
790 break;
791 } else {
792 buf += 2;
793 }
794 }
795 *buffer = buf;
796 return ret;
797}
798
799static __inline__ verifier_state_t
800via_parse_header1( drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end )
801{
802 register uint32_t cmd;
803 const uint32_t *buf = *buffer;
804
805 while (buf < buf_end) {
806 cmd = *buf;
807 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1) break;
808 VIA_WRITE( (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
809 buf++;
810 }
811 *buffer = buf;
812 return state_command;
813}
814
815static __inline__ verifier_state_t
816via_check_vheader5( uint32_t const **buffer, const uint32_t *buf_end )
817{
818 uint32_t data;
819 const uint32_t *buf = *buffer;
820
821 if (buf_end - buf < 4) {
822 DRM_ERROR("Illegal termination of video header5 command\n");
823 return state_error;
824 }
825
826 data = *buf++ & ~VIA_VIDEOMASK;
827 if (verify_mmio_address(data))
828 return state_error;
829
830 data = *buf++;
831 if (*buf++ != 0x00F50000) {
832 DRM_ERROR("Illegal header5 header data\n");
833 return state_error;
834 }
835 if (*buf++ != 0x00000000) {
836 DRM_ERROR("Illegal header5 header data\n");
837 return state_error;
838 }
839 if (eat_words(&buf, buf_end, data))
840 return state_error;
841 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
842 return state_error;
843 *buffer = buf;
844 return state_command;
845
846}
847
848static __inline__ verifier_state_t
849via_parse_vheader5( drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end )
850{
851 uint32_t addr, count, i;
852 const uint32_t *buf = *buffer;
853
854 addr = *buf++ & ~VIA_VIDEOMASK;
855 i = count = *buf;
856 buf += 3;
857 while(i--) {
858 VIA_WRITE(addr, *buf++);
859 }
860 if (count & 3) buf += 4 - (count & 3);
861 *buffer = buf;
862 return state_command;
863}
864
865
866static __inline__ verifier_state_t
867via_check_vheader6( uint32_t const **buffer, const uint32_t *buf_end )
868{
869 uint32_t data;
870 const uint32_t *buf = *buffer;
871 uint32_t i;
872
873
874 if (buf_end - buf < 4) {
875 DRM_ERROR("Illegal termination of video header6 command\n");
876 return state_error;
877 }
878 buf++;
879 data = *buf++;
880 if (*buf++ != 0x00F60000) {
881 DRM_ERROR("Illegal header6 header data\n");
882 return state_error;
883 }
884 if (*buf++ != 0x00000000) {
885 DRM_ERROR("Illegal header6 header data\n");
886 return state_error;
887 }
888 if ((buf_end - buf) < (data << 1)) {
889 DRM_ERROR("Illegal termination of video header6 command\n");
890 return state_error;
891 }
892 for (i=0; i<data; ++i) {
893 if (verify_mmio_address(*buf++))
894 return state_error;
895 buf++;
896 }
897 data <<= 1;
898 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
899 return state_error;
900 *buffer = buf;
901 return state_command;
902}
903
904static __inline__ verifier_state_t
905via_parse_vheader6( drm_via_private_t *dev_priv, uint32_t const **buffer, const uint32_t *buf_end )
906{
907
908 uint32_t addr, count, i;
909 const uint32_t *buf = *buffer;
910
911 i = count = *++buf;
912 buf += 3;
913 while(i--) {
914 addr = *buf++;
915 VIA_WRITE(addr, *buf++);
916 }
917 count <<= 1;
918 if (count & 3) buf += 4 - (count & 3);
919 *buffer = buf;
920 return state_command;
921}
922
923
924
925int
926via_verify_command_stream(const uint32_t * buf, unsigned int size, drm_device_t *dev,
927 int agp)
928{
929
930 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
931 drm_via_state_t *hc_state = &dev_priv->hc_state;
932 drm_via_state_t saved_state = *hc_state;
933 uint32_t cmd;
934 const uint32_t *buf_end = buf + ( size >> 2 );
935 verifier_state_t state = state_command;
936 int pro_group_a = dev_priv->pro_group_a;
937
938 hc_state->dev = dev;
939 hc_state->unfinished = no_sequence;
940 hc_state->map_cache = NULL;
941 hc_state->agp = agp;
942 hc_state->buf_start = buf;
943 dev_priv->num_fire_offsets = 0;
944
945 while (buf < buf_end) {
946
947 switch (state) {
948 case state_header2:
949 state = via_check_header2( &buf, buf_end, hc_state );
950 break;
951 case state_header1:
952 state = via_check_header1( &buf, buf_end );
953 break;
954 case state_vheader5:
955 state = via_check_vheader5( &buf, buf_end );
956 break;
957 case state_vheader6:
958 state = via_check_vheader6( &buf, buf_end );
959 break;
960 case state_command:
961 if (HALCYON_HEADER2 == (cmd = *buf))
962 state = state_header2;
963 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
964 state = state_header1;
965 else if (pro_group_a && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
966 state = state_vheader5;
967 else if (pro_group_a && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
968 state = state_vheader6;
969 else {
970 DRM_ERROR("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
971 cmd);
972 state = state_error;
973 }
974 break;
975 case state_error:
976 default:
977 *hc_state = saved_state;
978 return DRM_ERR(EINVAL);
979 }
980 }
981 if (state == state_error) {
982 *hc_state = saved_state;
983 return DRM_ERR(EINVAL);
984 }
985 return 0;
986}
987
988int
989via_parse_command_stream(drm_device_t *dev, const uint32_t * buf, unsigned int size)
990{
991
992 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
993 uint32_t cmd;
994 const uint32_t *buf_end = buf + ( size >> 2 );
995 verifier_state_t state = state_command;
996 int fire_count = 0;
997
998 while (buf < buf_end) {
999
1000 switch (state) {
1001 case state_header2:
1002 state = via_parse_header2( dev_priv, &buf, buf_end, &fire_count );
1003 break;
1004 case state_header1:
1005 state = via_parse_header1( dev_priv, &buf, buf_end );
1006 break;
1007 case state_vheader5:
1008 state = via_parse_vheader5( dev_priv, &buf, buf_end );
1009 break;
1010 case state_vheader6:
1011 state = via_parse_vheader6( dev_priv, &buf, buf_end );
1012 break;
1013 case state_command:
1014 if (HALCYON_HEADER2 == (cmd = *buf))
1015 state = state_header2;
1016 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1017 state = state_header1;
1018 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1019 state = state_vheader5;
1020 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1021 state = state_vheader6;
1022 else {
1023 DRM_ERROR("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1024 cmd);
1025 state = state_error;
1026 }
1027 break;
1028 case state_error:
1029 default:
1030 return DRM_ERR(EINVAL);
1031 }
1032 }
1033 if (state == state_error) {
1034 return DRM_ERR(EINVAL);
1035 }
1036 return 0;
1037}
1038
1039
1040
1041static void
1042setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1043{
1044 int i;
1045
1046 for(i=0; i<256; ++i) {
1047 table[i] = forbidden_command;
1048 }
1049
1050 for(i=0; i<size; ++i) {
1051 table[init_table[i].code] = init_table[i].hz;
1052 }
1053}
1054
1055void
1056via_init_command_verifier( void )
1057{
1058 setup_hazard_table(init_table1, table1, sizeof(init_table1) / sizeof(hz_init_t));
1059 setup_hazard_table(init_table2, table2, sizeof(init_table2) / sizeof(hz_init_t));
1060 setup_hazard_table(init_table3, table3, sizeof(init_table3) / sizeof(hz_init_t));
1061}