aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/savage_state.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@starflyer.(none)>2005-09-25 00:28:13 -0400
committerDave Airlie <airlied@linux.ie>2005-09-25 00:28:13 -0400
commitb5e89ed53ed8d24f83ba1941c07382af00ed238e (patch)
tree747bae7a565f88a2e1d5974776eeb054a932c505 /drivers/char/drm/savage_state.c
parent99a2657a29e2d623c3568cd86b27cac13fb63140 (diff)
drm: lindent the drm directory.
I've been threatening this for a while, so no point hanging around. This lindents the DRM code which was always really bad in tabbing department. I've also fixed some misnamed files in comments and removed some trailing whitespace. Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char/drm/savage_state.c')
-rw-r--r--drivers/char/drm/savage_state.c483
1 files changed, 250 insertions, 233 deletions
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index 475695a00083..39654cb5d570 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -26,48 +26,48 @@
26#include "savage_drm.h" 26#include "savage_drm.h"
27#include "savage_drv.h" 27#include "savage_drv.h"
28 28
29void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, 29void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
30 drm_clip_rect_t *pbox) 30 drm_clip_rect_t * pbox)
31{ 31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend; 33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | 34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) | 35 ((uint32_t) pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000); 36 (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | 37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2-1) & 0x000007ff) | 38 (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000); 39 ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart || 40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) { 41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS; 42 DMA_LOCALS;
43 BEGIN_DMA(4); 43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); 44 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); 45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart); 46 DMA_WRITE(scstart);
47 DMA_WRITE(scend); 47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart; 48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend; 49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1; 50 dev_priv->waiting = 1;
51 DMA_COMMIT(); 51 DMA_COMMIT();
52 } 52 }
53} 53}
54 54
55void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, 55void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
56 drm_clip_rect_t *pbox) 56 drm_clip_rect_t * pbox)
57{ 57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | 60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) | 61 ((uint32_t) pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000); 62 (((uint32_t) pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | 63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2-1) & 0x000007ff) | 64 (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000); 65 ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 || 66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) { 67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS; 68 DMA_LOCALS;
69 BEGIN_DMA(4); 69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); 70 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); 71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0); 72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1); 73 DMA_WRITE(drawctrl1);
@@ -78,22 +78,23 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
78 } 78 }
79} 79}
80 80
81static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, 81static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
82 uint32_t addr) 82 uint32_t addr)
83{ 83{
84 if ((addr & 6) != 2) { /* reserved bits */ 84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); 85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL); 86 return DRM_ERR(EINVAL);
87 } 87 }
88 if (!(addr & 1)) { /* local */ 88 if (!(addr & 1)) { /* local */
89 addr &= ~7; 89 addr &= ~7;
90 if (addr < dev_priv->texture_offset || 90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset+dev_priv->texture_size) { 91 addr >= dev_priv->texture_offset + dev_priv->texture_size) {
92 DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n", 92 DRM_ERROR
93 unit, addr); 93 ("bad texAddr%d %08x (local addr out of range)\n",
94 unit, addr);
94 return DRM_ERR(EINVAL); 95 return DRM_ERR(EINVAL);
95 } 96 }
96 } else { /* AGP */ 97 } else { /* AGP */
97 if (!dev_priv->agp_textures) { 98 if (!dev_priv->agp_textures) {
98 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", 99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
99 unit, addr); 100 unit, addr);
@@ -103,8 +104,9 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
103 if (addr < dev_priv->agp_textures->offset || 104 if (addr < dev_priv->agp_textures->offset ||
104 addr >= (dev_priv->agp_textures->offset + 105 addr >= (dev_priv->agp_textures->offset +
105 dev_priv->agp_textures->size)) { 106 dev_priv->agp_textures->size)) {
106 DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n", 107 DRM_ERROR
107 unit, addr); 108 ("bad texAddr%d %08x (AGP addr out of range)\n",
109 unit, addr);
108 return DRM_ERR(EINVAL); 110 return DRM_ERR(EINVAL);
109 } 111 }
110 } 112 }
@@ -122,14 +124,14 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
122 (dev_priv->state.where & ~(mask)); \ 124 (dev_priv->state.where & ~(mask)); \
123 } \ 125 } \
124} while (0) 126} while (0)
125static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, 127static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
126 unsigned int start, unsigned int count, 128 unsigned int start, unsigned int count,
127 const uint32_t __user *regs) 129 const uint32_t __user * regs)
128{ 130{
129 if (start < SAVAGE_TEXPALADDR_S3D || 131 if (start < SAVAGE_TEXPALADDR_S3D ||
130 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { 132 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
131 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 133 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
132 start, start+count-1); 134 start, start + count - 1);
133 return DRM_ERR(EINVAL); 135 return DRM_ERR(EINVAL);
134 } 136 }
135 137
@@ -140,28 +142,29 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
140 142
141 /* if any texture regs were changed ... */ 143 /* if any texture regs were changed ... */
142 if (start <= SAVAGE_TEXCTRL_S3D && 144 if (start <= SAVAGE_TEXCTRL_S3D &&
143 start+count > SAVAGE_TEXPALADDR_S3D) { 145 start + count > SAVAGE_TEXPALADDR_S3D) {
144 /* ... check texture state */ 146 /* ... check texture state */
145 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); 147 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
146 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
147 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
148 return savage_verify_texaddr( 150 return savage_verify_texaddr(dev_priv, 0,
149 dev_priv, 0, dev_priv->state.s3d.texaddr); 151 dev_priv->state.s3d.
152 texaddr);
150 } 153 }
151 154
152 return 0; 155 return 0;
153} 156}
154 157
155static int savage_verify_state_s4(drm_savage_private_t *dev_priv, 158static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
156 unsigned int start, unsigned int count, 159 unsigned int start, unsigned int count,
157 const uint32_t __user *regs) 160 const uint32_t __user * regs)
158{ 161{
159 int ret = 0; 162 int ret = 0;
160 163
161 if (start < SAVAGE_DRAWLOCALCTRL_S4 || 164 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
162 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { 165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
163 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
164 start, start+count-1); 167 start, start + count - 1);
165 return DRM_ERR(EINVAL); 168 return DRM_ERR(EINVAL);
166 } 169 }
167 170
@@ -171,28 +174,30 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
171 ~SAVAGE_SCISSOR_MASK_S4); 174 ~SAVAGE_SCISSOR_MASK_S4);
172 175
173 /* if any texture regs were changed ... */ 176 /* if any texture regs were changed ... */
174 if (start <= SAVAGE_TEXDESCR_S4 && 177 if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) {
175 start+count > SAVAGE_TEXPALADDR_S4) {
176 /* ... check texture state */ 178 /* ... check texture state */
177 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
178 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
179 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
180 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
181 ret |= savage_verify_texaddr( 183 ret |=
182 dev_priv, 0, dev_priv->state.s4.texaddr0); 184 savage_verify_texaddr(dev_priv, 0,
185 dev_priv->state.s4.texaddr0);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
184 ret |= savage_verify_texaddr( 187 ret |=
185 dev_priv, 1, dev_priv->state.s4.texaddr1); 188 savage_verify_texaddr(dev_priv, 1,
189 dev_priv->state.s4.texaddr1);
186 } 190 }
187 191
188 return ret; 192 return ret;
189} 193}
194
190#undef SAVE_STATE 195#undef SAVE_STATE
191#undef SAVE_STATE_MASK 196#undef SAVE_STATE_MASK
192 197
193static int savage_dispatch_state(drm_savage_private_t *dev_priv, 198static int savage_dispatch_state(drm_savage_private_t * dev_priv,
194 const drm_savage_cmd_header_t *cmd_header, 199 const drm_savage_cmd_header_t * cmd_header,
195 const uint32_t __user *regs) 200 const uint32_t __user * regs)
196{ 201{
197 unsigned int count = cmd_header->state.count; 202 unsigned int count = cmd_header->state.count;
198 unsigned int start = cmd_header->state.start; 203 unsigned int start = cmd_header->state.start;
@@ -204,7 +209,7 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
204 if (!count) 209 if (!count)
205 return 0; 210 return 0;
206 211
207 if (DRM_VERIFYAREA_READ(regs, count*4)) 212 if (DRM_VERIFYAREA_READ(regs, count * 4))
208 return DRM_ERR(EFAULT); 213 return DRM_ERR(EFAULT);
209 214
210 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 215 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@@ -213,14 +218,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
213 return ret; 218 return ret;
214 /* scissor regs are emitted in savage_dispatch_draw */ 219 /* scissor regs are emitted in savage_dispatch_draw */
215 if (start < SAVAGE_SCSTART_S3D) { 220 if (start < SAVAGE_SCSTART_S3D) {
216 if (start+count > SAVAGE_SCEND_S3D+1) 221 if (start + count > SAVAGE_SCEND_S3D + 1)
217 count2 = count - (SAVAGE_SCEND_S3D+1 - start); 222 count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
218 if (start+count > SAVAGE_SCSTART_S3D) 223 if (start + count > SAVAGE_SCSTART_S3D)
219 count = SAVAGE_SCSTART_S3D - start; 224 count = SAVAGE_SCSTART_S3D - start;
220 } else if (start <= SAVAGE_SCEND_S3D) { 225 } else if (start <= SAVAGE_SCEND_S3D) {
221 if (start+count > SAVAGE_SCEND_S3D+1) { 226 if (start + count > SAVAGE_SCEND_S3D + 1) {
222 count -= SAVAGE_SCEND_S3D+1 - start; 227 count -= SAVAGE_SCEND_S3D + 1 - start;
223 start = SAVAGE_SCEND_S3D+1; 228 start = SAVAGE_SCEND_S3D + 1;
224 } else 229 } else
225 return 0; 230 return 0;
226 } 231 }
@@ -230,23 +235,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
230 return ret; 235 return ret;
231 /* scissor regs are emitted in savage_dispatch_draw */ 236 /* scissor regs are emitted in savage_dispatch_draw */
232 if (start < SAVAGE_DRAWCTRL0_S4) { 237 if (start < SAVAGE_DRAWCTRL0_S4) {
233 if (start+count > SAVAGE_DRAWCTRL1_S4+1) 238 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
234 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start); 239 count2 =
235 if (start+count > SAVAGE_DRAWCTRL0_S4) 240 count - (SAVAGE_DRAWCTRL1_S4 + 1 - start);
241 if (start + count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start; 242 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) { 243 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start+count > SAVAGE_DRAWCTRL1_S4+1) { 244 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
239 count -= SAVAGE_DRAWCTRL1_S4+1 - start; 245 count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
240 start = SAVAGE_DRAWCTRL1_S4+1; 246 start = SAVAGE_DRAWCTRL1_S4 + 1;
241 } else 247 } else
242 return 0; 248 return 0;
243 } 249 }
244 } 250 }
245 251
246 bci_size = count + (count+254)/255 + count2 + (count2+254)/255; 252 bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
247 253
248 if (cmd_header->state.global) { 254 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size+1); 255 BEGIN_DMA(bci_size + 1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); 256 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1; 257 dev_priv->waiting = 1;
252 } else { 258 } else {
@@ -273,9 +279,9 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
273 return 0; 279 return 0;
274} 280}
275 281
276static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, 282static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
277 const drm_savage_cmd_header_t *cmd_header, 283 const drm_savage_cmd_header_t * cmd_header,
278 const drm_buf_t *dmabuf) 284 const drm_buf_t * dmabuf)
279{ 285{
280 unsigned char reorder = 0; 286 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim; 287 unsigned int prim = cmd_header->prim.prim;
@@ -286,8 +292,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
286 BCI_LOCALS; 292 BCI_LOCALS;
287 293
288 if (!dmabuf) { 294 if (!dmabuf) {
289 DRM_ERROR("called without dma buffers!\n"); 295 DRM_ERROR("called without dma buffers!\n");
290 return DRM_ERR(EINVAL); 296 return DRM_ERR(EINVAL);
291 } 297 }
292 298
293 if (!n) 299 if (!n)
@@ -307,8 +313,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
307 case SAVAGE_PRIM_TRISTRIP: 313 case SAVAGE_PRIM_TRISTRIP:
308 case SAVAGE_PRIM_TRIFAN: 314 case SAVAGE_PRIM_TRIFAN:
309 if (n < 3) { 315 if (n < 3) {
310 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n", 316 DRM_ERROR
311 n); 317 ("wrong number of vertices %u in TRIFAN/STRIP\n",
318 n);
312 return DRM_ERR(EINVAL); 319 return DRM_ERR(EINVAL);
313 } 320 }
314 break; 321 break;
@@ -319,17 +326,15 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
319 326
320 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 327 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
321 if (skip != 0) { 328 if (skip != 0) {
322 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", 329 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
323 skip);
324 return DRM_ERR(EINVAL); 330 return DRM_ERR(EINVAL);
325 } 331 }
326 } else { 332 } else {
327 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 333 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - 334 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 335 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 336 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", 337 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
332 skip);
333 return DRM_ERR(EINVAL); 338 return DRM_ERR(EINVAL);
334 } 339 }
335 if (reorder) { 340 if (reorder) {
@@ -338,9 +343,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
338 } 343 }
339 } 344 }
340 345
341 if (start + n > dmabuf->total/32) { 346 if (start + n > dmabuf->total / 32) {
342 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 347 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343 start, start + n - 1, dmabuf->total/32); 348 start, start + n - 1, dmabuf->total / 32);
344 return DRM_ERR(EINVAL); 349 return DRM_ERR(EINVAL);
345 } 350 }
346 351
@@ -375,32 +380,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
375 /* Need to reorder indices for correct flat 380 /* Need to reorder indices for correct flat
376 * shading while preserving the clock sense 381 * shading while preserving the clock sense
377 * for correct culling. Only on Savage3D. */ 382 * for correct culling. Only on Savage3D. */
378 int reorder[3] = {-1, -1, -1}; 383 int reorder[3] = { -1, -1, -1 };
379 reorder[start%3] = 2; 384 reorder[start % 3] = 2;
380 385
381 BEGIN_BCI((count+1+1)/2); 386 BEGIN_BCI((count + 1 + 1) / 2);
382 BCI_DRAW_INDICES_S3D(count, prim, start+2); 387 BCI_DRAW_INDICES_S3D(count, prim, start + 2);
383 388
384 for (i = start+1; i+1 < start+count; i += 2) 389 for (i = start + 1; i + 1 < start + count; i += 2)
385 BCI_WRITE((i + reorder[i % 3]) | 390 BCI_WRITE((i + reorder[i % 3]) |
386 ((i+1 + reorder[(i+1) % 3]) << 16)); 391 ((i + 1 +
387 if (i < start+count) 392 reorder[(i + 1) % 3]) << 16));
388 BCI_WRITE(i + reorder[i%3]); 393 if (i < start + count)
394 BCI_WRITE(i + reorder[i % 3]);
389 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 395 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390 BEGIN_BCI((count+1+1)/2); 396 BEGIN_BCI((count + 1 + 1) / 2);
391 BCI_DRAW_INDICES_S3D(count, prim, start); 397 BCI_DRAW_INDICES_S3D(count, prim, start);
392 398
393 for (i = start+1; i+1 < start+count; i += 2) 399 for (i = start + 1; i + 1 < start + count; i += 2)
394 BCI_WRITE(i | ((i+1) << 16)); 400 BCI_WRITE(i | ((i + 1) << 16));
395 if (i < start+count) 401 if (i < start + count)
396 BCI_WRITE(i); 402 BCI_WRITE(i);
397 } else { 403 } else {
398 BEGIN_BCI((count+2+1)/2); 404 BEGIN_BCI((count + 2 + 1) / 2);
399 BCI_DRAW_INDICES_S4(count, prim, skip); 405 BCI_DRAW_INDICES_S4(count, prim, skip);
400 406
401 for (i = start; i+1 < start+count; i += 2) 407 for (i = start; i + 1 < start + count; i += 2)
402 BCI_WRITE(i | ((i+1) << 16)); 408 BCI_WRITE(i | ((i + 1) << 16));
403 if (i < start+count) 409 if (i < start + count)
404 BCI_WRITE(i); 410 BCI_WRITE(i);
405 } 411 }
406 412
@@ -413,11 +419,10 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
413 return 0; 419 return 0;
414} 420}
415 421
416static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, 422static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
417 const drm_savage_cmd_header_t *cmd_header, 423 const drm_savage_cmd_header_t * cmd_header,
418 const uint32_t __user *vtxbuf, 424 const uint32_t __user * vtxbuf,
419 unsigned int vb_size, 425 unsigned int vb_size, unsigned int vb_stride)
420 unsigned int vb_stride)
421{ 426{
422 unsigned char reorder = 0; 427 unsigned char reorder = 0;
423 unsigned int prim = cmd_header->prim.prim; 428 unsigned int prim = cmd_header->prim.prim;
@@ -445,8 +450,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
445 case SAVAGE_PRIM_TRISTRIP: 450 case SAVAGE_PRIM_TRISTRIP:
446 case SAVAGE_PRIM_TRIFAN: 451 case SAVAGE_PRIM_TRIFAN:
447 if (n < 3) { 452 if (n < 3) {
448 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n", 453 DRM_ERROR
449 n); 454 ("wrong number of vertices %u in TRIFAN/STRIP\n",
455 n);
450 return DRM_ERR(EINVAL); 456 return DRM_ERR(EINVAL);
451 } 457 }
452 break; 458 break;
@@ -460,18 +466,18 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
460 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 466 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461 return DRM_ERR(EINVAL); 467 return DRM_ERR(EINVAL);
462 } 468 }
463 vtx_size = 8; /* full vertex */ 469 vtx_size = 8; /* full vertex */
464 } else { 470 } else {
465 if (skip > SAVAGE_SKIP_ALL_S4) { 471 if (skip > SAVAGE_SKIP_ALL_S4) {
466 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 472 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467 return DRM_ERR(EINVAL); 473 return DRM_ERR(EINVAL);
468 } 474 }
469 vtx_size = 10; /* full vertex */ 475 vtx_size = 10; /* full vertex */
470 } 476 }
471 477
472 vtx_size -= (skip & 1) + (skip >> 1 & 1) + 478 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + 479 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); 480 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
475 481
476 if (vtx_size > vb_stride) { 482 if (vtx_size > vb_stride) {
477 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 483 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
@@ -479,9 +485,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
479 return DRM_ERR(EINVAL); 485 return DRM_ERR(EINVAL);
480 } 486 }
481 487
482 if (start + n > vb_size / (vb_stride*4)) { 488 if (start + n > vb_size / (vb_stride * 4)) {
483 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 489 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484 start, start + n - 1, vb_size / (vb_stride*4)); 490 start, start + n - 1, vb_size / (vb_stride * 4));
485 return DRM_ERR(EINVAL); 491 return DRM_ERR(EINVAL);
486 } 492 }
487 493
@@ -493,31 +499,31 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
493 /* Need to reorder vertices for correct flat 499 /* Need to reorder vertices for correct flat
494 * shading while preserving the clock sense 500 * shading while preserving the clock sense
495 * for correct culling. Only on Savage3D. */ 501 * for correct culling. Only on Savage3D. */
496 int reorder[3] = {-1, -1, -1}; 502 int reorder[3] = { -1, -1, -1 };
497 reorder[start%3] = 2; 503 reorder[start % 3] = 2;
498 504
499 BEGIN_DMA(count*vtx_size+1); 505 BEGIN_DMA(count * vtx_size + 1);
500 DMA_DRAW_PRIMITIVE(count, prim, skip); 506 DMA_DRAW_PRIMITIVE(count, prim, skip);
501 507
502 for (i = start; i < start+count; ++i) { 508 for (i = start; i < start + count; ++i) {
503 unsigned int j = i + reorder[i % 3]; 509 unsigned int j = i + reorder[i % 3];
504 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], 510 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
505 vtx_size); 511 vtx_size);
506 } 512 }
507 513
508 DMA_COMMIT(); 514 DMA_COMMIT();
509 } else { 515 } else {
510 BEGIN_DMA(count*vtx_size+1); 516 BEGIN_DMA(count * vtx_size + 1);
511 DMA_DRAW_PRIMITIVE(count, prim, skip); 517 DMA_DRAW_PRIMITIVE(count, prim, skip);
512 518
513 if (vb_stride == vtx_size) { 519 if (vb_stride == vtx_size) {
514 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start], 520 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start],
515 vtx_size*count); 521 vtx_size * count);
516 } else { 522 } else {
517 for (i = start; i < start+count; ++i) { 523 for (i = start; i < start + count; ++i) {
518 DMA_COPY_FROM_USER( 524 DMA_COPY_FROM_USER(&vtxbuf
519 &vtxbuf[vb_stride*i], 525 [vb_stride * i],
520 vtx_size); 526 vtx_size);
521 } 527 }
522 } 528 }
523 529
@@ -533,10 +539,10 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
533 return 0; 539 return 0;
534} 540}
535 541
536static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, 542static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
537 const drm_savage_cmd_header_t *cmd_header, 543 const drm_savage_cmd_header_t * cmd_header,
538 const uint16_t __user *usr_idx, 544 const uint16_t __user * usr_idx,
539 const drm_buf_t *dmabuf) 545 const drm_buf_t * dmabuf)
540{ 546{
541 unsigned char reorder = 0; 547 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim; 548 unsigned int prim = cmd_header->idx.prim;
@@ -546,8 +552,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
546 BCI_LOCALS; 552 BCI_LOCALS;
547 553
548 if (!dmabuf) { 554 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n"); 555 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL); 556 return DRM_ERR(EINVAL);
551 } 557 }
552 558
553 if (!n) 559 if (!n)
@@ -559,16 +565,15 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
559 prim = SAVAGE_PRIM_TRILIST; 565 prim = SAVAGE_PRIM_TRILIST;
560 case SAVAGE_PRIM_TRILIST: 566 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) { 567 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n", 568 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
563 n);
564 return DRM_ERR(EINVAL); 569 return DRM_ERR(EINVAL);
565 } 570 }
566 break; 571 break;
567 case SAVAGE_PRIM_TRISTRIP: 572 case SAVAGE_PRIM_TRISTRIP:
568 case SAVAGE_PRIM_TRIFAN: 573 case SAVAGE_PRIM_TRIFAN:
569 if (n < 3) { 574 if (n < 3) {
570 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n", 575 DRM_ERROR
571 n); 576 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
572 return DRM_ERR(EINVAL); 577 return DRM_ERR(EINVAL);
573 } 578 }
574 break; 579 break;
@@ -579,17 +584,15 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
579 584
580 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 585 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581 if (skip != 0) { 586 if (skip != 0) {
582 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", 587 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
583 skip);
584 return DRM_ERR(EINVAL); 588 return DRM_ERR(EINVAL);
585 } 589 }
586 } else { 590 } else {
587 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 591 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
588 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - 592 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
589 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 593 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
590 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 594 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
591 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", 595 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
592 skip);
593 return DRM_ERR(EINVAL); 596 return DRM_ERR(EINVAL);
594 } 597 }
595 if (reorder) { 598 if (reorder) {
@@ -629,11 +632,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
629 uint16_t idx[255]; 632 uint16_t idx[255];
630 633
631 /* Copy and check indices */ 634 /* Copy and check indices */
632 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2); 635 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
633 for (i = 0; i < count; ++i) { 636 for (i = 0; i < count; ++i) {
634 if (idx[i] > dmabuf->total/32) { 637 if (idx[i] > dmabuf->total / 32) {
635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 638 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
636 i, idx[i], dmabuf->total/32); 639 i, idx[i], dmabuf->total / 32);
637 return DRM_ERR(EINVAL); 640 return DRM_ERR(EINVAL);
638 } 641 }
639 } 642 }
@@ -642,30 +645,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
642 /* Need to reorder indices for correct flat 645 /* Need to reorder indices for correct flat
643 * shading while preserving the clock sense 646 * shading while preserving the clock sense
644 * for correct culling. Only on Savage3D. */ 647 * for correct culling. Only on Savage3D. */
645 int reorder[3] = {2, -1, -1}; 648 int reorder[3] = { 2, -1, -1 };
646 649
647 BEGIN_BCI((count+1+1)/2); 650 BEGIN_BCI((count + 1 + 1) / 2);
648 BCI_DRAW_INDICES_S3D(count, prim, idx[2]); 651 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
649 652
650 for (i = 1; i+1 < count; i += 2) 653 for (i = 1; i + 1 < count; i += 2)
651 BCI_WRITE(idx[i + reorder[i % 3]] | 654 BCI_WRITE(idx[i + reorder[i % 3]] |
652 (idx[i+1 + reorder[(i+1) % 3]] << 16)); 655 (idx[i + 1 + reorder[(i + 1) % 3]] <<
656 16));
653 if (i < count) 657 if (i < count)
654 BCI_WRITE(idx[i + reorder[i%3]]); 658 BCI_WRITE(idx[i + reorder[i % 3]]);
655 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 659 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
656 BEGIN_BCI((count+1+1)/2); 660 BEGIN_BCI((count + 1 + 1) / 2);
657 BCI_DRAW_INDICES_S3D(count, prim, idx[0]); 661 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
658 662
659 for (i = 1; i+1 < count; i += 2) 663 for (i = 1; i + 1 < count; i += 2)
660 BCI_WRITE(idx[i] | (idx[i+1] << 16)); 664 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
661 if (i < count) 665 if (i < count)
662 BCI_WRITE(idx[i]); 666 BCI_WRITE(idx[i]);
663 } else { 667 } else {
664 BEGIN_BCI((count+2+1)/2); 668 BEGIN_BCI((count + 2 + 1) / 2);
665 BCI_DRAW_INDICES_S4(count, prim, skip); 669 BCI_DRAW_INDICES_S4(count, prim, skip);
666 670
667 for (i = 0; i+1 < count; i += 2) 671 for (i = 0; i + 1 < count; i += 2)
668 BCI_WRITE(idx[i] | (idx[i+1] << 16)); 672 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
669 if (i < count) 673 if (i < count)
670 BCI_WRITE(idx[i]); 674 BCI_WRITE(idx[i]);
671 } 675 }
@@ -679,12 +683,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
679 return 0; 683 return 0;
680} 684}
681 685
682static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, 686static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
683 const drm_savage_cmd_header_t *cmd_header, 687 const drm_savage_cmd_header_t * cmd_header,
684 const uint16_t __user *usr_idx, 688 const uint16_t __user * usr_idx,
685 const uint32_t __user *vtxbuf, 689 const uint32_t __user * vtxbuf,
686 unsigned int vb_size, 690 unsigned int vb_size, unsigned int vb_stride)
687 unsigned int vb_stride)
688{ 691{
689 unsigned char reorder = 0; 692 unsigned char reorder = 0;
690 unsigned int prim = cmd_header->idx.prim; 693 unsigned int prim = cmd_header->idx.prim;
@@ -703,16 +706,15 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
703 prim = SAVAGE_PRIM_TRILIST; 706 prim = SAVAGE_PRIM_TRILIST;
704 case SAVAGE_PRIM_TRILIST: 707 case SAVAGE_PRIM_TRILIST:
705 if (n % 3 != 0) { 708 if (n % 3 != 0) {
706 DRM_ERROR("wrong number of indices %u in TRILIST\n", 709 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
707 n);
708 return DRM_ERR(EINVAL); 710 return DRM_ERR(EINVAL);
709 } 711 }
710 break; 712 break;
711 case SAVAGE_PRIM_TRISTRIP: 713 case SAVAGE_PRIM_TRISTRIP:
712 case SAVAGE_PRIM_TRIFAN: 714 case SAVAGE_PRIM_TRIFAN:
713 if (n < 3) { 715 if (n < 3) {
714 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n", 716 DRM_ERROR
715 n); 717 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
716 return DRM_ERR(EINVAL); 718 return DRM_ERR(EINVAL);
717 } 719 }
718 break; 720 break;
@@ -726,18 +728,18 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
726 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 728 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
727 return DRM_ERR(EINVAL); 729 return DRM_ERR(EINVAL);
728 } 730 }
729 vtx_size = 8; /* full vertex */ 731 vtx_size = 8; /* full vertex */
730 } else { 732 } else {
731 if (skip > SAVAGE_SKIP_ALL_S4) { 733 if (skip > SAVAGE_SKIP_ALL_S4) {
732 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 734 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
733 return DRM_ERR(EINVAL); 735 return DRM_ERR(EINVAL);
734 } 736 }
735 vtx_size = 10; /* full vertex */ 737 vtx_size = 10; /* full vertex */
736 } 738 }
737 739
738 vtx_size -= (skip & 1) + (skip >> 1 & 1) + 740 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
739 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + 741 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
740 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); 742 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
741 743
742 if (vtx_size > vb_stride) { 744 if (vtx_size > vb_stride) {
743 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 745 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
@@ -753,11 +755,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
753 uint16_t idx[255]; 755 uint16_t idx[255];
754 756
755 /* Copy and check indices */ 757 /* Copy and check indices */
756 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2); 758 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
757 for (i = 0; i < count; ++i) { 759 for (i = 0; i < count; ++i) {
758 if (idx[i] > vb_size / (vb_stride*4)) { 760 if (idx[i] > vb_size / (vb_stride * 4)) {
759 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 761 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
760 i, idx[i], vb_size / (vb_stride*4)); 762 i, idx[i], vb_size / (vb_stride * 4));
761 return DRM_ERR(EINVAL); 763 return DRM_ERR(EINVAL);
762 } 764 }
763 } 765 }
@@ -766,25 +768,25 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
766 /* Need to reorder vertices for correct flat 768 /* Need to reorder vertices for correct flat
767 * shading while preserving the clock sense 769 * shading while preserving the clock sense
768 * for correct culling. Only on Savage3D. */ 770 * for correct culling. Only on Savage3D. */
769 int reorder[3] = {2, -1, -1}; 771 int reorder[3] = { 2, -1, -1 };
770 772
771 BEGIN_DMA(count*vtx_size+1); 773 BEGIN_DMA(count * vtx_size + 1);
772 DMA_DRAW_PRIMITIVE(count, prim, skip); 774 DMA_DRAW_PRIMITIVE(count, prim, skip);
773 775
774 for (i = 0; i < count; ++i) { 776 for (i = 0; i < count; ++i) {
775 unsigned int j = idx[i + reorder[i % 3]]; 777 unsigned int j = idx[i + reorder[i % 3]];
776 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], 778 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
777 vtx_size); 779 vtx_size);
778 } 780 }
779 781
780 DMA_COMMIT(); 782 DMA_COMMIT();
781 } else { 783 } else {
782 BEGIN_DMA(count*vtx_size+1); 784 BEGIN_DMA(count * vtx_size + 1);
783 DMA_DRAW_PRIMITIVE(count, prim, skip); 785 DMA_DRAW_PRIMITIVE(count, prim, skip);
784 786
785 for (i = 0; i < count; ++i) { 787 for (i = 0; i < count; ++i) {
786 unsigned int j = idx[i]; 788 unsigned int j = idx[i];
787 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], 789 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
788 vtx_size); 790 vtx_size);
789 } 791 }
790 792
@@ -800,11 +802,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
800 return 0; 802 return 0;
801} 803}
802 804
803static int savage_dispatch_clear(drm_savage_private_t *dev_priv, 805static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
804 const drm_savage_cmd_header_t *cmd_header, 806 const drm_savage_cmd_header_t * cmd_header,
805 const drm_savage_cmd_header_t __user *data, 807 const drm_savage_cmd_header_t __user * data,
806 unsigned int nbox, 808 unsigned int nbox,
807 const drm_clip_rect_t __user *usr_boxes) 809 const drm_clip_rect_t __user * usr_boxes)
808{ 810{
809 unsigned int flags = cmd_header->clear0.flags, mask, value; 811 unsigned int flags = cmd_header->clear0.flags, mask, value;
810 unsigned int clear_cmd; 812 unsigned int clear_cmd;
@@ -814,18 +816,17 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
814 if (nbox == 0) 816 if (nbox == 0)
815 return 0; 817 return 0;
816 818
817 DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data) 819 DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t *)data)
818 ->clear1.mask); 820 ->clear1.mask);
819 DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data) 821 DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t *)data)
820 ->clear1.value); 822 ->clear1.value);
821 823
822 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 824 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; 825 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824 BCI_CMD_SET_ROP(clear_cmd,0xCC); 826 BCI_CMD_SET_ROP(clear_cmd, 0xCC);
825 827
826 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + 828 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
827 ((flags & SAVAGE_BACK) ? 1 : 0) + 829 ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
828 ((flags & SAVAGE_DEPTH) ? 1 : 0);
829 if (nbufs == 0) 830 if (nbufs == 0)
830 return 0; 831 return 0;
831 832
@@ -844,12 +845,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
844 x = box.x1, y = box.y1; 845 x = box.x1, y = box.y1;
845 w = box.x2 - box.x1; 846 w = box.x2 - box.x1;
846 h = box.y2 - box.y1; 847 h = box.y2 - box.y1;
847 BEGIN_DMA(nbufs*6); 848 BEGIN_DMA(nbufs * 6);
848 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 849 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
849 if (!(flags & buf)) 850 if (!(flags & buf))
850 continue; 851 continue;
851 DMA_WRITE(clear_cmd); 852 DMA_WRITE(clear_cmd);
852 switch(buf) { 853 switch (buf) {
853 case SAVAGE_FRONT: 854 case SAVAGE_FRONT:
854 DMA_WRITE(dev_priv->front_offset); 855 DMA_WRITE(dev_priv->front_offset);
855 DMA_WRITE(dev_priv->front_bd); 856 DMA_WRITE(dev_priv->front_bd);
@@ -880,9 +881,9 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
880 return 0; 881 return 0;
881} 882}
882 883
883static int savage_dispatch_swap(drm_savage_private_t *dev_priv, 884static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
884 unsigned int nbox, 885 unsigned int nbox,
885 const drm_clip_rect_t __user *usr_boxes) 886 const drm_clip_rect_t __user * usr_boxes)
886{ 887{
887 unsigned int swap_cmd; 888 unsigned int swap_cmd;
888 unsigned int i; 889 unsigned int i;
@@ -892,8 +893,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
892 return 0; 893 return 0;
893 894
894 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 895 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
895 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; 896 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
896 BCI_CMD_SET_ROP(swap_cmd,0xCC); 897 BCI_CMD_SET_ROP(swap_cmd, 0xCC);
897 898
898 for (i = 0; i < nbox; ++i) { 899 for (i = 0; i < nbox; ++i) {
899 drm_clip_rect_t box; 900 drm_clip_rect_t box;
@@ -905,21 +906,21 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
905 DMA_WRITE(dev_priv->back_bd); 906 DMA_WRITE(dev_priv->back_bd);
906 DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 907 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
907 DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 908 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
908 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1)); 909 DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1));
909 DMA_COMMIT(); 910 DMA_COMMIT();
910 } 911 }
911 912
912 return 0; 913 return 0;
913} 914}
914 915
915static int savage_dispatch_draw(drm_savage_private_t *dev_priv, 916static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
916 const drm_savage_cmd_header_t __user *start, 917 const drm_savage_cmd_header_t __user * start,
917 const drm_savage_cmd_header_t __user *end, 918 const drm_savage_cmd_header_t __user * end,
918 const drm_buf_t *dmabuf, 919 const drm_buf_t * dmabuf,
919 const unsigned int __user *usr_vtxbuf, 920 const unsigned int __user * usr_vtxbuf,
920 unsigned int vb_size, unsigned int vb_stride, 921 unsigned int vb_size, unsigned int vb_stride,
921 unsigned int nbox, 922 unsigned int nbox,
922 const drm_clip_rect_t __user *usr_boxes) 923 const drm_clip_rect_t __user * usr_boxes)
923{ 924{
924 unsigned int i, j; 925 unsigned int i, j;
925 int ret; 926 int ret;
@@ -938,32 +939,42 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
938 usr_cmdbuf++; 939 usr_cmdbuf++;
939 switch (cmd_header.cmd.cmd) { 940 switch (cmd_header.cmd.cmd) {
940 case SAVAGE_CMD_DMA_PRIM: 941 case SAVAGE_CMD_DMA_PRIM:
941 ret = savage_dispatch_dma_prim( 942 ret =
942 dev_priv, &cmd_header, dmabuf); 943 savage_dispatch_dma_prim(dev_priv,
944 &cmd_header,
945 dmabuf);
943 break; 946 break;
944 case SAVAGE_CMD_VB_PRIM: 947 case SAVAGE_CMD_VB_PRIM:
945 ret = savage_dispatch_vb_prim( 948 ret =
946 dev_priv, &cmd_header, 949 savage_dispatch_vb_prim(dev_priv,
947 (const uint32_t __user *)usr_vtxbuf, 950 &cmd_header,
948 vb_size, vb_stride); 951 (const uint32_t
952 __user *)
953 usr_vtxbuf, vb_size,
954 vb_stride);
949 break; 955 break;
950 case SAVAGE_CMD_DMA_IDX: 956 case SAVAGE_CMD_DMA_IDX:
951 j = (cmd_header.idx.count + 3) / 4; 957 j = (cmd_header.idx.count + 3) / 4;
952 /* j was check in savage_bci_cmdbuf */ 958 /* j was check in savage_bci_cmdbuf */
953 ret = savage_dispatch_dma_idx( 959 ret =
954 dev_priv, &cmd_header, 960 savage_dispatch_dma_idx(dev_priv,
955 (const uint16_t __user *)usr_cmdbuf, 961 &cmd_header,
956 dmabuf); 962 (const uint16_t
963 __user *)
964 usr_cmdbuf, dmabuf);
957 usr_cmdbuf += j; 965 usr_cmdbuf += j;
958 break; 966 break;
959 case SAVAGE_CMD_VB_IDX: 967 case SAVAGE_CMD_VB_IDX:
960 j = (cmd_header.idx.count + 3) / 4; 968 j = (cmd_header.idx.count + 3) / 4;
961 /* j was check in savage_bci_cmdbuf */ 969 /* j was check in savage_bci_cmdbuf */
962 ret = savage_dispatch_vb_idx( 970 ret =
963 dev_priv, &cmd_header, 971 savage_dispatch_vb_idx(dev_priv,
964 (const uint16_t __user *)usr_cmdbuf, 972 &cmd_header,
965 (const uint32_t __user *)usr_vtxbuf, 973 (const uint16_t
966 vb_size, vb_stride); 974 __user *)usr_cmdbuf,
975 (const uint32_t
976 __user *)usr_vtxbuf,
977 vb_size, vb_stride);
967 usr_cmdbuf += j; 978 usr_cmdbuf += j;
968 break; 979 break;
969 default: 980 default:
@@ -997,16 +1008,17 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
997 int ret = 0; 1008 int ret = 0;
998 1009
999 DRM_DEBUG("\n"); 1010 DRM_DEBUG("\n");
1000 1011
1001 LOCK_TEST_WITH_RETURN(dev, filp); 1012 LOCK_TEST_WITH_RETURN(dev, filp);
1002 1013
1003 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data, 1014 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *) data,
1004 sizeof(cmdbuf)); 1015 sizeof(cmdbuf));
1005 1016
1006 if (dma && dma->buflist) { 1017 if (dma && dma->buflist) {
1007 if (cmdbuf.dma_idx > dma->buf_count) { 1018 if (cmdbuf.dma_idx > dma->buf_count) {
1008 DRM_ERROR("vertex buffer index %u out of range (0-%u)\n", 1019 DRM_ERROR
1009 cmdbuf.dma_idx, dma->buf_count-1); 1020 ("vertex buffer index %u out of range (0-%u)\n",
1021 cmdbuf.dma_idx, dma->buf_count - 1);
1010 return DRM_ERR(EINVAL); 1022 return DRM_ERR(EINVAL);
1011 } 1023 }
1012 dmabuf = dma->buflist[cmdbuf.dma_idx]; 1024 dmabuf = dma->buflist[cmdbuf.dma_idx];
@@ -1014,14 +1026,14 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1014 dmabuf = NULL; 1026 dmabuf = NULL;
1015 } 1027 }
1016 1028
1017 usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr; 1029 usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr;
1018 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; 1030 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1019 usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr; 1031 usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr;
1020 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) || 1032 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) ||
1021 (cmdbuf.vb_size && DRM_VERIFYAREA_READ( 1033 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size))
1022 usr_vtxbuf, cmdbuf.vb_size)) || 1034 || (cmdbuf.nbox
1023 (cmdbuf.nbox && DRM_VERIFYAREA_READ( 1035 && DRM_VERIFYAREA_READ(usr_boxes,
1024 usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t)))) 1036 cmdbuf.nbox * sizeof(drm_clip_rect_t))))
1025 return DRM_ERR(EFAULT); 1037 return DRM_ERR(EFAULT);
1026 1038
1027 /* Make sure writes to DMA buffers are finished before sending 1039 /* Make sure writes to DMA buffers are finished before sending
@@ -1058,17 +1070,21 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1058 case SAVAGE_CMD_DMA_PRIM: 1070 case SAVAGE_CMD_DMA_PRIM:
1059 case SAVAGE_CMD_VB_PRIM: 1071 case SAVAGE_CMD_VB_PRIM:
1060 if (!first_draw_cmd) 1072 if (!first_draw_cmd)
1061 first_draw_cmd = usr_cmdbuf-1; 1073 first_draw_cmd = usr_cmdbuf - 1;
1062 usr_cmdbuf += j; 1074 usr_cmdbuf += j;
1063 i += j; 1075 i += j;
1064 break; 1076 break;
1065 default: 1077 default:
1066 if (first_draw_cmd) { 1078 if (first_draw_cmd) {
1067 ret = savage_dispatch_draw ( 1079 ret =
1068 dev_priv, first_draw_cmd, usr_cmdbuf-1, 1080 savage_dispatch_draw(dev_priv,
1069 dmabuf, usr_vtxbuf, cmdbuf.vb_size, 1081 first_draw_cmd,
1070 cmdbuf.vb_stride, 1082 usr_cmdbuf - 1, dmabuf,
1071 cmdbuf.nbox, usr_boxes); 1083 usr_vtxbuf,
1084 cmdbuf.vb_size,
1085 cmdbuf.vb_stride,
1086 cmdbuf.nbox,
1087 usr_boxes);
1072 if (ret != 0) 1088 if (ret != 0)
1073 return ret; 1089 return ret;
1074 first_draw_cmd = NULL; 1090 first_draw_cmd = NULL;
@@ -1086,9 +1102,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1086 DMA_FLUSH(); 1102 DMA_FLUSH();
1087 return DRM_ERR(EINVAL); 1103 return DRM_ERR(EINVAL);
1088 } 1104 }
1089 ret = savage_dispatch_state( 1105 ret = savage_dispatch_state(dev_priv, &cmd_header,
1090 dev_priv, &cmd_header, 1106 (uint32_t __user *)
1091 (uint32_t __user *)usr_cmdbuf); 1107 usr_cmdbuf);
1092 usr_cmdbuf += j; 1108 usr_cmdbuf += j;
1093 i += j; 1109 i += j;
1094 break; 1110 break;
@@ -1122,10 +1138,11 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1122 } 1138 }
1123 1139
1124 if (first_draw_cmd) { 1140 if (first_draw_cmd) {
1125 ret = savage_dispatch_draw ( 1141 ret =
1126 dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf, 1142 savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf,
1127 usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride, 1143 dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1128 cmdbuf.nbox, usr_boxes); 1144 cmdbuf.vb_stride, cmdbuf.nbox,
1145 usr_boxes);
1129 if (ret != 0) { 1146 if (ret != 0) {
1130 DMA_FLUSH(); 1147 DMA_FLUSH();
1131 return ret; 1148 return ret;