diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2006-01-02 00:11:44 -0500 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2006-01-02 00:11:44 -0500 |
commit | 3528af1b189d0fbb4c7a3f121f46d9987b9af5b6 (patch) | |
tree | 565940d5d0ac96c063f15cba5a049103e2993466 /drivers/char | |
parent | 952d751a140e961f7ac67f743cf94d1a37c736e8 (diff) |
drm: fix a LOR issue on FreeBSD for savage driver
Correct a LOR issue on FreeBSD by allocating temporary space and doing a single
DRM_COPY_FROM_USER rather than DRM_VERIFYAREA_READ followed by tons of
DRM_COPY_FROM_USER_UNCHECKED. I don't like the look of the temporary space
allocation, but I like the simplification in the rest of the file. Tested
with glxgears, tuxracer, and q3 on a savage4.
From: Eric Anholt <anholt@freebsd.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/drm/savage_drv.h | 23 | ||||
-rw-r--r-- | drivers/char/drm/savage_state.c | 324 |
2 files changed, 171 insertions, 176 deletions
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h index 2f73558fb9cf..dd46cb85439c 100644 --- a/drivers/char/drm/savage_drv.h +++ b/drivers/char/drm/savage_drv.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* savage_drv.h -- Private header for the savage driver | 1 | /* savage_drv.h -- Private header for the savage driver */ |
2 | * | 2 | /* |
3 | * Copyright 2004 Felix Kuehling | 3 | * Copyright 2004 Felix Kuehling |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
@@ -192,7 +192,7 @@ typedef struct drm_savage_private { | |||
192 | /* Err, there is a macro wait_event in include/linux/wait.h. | 192 | /* Err, there is a macro wait_event in include/linux/wait.h. |
193 | * Avoid unwanted macro expansion. */ | 193 | * Avoid unwanted macro expansion. */ |
194 | void (*emit_clip_rect) (struct drm_savage_private * dev_priv, | 194 | void (*emit_clip_rect) (struct drm_savage_private * dev_priv, |
195 | drm_clip_rect_t * pbox); | 195 | const drm_clip_rect_t * pbox); |
196 | void (*dma_flush) (struct drm_savage_private * dev_priv); | 196 | void (*dma_flush) (struct drm_savage_private * dev_priv); |
197 | } drm_savage_private_t; | 197 | } drm_savage_private_t; |
198 | 198 | ||
@@ -217,9 +217,9 @@ extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp); | |||
217 | 217 | ||
218 | /* state functions */ | 218 | /* state functions */ |
219 | extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, | 219 | extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, |
220 | drm_clip_rect_t * pbox); | 220 | const drm_clip_rect_t * pbox); |
221 | extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | 221 | extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, |
222 | drm_clip_rect_t * pbox); | 222 | const drm_clip_rect_t * pbox); |
223 | 223 | ||
224 | #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ | 224 | #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ |
225 | #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ | 225 | #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ |
@@ -502,15 +502,6 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | |||
502 | 502 | ||
503 | #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) | 503 | #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) |
504 | 504 | ||
505 | #define BCI_COPY_FROM_USER(src,n) do { \ | ||
506 | unsigned int i; \ | ||
507 | for (i = 0; i < n; ++i) { \ | ||
508 | uint32_t val; \ | ||
509 | DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \ | ||
510 | BCI_WRITE(val); \ | ||
511 | } \ | ||
512 | } while(0) | ||
513 | |||
514 | /* | 505 | /* |
515 | * command DMA support | 506 | * command DMA support |
516 | */ | 507 | */ |
@@ -536,8 +527,8 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | |||
536 | 527 | ||
537 | #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) | 528 | #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) |
538 | 529 | ||
539 | #define DMA_COPY_FROM_USER(src,n) do { \ | 530 | #define DMA_COPY(src, n) do { \ |
540 | DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \ | 531 | memcpy(dma_ptr, (src), (n)*4); \ |
541 | dma_ptr += n; \ | 532 | dma_ptr += n; \ |
542 | } while(0) | 533 | } while(0) |
543 | 534 | ||
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c index e87a5d59b99c..ef2581d16146 100644 --- a/drivers/char/drm/savage_state.c +++ b/drivers/char/drm/savage_state.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "savage_drv.h" | 27 | #include "savage_drv.h" |
28 | 28 | ||
29 | void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, | 29 | void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, |
30 | drm_clip_rect_t * pbox) | 30 | const drm_clip_rect_t * pbox) |
31 | { | 31 | { |
32 | uint32_t scstart = dev_priv->state.s3d.new_scstart; | 32 | uint32_t scstart = dev_priv->state.s3d.new_scstart; |
33 | uint32_t scend = dev_priv->state.s3d.new_scend; | 33 | uint32_t scend = dev_priv->state.s3d.new_scend; |
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, | |||
53 | } | 53 | } |
54 | 54 | ||
55 | void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, | 55 | void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, |
56 | drm_clip_rect_t * pbox) | 56 | const drm_clip_rect_t * pbox) |
57 | { | 57 | { |
58 | uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; | 58 | uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; |
59 | uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; | 59 | uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; |
@@ -115,18 +115,19 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, | |||
115 | 115 | ||
116 | #define SAVE_STATE(reg,where) \ | 116 | #define SAVE_STATE(reg,where) \ |
117 | if(start <= reg && start+count > reg) \ | 117 | if(start <= reg && start+count > reg) \ |
118 | DRM_GET_USER_UNCHECKED(dev_priv->state.where, ®s[reg-start]) | 118 | dev_priv->state.where = regs[reg - start] |
119 | #define SAVE_STATE_MASK(reg,where,mask) do { \ | 119 | #define SAVE_STATE_MASK(reg,where,mask) do { \ |
120 | if(start <= reg && start+count > reg) { \ | 120 | if(start <= reg && start+count > reg) { \ |
121 | uint32_t tmp; \ | 121 | uint32_t tmp; \ |
122 | DRM_GET_USER_UNCHECKED(tmp, ®s[reg-start]); \ | 122 | tmp = regs[reg - start]; \ |
123 | dev_priv->state.where = (tmp & (mask)) | \ | 123 | dev_priv->state.where = (tmp & (mask)) | \ |
124 | (dev_priv->state.where & ~(mask)); \ | 124 | (dev_priv->state.where & ~(mask)); \ |
125 | } \ | 125 | } \ |
126 | } while (0) | 126 | } while (0) |
127 | |||
127 | static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, | 128 | static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, |
128 | unsigned int start, unsigned int count, | 129 | unsigned int start, unsigned int count, |
129 | const uint32_t __user * regs) | 130 | const uint32_t *regs) |
130 | { | 131 | { |
131 | if (start < SAVAGE_TEXPALADDR_S3D || | 132 | if (start < SAVAGE_TEXPALADDR_S3D || |
132 | start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { | 133 | start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { |
@@ -148,8 +149,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, | |||
148 | SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); | 149 | SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); |
149 | if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) | 150 | if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) |
150 | return savage_verify_texaddr(dev_priv, 0, | 151 | return savage_verify_texaddr(dev_priv, 0, |
151 | dev_priv->state.s3d. | 152 | dev_priv->state.s3d.texaddr); |
152 | texaddr); | ||
153 | } | 153 | } |
154 | 154 | ||
155 | return 0; | 155 | return 0; |
@@ -157,7 +157,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, | |||
157 | 157 | ||
158 | static int savage_verify_state_s4(drm_savage_private_t * dev_priv, | 158 | static int savage_verify_state_s4(drm_savage_private_t * dev_priv, |
159 | unsigned int start, unsigned int count, | 159 | unsigned int start, unsigned int count, |
160 | const uint32_t __user * regs) | 160 | const uint32_t *regs) |
161 | { | 161 | { |
162 | int ret = 0; | 162 | int ret = 0; |
163 | 163 | ||
@@ -174,19 +174,18 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv, | |||
174 | ~SAVAGE_SCISSOR_MASK_S4); | 174 | ~SAVAGE_SCISSOR_MASK_S4); |
175 | 175 | ||
176 | /* if any texture regs were changed ... */ | 176 | /* if any texture regs were changed ... */ |
177 | if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) { | 177 | if (start <= SAVAGE_TEXDESCR_S4 && |
178 | start + count > SAVAGE_TEXPALADDR_S4) { | ||
178 | /* ... check texture state */ | 179 | /* ... check texture state */ |
179 | SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); | 180 | SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); |
180 | SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); | 181 | SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); |
181 | SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); | 182 | SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); |
182 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) | 183 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) |
183 | ret |= | 184 | ret |= savage_verify_texaddr(dev_priv, 0, |
184 | savage_verify_texaddr(dev_priv, 0, | 185 | dev_priv->state.s4.texaddr0); |
185 | dev_priv->state.s4.texaddr0); | ||
186 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) | 186 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) |
187 | ret |= | 187 | ret |= savage_verify_texaddr(dev_priv, 1, |
188 | savage_verify_texaddr(dev_priv, 1, | 188 | dev_priv->state.s4.texaddr1); |
189 | dev_priv->state.s4.texaddr1); | ||
190 | } | 189 | } |
191 | 190 | ||
192 | return ret; | 191 | return ret; |
@@ -197,7 +196,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv, | |||
197 | 196 | ||
198 | static int savage_dispatch_state(drm_savage_private_t * dev_priv, | 197 | static int savage_dispatch_state(drm_savage_private_t * dev_priv, |
199 | const drm_savage_cmd_header_t * cmd_header, | 198 | const drm_savage_cmd_header_t * cmd_header, |
200 | const uint32_t __user * regs) | 199 | const uint32_t *regs) |
201 | { | 200 | { |
202 | unsigned int count = cmd_header->state.count; | 201 | unsigned int count = cmd_header->state.count; |
203 | unsigned int start = cmd_header->state.start; | 202 | unsigned int start = cmd_header->state.start; |
@@ -209,9 +208,6 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv, | |||
209 | if (!count) | 208 | if (!count) |
210 | return 0; | 209 | return 0; |
211 | 210 | ||
212 | if (DRM_VERIFYAREA_READ(regs, count * 4)) | ||
213 | return DRM_ERR(EFAULT); | ||
214 | |||
215 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 211 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
216 | ret = savage_verify_state_s3d(dev_priv, start, count, regs); | 212 | ret = savage_verify_state_s3d(dev_priv, start, count, regs); |
217 | if (ret != 0) | 213 | if (ret != 0) |
@@ -236,8 +232,8 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv, | |||
236 | /* scissor regs are emitted in savage_dispatch_draw */ | 232 | /* scissor regs are emitted in savage_dispatch_draw */ |
237 | if (start < SAVAGE_DRAWCTRL0_S4) { | 233 | if (start < SAVAGE_DRAWCTRL0_S4) { |
238 | if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) | 234 | if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) |
239 | count2 = | 235 | count2 = count - |
240 | count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); | 236 | (SAVAGE_DRAWCTRL1_S4 + 1 - start); |
241 | if (start + count > SAVAGE_DRAWCTRL0_S4) | 237 | if (start + count > SAVAGE_DRAWCTRL0_S4) |
242 | count = SAVAGE_DRAWCTRL0_S4 - start; | 238 | count = SAVAGE_DRAWCTRL0_S4 - start; |
243 | } else if (start <= SAVAGE_DRAWCTRL1_S4) { | 239 | } else if (start <= SAVAGE_DRAWCTRL1_S4) { |
@@ -263,7 +259,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv, | |||
263 | while (count > 0) { | 259 | while (count > 0) { |
264 | unsigned int n = count < 255 ? count : 255; | 260 | unsigned int n = count < 255 ? count : 255; |
265 | DMA_SET_REGISTERS(start, n); | 261 | DMA_SET_REGISTERS(start, n); |
266 | DMA_COPY_FROM_USER(regs, n); | 262 | DMA_COPY(regs, n); |
267 | count -= n; | 263 | count -= n; |
268 | start += n; | 264 | start += n; |
269 | regs += n; | 265 | regs += n; |
@@ -421,8 +417,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | |||
421 | 417 | ||
422 | static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | 418 | static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, |
423 | const drm_savage_cmd_header_t * cmd_header, | 419 | const drm_savage_cmd_header_t * cmd_header, |
424 | const uint32_t __user * vtxbuf, | 420 | const uint32_t *vtxbuf, unsigned int vb_size, |
425 | unsigned int vb_size, unsigned int vb_stride) | 421 | unsigned int vb_stride) |
426 | { | 422 | { |
427 | unsigned char reorder = 0; | 423 | unsigned char reorder = 0; |
428 | unsigned int prim = cmd_header->prim.prim; | 424 | unsigned int prim = cmd_header->prim.prim; |
@@ -507,8 +503,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
507 | 503 | ||
508 | for (i = start; i < start + count; ++i) { | 504 | for (i = start; i < start + count; ++i) { |
509 | unsigned int j = i + reorder[i % 3]; | 505 | unsigned int j = i + reorder[i % 3]; |
510 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], | 506 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); |
511 | vtx_size); | ||
512 | } | 507 | } |
513 | 508 | ||
514 | DMA_COMMIT(); | 509 | DMA_COMMIT(); |
@@ -517,13 +512,12 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
517 | DMA_DRAW_PRIMITIVE(count, prim, skip); | 512 | DMA_DRAW_PRIMITIVE(count, prim, skip); |
518 | 513 | ||
519 | if (vb_stride == vtx_size) { | 514 | if (vb_stride == vtx_size) { |
520 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start], | 515 | DMA_COPY(&vtxbuf[vb_stride * start], |
521 | vtx_size * count); | 516 | vtx_size * count); |
522 | } else { | 517 | } else { |
523 | for (i = start; i < start + count; ++i) { | 518 | for (i = start; i < start + count; ++i) { |
524 | DMA_COPY_FROM_USER(&vtxbuf | 519 | DMA_COPY(&vtxbuf [vb_stride * i], |
525 | [vb_stride * i], | 520 | vtx_size); |
526 | vtx_size); | ||
527 | } | 521 | } |
528 | } | 522 | } |
529 | 523 | ||
@@ -541,7 +535,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
541 | 535 | ||
542 | static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | 536 | static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, |
543 | const drm_savage_cmd_header_t * cmd_header, | 537 | const drm_savage_cmd_header_t * cmd_header, |
544 | const uint16_t __user * usr_idx, | 538 | const uint16_t *idx, |
545 | const drm_buf_t * dmabuf) | 539 | const drm_buf_t * dmabuf) |
546 | { | 540 | { |
547 | unsigned char reorder = 0; | 541 | unsigned char reorder = 0; |
@@ -628,11 +622,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
628 | while (n != 0) { | 622 | while (n != 0) { |
629 | /* Can emit up to 255 indices (85 triangles) at once. */ | 623 | /* Can emit up to 255 indices (85 triangles) at once. */ |
630 | unsigned int count = n > 255 ? 255 : n; | 624 | unsigned int count = n > 255 ? 255 : n; |
631 | /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ | ||
632 | uint16_t idx[255]; | ||
633 | 625 | ||
634 | /* Copy and check indices */ | 626 | /* check indices */ |
635 | DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); | ||
636 | for (i = 0; i < count; ++i) { | 627 | for (i = 0; i < count; ++i) { |
637 | if (idx[i] > dmabuf->total / 32) { | 628 | if (idx[i] > dmabuf->total / 32) { |
638 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | 629 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", |
@@ -652,8 +643,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
652 | 643 | ||
653 | for (i = 1; i + 1 < count; i += 2) | 644 | for (i = 1; i + 1 < count; i += 2) |
654 | BCI_WRITE(idx[i + reorder[i % 3]] | | 645 | BCI_WRITE(idx[i + reorder[i % 3]] | |
655 | (idx[i + 1 + reorder[(i + 1) % 3]] << | 646 | (idx[i + 1 + |
656 | 16)); | 647 | reorder[(i + 1) % 3]] << 16)); |
657 | if (i < count) | 648 | if (i < count) |
658 | BCI_WRITE(idx[i + reorder[i % 3]]); | 649 | BCI_WRITE(idx[i + reorder[i % 3]]); |
659 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 650 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
@@ -674,7 +665,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
674 | BCI_WRITE(idx[i]); | 665 | BCI_WRITE(idx[i]); |
675 | } | 666 | } |
676 | 667 | ||
677 | usr_idx += count; | 668 | idx += count; |
678 | n -= count; | 669 | n -= count; |
679 | 670 | ||
680 | prim |= BCI_CMD_DRAW_CONT; | 671 | prim |= BCI_CMD_DRAW_CONT; |
@@ -685,8 +676,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
685 | 676 | ||
686 | static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | 677 | static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, |
687 | const drm_savage_cmd_header_t * cmd_header, | 678 | const drm_savage_cmd_header_t * cmd_header, |
688 | const uint16_t __user * usr_idx, | 679 | const uint16_t *idx, |
689 | const uint32_t __user * vtxbuf, | 680 | const uint32_t *vtxbuf, |
690 | unsigned int vb_size, unsigned int vb_stride) | 681 | unsigned int vb_size, unsigned int vb_stride) |
691 | { | 682 | { |
692 | unsigned char reorder = 0; | 683 | unsigned char reorder = 0; |
@@ -751,11 +742,8 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
751 | while (n != 0) { | 742 | while (n != 0) { |
752 | /* Can emit up to 255 vertices (85 triangles) at once. */ | 743 | /* Can emit up to 255 vertices (85 triangles) at once. */ |
753 | unsigned int count = n > 255 ? 255 : n; | 744 | unsigned int count = n > 255 ? 255 : n; |
754 | /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ | 745 | |
755 | uint16_t idx[255]; | 746 | /* Check indices */ |
756 | |||
757 | /* Copy and check indices */ | ||
758 | DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); | ||
759 | for (i = 0; i < count; ++i) { | 747 | for (i = 0; i < count; ++i) { |
760 | if (idx[i] > vb_size / (vb_stride * 4)) { | 748 | if (idx[i] > vb_size / (vb_stride * 4)) { |
761 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | 749 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", |
@@ -775,8 +763,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
775 | 763 | ||
776 | for (i = 0; i < count; ++i) { | 764 | for (i = 0; i < count; ++i) { |
777 | unsigned int j = idx[i + reorder[i % 3]]; | 765 | unsigned int j = idx[i + reorder[i % 3]]; |
778 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], | 766 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); |
779 | vtx_size); | ||
780 | } | 767 | } |
781 | 768 | ||
782 | DMA_COMMIT(); | 769 | DMA_COMMIT(); |
@@ -786,14 +773,13 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
786 | 773 | ||
787 | for (i = 0; i < count; ++i) { | 774 | for (i = 0; i < count; ++i) { |
788 | unsigned int j = idx[i]; | 775 | unsigned int j = idx[i]; |
789 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], | 776 | DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); |
790 | vtx_size); | ||
791 | } | 777 | } |
792 | 778 | ||
793 | DMA_COMMIT(); | 779 | DMA_COMMIT(); |
794 | } | 780 | } |
795 | 781 | ||
796 | usr_idx += count; | 782 | idx += count; |
797 | n -= count; | 783 | n -= count; |
798 | 784 | ||
799 | prim |= BCI_CMD_DRAW_CONT; | 785 | prim |= BCI_CMD_DRAW_CONT; |
@@ -804,11 +790,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
804 | 790 | ||
805 | static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | 791 | static int savage_dispatch_clear(drm_savage_private_t * dev_priv, |
806 | const drm_savage_cmd_header_t * cmd_header, | 792 | const drm_savage_cmd_header_t * cmd_header, |
807 | const drm_savage_cmd_header_t __user * data, | 793 | const drm_savage_cmd_header_t *data, |
808 | unsigned int nbox, | 794 | unsigned int nbox, |
809 | const drm_clip_rect_t __user * usr_boxes) | 795 | const drm_clip_rect_t *boxes) |
810 | { | 796 | { |
811 | unsigned int flags = cmd_header->clear0.flags, mask, value; | 797 | unsigned int flags = cmd_header->clear0.flags; |
812 | unsigned int clear_cmd; | 798 | unsigned int clear_cmd; |
813 | unsigned int i, nbufs; | 799 | unsigned int i, nbufs; |
814 | DMA_LOCALS; | 800 | DMA_LOCALS; |
@@ -816,9 +802,6 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | |||
816 | if (nbox == 0) | 802 | if (nbox == 0) |
817 | return 0; | 803 | return 0; |
818 | 804 | ||
819 | DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask); | ||
820 | DRM_GET_USER_UNCHECKED(value, &data->clear1.value); | ||
821 | |||
822 | clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | | 805 | clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | |
823 | BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; | 806 | BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; |
824 | BCI_CMD_SET_ROP(clear_cmd, 0xCC); | 807 | BCI_CMD_SET_ROP(clear_cmd, 0xCC); |
@@ -828,21 +811,19 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | |||
828 | if (nbufs == 0) | 811 | if (nbufs == 0) |
829 | return 0; | 812 | return 0; |
830 | 813 | ||
831 | if (mask != 0xffffffff) { | 814 | if (data->clear1.mask != 0xffffffff) { |
832 | /* set mask */ | 815 | /* set mask */ |
833 | BEGIN_DMA(2); | 816 | BEGIN_DMA(2); |
834 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | 817 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); |
835 | DMA_WRITE(mask); | 818 | DMA_WRITE(data->clear1.mask); |
836 | DMA_COMMIT(); | 819 | DMA_COMMIT(); |
837 | } | 820 | } |
838 | for (i = 0; i < nbox; ++i) { | 821 | for (i = 0; i < nbox; ++i) { |
839 | drm_clip_rect_t box; | ||
840 | unsigned int x, y, w, h; | 822 | unsigned int x, y, w, h; |
841 | unsigned int buf; | 823 | unsigned int buf; |
842 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | 824 | x = boxes[i].x1, y = boxes[i].y1; |
843 | x = box.x1, y = box.y1; | 825 | w = boxes[i].x2 - boxes[i].x1; |
844 | w = box.x2 - box.x1; | 826 | h = boxes[i].y2 - boxes[i].y1; |
845 | h = box.y2 - box.y1; | ||
846 | BEGIN_DMA(nbufs * 6); | 827 | BEGIN_DMA(nbufs * 6); |
847 | for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { | 828 | for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { |
848 | if (!(flags & buf)) | 829 | if (!(flags & buf)) |
@@ -862,13 +843,13 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | |||
862 | DMA_WRITE(dev_priv->depth_bd); | 843 | DMA_WRITE(dev_priv->depth_bd); |
863 | break; | 844 | break; |
864 | } | 845 | } |
865 | DMA_WRITE(value); | 846 | DMA_WRITE(data->clear1.value); |
866 | DMA_WRITE(BCI_X_Y(x, y)); | 847 | DMA_WRITE(BCI_X_Y(x, y)); |
867 | DMA_WRITE(BCI_W_H(w, h)); | 848 | DMA_WRITE(BCI_W_H(w, h)); |
868 | } | 849 | } |
869 | DMA_COMMIT(); | 850 | DMA_COMMIT(); |
870 | } | 851 | } |
871 | if (mask != 0xffffffff) { | 852 | if (data->clear1.mask != 0xffffffff) { |
872 | /* reset mask */ | 853 | /* reset mask */ |
873 | BEGIN_DMA(2); | 854 | BEGIN_DMA(2); |
874 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | 855 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); |
@@ -880,8 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv, | |||
880 | } | 861 | } |
881 | 862 | ||
882 | static int savage_dispatch_swap(drm_savage_private_t * dev_priv, | 863 | static int savage_dispatch_swap(drm_savage_private_t * dev_priv, |
883 | unsigned int nbox, | 864 | unsigned int nbox, const drm_clip_rect_t *boxes) |
884 | const drm_clip_rect_t __user * usr_boxes) | ||
885 | { | 865 | { |
886 | unsigned int swap_cmd; | 866 | unsigned int swap_cmd; |
887 | unsigned int i; | 867 | unsigned int i; |
@@ -895,16 +875,14 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv, | |||
895 | BCI_CMD_SET_ROP(swap_cmd, 0xCC); | 875 | BCI_CMD_SET_ROP(swap_cmd, 0xCC); |
896 | 876 | ||
897 | for (i = 0; i < nbox; ++i) { | 877 | for (i = 0; i < nbox; ++i) { |
898 | drm_clip_rect_t box; | ||
899 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | ||
900 | |||
901 | BEGIN_DMA(6); | 878 | BEGIN_DMA(6); |
902 | DMA_WRITE(swap_cmd); | 879 | DMA_WRITE(swap_cmd); |
903 | DMA_WRITE(dev_priv->back_offset); | 880 | DMA_WRITE(dev_priv->back_offset); |
904 | DMA_WRITE(dev_priv->back_bd); | 881 | DMA_WRITE(dev_priv->back_bd); |
905 | DMA_WRITE(BCI_X_Y(box.x1, box.y1)); | 882 | DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); |
906 | DMA_WRITE(BCI_X_Y(box.x1, box.y1)); | 883 | DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); |
907 | DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1)); | 884 | DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, |
885 | boxes[i].y2 - boxes[i].y1)); | ||
908 | DMA_COMMIT(); | 886 | DMA_COMMIT(); |
909 | } | 887 | } |
910 | 888 | ||
@@ -912,68 +890,52 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv, | |||
912 | } | 890 | } |
913 | 891 | ||
914 | static int savage_dispatch_draw(drm_savage_private_t * dev_priv, | 892 | static int savage_dispatch_draw(drm_savage_private_t * dev_priv, |
915 | const drm_savage_cmd_header_t __user * start, | 893 | const drm_savage_cmd_header_t *start, |
916 | const drm_savage_cmd_header_t __user * end, | 894 | const drm_savage_cmd_header_t *end, |
917 | const drm_buf_t * dmabuf, | 895 | const drm_buf_t * dmabuf, |
918 | const unsigned int __user * usr_vtxbuf, | 896 | const unsigned int *vtxbuf, |
919 | unsigned int vb_size, unsigned int vb_stride, | 897 | unsigned int vb_size, unsigned int vb_stride, |
920 | unsigned int nbox, | 898 | unsigned int nbox, |
921 | const drm_clip_rect_t __user * usr_boxes) | 899 | const drm_clip_rect_t *boxes) |
922 | { | 900 | { |
923 | unsigned int i, j; | 901 | unsigned int i, j; |
924 | int ret; | 902 | int ret; |
925 | 903 | ||
926 | for (i = 0; i < nbox; ++i) { | 904 | for (i = 0; i < nbox; ++i) { |
927 | drm_clip_rect_t box; | 905 | const drm_savage_cmd_header_t *cmdbuf; |
928 | const drm_savage_cmd_header_t __user *usr_cmdbuf; | 906 | dev_priv->emit_clip_rect(dev_priv, &boxes[i]); |
929 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | ||
930 | dev_priv->emit_clip_rect(dev_priv, &box); | ||
931 | 907 | ||
932 | usr_cmdbuf = start; | 908 | cmdbuf = start; |
933 | while (usr_cmdbuf < end) { | 909 | while (cmdbuf < end) { |
934 | drm_savage_cmd_header_t cmd_header; | 910 | drm_savage_cmd_header_t cmd_header; |
935 | DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, | 911 | cmd_header = *cmdbuf; |
936 | sizeof(cmd_header)); | 912 | cmdbuf++; |
937 | usr_cmdbuf++; | ||
938 | switch (cmd_header.cmd.cmd) { | 913 | switch (cmd_header.cmd.cmd) { |
939 | case SAVAGE_CMD_DMA_PRIM: | 914 | case SAVAGE_CMD_DMA_PRIM: |
940 | ret = | 915 | ret = savage_dispatch_dma_prim( |
941 | savage_dispatch_dma_prim(dev_priv, | 916 | dev_priv, &cmd_header, dmabuf); |
942 | &cmd_header, | ||
943 | dmabuf); | ||
944 | break; | 917 | break; |
945 | case SAVAGE_CMD_VB_PRIM: | 918 | case SAVAGE_CMD_VB_PRIM: |
946 | ret = | 919 | ret = savage_dispatch_vb_prim( |
947 | savage_dispatch_vb_prim(dev_priv, | 920 | dev_priv, &cmd_header, |
948 | &cmd_header, | 921 | vtxbuf, vb_size, vb_stride); |
949 | (const uint32_t | ||
950 | __user *) | ||
951 | usr_vtxbuf, vb_size, | ||
952 | vb_stride); | ||
953 | break; | 922 | break; |
954 | case SAVAGE_CMD_DMA_IDX: | 923 | case SAVAGE_CMD_DMA_IDX: |
955 | j = (cmd_header.idx.count + 3) / 4; | 924 | j = (cmd_header.idx.count + 3) / 4; |
956 | /* j was check in savage_bci_cmdbuf */ | 925 | /* j was check in savage_bci_cmdbuf */ |
957 | ret = | 926 | ret = savage_dispatch_dma_idx(dev_priv, |
958 | savage_dispatch_dma_idx(dev_priv, | 927 | &cmd_header, (const uint16_t *)cmdbuf, |
959 | &cmd_header, | 928 | dmabuf); |
960 | (const uint16_t | 929 | cmdbuf += j; |
961 | __user *) | ||
962 | usr_cmdbuf, dmabuf); | ||
963 | usr_cmdbuf += j; | ||
964 | break; | 930 | break; |
965 | case SAVAGE_CMD_VB_IDX: | 931 | case SAVAGE_CMD_VB_IDX: |
966 | j = (cmd_header.idx.count + 3) / 4; | 932 | j = (cmd_header.idx.count + 3) / 4; |
967 | /* j was check in savage_bci_cmdbuf */ | 933 | /* j was check in savage_bci_cmdbuf */ |
968 | ret = | 934 | ret = savage_dispatch_vb_idx(dev_priv, |
969 | savage_dispatch_vb_idx(dev_priv, | 935 | &cmd_header, (const uint16_t *)cmdbuf, |
970 | &cmd_header, | 936 | (const uint32_t *)vtxbuf, vb_size, |
971 | (const uint16_t | 937 | vb_stride); |
972 | __user *)usr_cmdbuf, | 938 | cmdbuf += j; |
973 | (const uint32_t | ||
974 | __user *)usr_vtxbuf, | ||
975 | vb_size, vb_stride); | ||
976 | usr_cmdbuf += j; | ||
977 | break; | 939 | break; |
978 | default: | 940 | default: |
979 | /* What's the best return code? EFAULT? */ | 941 | /* What's the best return code? EFAULT? */ |
@@ -998,10 +960,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
998 | drm_device_dma_t *dma = dev->dma; | 960 | drm_device_dma_t *dma = dev->dma; |
999 | drm_buf_t *dmabuf; | 961 | drm_buf_t *dmabuf; |
1000 | drm_savage_cmdbuf_t cmdbuf; | 962 | drm_savage_cmdbuf_t cmdbuf; |
1001 | drm_savage_cmd_header_t __user *usr_cmdbuf; | 963 | drm_savage_cmd_header_t *kcmd_addr = NULL; |
1002 | drm_savage_cmd_header_t __user *first_draw_cmd; | 964 | drm_savage_cmd_header_t *first_draw_cmd; |
1003 | unsigned int __user *usr_vtxbuf; | 965 | unsigned int *kvb_addr = NULL; |
1004 | drm_clip_rect_t __user *usr_boxes; | 966 | drm_clip_rect_t *kbox_addr = NULL; |
1005 | unsigned int i, j; | 967 | unsigned int i, j; |
1006 | int ret = 0; | 968 | int ret = 0; |
1007 | 969 | ||
@@ -1024,15 +986,53 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1024 | dmabuf = NULL; | 986 | dmabuf = NULL; |
1025 | } | 987 | } |
1026 | 988 | ||
1027 | usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr; | 989 | /* Copy the user buffers into kernel temporary areas. This hasn't been |
1028 | usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; | 990 | * a performance loss compared to VERIFYAREA_READ/ |
1029 | usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr; | 991 | * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct |
1030 | if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) || | 992 | * for locking on FreeBSD. |
1031 | (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size)) | 993 | */ |
1032 | || (cmdbuf.nbox | 994 | if (cmdbuf.size) { |
1033 | && DRM_VERIFYAREA_READ(usr_boxes, | 995 | kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); |
1034 | cmdbuf.nbox * sizeof(drm_clip_rect_t)))) | 996 | if (kcmd_addr == NULL) |
1035 | return DRM_ERR(EFAULT); | 997 | return ENOMEM; |
998 | |||
999 | if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, | ||
1000 | cmdbuf.size * 8)) | ||
1001 | { | ||
1002 | drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); | ||
1003 | return DRM_ERR(EFAULT); | ||
1004 | } | ||
1005 | cmdbuf.cmd_addr = kcmd_addr; | ||
1006 | } | ||
1007 | if (cmdbuf.vb_size) { | ||
1008 | kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); | ||
1009 | if (kvb_addr == NULL) { | ||
1010 | ret = DRM_ERR(ENOMEM); | ||
1011 | goto done; | ||
1012 | } | ||
1013 | |||
1014 | if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, | ||
1015 | cmdbuf.vb_size)) { | ||
1016 | ret = DRM_ERR(EFAULT); | ||
1017 | goto done; | ||
1018 | } | ||
1019 | cmdbuf.vb_addr = kvb_addr; | ||
1020 | } | ||
1021 | if (cmdbuf.nbox) { | ||
1022 | kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t), | ||
1023 | DRM_MEM_DRIVER); | ||
1024 | if (kbox_addr == NULL) { | ||
1025 | ret = DRM_ERR(ENOMEM); | ||
1026 | goto done; | ||
1027 | } | ||
1028 | |||
1029 | if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, | ||
1030 | cmdbuf.nbox * sizeof(drm_clip_rect_t))) { | ||
1031 | ret = DRM_ERR(EFAULT); | ||
1032 | goto done; | ||
1033 | } | ||
1034 | cmdbuf.box_addr = kbox_addr; | ||
1035 | } | ||
1036 | 1036 | ||
1037 | /* Make sure writes to DMA buffers are finished before sending | 1037 | /* Make sure writes to DMA buffers are finished before sending |
1038 | * DMA commands to the graphics hardware. */ | 1038 | * DMA commands to the graphics hardware. */ |
@@ -1046,9 +1046,8 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1046 | first_draw_cmd = NULL; | 1046 | first_draw_cmd = NULL; |
1047 | while (i < cmdbuf.size) { | 1047 | while (i < cmdbuf.size) { |
1048 | drm_savage_cmd_header_t cmd_header; | 1048 | drm_savage_cmd_header_t cmd_header; |
1049 | DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, | 1049 | cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; |
1050 | sizeof(cmd_header)); | 1050 | cmdbuf.cmd_addr++; |
1051 | usr_cmdbuf++; | ||
1052 | i++; | 1051 | i++; |
1053 | 1052 | ||
1054 | /* Group drawing commands with same state to minimize | 1053 | /* Group drawing commands with same state to minimize |
@@ -1068,21 +1067,18 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1068 | case SAVAGE_CMD_DMA_PRIM: | 1067 | case SAVAGE_CMD_DMA_PRIM: |
1069 | case SAVAGE_CMD_VB_PRIM: | 1068 | case SAVAGE_CMD_VB_PRIM: |
1070 | if (!first_draw_cmd) | 1069 | if (!first_draw_cmd) |
1071 | first_draw_cmd = usr_cmdbuf - 1; | 1070 | first_draw_cmd = cmdbuf.cmd_addr - 1; |
1072 | usr_cmdbuf += j; | 1071 | cmdbuf.cmd_addr += j; |
1073 | i += j; | 1072 | i += j; |
1074 | break; | 1073 | break; |
1075 | default: | 1074 | default: |
1076 | if (first_draw_cmd) { | 1075 | if (first_draw_cmd) { |
1077 | ret = | 1076 | ret = savage_dispatch_draw( |
1078 | savage_dispatch_draw(dev_priv, | 1077 | dev_priv, first_draw_cmd, |
1079 | first_draw_cmd, | 1078 | cmdbuf.cmd_addr - 1, |
1080 | usr_cmdbuf - 1, dmabuf, | 1079 | dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, |
1081 | usr_vtxbuf, | 1080 | cmdbuf.vb_stride, |
1082 | cmdbuf.vb_size, | 1081 | cmdbuf.nbox, cmdbuf.box_addr); |
1083 | cmdbuf.vb_stride, | ||
1084 | cmdbuf.nbox, | ||
1085 | usr_boxes); | ||
1086 | if (ret != 0) | 1082 | if (ret != 0) |
1087 | return ret; | 1083 | return ret; |
1088 | first_draw_cmd = NULL; | 1084 | first_draw_cmd = NULL; |
@@ -1098,12 +1094,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1098 | DRM_ERROR("command SAVAGE_CMD_STATE extends " | 1094 | DRM_ERROR("command SAVAGE_CMD_STATE extends " |
1099 | "beyond end of command buffer\n"); | 1095 | "beyond end of command buffer\n"); |
1100 | DMA_FLUSH(); | 1096 | DMA_FLUSH(); |
1101 | return DRM_ERR(EINVAL); | 1097 | ret = DRM_ERR(EINVAL); |
1098 | goto done; | ||
1102 | } | 1099 | } |
1103 | ret = savage_dispatch_state(dev_priv, &cmd_header, | 1100 | ret = savage_dispatch_state(dev_priv, &cmd_header, |
1104 | (uint32_t __user *) | 1101 | (const uint32_t *)cmdbuf.cmd_addr); |
1105 | usr_cmdbuf); | 1102 | cmdbuf.cmd_addr += j; |
1106 | usr_cmdbuf += j; | ||
1107 | i += j; | 1103 | i += j; |
1108 | break; | 1104 | break; |
1109 | case SAVAGE_CMD_CLEAR: | 1105 | case SAVAGE_CMD_CLEAR: |
@@ -1111,39 +1107,40 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1111 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " | 1107 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " |
1112 | "beyond end of command buffer\n"); | 1108 | "beyond end of command buffer\n"); |
1113 | DMA_FLUSH(); | 1109 | DMA_FLUSH(); |
1114 | return DRM_ERR(EINVAL); | 1110 | ret = DRM_ERR(EINVAL); |
1111 | goto done; | ||
1115 | } | 1112 | } |
1116 | ret = savage_dispatch_clear(dev_priv, &cmd_header, | 1113 | ret = savage_dispatch_clear(dev_priv, &cmd_header, |
1117 | usr_cmdbuf, | 1114 | cmdbuf.cmd_addr, |
1118 | cmdbuf.nbox, usr_boxes); | 1115 | cmdbuf.nbox, cmdbuf.box_addr); |
1119 | usr_cmdbuf++; | 1116 | cmdbuf.cmd_addr++; |
1120 | i++; | 1117 | i++; |
1121 | break; | 1118 | break; |
1122 | case SAVAGE_CMD_SWAP: | 1119 | case SAVAGE_CMD_SWAP: |
1123 | ret = savage_dispatch_swap(dev_priv, | 1120 | ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, |
1124 | cmdbuf.nbox, usr_boxes); | 1121 | cmdbuf.box_addr); |
1125 | break; | 1122 | break; |
1126 | default: | 1123 | default: |
1127 | DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); | 1124 | DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); |
1128 | DMA_FLUSH(); | 1125 | DMA_FLUSH(); |
1129 | return DRM_ERR(EINVAL); | 1126 | ret = DRM_ERR(EINVAL); |
1127 | goto done; | ||
1130 | } | 1128 | } |
1131 | 1129 | ||
1132 | if (ret != 0) { | 1130 | if (ret != 0) { |
1133 | DMA_FLUSH(); | 1131 | DMA_FLUSH(); |
1134 | return ret; | 1132 | goto done; |
1135 | } | 1133 | } |
1136 | } | 1134 | } |
1137 | 1135 | ||
1138 | if (first_draw_cmd) { | 1136 | if (first_draw_cmd) { |
1139 | ret = | 1137 | ret = savage_dispatch_draw ( |
1140 | savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf, | 1138 | dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, |
1141 | dmabuf, usr_vtxbuf, cmdbuf.vb_size, | 1139 | cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, |
1142 | cmdbuf.vb_stride, cmdbuf.nbox, | 1140 | cmdbuf.nbox, cmdbuf.box_addr); |
1143 | usr_boxes); | ||
1144 | if (ret != 0) { | 1141 | if (ret != 0) { |
1145 | DMA_FLUSH(); | 1142 | DMA_FLUSH(); |
1146 | return ret; | 1143 | goto done; |
1147 | } | 1144 | } |
1148 | } | 1145 | } |
1149 | 1146 | ||
@@ -1157,5 +1154,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1157 | savage_freelist_put(dev, dmabuf); | 1154 | savage_freelist_put(dev, dmabuf); |
1158 | } | 1155 | } |
1159 | 1156 | ||
1160 | return 0; | 1157 | done: |
1158 | /* If we didn't need to allocate them, these'll be NULL */ | ||
1159 | drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); | ||
1160 | drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); | ||
1161 | drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t), | ||
1162 | DRM_MEM_DRIVER); | ||
1163 | |||
1164 | return ret; | ||
1161 | } | 1165 | } |