diff options
author | Eric Anholt <eric@anholt.net> | 2007-08-25 05:22:43 -0400 |
---|---|---|
committer | Dave Airlie <airlied@optimus.(none)> | 2007-10-14 20:38:19 -0400 |
commit | 20caafa6ecb2487d9b223aa33e7cc704f912a758 (patch) | |
tree | 7df033fdee81305dad0a67ceba79f51ead7c1b8b | |
parent | 23fd50450a34f2558070ceabb0bfebc1c9604af5 (diff) |
drm: Remove DRM_ERR OS macro.
This was used to make all ioctl handlers return -errno on linux and errno on
*BSD. Instead, just return -errno in shared code, and flip sign on return f
shared code to *BSD code.
Signed-off-by: Dave Airlie <airlied@linux.ie>
31 files changed, 474 insertions, 475 deletions
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c index d6cdba5644e2..2787c9a3ab44 100644 --- a/drivers/char/drm/drm_drawable.c +++ b/drivers/char/drm/drm_drawable.c | |||
@@ -130,7 +130,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) | |||
130 | 130 | ||
131 | if (update.num && !rects) { | 131 | if (update.num && !rects) { |
132 | DRM_ERROR("Failed to allocate cliprect memory\n"); | 132 | DRM_ERROR("Failed to allocate cliprect memory\n"); |
133 | err = DRM_ERR(ENOMEM); | 133 | err = -ENOMEM; |
134 | goto error; | 134 | goto error; |
135 | } | 135 | } |
136 | 136 | ||
@@ -140,7 +140,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) | |||
140 | update.num * | 140 | update.num * |
141 | sizeof(*rects))) { | 141 | sizeof(*rects))) { |
142 | DRM_ERROR("Failed to copy cliprects from userspace\n"); | 142 | DRM_ERROR("Failed to copy cliprects from userspace\n"); |
143 | err = DRM_ERR(EFAULT); | 143 | err = -EFAULT; |
144 | goto error; | 144 | goto error; |
145 | } | 145 | } |
146 | 146 | ||
@@ -161,7 +161,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) | |||
161 | break; | 161 | break; |
162 | default: | 162 | default: |
163 | DRM_ERROR("Invalid update type %d\n", update.type); | 163 | DRM_ERROR("Invalid update type %d\n", update.type); |
164 | return DRM_ERR(EINVAL); | 164 | return -EINVAL; |
165 | } | 165 | } |
166 | 166 | ||
167 | return 0; | 167 | return 0; |
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c index b195e102e737..4eecfb9226d4 100644 --- a/drivers/char/drm/drm_ioctl.c +++ b/drivers/char/drm/drm_ioctl.c | |||
@@ -123,7 +123,7 @@ int drm_setunique(struct inode *inode, struct file *filp, | |||
123 | */ | 123 | */ |
124 | ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); | 124 | ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); |
125 | if (ret != 3) | 125 | if (ret != 3) |
126 | return DRM_ERR(EINVAL); | 126 | return -EINVAL; |
127 | domain = bus >> 8; | 127 | domain = bus >> 8; |
128 | bus &= 0xff; | 128 | bus &= 0xff; |
129 | 129 | ||
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index c0534b5a8b78..950e78dbcc3c 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c | |||
@@ -125,7 +125,7 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
125 | if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { | 125 | if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { |
126 | if (dev->driver->dma_quiescent(dev)) { | 126 | if (dev->driver->dma_quiescent(dev)) { |
127 | DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); | 127 | DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); |
128 | return DRM_ERR(EBUSY); | 128 | return -EBUSY; |
129 | } | 129 | } |
130 | } | 130 | } |
131 | 131 | ||
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h index 0b8d3433386d..132c5580358b 100644 --- a/drivers/char/drm/drm_os_linux.h +++ b/drivers/char/drm/drm_os_linux.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #define DRMFILE struct file * | 10 | #define DRMFILE struct file * |
11 | /** Ioctl arguments */ | 11 | /** Ioctl arguments */ |
12 | #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data | 12 | #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data |
13 | #define DRM_ERR(d) -(d) | ||
14 | /** Current process ID */ | 13 | /** Current process ID */ |
15 | #define DRM_CURRENTPID current->pid | 14 | #define DRM_CURRENTPID current->pid |
16 | #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) | 15 | #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) |
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index cb449999d0ef..bfb35ab11809 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c | |||
@@ -380,7 +380,7 @@ static int i810_dma_initialize(struct drm_device * dev, | |||
380 | i810_dma_cleanup(dev); | 380 | i810_dma_cleanup(dev); |
381 | DRM_ERROR("can not ioremap virtual address for" | 381 | DRM_ERROR("can not ioremap virtual address for" |
382 | " ring buffer\n"); | 382 | " ring buffer\n"); |
383 | return DRM_ERR(ENOMEM); | 383 | return -ENOMEM; |
384 | } | 384 | } |
385 | 385 | ||
386 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 386 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; |
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index dc20c1a7834e..0bb1cbf48109 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c | |||
@@ -389,7 +389,7 @@ static int i830_dma_initialize(struct drm_device * dev, | |||
389 | i830_dma_cleanup(dev); | 389 | i830_dma_cleanup(dev); |
390 | DRM_ERROR("can not ioremap virtual address for" | 390 | DRM_ERROR("can not ioremap virtual address for" |
391 | " ring buffer\n"); | 391 | " ring buffer\n"); |
392 | return DRM_ERR(ENOMEM); | 392 | return -ENOMEM; |
393 | } | 393 | } |
394 | 394 | ||
395 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 395 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; |
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index 8e7d713a5a15..a7566ff1a3ea 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c | |||
@@ -70,7 +70,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
70 | last_head = ring->head; | 70 | last_head = ring->head; |
71 | } | 71 | } |
72 | 72 | ||
73 | return DRM_ERR(EBUSY); | 73 | return -EBUSY; |
74 | } | 74 | } |
75 | 75 | ||
76 | void i915_kernel_lost_context(struct drm_device * dev) | 76 | void i915_kernel_lost_context(struct drm_device * dev) |
@@ -137,7 +137,7 @@ static int i915_initialize(struct drm_device * dev, | |||
137 | DRM_ERROR("can not find sarea!\n"); | 137 | DRM_ERROR("can not find sarea!\n"); |
138 | dev->dev_private = (void *)dev_priv; | 138 | dev->dev_private = (void *)dev_priv; |
139 | i915_dma_cleanup(dev); | 139 | i915_dma_cleanup(dev); |
140 | return DRM_ERR(EINVAL); | 140 | return -EINVAL; |
141 | } | 141 | } |
142 | 142 | ||
143 | dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); | 143 | dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); |
@@ -145,7 +145,7 @@ static int i915_initialize(struct drm_device * dev, | |||
145 | dev->dev_private = (void *)dev_priv; | 145 | dev->dev_private = (void *)dev_priv; |
146 | i915_dma_cleanup(dev); | 146 | i915_dma_cleanup(dev); |
147 | DRM_ERROR("can not find mmio map!\n"); | 147 | DRM_ERROR("can not find mmio map!\n"); |
148 | return DRM_ERR(EINVAL); | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | 151 | dev_priv->sarea_priv = (drm_i915_sarea_t *) |
@@ -169,7 +169,7 @@ static int i915_initialize(struct drm_device * dev, | |||
169 | i915_dma_cleanup(dev); | 169 | i915_dma_cleanup(dev); |
170 | DRM_ERROR("can not ioremap virtual address for" | 170 | DRM_ERROR("can not ioremap virtual address for" |
171 | " ring buffer\n"); | 171 | " ring buffer\n"); |
172 | return DRM_ERR(ENOMEM); | 172 | return -ENOMEM; |
173 | } | 173 | } |
174 | 174 | ||
175 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 175 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; |
@@ -200,7 +200,7 @@ static int i915_initialize(struct drm_device * dev, | |||
200 | dev->dev_private = (void *)dev_priv; | 200 | dev->dev_private = (void *)dev_priv; |
201 | i915_dma_cleanup(dev); | 201 | i915_dma_cleanup(dev); |
202 | DRM_ERROR("Can not allocate hardware status page\n"); | 202 | DRM_ERROR("Can not allocate hardware status page\n"); |
203 | return DRM_ERR(ENOMEM); | 203 | return -ENOMEM; |
204 | } | 204 | } |
205 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | 205 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; |
206 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | 206 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; |
@@ -221,24 +221,24 @@ static int i915_dma_resume(struct drm_device * dev) | |||
221 | 221 | ||
222 | if (!dev_priv->sarea) { | 222 | if (!dev_priv->sarea) { |
223 | DRM_ERROR("can not find sarea!\n"); | 223 | DRM_ERROR("can not find sarea!\n"); |
224 | return DRM_ERR(EINVAL); | 224 | return -EINVAL; |
225 | } | 225 | } |
226 | 226 | ||
227 | if (!dev_priv->mmio_map) { | 227 | if (!dev_priv->mmio_map) { |
228 | DRM_ERROR("can not find mmio map!\n"); | 228 | DRM_ERROR("can not find mmio map!\n"); |
229 | return DRM_ERR(EINVAL); | 229 | return -EINVAL; |
230 | } | 230 | } |
231 | 231 | ||
232 | if (dev_priv->ring.map.handle == NULL) { | 232 | if (dev_priv->ring.map.handle == NULL) { |
233 | DRM_ERROR("can not ioremap virtual address for" | 233 | DRM_ERROR("can not ioremap virtual address for" |
234 | " ring buffer\n"); | 234 | " ring buffer\n"); |
235 | return DRM_ERR(ENOMEM); | 235 | return -ENOMEM; |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Program Hardware Status Page */ | 238 | /* Program Hardware Status Page */ |
239 | if (!dev_priv->hw_status_page) { | 239 | if (!dev_priv->hw_status_page) { |
240 | DRM_ERROR("Can not find hardware status page\n"); | 240 | DRM_ERROR("Can not find hardware status page\n"); |
241 | return DRM_ERR(EINVAL); | 241 | return -EINVAL; |
242 | } | 242 | } |
243 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | 243 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); |
244 | 244 | ||
@@ -266,7 +266,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) | |||
266 | dev_priv = drm_alloc(sizeof(drm_i915_private_t), | 266 | dev_priv = drm_alloc(sizeof(drm_i915_private_t), |
267 | DRM_MEM_DRIVER); | 267 | DRM_MEM_DRIVER); |
268 | if (dev_priv == NULL) | 268 | if (dev_priv == NULL) |
269 | return DRM_ERR(ENOMEM); | 269 | return -ENOMEM; |
270 | retcode = i915_initialize(dev, dev_priv, &init); | 270 | retcode = i915_initialize(dev, dev_priv, &init); |
271 | break; | 271 | break; |
272 | case I915_CLEANUP_DMA: | 272 | case I915_CLEANUP_DMA: |
@@ -276,7 +276,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) | |||
276 | retcode = i915_dma_resume(dev); | 276 | retcode = i915_dma_resume(dev); |
277 | break; | 277 | break; |
278 | default: | 278 | default: |
279 | retcode = DRM_ERR(EINVAL); | 279 | retcode = -EINVAL; |
280 | break; | 280 | break; |
281 | } | 281 | } |
282 | 282 | ||
@@ -366,7 +366,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
366 | RING_LOCALS; | 366 | RING_LOCALS; |
367 | 367 | ||
368 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) | 368 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) |
369 | return DRM_ERR(EINVAL); | 369 | return -EINVAL; |
370 | 370 | ||
371 | BEGIN_LP_RING((dwords+1)&~1); | 371 | BEGIN_LP_RING((dwords+1)&~1); |
372 | 372 | ||
@@ -374,17 +374,17 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
374 | int cmd, sz; | 374 | int cmd, sz; |
375 | 375 | ||
376 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) | 376 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) |
377 | return DRM_ERR(EINVAL); | 377 | return -EINVAL; |
378 | 378 | ||
379 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | 379 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) |
380 | return DRM_ERR(EINVAL); | 380 | return -EINVAL; |
381 | 381 | ||
382 | OUT_RING(cmd); | 382 | OUT_RING(cmd); |
383 | 383 | ||
384 | while (++i, --sz) { | 384 | while (++i, --sz) { |
385 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], | 385 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], |
386 | sizeof(cmd))) { | 386 | sizeof(cmd))) { |
387 | return DRM_ERR(EINVAL); | 387 | return -EINVAL; |
388 | } | 388 | } |
389 | OUT_RING(cmd); | 389 | OUT_RING(cmd); |
390 | } | 390 | } |
@@ -407,13 +407,13 @@ static int i915_emit_box(struct drm_device * dev, | |||
407 | RING_LOCALS; | 407 | RING_LOCALS; |
408 | 408 | ||
409 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { | 409 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { |
410 | return DRM_ERR(EFAULT); | 410 | return -EFAULT; |
411 | } | 411 | } |
412 | 412 | ||
413 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 413 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
414 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 414 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
415 | box.x1, box.y1, box.x2, box.y2); | 415 | box.x1, box.y1, box.x2, box.y2); |
416 | return DRM_ERR(EINVAL); | 416 | return -EINVAL; |
417 | } | 417 | } |
418 | 418 | ||
419 | if (IS_I965G(dev)) { | 419 | if (IS_I965G(dev)) { |
@@ -467,7 +467,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
467 | 467 | ||
468 | if (cmd->sz & 0x3) { | 468 | if (cmd->sz & 0x3) { |
469 | DRM_ERROR("alignment"); | 469 | DRM_ERROR("alignment"); |
470 | return DRM_ERR(EINVAL); | 470 | return -EINVAL; |
471 | } | 471 | } |
472 | 472 | ||
473 | i915_kernel_lost_context(dev); | 473 | i915_kernel_lost_context(dev); |
@@ -502,7 +502,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
502 | 502 | ||
503 | if ((batch->start | batch->used) & 0x7) { | 503 | if ((batch->start | batch->used) & 0x7) { |
504 | DRM_ERROR("alignment"); | 504 | DRM_ERROR("alignment"); |
505 | return DRM_ERR(EINVAL); | 505 | return -EINVAL; |
506 | } | 506 | } |
507 | 507 | ||
508 | i915_kernel_lost_context(dev); | 508 | i915_kernel_lost_context(dev); |
@@ -619,7 +619,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) | |||
619 | 619 | ||
620 | if (!dev_priv->allow_batchbuffer) { | 620 | if (!dev_priv->allow_batchbuffer) { |
621 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | 621 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
622 | return DRM_ERR(EINVAL); | 622 | return -EINVAL; |
623 | } | 623 | } |
624 | 624 | ||
625 | DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, | 625 | DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, |
@@ -633,7 +633,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS) | |||
633 | if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, | 633 | if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, |
634 | batch.num_cliprects * | 634 | batch.num_cliprects * |
635 | sizeof(struct drm_clip_rect))) | 635 | sizeof(struct drm_clip_rect))) |
636 | return DRM_ERR(EFAULT); | 636 | return -EFAULT; |
637 | 637 | ||
638 | ret = i915_dispatch_batchbuffer(dev, &batch); | 638 | ret = i915_dispatch_batchbuffer(dev, &batch); |
639 | 639 | ||
@@ -664,7 +664,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS) | |||
664 | cmdbuf.num_cliprects * | 664 | cmdbuf.num_cliprects * |
665 | sizeof(struct drm_clip_rect))) { | 665 | sizeof(struct drm_clip_rect))) { |
666 | DRM_ERROR("Fault accessing cliprects\n"); | 666 | DRM_ERROR("Fault accessing cliprects\n"); |
667 | return DRM_ERR(EFAULT); | 667 | return -EFAULT; |
668 | } | 668 | } |
669 | 669 | ||
670 | ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); | 670 | ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); |
@@ -697,7 +697,7 @@ static int i915_getparam(DRM_IOCTL_ARGS) | |||
697 | 697 | ||
698 | if (!dev_priv) { | 698 | if (!dev_priv) { |
699 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 699 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
700 | return DRM_ERR(EINVAL); | 700 | return -EINVAL; |
701 | } | 701 | } |
702 | 702 | ||
703 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, | 703 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, |
@@ -715,12 +715,12 @@ static int i915_getparam(DRM_IOCTL_ARGS) | |||
715 | break; | 715 | break; |
716 | default: | 716 | default: |
717 | DRM_ERROR("Unknown parameter %d\n", param.param); | 717 | DRM_ERROR("Unknown parameter %d\n", param.param); |
718 | return DRM_ERR(EINVAL); | 718 | return -EINVAL; |
719 | } | 719 | } |
720 | 720 | ||
721 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { | 721 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { |
722 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | 722 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
723 | return DRM_ERR(EFAULT); | 723 | return -EFAULT; |
724 | } | 724 | } |
725 | 725 | ||
726 | return 0; | 726 | return 0; |
@@ -734,7 +734,7 @@ static int i915_setparam(DRM_IOCTL_ARGS) | |||
734 | 734 | ||
735 | if (!dev_priv) { | 735 | if (!dev_priv) { |
736 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 736 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
737 | return DRM_ERR(EINVAL); | 737 | return -EINVAL; |
738 | } | 738 | } |
739 | 739 | ||
740 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, | 740 | DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, |
@@ -753,7 +753,7 @@ static int i915_setparam(DRM_IOCTL_ARGS) | |||
753 | break; | 753 | break; |
754 | default: | 754 | default: |
755 | DRM_ERROR("unknown parameter %d\n", param.param); | 755 | DRM_ERROR("unknown parameter %d\n", param.param); |
756 | return DRM_ERR(EINVAL); | 756 | return -EINVAL; |
757 | } | 757 | } |
758 | 758 | ||
759 | return 0; | 759 | return 0; |
@@ -767,7 +767,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) | |||
767 | 767 | ||
768 | if (!dev_priv) { | 768 | if (!dev_priv) { |
769 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 769 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
770 | return DRM_ERR(EINVAL); | 770 | return -EINVAL; |
771 | } | 771 | } |
772 | DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, | 772 | DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, |
773 | sizeof(hws)); | 773 | sizeof(hws)); |
@@ -788,7 +788,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS) | |||
788 | dev_priv->status_gfx_addr = 0; | 788 | dev_priv->status_gfx_addr = 0; |
789 | DRM_ERROR("can not ioremap virtual address for" | 789 | DRM_ERROR("can not ioremap virtual address for" |
790 | " G33 hw status page\n"); | 790 | " G33 hw status page\n"); |
791 | return DRM_ERR(ENOMEM); | 791 | return -ENOMEM; |
792 | } | 792 | } |
793 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | 793 | dev_priv->hw_status_page = dev_priv->hws_map.handle; |
794 | 794 | ||
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c index bb8e9e9c8201..470e7aa81558 100644 --- a/drivers/char/drm/i915_irq.c +++ b/drivers/char/drm/i915_irq.c | |||
@@ -311,7 +311,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |||
311 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, | 311 | DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, |
312 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 312 | READ_BREADCRUMB(dev_priv) >= irq_nr); |
313 | 313 | ||
314 | if (ret == DRM_ERR(EBUSY)) { | 314 | if (ret == -EBUSY) { |
315 | DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", | 315 | DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", |
316 | __FUNCTION__, | 316 | __FUNCTION__, |
317 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); | 317 | READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); |
@@ -330,7 +330,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ | |||
330 | 330 | ||
331 | if (!dev_priv) { | 331 | if (!dev_priv) { |
332 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 332 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
333 | return DRM_ERR(EINVAL); | 333 | return -EINVAL; |
334 | } | 334 | } |
335 | 335 | ||
336 | DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, | 336 | DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, |
@@ -366,7 +366,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS) | |||
366 | 366 | ||
367 | if (!dev_priv) { | 367 | if (!dev_priv) { |
368 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 368 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
369 | return DRM_ERR(EINVAL); | 369 | return -EINVAL; |
370 | } | 370 | } |
371 | 371 | ||
372 | DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, | 372 | DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, |
@@ -376,7 +376,7 @@ int i915_irq_emit(DRM_IOCTL_ARGS) | |||
376 | 376 | ||
377 | if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { | 377 | if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { |
378 | DRM_ERROR("copy_to_user\n"); | 378 | DRM_ERROR("copy_to_user\n"); |
379 | return DRM_ERR(EFAULT); | 379 | return -EFAULT; |
380 | } | 380 | } |
381 | 381 | ||
382 | return 0; | 382 | return 0; |
@@ -392,7 +392,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS) | |||
392 | 392 | ||
393 | if (!dev_priv) { | 393 | if (!dev_priv) { |
394 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 394 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
395 | return DRM_ERR(EINVAL); | 395 | return -EINVAL; |
396 | } | 396 | } |
397 | 397 | ||
398 | DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, | 398 | DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, |
@@ -425,7 +425,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS) | |||
425 | 425 | ||
426 | if (!dev_priv) { | 426 | if (!dev_priv) { |
427 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 427 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
428 | return DRM_ERR(EINVAL); | 428 | return -EINVAL; |
429 | } | 429 | } |
430 | 430 | ||
431 | DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, | 431 | DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, |
@@ -434,7 +434,7 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS) | |||
434 | if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { | 434 | if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { |
435 | DRM_ERROR("%s called with invalid pipe 0x%x\n", | 435 | DRM_ERROR("%s called with invalid pipe 0x%x\n", |
436 | __FUNCTION__, pipe.pipe); | 436 | __FUNCTION__, pipe.pipe); |
437 | return DRM_ERR(EINVAL); | 437 | return -EINVAL; |
438 | } | 438 | } |
439 | 439 | ||
440 | dev_priv->vblank_pipe = pipe.pipe; | 440 | dev_priv->vblank_pipe = pipe.pipe; |
@@ -453,7 +453,7 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS) | |||
453 | 453 | ||
454 | if (!dev_priv) { | 454 | if (!dev_priv) { |
455 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 455 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
456 | return DRM_ERR(EINVAL); | 456 | return -EINVAL; |
457 | } | 457 | } |
458 | 458 | ||
459 | flag = I915_READ(I915REG_INT_ENABLE_R); | 459 | flag = I915_READ(I915REG_INT_ENABLE_R); |
@@ -482,12 +482,12 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
482 | 482 | ||
483 | if (!dev_priv) { | 483 | if (!dev_priv) { |
484 | DRM_ERROR("%s called with no initialization\n", __func__); | 484 | DRM_ERROR("%s called with no initialization\n", __func__); |
485 | return DRM_ERR(EINVAL); | 485 | return -EINVAL; |
486 | } | 486 | } |
487 | 487 | ||
488 | if (dev_priv->sarea_priv->rotation) { | 488 | if (dev_priv->sarea_priv->rotation) { |
489 | DRM_DEBUG("Rotation not supported\n"); | 489 | DRM_DEBUG("Rotation not supported\n"); |
490 | return DRM_ERR(EINVAL); | 490 | return -EINVAL; |
491 | } | 491 | } |
492 | 492 | ||
493 | DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, | 493 | DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, |
@@ -496,7 +496,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
496 | if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | | 496 | if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | |
497 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { | 497 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { |
498 | DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); | 498 | DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); |
499 | return DRM_ERR(EINVAL); | 499 | return -EINVAL; |
500 | } | 500 | } |
501 | 501 | ||
502 | pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; | 502 | pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; |
@@ -505,7 +505,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
505 | 505 | ||
506 | if (!(dev_priv->vblank_pipe & (1 << pipe))) { | 506 | if (!(dev_priv->vblank_pipe & (1 << pipe))) { |
507 | DRM_ERROR("Invalid pipe %d\n", pipe); | 507 | DRM_ERROR("Invalid pipe %d\n", pipe); |
508 | return DRM_ERR(EINVAL); | 508 | return -EINVAL; |
509 | } | 509 | } |
510 | 510 | ||
511 | spin_lock_irqsave(&dev->drw_lock, irqflags); | 511 | spin_lock_irqsave(&dev->drw_lock, irqflags); |
@@ -513,7 +513,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
513 | if (!drm_get_drawable_info(dev, swap.drawable)) { | 513 | if (!drm_get_drawable_info(dev, swap.drawable)) { |
514 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | 514 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); |
515 | DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); | 515 | DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); |
516 | return DRM_ERR(EINVAL); | 516 | return -EINVAL; |
517 | } | 517 | } |
518 | 518 | ||
519 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | 519 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); |
@@ -528,7 +528,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
528 | swap.sequence = curseq + 1; | 528 | swap.sequence = curseq + 1; |
529 | } else { | 529 | } else { |
530 | DRM_DEBUG("Missed target sequence\n"); | 530 | DRM_DEBUG("Missed target sequence\n"); |
531 | return DRM_ERR(EINVAL); | 531 | return -EINVAL; |
532 | } | 532 | } |
533 | } | 533 | } |
534 | 534 | ||
@@ -550,14 +550,14 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) | |||
550 | 550 | ||
551 | if (dev_priv->swaps_pending >= 100) { | 551 | if (dev_priv->swaps_pending >= 100) { |
552 | DRM_DEBUG("Too many swaps queued\n"); | 552 | DRM_DEBUG("Too many swaps queued\n"); |
553 | return DRM_ERR(EBUSY); | 553 | return -EBUSY; |
554 | } | 554 | } |
555 | 555 | ||
556 | vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); | 556 | vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); |
557 | 557 | ||
558 | if (!vbl_swap) { | 558 | if (!vbl_swap) { |
559 | DRM_ERROR("Failed to allocate memory to queue swap\n"); | 559 | DRM_ERROR("Failed to allocate memory to queue swap\n"); |
560 | return DRM_ERR(ENOMEM); | 560 | return -ENOMEM; |
561 | } | 561 | } |
562 | 562 | ||
563 | DRM_DEBUG("\n"); | 563 | DRM_DEBUG("\n"); |
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c index 50b4bacef0e0..d3ffad61c6b8 100644 --- a/drivers/char/drm/i915_mem.c +++ b/drivers/char/drm/i915_mem.c | |||
@@ -276,7 +276,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) | |||
276 | 276 | ||
277 | if (!dev_priv) { | 277 | if (!dev_priv) { |
278 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 278 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
279 | return DRM_ERR(EINVAL); | 279 | return -EINVAL; |
280 | } | 280 | } |
281 | 281 | ||
282 | DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, | 282 | DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, |
@@ -284,7 +284,7 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) | |||
284 | 284 | ||
285 | heap = get_heap(dev_priv, alloc.region); | 285 | heap = get_heap(dev_priv, alloc.region); |
286 | if (!heap || !*heap) | 286 | if (!heap || !*heap) |
287 | return DRM_ERR(EFAULT); | 287 | return -EFAULT; |
288 | 288 | ||
289 | /* Make things easier on ourselves: all allocations at least | 289 | /* Make things easier on ourselves: all allocations at least |
290 | * 4k aligned. | 290 | * 4k aligned. |
@@ -295,13 +295,13 @@ int i915_mem_alloc(DRM_IOCTL_ARGS) | |||
295 | block = alloc_block(*heap, alloc.size, alloc.alignment, filp); | 295 | block = alloc_block(*heap, alloc.size, alloc.alignment, filp); |
296 | 296 | ||
297 | if (!block) | 297 | if (!block) |
298 | return DRM_ERR(ENOMEM); | 298 | return -ENOMEM; |
299 | 299 | ||
300 | mark_block(dev, block, 1); | 300 | mark_block(dev, block, 1); |
301 | 301 | ||
302 | if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { | 302 | if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { |
303 | DRM_ERROR("copy_to_user\n"); | 303 | DRM_ERROR("copy_to_user\n"); |
304 | return DRM_ERR(EFAULT); | 304 | return -EFAULT; |
305 | } | 305 | } |
306 | 306 | ||
307 | return 0; | 307 | return 0; |
@@ -316,7 +316,7 @@ int i915_mem_free(DRM_IOCTL_ARGS) | |||
316 | 316 | ||
317 | if (!dev_priv) { | 317 | if (!dev_priv) { |
318 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 318 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
319 | return DRM_ERR(EINVAL); | 319 | return -EINVAL; |
320 | } | 320 | } |
321 | 321 | ||
322 | DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, | 322 | DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, |
@@ -324,14 +324,14 @@ int i915_mem_free(DRM_IOCTL_ARGS) | |||
324 | 324 | ||
325 | heap = get_heap(dev_priv, memfree.region); | 325 | heap = get_heap(dev_priv, memfree.region); |
326 | if (!heap || !*heap) | 326 | if (!heap || !*heap) |
327 | return DRM_ERR(EFAULT); | 327 | return -EFAULT; |
328 | 328 | ||
329 | block = find_block(*heap, memfree.region_offset); | 329 | block = find_block(*heap, memfree.region_offset); |
330 | if (!block) | 330 | if (!block) |
331 | return DRM_ERR(EFAULT); | 331 | return -EFAULT; |
332 | 332 | ||
333 | if (block->filp != filp) | 333 | if (block->filp != filp) |
334 | return DRM_ERR(EPERM); | 334 | return -EPERM; |
335 | 335 | ||
336 | mark_block(dev, block, 0); | 336 | mark_block(dev, block, 0); |
337 | free_block(block); | 337 | free_block(block); |
@@ -347,7 +347,7 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) | |||
347 | 347 | ||
348 | if (!dev_priv) { | 348 | if (!dev_priv) { |
349 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 349 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
350 | return DRM_ERR(EINVAL); | 350 | return -EINVAL; |
351 | } | 351 | } |
352 | 352 | ||
353 | DRM_COPY_FROM_USER_IOCTL(initheap, | 353 | DRM_COPY_FROM_USER_IOCTL(initheap, |
@@ -356,11 +356,11 @@ int i915_mem_init_heap(DRM_IOCTL_ARGS) | |||
356 | 356 | ||
357 | heap = get_heap(dev_priv, initheap.region); | 357 | heap = get_heap(dev_priv, initheap.region); |
358 | if (!heap) | 358 | if (!heap) |
359 | return DRM_ERR(EFAULT); | 359 | return -EFAULT; |
360 | 360 | ||
361 | if (*heap) { | 361 | if (*heap) { |
362 | DRM_ERROR("heap already initialized?"); | 362 | DRM_ERROR("heap already initialized?"); |
363 | return DRM_ERR(EFAULT); | 363 | return -EFAULT; |
364 | } | 364 | } |
365 | 365 | ||
366 | return init_heap(heap, initheap.start, initheap.size); | 366 | return init_heap(heap, initheap.start, initheap.size); |
@@ -375,7 +375,7 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) | |||
375 | 375 | ||
376 | if ( !dev_priv ) { | 376 | if ( !dev_priv ) { |
377 | DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); | 377 | DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); |
378 | return DRM_ERR(EINVAL); | 378 | return -EINVAL; |
379 | } | 379 | } |
380 | 380 | ||
381 | DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, | 381 | DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, |
@@ -384,12 +384,12 @@ int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) | |||
384 | heap = get_heap( dev_priv, destroyheap.region ); | 384 | heap = get_heap( dev_priv, destroyheap.region ); |
385 | if (!heap) { | 385 | if (!heap) { |
386 | DRM_ERROR("get_heap failed"); | 386 | DRM_ERROR("get_heap failed"); |
387 | return DRM_ERR(EFAULT); | 387 | return -EFAULT; |
388 | } | 388 | } |
389 | 389 | ||
390 | if (!*heap) { | 390 | if (!*heap) { |
391 | DRM_ERROR("heap not initialized?"); | 391 | DRM_ERROR("heap not initialized?"); |
392 | return DRM_ERR(EFAULT); | 392 | return -EFAULT; |
393 | } | 393 | } |
394 | 394 | ||
395 | i915_mem_takedown( heap ); | 395 | i915_mem_takedown( heap ); |
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index 9c73a6e3861b..0f23760eb629 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c | |||
@@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) | |||
71 | DRM_ERROR("failed!\n"); | 71 | DRM_ERROR("failed!\n"); |
72 | DRM_INFO(" status=0x%08x\n", status); | 72 | DRM_INFO(" status=0x%08x\n", status); |
73 | #endif | 73 | #endif |
74 | return DRM_ERR(EBUSY); | 74 | return -EBUSY; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int mga_do_dma_reset(drm_mga_private_t * dev_priv) | 77 | static int mga_do_dma_reset(drm_mga_private_t * dev_priv) |
@@ -256,7 +256,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr | |||
256 | 256 | ||
257 | dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); | 257 | dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); |
258 | if (dev_priv->head == NULL) | 258 | if (dev_priv->head == NULL) |
259 | return DRM_ERR(ENOMEM); | 259 | return -ENOMEM; |
260 | 260 | ||
261 | memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); | 261 | memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); |
262 | SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); | 262 | SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); |
@@ -267,7 +267,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr | |||
267 | 267 | ||
268 | entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); | 268 | entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); |
269 | if (entry == NULL) | 269 | if (entry == NULL) |
270 | return DRM_ERR(ENOMEM); | 270 | return -ENOMEM; |
271 | 271 | ||
272 | memset(entry, 0, sizeof(drm_mga_freelist_t)); | 272 | memset(entry, 0, sizeof(drm_mga_freelist_t)); |
273 | 273 | ||
@@ -399,7 +399,7 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) | |||
399 | 399 | ||
400 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | 400 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); |
401 | if (!dev_priv) | 401 | if (!dev_priv) |
402 | return DRM_ERR(ENOMEM); | 402 | return -ENOMEM; |
403 | 403 | ||
404 | dev->dev_private = (void *)dev_priv; | 404 | dev->dev_private = (void *)dev_priv; |
405 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); | 405 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); |
@@ -578,7 +578,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, | |||
578 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", | 578 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", |
579 | dev_priv->warp->handle, dev_priv->primary->handle, | 579 | dev_priv->warp->handle, dev_priv->primary->handle, |
580 | dev->agp_buffer_map->handle); | 580 | dev->agp_buffer_map->handle); |
581 | return DRM_ERR(ENOMEM); | 581 | return -ENOMEM; |
582 | } | 582 | } |
583 | 583 | ||
584 | dev_priv->dma_access = MGA_PAGPXFER; | 584 | dev_priv->dma_access = MGA_PAGPXFER; |
@@ -622,7 +622,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, | |||
622 | 622 | ||
623 | if (dev->dma == NULL) { | 623 | if (dev->dma == NULL) { |
624 | DRM_ERROR("dev->dma is NULL\n"); | 624 | DRM_ERROR("dev->dma is NULL\n"); |
625 | return DRM_ERR(EFAULT); | 625 | return -EFAULT; |
626 | } | 626 | } |
627 | 627 | ||
628 | /* Make drm_addbufs happy by not trying to create a mapping for less | 628 | /* Make drm_addbufs happy by not trying to create a mapping for less |
@@ -656,7 +656,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, | |||
656 | 656 | ||
657 | if (err != 0) { | 657 | if (err != 0) { |
658 | DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); | 658 | DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); |
659 | return DRM_ERR(ENOMEM); | 659 | return -ENOMEM; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (dev_priv->primary->size != dma_bs->primary_size) { | 662 | if (dev_priv->primary->size != dma_bs->primary_size) { |
@@ -826,7 +826,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) | |||
826 | dev_priv->sarea = drm_getsarea(dev); | 826 | dev_priv->sarea = drm_getsarea(dev); |
827 | if (!dev_priv->sarea) { | 827 | if (!dev_priv->sarea) { |
828 | DRM_ERROR("failed to find sarea!\n"); | 828 | DRM_ERROR("failed to find sarea!\n"); |
829 | return DRM_ERR(EINVAL); | 829 | return -EINVAL; |
830 | } | 830 | } |
831 | 831 | ||
832 | if (!dev_priv->used_new_dma_init) { | 832 | if (!dev_priv->used_new_dma_init) { |
@@ -837,29 +837,29 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) | |||
837 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | 837 | dev_priv->status = drm_core_findmap(dev, init->status_offset); |
838 | if (!dev_priv->status) { | 838 | if (!dev_priv->status) { |
839 | DRM_ERROR("failed to find status page!\n"); | 839 | DRM_ERROR("failed to find status page!\n"); |
840 | return DRM_ERR(EINVAL); | 840 | return -EINVAL; |
841 | } | 841 | } |
842 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | 842 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); |
843 | if (!dev_priv->mmio) { | 843 | if (!dev_priv->mmio) { |
844 | DRM_ERROR("failed to find mmio region!\n"); | 844 | DRM_ERROR("failed to find mmio region!\n"); |
845 | return DRM_ERR(EINVAL); | 845 | return -EINVAL; |
846 | } | 846 | } |
847 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); | 847 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); |
848 | if (!dev_priv->warp) { | 848 | if (!dev_priv->warp) { |
849 | DRM_ERROR("failed to find warp microcode region!\n"); | 849 | DRM_ERROR("failed to find warp microcode region!\n"); |
850 | return DRM_ERR(EINVAL); | 850 | return -EINVAL; |
851 | } | 851 | } |
852 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); | 852 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); |
853 | if (!dev_priv->primary) { | 853 | if (!dev_priv->primary) { |
854 | DRM_ERROR("failed to find primary dma region!\n"); | 854 | DRM_ERROR("failed to find primary dma region!\n"); |
855 | return DRM_ERR(EINVAL); | 855 | return -EINVAL; |
856 | } | 856 | } |
857 | dev->agp_buffer_token = init->buffers_offset; | 857 | dev->agp_buffer_token = init->buffers_offset; |
858 | dev->agp_buffer_map = | 858 | dev->agp_buffer_map = |
859 | drm_core_findmap(dev, init->buffers_offset); | 859 | drm_core_findmap(dev, init->buffers_offset); |
860 | if (!dev->agp_buffer_map) { | 860 | if (!dev->agp_buffer_map) { |
861 | DRM_ERROR("failed to find dma buffer region!\n"); | 861 | DRM_ERROR("failed to find dma buffer region!\n"); |
862 | return DRM_ERR(EINVAL); | 862 | return -EINVAL; |
863 | } | 863 | } |
864 | 864 | ||
865 | drm_core_ioremap(dev_priv->warp, dev); | 865 | drm_core_ioremap(dev_priv->warp, dev); |
@@ -877,7 +877,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) | |||
877 | ((dev->agp_buffer_map == NULL) || | 877 | ((dev->agp_buffer_map == NULL) || |
878 | (dev->agp_buffer_map->handle == NULL)))) { | 878 | (dev->agp_buffer_map->handle == NULL)))) { |
879 | DRM_ERROR("failed to ioremap agp regions!\n"); | 879 | DRM_ERROR("failed to ioremap agp regions!\n"); |
880 | return DRM_ERR(ENOMEM); | 880 | return -ENOMEM; |
881 | } | 881 | } |
882 | 882 | ||
883 | ret = mga_warp_install_microcode(dev_priv); | 883 | ret = mga_warp_install_microcode(dev_priv); |
@@ -927,7 +927,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) | |||
927 | 927 | ||
928 | if (mga_freelist_init(dev, dev_priv) < 0) { | 928 | if (mga_freelist_init(dev, dev_priv) < 0) { |
929 | DRM_ERROR("could not initialize freelist\n"); | 929 | DRM_ERROR("could not initialize freelist\n"); |
930 | return DRM_ERR(ENOMEM); | 930 | return -ENOMEM; |
931 | } | 931 | } |
932 | 932 | ||
933 | return 0; | 933 | return 0; |
@@ -1029,7 +1029,7 @@ int mga_dma_init(DRM_IOCTL_ARGS) | |||
1029 | return mga_do_cleanup_dma(dev, FULL_CLEANUP); | 1029 | return mga_do_cleanup_dma(dev, FULL_CLEANUP); |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | return DRM_ERR(EINVAL); | 1032 | return -EINVAL; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | /* ================================================================ | 1035 | /* ================================================================ |
@@ -1094,16 +1094,16 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm | |||
1094 | for (i = d->granted_count; i < d->request_count; i++) { | 1094 | for (i = d->granted_count; i < d->request_count; i++) { |
1095 | buf = mga_freelist_get(dev); | 1095 | buf = mga_freelist_get(dev); |
1096 | if (!buf) | 1096 | if (!buf) |
1097 | return DRM_ERR(EAGAIN); | 1097 | return -EAGAIN; |
1098 | 1098 | ||
1099 | buf->filp = filp; | 1099 | buf->filp = filp; |
1100 | 1100 | ||
1101 | if (DRM_COPY_TO_USER(&d->request_indices[i], | 1101 | if (DRM_COPY_TO_USER(&d->request_indices[i], |
1102 | &buf->idx, sizeof(buf->idx))) | 1102 | &buf->idx, sizeof(buf->idx))) |
1103 | return DRM_ERR(EFAULT); | 1103 | return -EFAULT; |
1104 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | 1104 | if (DRM_COPY_TO_USER(&d->request_sizes[i], |
1105 | &buf->total, sizeof(buf->total))) | 1105 | &buf->total, sizeof(buf->total))) |
1106 | return DRM_ERR(EFAULT); | 1106 | return -EFAULT; |
1107 | 1107 | ||
1108 | d->granted_count++; | 1108 | d->granted_count++; |
1109 | } | 1109 | } |
@@ -1128,7 +1128,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) | |||
1128 | if (d.send_count != 0) { | 1128 | if (d.send_count != 0) { |
1129 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | 1129 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", |
1130 | DRM_CURRENTPID, d.send_count); | 1130 | DRM_CURRENTPID, d.send_count); |
1131 | return DRM_ERR(EINVAL); | 1131 | return -EINVAL; |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | /* We'll send you buffers. | 1134 | /* We'll send you buffers. |
@@ -1136,7 +1136,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS) | |||
1136 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | 1136 | if (d.request_count < 0 || d.request_count > dma->buf_count) { |
1137 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | 1137 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", |
1138 | DRM_CURRENTPID, d.request_count, dma->buf_count); | 1138 | DRM_CURRENTPID, d.request_count, dma->buf_count); |
1139 | return DRM_ERR(EINVAL); | 1139 | return -EINVAL; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | WRAP_TEST_WITH_RETURN(dev_priv); | 1142 | WRAP_TEST_WITH_RETURN(dev_priv); |
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h index 49253affa475..7bf59113fca6 100644 --- a/drivers/char/drm/mga_drv.h +++ b/drivers/char/drm/mga_drv.h | |||
@@ -245,7 +245,7 @@ do { \ | |||
245 | dev_priv->prim.high_mark ) { \ | 245 | dev_priv->prim.high_mark ) { \ |
246 | if ( MGA_DMA_DEBUG ) \ | 246 | if ( MGA_DMA_DEBUG ) \ |
247 | DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ | 247 | DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ |
248 | return DRM_ERR(EBUSY); \ | 248 | return -EBUSY; \ |
249 | } \ | 249 | } \ |
250 | } \ | 250 | } \ |
251 | } while (0) | 251 | } while (0) |
@@ -256,7 +256,7 @@ do { \ | |||
256 | if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ | 256 | if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ |
257 | if ( MGA_DMA_DEBUG ) \ | 257 | if ( MGA_DMA_DEBUG ) \ |
258 | DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ | 258 | DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ |
259 | return DRM_ERR(EBUSY); \ | 259 | return -EBUSY; \ |
260 | } \ | 260 | } \ |
261 | mga_do_dma_wrap_end( dev_priv ); \ | 261 | mga_do_dma_wrap_end( dev_priv ); \ |
262 | } \ | 262 | } \ |
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c index d448b0aef33c..4e9867161170 100644 --- a/drivers/char/drm/mga_state.c +++ b/drivers/char/drm/mga_state.c | |||
@@ -392,7 +392,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv) | |||
392 | ctx->dstorg, dev_priv->front_offset, | 392 | ctx->dstorg, dev_priv->front_offset, |
393 | dev_priv->back_offset); | 393 | dev_priv->back_offset); |
394 | ctx->dstorg = 0; | 394 | ctx->dstorg = 0; |
395 | return DRM_ERR(EINVAL); | 395 | return -EINVAL; |
396 | } | 396 | } |
397 | 397 | ||
398 | return 0; | 398 | return 0; |
@@ -411,7 +411,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) | |||
411 | if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { | 411 | if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { |
412 | DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); | 412 | DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); |
413 | tex->texorg = 0; | 413 | tex->texorg = 0; |
414 | return DRM_ERR(EINVAL); | 414 | return -EINVAL; |
415 | } | 415 | } |
416 | 416 | ||
417 | return 0; | 417 | return 0; |
@@ -453,13 +453,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv, | |||
453 | dstorg + length > (dev_priv->texture_offset + | 453 | dstorg + length > (dev_priv->texture_offset + |
454 | dev_priv->texture_size)) { | 454 | dev_priv->texture_size)) { |
455 | DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); | 455 | DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); |
456 | return DRM_ERR(EINVAL); | 456 | return -EINVAL; |
457 | } | 457 | } |
458 | 458 | ||
459 | if (length & MGA_ILOAD_MASK) { | 459 | if (length & MGA_ILOAD_MASK) { |
460 | DRM_ERROR("*** bad iload length: 0x%x\n", | 460 | DRM_ERROR("*** bad iload length: 0x%x\n", |
461 | length & MGA_ILOAD_MASK); | 461 | length & MGA_ILOAD_MASK); |
462 | return DRM_ERR(EINVAL); | 462 | return -EINVAL; |
463 | } | 463 | } |
464 | 464 | ||
465 | return 0; | 465 | return 0; |
@@ -471,7 +471,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv, | |||
471 | if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || | 471 | if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || |
472 | (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { | 472 | (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { |
473 | DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); | 473 | DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); |
474 | return DRM_ERR(EINVAL); | 474 | return -EINVAL; |
475 | } | 475 | } |
476 | return 0; | 476 | return 0; |
477 | } | 477 | } |
@@ -892,7 +892,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) | |||
892 | sizeof(vertex)); | 892 | sizeof(vertex)); |
893 | 893 | ||
894 | if (vertex.idx < 0 || vertex.idx > dma->buf_count) | 894 | if (vertex.idx < 0 || vertex.idx > dma->buf_count) |
895 | return DRM_ERR(EINVAL); | 895 | return -EINVAL; |
896 | buf = dma->buflist[vertex.idx]; | 896 | buf = dma->buflist[vertex.idx]; |
897 | buf_priv = buf->dev_private; | 897 | buf_priv = buf->dev_private; |
898 | 898 | ||
@@ -906,7 +906,7 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS) | |||
906 | buf_priv->dispatched = 0; | 906 | buf_priv->dispatched = 0; |
907 | mga_freelist_put(dev, buf); | 907 | mga_freelist_put(dev, buf); |
908 | } | 908 | } |
909 | return DRM_ERR(EINVAL); | 909 | return -EINVAL; |
910 | } | 910 | } |
911 | 911 | ||
912 | WRAP_TEST_WITH_RETURN(dev_priv); | 912 | WRAP_TEST_WITH_RETURN(dev_priv); |
@@ -932,7 +932,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) | |||
932 | sizeof(indices)); | 932 | sizeof(indices)); |
933 | 933 | ||
934 | if (indices.idx < 0 || indices.idx > dma->buf_count) | 934 | if (indices.idx < 0 || indices.idx > dma->buf_count) |
935 | return DRM_ERR(EINVAL); | 935 | return -EINVAL; |
936 | 936 | ||
937 | buf = dma->buflist[indices.idx]; | 937 | buf = dma->buflist[indices.idx]; |
938 | buf_priv = buf->dev_private; | 938 | buf_priv = buf->dev_private; |
@@ -946,7 +946,7 @@ static int mga_dma_indices(DRM_IOCTL_ARGS) | |||
946 | buf_priv->dispatched = 0; | 946 | buf_priv->dispatched = 0; |
947 | mga_freelist_put(dev, buf); | 947 | mga_freelist_put(dev, buf); |
948 | } | 948 | } |
949 | return DRM_ERR(EINVAL); | 949 | return -EINVAL; |
950 | } | 950 | } |
951 | 951 | ||
952 | WRAP_TEST_WITH_RETURN(dev_priv); | 952 | WRAP_TEST_WITH_RETURN(dev_priv); |
@@ -975,18 +975,18 @@ static int mga_dma_iload(DRM_IOCTL_ARGS) | |||
975 | if (mga_do_wait_for_idle(dev_priv) < 0) { | 975 | if (mga_do_wait_for_idle(dev_priv) < 0) { |
976 | if (MGA_DMA_DEBUG) | 976 | if (MGA_DMA_DEBUG) |
977 | DRM_INFO("%s: -EBUSY\n", __FUNCTION__); | 977 | DRM_INFO("%s: -EBUSY\n", __FUNCTION__); |
978 | return DRM_ERR(EBUSY); | 978 | return -EBUSY; |
979 | } | 979 | } |
980 | #endif | 980 | #endif |
981 | if (iload.idx < 0 || iload.idx > dma->buf_count) | 981 | if (iload.idx < 0 || iload.idx > dma->buf_count) |
982 | return DRM_ERR(EINVAL); | 982 | return -EINVAL; |
983 | 983 | ||
984 | buf = dma->buflist[iload.idx]; | 984 | buf = dma->buflist[iload.idx]; |
985 | buf_priv = buf->dev_private; | 985 | buf_priv = buf->dev_private; |
986 | 986 | ||
987 | if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { | 987 | if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { |
988 | mga_freelist_put(dev, buf); | 988 | mga_freelist_put(dev, buf); |
989 | return DRM_ERR(EINVAL); | 989 | return -EINVAL; |
990 | } | 990 | } |
991 | 991 | ||
992 | WRAP_TEST_WITH_RETURN(dev_priv); | 992 | WRAP_TEST_WITH_RETURN(dev_priv); |
@@ -1017,7 +1017,7 @@ static int mga_dma_blit(DRM_IOCTL_ARGS) | |||
1017 | sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; | 1017 | sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; |
1018 | 1018 | ||
1019 | if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) | 1019 | if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) |
1020 | return DRM_ERR(EINVAL); | 1020 | return -EINVAL; |
1021 | 1021 | ||
1022 | WRAP_TEST_WITH_RETURN(dev_priv); | 1022 | WRAP_TEST_WITH_RETURN(dev_priv); |
1023 | 1023 | ||
@@ -1039,7 +1039,7 @@ static int mga_getparam(DRM_IOCTL_ARGS) | |||
1039 | 1039 | ||
1040 | if (!dev_priv) { | 1040 | if (!dev_priv) { |
1041 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1041 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1042 | return DRM_ERR(EINVAL); | 1042 | return -EINVAL; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, | 1045 | DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, |
@@ -1055,12 +1055,12 @@ static int mga_getparam(DRM_IOCTL_ARGS) | |||
1055 | value = dev_priv->chipset; | 1055 | value = dev_priv->chipset; |
1056 | break; | 1056 | break; |
1057 | default: | 1057 | default: |
1058 | return DRM_ERR(EINVAL); | 1058 | return -EINVAL; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { | 1061 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { |
1062 | DRM_ERROR("copy_to_user\n"); | 1062 | DRM_ERROR("copy_to_user\n"); |
1063 | return DRM_ERR(EFAULT); | 1063 | return -EFAULT; |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | return 0; | 1066 | return 0; |
@@ -1075,7 +1075,7 @@ static int mga_set_fence(DRM_IOCTL_ARGS) | |||
1075 | 1075 | ||
1076 | if (!dev_priv) { | 1076 | if (!dev_priv) { |
1077 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1077 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1078 | return DRM_ERR(EINVAL); | 1078 | return -EINVAL; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); | 1081 | DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); |
@@ -1095,7 +1095,7 @@ static int mga_set_fence(DRM_IOCTL_ARGS) | |||
1095 | 1095 | ||
1096 | if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) { | 1096 | if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) { |
1097 | DRM_ERROR("copy_to_user\n"); | 1097 | DRM_ERROR("copy_to_user\n"); |
1098 | return DRM_ERR(EFAULT); | 1098 | return -EFAULT; |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | return 0; | 1101 | return 0; |
@@ -1109,7 +1109,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS) | |||
1109 | 1109 | ||
1110 | if (!dev_priv) { | 1110 | if (!dev_priv) { |
1111 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1111 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1112 | return DRM_ERR(EINVAL); | 1112 | return -EINVAL; |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); | 1115 | DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); |
@@ -1120,7 +1120,7 @@ static int mga_wait_fence(DRM_IOCTL_ARGS) | |||
1120 | 1120 | ||
1121 | if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) { | 1121 | if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) { |
1122 | DRM_ERROR("copy_to_user\n"); | 1122 | DRM_ERROR("copy_to_user\n"); |
1123 | return DRM_ERR(EFAULT); | 1123 | return -EFAULT; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | return 0; | 1126 | return 0; |
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c index d67f4925fbac..651b93c8ab5d 100644 --- a/drivers/char/drm/mga_warp.c +++ b/drivers/char/drm/mga_warp.c | |||
@@ -141,7 +141,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv) | |||
141 | if (size > dev_priv->warp->size) { | 141 | if (size > dev_priv->warp->size) { |
142 | DRM_ERROR("microcode too large! (%u > %lu)\n", | 142 | DRM_ERROR("microcode too large! (%u > %lu)\n", |
143 | size, dev_priv->warp->size); | 143 | size, dev_priv->warp->size); |
144 | return DRM_ERR(ENOMEM); | 144 | return -ENOMEM; |
145 | } | 145 | } |
146 | 146 | ||
147 | switch (dev_priv->chipset) { | 147 | switch (dev_priv->chipset) { |
@@ -151,7 +151,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv) | |||
151 | case MGA_CARD_TYPE_G200: | 151 | case MGA_CARD_TYPE_G200: |
152 | return mga_warp_install_g200_microcode(dev_priv); | 152 | return mga_warp_install_g200_microcode(dev_priv); |
153 | default: | 153 | default: |
154 | return DRM_ERR(EINVAL); | 154 | return -EINVAL; |
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
@@ -177,7 +177,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv) | |||
177 | MGA_WRITE(MGA_WVRTXSZ, 7); | 177 | MGA_WRITE(MGA_WVRTXSZ, 7); |
178 | break; | 178 | break; |
179 | default: | 179 | default: |
180 | return DRM_ERR(EINVAL); | 180 | return -EINVAL; |
181 | } | 181 | } |
182 | 182 | ||
183 | MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | | 183 | MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | |
@@ -186,7 +186,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv) | |||
186 | if (wmisc != WMISC_EXPECTED) { | 186 | if (wmisc != WMISC_EXPECTED) { |
187 | DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", | 187 | DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", |
188 | wmisc, WMISC_EXPECTED); | 188 | wmisc, WMISC_EXPECTED); |
189 | return DRM_ERR(EINVAL); | 189 | return -EINVAL; |
190 | } | 190 | } |
191 | 191 | ||
192 | return 0; | 192 | return 0; |
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c index b163ed09bd81..15e873627d53 100644 --- a/drivers/char/drm/r128_cce.c +++ b/drivers/char/drm/r128_cce.c | |||
@@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) | |||
129 | #if R128_FIFO_DEBUG | 129 | #if R128_FIFO_DEBUG |
130 | DRM_ERROR("failed!\n"); | 130 | DRM_ERROR("failed!\n"); |
131 | #endif | 131 | #endif |
132 | return DRM_ERR(EBUSY); | 132 | return -EBUSY; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) | 135 | static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) |
@@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) | |||
146 | #if R128_FIFO_DEBUG | 146 | #if R128_FIFO_DEBUG |
147 | DRM_ERROR("failed!\n"); | 147 | DRM_ERROR("failed!\n"); |
148 | #endif | 148 | #endif |
149 | return DRM_ERR(EBUSY); | 149 | return -EBUSY; |
150 | } | 150 | } |
151 | 151 | ||
152 | static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) | 152 | static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) |
@@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) | |||
168 | #if R128_FIFO_DEBUG | 168 | #if R128_FIFO_DEBUG |
169 | DRM_ERROR("failed!\n"); | 169 | DRM_ERROR("failed!\n"); |
170 | #endif | 170 | #endif |
171 | return DRM_ERR(EBUSY); | 171 | return -EBUSY; |
172 | } | 172 | } |
173 | 173 | ||
174 | /* ================================================================ | 174 | /* ================================================================ |
@@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv) | |||
227 | DRM_ERROR("failed!\n"); | 227 | DRM_ERROR("failed!\n"); |
228 | r128_status(dev_priv); | 228 | r128_status(dev_priv); |
229 | #endif | 229 | #endif |
230 | return DRM_ERR(EBUSY); | 230 | return -EBUSY; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Start the Concurrent Command Engine. | 233 | /* Start the Concurrent Command Engine. |
@@ -355,7 +355,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
355 | 355 | ||
356 | dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); | 356 | dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); |
357 | if (dev_priv == NULL) | 357 | if (dev_priv == NULL) |
358 | return DRM_ERR(ENOMEM); | 358 | return -ENOMEM; |
359 | 359 | ||
360 | memset(dev_priv, 0, sizeof(drm_r128_private_t)); | 360 | memset(dev_priv, 0, sizeof(drm_r128_private_t)); |
361 | 361 | ||
@@ -365,7 +365,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
365 | DRM_ERROR("PCI GART memory not allocated!\n"); | 365 | DRM_ERROR("PCI GART memory not allocated!\n"); |
366 | dev->dev_private = (void *)dev_priv; | 366 | dev->dev_private = (void *)dev_priv; |
367 | r128_do_cleanup_cce(dev); | 367 | r128_do_cleanup_cce(dev); |
368 | return DRM_ERR(EINVAL); | 368 | return -EINVAL; |
369 | } | 369 | } |
370 | 370 | ||
371 | dev_priv->usec_timeout = init->usec_timeout; | 371 | dev_priv->usec_timeout = init->usec_timeout; |
@@ -374,7 +374,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
374 | DRM_DEBUG("TIMEOUT problem!\n"); | 374 | DRM_DEBUG("TIMEOUT problem!\n"); |
375 | dev->dev_private = (void *)dev_priv; | 375 | dev->dev_private = (void *)dev_priv; |
376 | r128_do_cleanup_cce(dev); | 376 | r128_do_cleanup_cce(dev); |
377 | return DRM_ERR(EINVAL); | 377 | return -EINVAL; |
378 | } | 378 | } |
379 | 379 | ||
380 | dev_priv->cce_mode = init->cce_mode; | 380 | dev_priv->cce_mode = init->cce_mode; |
@@ -394,7 +394,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
394 | DRM_DEBUG("Bad cce_mode!\n"); | 394 | DRM_DEBUG("Bad cce_mode!\n"); |
395 | dev->dev_private = (void *)dev_priv; | 395 | dev->dev_private = (void *)dev_priv; |
396 | r128_do_cleanup_cce(dev); | 396 | r128_do_cleanup_cce(dev); |
397 | return DRM_ERR(EINVAL); | 397 | return -EINVAL; |
398 | } | 398 | } |
399 | 399 | ||
400 | switch (init->cce_mode) { | 400 | switch (init->cce_mode) { |
@@ -461,7 +461,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
461 | DRM_ERROR("could not find sarea!\n"); | 461 | DRM_ERROR("could not find sarea!\n"); |
462 | dev->dev_private = (void *)dev_priv; | 462 | dev->dev_private = (void *)dev_priv; |
463 | r128_do_cleanup_cce(dev); | 463 | r128_do_cleanup_cce(dev); |
464 | return DRM_ERR(EINVAL); | 464 | return -EINVAL; |
465 | } | 465 | } |
466 | 466 | ||
467 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | 467 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); |
@@ -469,21 +469,21 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
469 | DRM_ERROR("could not find mmio region!\n"); | 469 | DRM_ERROR("could not find mmio region!\n"); |
470 | dev->dev_private = (void *)dev_priv; | 470 | dev->dev_private = (void *)dev_priv; |
471 | r128_do_cleanup_cce(dev); | 471 | r128_do_cleanup_cce(dev); |
472 | return DRM_ERR(EINVAL); | 472 | return -EINVAL; |
473 | } | 473 | } |
474 | dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); | 474 | dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); |
475 | if (!dev_priv->cce_ring) { | 475 | if (!dev_priv->cce_ring) { |
476 | DRM_ERROR("could not find cce ring region!\n"); | 476 | DRM_ERROR("could not find cce ring region!\n"); |
477 | dev->dev_private = (void *)dev_priv; | 477 | dev->dev_private = (void *)dev_priv; |
478 | r128_do_cleanup_cce(dev); | 478 | r128_do_cleanup_cce(dev); |
479 | return DRM_ERR(EINVAL); | 479 | return -EINVAL; |
480 | } | 480 | } |
481 | dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); | 481 | dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); |
482 | if (!dev_priv->ring_rptr) { | 482 | if (!dev_priv->ring_rptr) { |
483 | DRM_ERROR("could not find ring read pointer!\n"); | 483 | DRM_ERROR("could not find ring read pointer!\n"); |
484 | dev->dev_private = (void *)dev_priv; | 484 | dev->dev_private = (void *)dev_priv; |
485 | r128_do_cleanup_cce(dev); | 485 | r128_do_cleanup_cce(dev); |
486 | return DRM_ERR(EINVAL); | 486 | return -EINVAL; |
487 | } | 487 | } |
488 | dev->agp_buffer_token = init->buffers_offset; | 488 | dev->agp_buffer_token = init->buffers_offset; |
489 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 489 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
@@ -491,7 +491,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
491 | DRM_ERROR("could not find dma buffer region!\n"); | 491 | DRM_ERROR("could not find dma buffer region!\n"); |
492 | dev->dev_private = (void *)dev_priv; | 492 | dev->dev_private = (void *)dev_priv; |
493 | r128_do_cleanup_cce(dev); | 493 | r128_do_cleanup_cce(dev); |
494 | return DRM_ERR(EINVAL); | 494 | return -EINVAL; |
495 | } | 495 | } |
496 | 496 | ||
497 | if (!dev_priv->is_pci) { | 497 | if (!dev_priv->is_pci) { |
@@ -501,7 +501,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
501 | DRM_ERROR("could not find agp texture region!\n"); | 501 | DRM_ERROR("could not find agp texture region!\n"); |
502 | dev->dev_private = (void *)dev_priv; | 502 | dev->dev_private = (void *)dev_priv; |
503 | r128_do_cleanup_cce(dev); | 503 | r128_do_cleanup_cce(dev); |
504 | return DRM_ERR(EINVAL); | 504 | return -EINVAL; |
505 | } | 505 | } |
506 | } | 506 | } |
507 | 507 | ||
@@ -520,7 +520,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
520 | DRM_ERROR("Could not ioremap agp regions!\n"); | 520 | DRM_ERROR("Could not ioremap agp regions!\n"); |
521 | dev->dev_private = (void *)dev_priv; | 521 | dev->dev_private = (void *)dev_priv; |
522 | r128_do_cleanup_cce(dev); | 522 | r128_do_cleanup_cce(dev); |
523 | return DRM_ERR(ENOMEM); | 523 | return -ENOMEM; |
524 | } | 524 | } |
525 | } else | 525 | } else |
526 | #endif | 526 | #endif |
@@ -567,7 +567,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) | |||
567 | DRM_ERROR("failed to init PCI GART!\n"); | 567 | DRM_ERROR("failed to init PCI GART!\n"); |
568 | dev->dev_private = (void *)dev_priv; | 568 | dev->dev_private = (void *)dev_priv; |
569 | r128_do_cleanup_cce(dev); | 569 | r128_do_cleanup_cce(dev); |
570 | return DRM_ERR(ENOMEM); | 570 | return -ENOMEM; |
571 | } | 571 | } |
572 | R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); | 572 | R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); |
573 | #if __OS_HAS_AGP | 573 | #if __OS_HAS_AGP |
@@ -644,7 +644,7 @@ int r128_cce_init(DRM_IOCTL_ARGS) | |||
644 | return r128_do_cleanup_cce(dev); | 644 | return r128_do_cleanup_cce(dev); |
645 | } | 645 | } |
646 | 646 | ||
647 | return DRM_ERR(EINVAL); | 647 | return -EINVAL; |
648 | } | 648 | } |
649 | 649 | ||
650 | int r128_cce_start(DRM_IOCTL_ARGS) | 650 | int r128_cce_start(DRM_IOCTL_ARGS) |
@@ -721,7 +721,7 @@ int r128_cce_reset(DRM_IOCTL_ARGS) | |||
721 | 721 | ||
722 | if (!dev_priv) { | 722 | if (!dev_priv) { |
723 | DRM_DEBUG("%s called before init done\n", __FUNCTION__); | 723 | DRM_DEBUG("%s called before init done\n", __FUNCTION__); |
724 | return DRM_ERR(EINVAL); | 724 | return -EINVAL; |
725 | } | 725 | } |
726 | 726 | ||
727 | r128_do_cce_reset(dev_priv); | 727 | r128_do_cce_reset(dev_priv); |
@@ -759,7 +759,7 @@ int r128_engine_reset(DRM_IOCTL_ARGS) | |||
759 | 759 | ||
760 | int r128_fullscreen(DRM_IOCTL_ARGS) | 760 | int r128_fullscreen(DRM_IOCTL_ARGS) |
761 | { | 761 | { |
762 | return DRM_ERR(EINVAL); | 762 | return -EINVAL; |
763 | } | 763 | } |
764 | 764 | ||
765 | /* ================================================================ | 765 | /* ================================================================ |
@@ -780,7 +780,7 @@ static int r128_freelist_init(struct drm_device * dev) | |||
780 | 780 | ||
781 | dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); | 781 | dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); |
782 | if (dev_priv->head == NULL) | 782 | if (dev_priv->head == NULL) |
783 | return DRM_ERR(ENOMEM); | 783 | return -ENOMEM; |
784 | 784 | ||
785 | memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); | 785 | memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); |
786 | dev_priv->head->age = R128_BUFFER_USED; | 786 | dev_priv->head->age = R128_BUFFER_USED; |
@@ -791,7 +791,7 @@ static int r128_freelist_init(struct drm_device * dev) | |||
791 | 791 | ||
792 | entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); | 792 | entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); |
793 | if (!entry) | 793 | if (!entry) |
794 | return DRM_ERR(ENOMEM); | 794 | return -ENOMEM; |
795 | 795 | ||
796 | entry->age = R128_BUFFER_FREE; | 796 | entry->age = R128_BUFFER_FREE; |
797 | entry->buf = buf; | 797 | entry->buf = buf; |
@@ -883,7 +883,7 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) | |||
883 | 883 | ||
884 | /* FIXME: This is being ignored... */ | 884 | /* FIXME: This is being ignored... */ |
885 | DRM_ERROR("failed!\n"); | 885 | DRM_ERROR("failed!\n"); |
886 | return DRM_ERR(EBUSY); | 886 | return -EBUSY; |
887 | } | 887 | } |
888 | 888 | ||
889 | static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) | 889 | static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) |
@@ -894,16 +894,16 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr | |||
894 | for (i = d->granted_count; i < d->request_count; i++) { | 894 | for (i = d->granted_count; i < d->request_count; i++) { |
895 | buf = r128_freelist_get(dev); | 895 | buf = r128_freelist_get(dev); |
896 | if (!buf) | 896 | if (!buf) |
897 | return DRM_ERR(EAGAIN); | 897 | return -EAGAIN; |
898 | 898 | ||
899 | buf->filp = filp; | 899 | buf->filp = filp; |
900 | 900 | ||
901 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, | 901 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, |
902 | sizeof(buf->idx))) | 902 | sizeof(buf->idx))) |
903 | return DRM_ERR(EFAULT); | 903 | return -EFAULT; |
904 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, | 904 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, |
905 | sizeof(buf->total))) | 905 | sizeof(buf->total))) |
906 | return DRM_ERR(EFAULT); | 906 | return -EFAULT; |
907 | 907 | ||
908 | d->granted_count++; | 908 | d->granted_count++; |
909 | } | 909 | } |
@@ -927,7 +927,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) | |||
927 | if (d.send_count != 0) { | 927 | if (d.send_count != 0) { |
928 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | 928 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", |
929 | DRM_CURRENTPID, d.send_count); | 929 | DRM_CURRENTPID, d.send_count); |
930 | return DRM_ERR(EINVAL); | 930 | return -EINVAL; |
931 | } | 931 | } |
932 | 932 | ||
933 | /* We'll send you buffers. | 933 | /* We'll send you buffers. |
@@ -935,7 +935,7 @@ int r128_cce_buffers(DRM_IOCTL_ARGS) | |||
935 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | 935 | if (d.request_count < 0 || d.request_count > dma->buf_count) { |
936 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | 936 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", |
937 | DRM_CURRENTPID, d.request_count, dma->buf_count); | 937 | DRM_CURRENTPID, d.request_count, dma->buf_count); |
938 | return DRM_ERR(EINVAL); | 938 | return -EINVAL; |
939 | } | 939 | } |
940 | 940 | ||
941 | d.granted_count = 0; | 941 | d.granted_count = 0; |
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h index 72249fb2fd1c..2835aa3c302f 100644 --- a/drivers/char/drm/r128_drv.h +++ b/drivers/char/drm/r128_drv.h | |||
@@ -428,7 +428,7 @@ do { \ | |||
428 | DRM_UDELAY(1); \ | 428 | DRM_UDELAY(1); \ |
429 | } \ | 429 | } \ |
430 | DRM_ERROR( "ring space check failed!\n" ); \ | 430 | DRM_ERROR( "ring space check failed!\n" ); \ |
431 | return DRM_ERR(EBUSY); \ | 431 | return -EBUSY; \ |
432 | } \ | 432 | } \ |
433 | __ring_space_done: \ | 433 | __ring_space_done: \ |
434 | ; \ | 434 | ; \ |
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c index 7b334fb7d649..f0e2571e94a9 100644 --- a/drivers/char/drm/r128_state.c +++ b/drivers/char/drm/r128_state.c | |||
@@ -809,7 +809,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp, | |||
809 | break; | 809 | break; |
810 | default: | 810 | default: |
811 | DRM_ERROR("invalid blit format %d\n", blit->format); | 811 | DRM_ERROR("invalid blit format %d\n", blit->format); |
812 | return DRM_ERR(EINVAL); | 812 | return -EINVAL; |
813 | } | 813 | } |
814 | 814 | ||
815 | /* Flush the pixel cache, and mark the contents as Read Invalid. | 815 | /* Flush the pixel cache, and mark the contents as Read Invalid. |
@@ -832,11 +832,11 @@ static int r128_cce_dispatch_blit(DRMFILE filp, | |||
832 | if (buf->filp != filp) { | 832 | if (buf->filp != filp) { |
833 | DRM_ERROR("process %d using buffer owned by %p\n", | 833 | DRM_ERROR("process %d using buffer owned by %p\n", |
834 | DRM_CURRENTPID, buf->filp); | 834 | DRM_CURRENTPID, buf->filp); |
835 | return DRM_ERR(EINVAL); | 835 | return -EINVAL; |
836 | } | 836 | } |
837 | if (buf->pending) { | 837 | if (buf->pending) { |
838 | DRM_ERROR("sending pending buffer %d\n", blit->idx); | 838 | DRM_ERROR("sending pending buffer %d\n", blit->idx); |
839 | return DRM_ERR(EINVAL); | 839 | return -EINVAL; |
840 | } | 840 | } |
841 | 841 | ||
842 | buf_priv->discard = 1; | 842 | buf_priv->discard = 1; |
@@ -900,22 +900,22 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, | |||
900 | 900 | ||
901 | count = depth->n; | 901 | count = depth->n; |
902 | if (count > 4096 || count <= 0) | 902 | if (count > 4096 || count <= 0) |
903 | return DRM_ERR(EMSGSIZE); | 903 | return -EMSGSIZE; |
904 | 904 | ||
905 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { | 905 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { |
906 | return DRM_ERR(EFAULT); | 906 | return -EFAULT; |
907 | } | 907 | } |
908 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { | 908 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { |
909 | return DRM_ERR(EFAULT); | 909 | return -EFAULT; |
910 | } | 910 | } |
911 | 911 | ||
912 | buffer_size = depth->n * sizeof(u32); | 912 | buffer_size = depth->n * sizeof(u32); |
913 | buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); | 913 | buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); |
914 | if (buffer == NULL) | 914 | if (buffer == NULL) |
915 | return DRM_ERR(ENOMEM); | 915 | return -ENOMEM; |
916 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { | 916 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { |
917 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 917 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
918 | return DRM_ERR(EFAULT); | 918 | return -EFAULT; |
919 | } | 919 | } |
920 | 920 | ||
921 | mask_size = depth->n * sizeof(u8); | 921 | mask_size = depth->n * sizeof(u8); |
@@ -923,12 +923,12 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, | |||
923 | mask = drm_alloc(mask_size, DRM_MEM_BUFS); | 923 | mask = drm_alloc(mask_size, DRM_MEM_BUFS); |
924 | if (mask == NULL) { | 924 | if (mask == NULL) { |
925 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 925 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
926 | return DRM_ERR(ENOMEM); | 926 | return -ENOMEM; |
927 | } | 927 | } |
928 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { | 928 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { |
929 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 929 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
930 | drm_free(mask, mask_size, DRM_MEM_BUFS); | 930 | drm_free(mask, mask_size, DRM_MEM_BUFS); |
931 | return DRM_ERR(EFAULT); | 931 | return -EFAULT; |
932 | } | 932 | } |
933 | 933 | ||
934 | for (i = 0; i < count; i++, x++) { | 934 | for (i = 0; i < count; i++, x++) { |
@@ -996,28 +996,28 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, | |||
996 | 996 | ||
997 | count = depth->n; | 997 | count = depth->n; |
998 | if (count > 4096 || count <= 0) | 998 | if (count > 4096 || count <= 0) |
999 | return DRM_ERR(EMSGSIZE); | 999 | return -EMSGSIZE; |
1000 | 1000 | ||
1001 | xbuf_size = count * sizeof(*x); | 1001 | xbuf_size = count * sizeof(*x); |
1002 | ybuf_size = count * sizeof(*y); | 1002 | ybuf_size = count * sizeof(*y); |
1003 | x = drm_alloc(xbuf_size, DRM_MEM_BUFS); | 1003 | x = drm_alloc(xbuf_size, DRM_MEM_BUFS); |
1004 | if (x == NULL) { | 1004 | if (x == NULL) { |
1005 | return DRM_ERR(ENOMEM); | 1005 | return -ENOMEM; |
1006 | } | 1006 | } |
1007 | y = drm_alloc(ybuf_size, DRM_MEM_BUFS); | 1007 | y = drm_alloc(ybuf_size, DRM_MEM_BUFS); |
1008 | if (y == NULL) { | 1008 | if (y == NULL) { |
1009 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1009 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1010 | return DRM_ERR(ENOMEM); | 1010 | return -ENOMEM; |
1011 | } | 1011 | } |
1012 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { | 1012 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { |
1013 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1013 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1014 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1014 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1015 | return DRM_ERR(EFAULT); | 1015 | return -EFAULT; |
1016 | } | 1016 | } |
1017 | if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { | 1017 | if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { |
1018 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1018 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1019 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1019 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1020 | return DRM_ERR(EFAULT); | 1020 | return -EFAULT; |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | buffer_size = depth->n * sizeof(u32); | 1023 | buffer_size = depth->n * sizeof(u32); |
@@ -1025,13 +1025,13 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, | |||
1025 | if (buffer == NULL) { | 1025 | if (buffer == NULL) { |
1026 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1026 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1027 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1027 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1028 | return DRM_ERR(ENOMEM); | 1028 | return -ENOMEM; |
1029 | } | 1029 | } |
1030 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { | 1030 | if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { |
1031 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1031 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1032 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1032 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1033 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 1033 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
1034 | return DRM_ERR(EFAULT); | 1034 | return -EFAULT; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | if (depth->mask) { | 1037 | if (depth->mask) { |
@@ -1041,14 +1041,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, | |||
1041 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1041 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1042 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1042 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1043 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 1043 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
1044 | return DRM_ERR(ENOMEM); | 1044 | return -ENOMEM; |
1045 | } | 1045 | } |
1046 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { | 1046 | if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { |
1047 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1047 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1048 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1048 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1049 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); | 1049 | drm_free(buffer, buffer_size, DRM_MEM_BUFS); |
1050 | drm_free(mask, mask_size, DRM_MEM_BUFS); | 1050 | drm_free(mask, mask_size, DRM_MEM_BUFS); |
1051 | return DRM_ERR(EFAULT); | 1051 | return -EFAULT; |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | for (i = 0; i < count; i++) { | 1054 | for (i = 0; i < count; i++) { |
@@ -1115,13 +1115,13 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev, | |||
1115 | 1115 | ||
1116 | count = depth->n; | 1116 | count = depth->n; |
1117 | if (count > 4096 || count <= 0) | 1117 | if (count > 4096 || count <= 0) |
1118 | return DRM_ERR(EMSGSIZE); | 1118 | return -EMSGSIZE; |
1119 | 1119 | ||
1120 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { | 1120 | if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { |
1121 | return DRM_ERR(EFAULT); | 1121 | return -EFAULT; |
1122 | } | 1122 | } |
1123 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { | 1123 | if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { |
1124 | return DRM_ERR(EFAULT); | 1124 | return -EFAULT; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | BEGIN_RING(7); | 1127 | BEGIN_RING(7); |
@@ -1159,7 +1159,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, | |||
1159 | 1159 | ||
1160 | count = depth->n; | 1160 | count = depth->n; |
1161 | if (count > 4096 || count <= 0) | 1161 | if (count > 4096 || count <= 0) |
1162 | return DRM_ERR(EMSGSIZE); | 1162 | return -EMSGSIZE; |
1163 | 1163 | ||
1164 | if (count > dev_priv->depth_pitch) { | 1164 | if (count > dev_priv->depth_pitch) { |
1165 | count = dev_priv->depth_pitch; | 1165 | count = dev_priv->depth_pitch; |
@@ -1169,22 +1169,22 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, | |||
1169 | ybuf_size = count * sizeof(*y); | 1169 | ybuf_size = count * sizeof(*y); |
1170 | x = drm_alloc(xbuf_size, DRM_MEM_BUFS); | 1170 | x = drm_alloc(xbuf_size, DRM_MEM_BUFS); |
1171 | if (x == NULL) { | 1171 | if (x == NULL) { |
1172 | return DRM_ERR(ENOMEM); | 1172 | return -ENOMEM; |
1173 | } | 1173 | } |
1174 | y = drm_alloc(ybuf_size, DRM_MEM_BUFS); | 1174 | y = drm_alloc(ybuf_size, DRM_MEM_BUFS); |
1175 | if (y == NULL) { | 1175 | if (y == NULL) { |
1176 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1176 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1177 | return DRM_ERR(ENOMEM); | 1177 | return -ENOMEM; |
1178 | } | 1178 | } |
1179 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { | 1179 | if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { |
1180 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1180 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1181 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1181 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1182 | return DRM_ERR(EFAULT); | 1182 | return -EFAULT; |
1183 | } | 1183 | } |
1184 | if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { | 1184 | if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { |
1185 | drm_free(x, xbuf_size, DRM_MEM_BUFS); | 1185 | drm_free(x, xbuf_size, DRM_MEM_BUFS); |
1186 | drm_free(y, ybuf_size, DRM_MEM_BUFS); | 1186 | drm_free(y, ybuf_size, DRM_MEM_BUFS); |
1187 | return DRM_ERR(EFAULT); | 1187 | return -EFAULT; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | for (i = 0; i < count; i++) { | 1190 | for (i = 0; i < count; i++) { |
@@ -1363,7 +1363,7 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) | |||
1363 | 1363 | ||
1364 | if (!dev_priv) { | 1364 | if (!dev_priv) { |
1365 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1365 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1366 | return DRM_ERR(EINVAL); | 1366 | return -EINVAL; |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, | 1369 | DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, |
@@ -1375,12 +1375,12 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) | |||
1375 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { | 1375 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { |
1376 | DRM_ERROR("buffer index %d (of %d max)\n", | 1376 | DRM_ERROR("buffer index %d (of %d max)\n", |
1377 | vertex.idx, dma->buf_count - 1); | 1377 | vertex.idx, dma->buf_count - 1); |
1378 | return DRM_ERR(EINVAL); | 1378 | return -EINVAL; |
1379 | } | 1379 | } |
1380 | if (vertex.prim < 0 || | 1380 | if (vertex.prim < 0 || |
1381 | vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { | 1381 | vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { |
1382 | DRM_ERROR("buffer prim %d\n", vertex.prim); | 1382 | DRM_ERROR("buffer prim %d\n", vertex.prim); |
1383 | return DRM_ERR(EINVAL); | 1383 | return -EINVAL; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1386 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -1392,11 +1392,11 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS) | |||
1392 | if (buf->filp != filp) { | 1392 | if (buf->filp != filp) { |
1393 | DRM_ERROR("process %d using buffer owned by %p\n", | 1393 | DRM_ERROR("process %d using buffer owned by %p\n", |
1394 | DRM_CURRENTPID, buf->filp); | 1394 | DRM_CURRENTPID, buf->filp); |
1395 | return DRM_ERR(EINVAL); | 1395 | return -EINVAL; |
1396 | } | 1396 | } |
1397 | if (buf->pending) { | 1397 | if (buf->pending) { |
1398 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); | 1398 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); |
1399 | return DRM_ERR(EINVAL); | 1399 | return -EINVAL; |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | buf->used = vertex.count; | 1402 | buf->used = vertex.count; |
@@ -1423,7 +1423,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) | |||
1423 | 1423 | ||
1424 | if (!dev_priv) { | 1424 | if (!dev_priv) { |
1425 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1425 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1426 | return DRM_ERR(EINVAL); | 1426 | return -EINVAL; |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, | 1429 | DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, |
@@ -1435,11 +1435,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) | |||
1435 | if (elts.idx < 0 || elts.idx >= dma->buf_count) { | 1435 | if (elts.idx < 0 || elts.idx >= dma->buf_count) { |
1436 | DRM_ERROR("buffer index %d (of %d max)\n", | 1436 | DRM_ERROR("buffer index %d (of %d max)\n", |
1437 | elts.idx, dma->buf_count - 1); | 1437 | elts.idx, dma->buf_count - 1); |
1438 | return DRM_ERR(EINVAL); | 1438 | return -EINVAL; |
1439 | } | 1439 | } |
1440 | if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { | 1440 | if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { |
1441 | DRM_ERROR("buffer prim %d\n", elts.prim); | 1441 | DRM_ERROR("buffer prim %d\n", elts.prim); |
1442 | return DRM_ERR(EINVAL); | 1442 | return -EINVAL; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1445 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -1451,11 +1451,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) | |||
1451 | if (buf->filp != filp) { | 1451 | if (buf->filp != filp) { |
1452 | DRM_ERROR("process %d using buffer owned by %p\n", | 1452 | DRM_ERROR("process %d using buffer owned by %p\n", |
1453 | DRM_CURRENTPID, buf->filp); | 1453 | DRM_CURRENTPID, buf->filp); |
1454 | return DRM_ERR(EINVAL); | 1454 | return -EINVAL; |
1455 | } | 1455 | } |
1456 | if (buf->pending) { | 1456 | if (buf->pending) { |
1457 | DRM_ERROR("sending pending buffer %d\n", elts.idx); | 1457 | DRM_ERROR("sending pending buffer %d\n", elts.idx); |
1458 | return DRM_ERR(EINVAL); | 1458 | return -EINVAL; |
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | count = (elts.end - elts.start) / sizeof(u16); | 1461 | count = (elts.end - elts.start) / sizeof(u16); |
@@ -1463,11 +1463,11 @@ static int r128_cce_indices(DRM_IOCTL_ARGS) | |||
1463 | 1463 | ||
1464 | if (elts.start & 0x7) { | 1464 | if (elts.start & 0x7) { |
1465 | DRM_ERROR("misaligned buffer 0x%x\n", elts.start); | 1465 | DRM_ERROR("misaligned buffer 0x%x\n", elts.start); |
1466 | return DRM_ERR(EINVAL); | 1466 | return -EINVAL; |
1467 | } | 1467 | } |
1468 | if (elts.start < buf->used) { | 1468 | if (elts.start < buf->used) { |
1469 | DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); | 1469 | DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); |
1470 | return DRM_ERR(EINVAL); | 1470 | return -EINVAL; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | buf->used = elts.end; | 1473 | buf->used = elts.end; |
@@ -1498,7 +1498,7 @@ static int r128_cce_blit(DRM_IOCTL_ARGS) | |||
1498 | if (blit.idx < 0 || blit.idx >= dma->buf_count) { | 1498 | if (blit.idx < 0 || blit.idx >= dma->buf_count) { |
1499 | DRM_ERROR("buffer index %d (of %d max)\n", | 1499 | DRM_ERROR("buffer index %d (of %d max)\n", |
1500 | blit.idx, dma->buf_count - 1); | 1500 | blit.idx, dma->buf_count - 1); |
1501 | return DRM_ERR(EINVAL); | 1501 | return -EINVAL; |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1504 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -1524,7 +1524,7 @@ static int r128_cce_depth(DRM_IOCTL_ARGS) | |||
1524 | 1524 | ||
1525 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1525 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
1526 | 1526 | ||
1527 | ret = DRM_ERR(EINVAL); | 1527 | ret = -EINVAL; |
1528 | switch (depth.func) { | 1528 | switch (depth.func) { |
1529 | case R128_WRITE_SPAN: | 1529 | case R128_WRITE_SPAN: |
1530 | ret = r128_cce_dispatch_write_span(dev, &depth); | 1530 | ret = r128_cce_dispatch_write_span(dev, &depth); |
@@ -1557,7 +1557,7 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS) | |||
1557 | sizeof(stipple)); | 1557 | sizeof(stipple)); |
1558 | 1558 | ||
1559 | if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) | 1559 | if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) |
1560 | return DRM_ERR(EFAULT); | 1560 | return -EFAULT; |
1561 | 1561 | ||
1562 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1562 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
1563 | 1563 | ||
@@ -1583,7 +1583,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) | |||
1583 | 1583 | ||
1584 | if (!dev_priv) { | 1584 | if (!dev_priv) { |
1585 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1585 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1586 | return DRM_ERR(EINVAL); | 1586 | return -EINVAL; |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, | 1589 | DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, |
@@ -1595,7 +1595,7 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) | |||
1595 | if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { | 1595 | if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { |
1596 | DRM_ERROR("buffer index %d (of %d max)\n", | 1596 | DRM_ERROR("buffer index %d (of %d max)\n", |
1597 | indirect.idx, dma->buf_count - 1); | 1597 | indirect.idx, dma->buf_count - 1); |
1598 | return DRM_ERR(EINVAL); | 1598 | return -EINVAL; |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | buf = dma->buflist[indirect.idx]; | 1601 | buf = dma->buflist[indirect.idx]; |
@@ -1604,17 +1604,17 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS) | |||
1604 | if (buf->filp != filp) { | 1604 | if (buf->filp != filp) { |
1605 | DRM_ERROR("process %d using buffer owned by %p\n", | 1605 | DRM_ERROR("process %d using buffer owned by %p\n", |
1606 | DRM_CURRENTPID, buf->filp); | 1606 | DRM_CURRENTPID, buf->filp); |
1607 | return DRM_ERR(EINVAL); | 1607 | return -EINVAL; |
1608 | } | 1608 | } |
1609 | if (buf->pending) { | 1609 | if (buf->pending) { |
1610 | DRM_ERROR("sending pending buffer %d\n", indirect.idx); | 1610 | DRM_ERROR("sending pending buffer %d\n", indirect.idx); |
1611 | return DRM_ERR(EINVAL); | 1611 | return -EINVAL; |
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | if (indirect.start < buf->used) { | 1614 | if (indirect.start < buf->used) { |
1615 | DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", | 1615 | DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", |
1616 | indirect.start, buf->used); | 1616 | indirect.start, buf->used); |
1617 | return DRM_ERR(EINVAL); | 1617 | return -EINVAL; |
1618 | } | 1618 | } |
1619 | 1619 | ||
1620 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 1620 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -1651,7 +1651,7 @@ static int r128_getparam(DRM_IOCTL_ARGS) | |||
1651 | 1651 | ||
1652 | if (!dev_priv) { | 1652 | if (!dev_priv) { |
1653 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1653 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
1654 | return DRM_ERR(EINVAL); | 1654 | return -EINVAL; |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, | 1657 | DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, |
@@ -1664,12 +1664,12 @@ static int r128_getparam(DRM_IOCTL_ARGS) | |||
1664 | value = dev->irq; | 1664 | value = dev->irq; |
1665 | break; | 1665 | break; |
1666 | default: | 1666 | default: |
1667 | return DRM_ERR(EINVAL); | 1667 | return -EINVAL; |
1668 | } | 1668 | } |
1669 | 1669 | ||
1670 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { | 1670 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { |
1671 | DRM_ERROR("copy_to_user\n"); | 1671 | DRM_ERROR("copy_to_user\n"); |
1672 | return DRM_ERR(EFAULT); | 1672 | return -EFAULT; |
1673 | } | 1673 | } |
1674 | 1674 | ||
1675 | return 0; | 1675 | return 0; |
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c index 4e5aca6ba59a..669aee68ce24 100644 --- a/drivers/char/drm/r300_cmdbuf.c +++ b/drivers/char/drm/r300_cmdbuf.c | |||
@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, | |||
74 | if (DRM_COPY_FROM_USER_UNCHECKED | 74 | if (DRM_COPY_FROM_USER_UNCHECKED |
75 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { | 75 | (&box, &cmdbuf->boxes[n + i], sizeof(box))) { |
76 | DRM_ERROR("copy cliprect faulted\n"); | 76 | DRM_ERROR("copy cliprect faulted\n"); |
77 | return DRM_ERR(EFAULT); | 77 | return -EFAULT; |
78 | } | 78 | } |
79 | 79 | ||
80 | box.x1 = | 80 | box.x1 = |
@@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * | |||
263 | DRM_ERROR | 263 | DRM_ERROR |
264 | ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", | 264 | ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", |
265 | reg, sz); | 265 | reg, sz); |
266 | return DRM_ERR(EINVAL); | 266 | return -EINVAL; |
267 | } | 267 | } |
268 | for (i = 0; i < sz; i++) { | 268 | for (i = 0; i < sz; i++) { |
269 | values[i] = ((int *)cmdbuf->buf)[i]; | 269 | values[i] = ((int *)cmdbuf->buf)[i]; |
@@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * | |||
275 | DRM_ERROR | 275 | DRM_ERROR |
276 | ("Offset failed range check (reg=%04x sz=%d)\n", | 276 | ("Offset failed range check (reg=%04x sz=%d)\n", |
277 | reg, sz); | 277 | reg, sz); |
278 | return DRM_ERR(EINVAL); | 278 | return -EINVAL; |
279 | } | 279 | } |
280 | break; | 280 | break; |
281 | default: | 281 | default: |
282 | DRM_ERROR("Register %04x failed check as flag=%02x\n", | 282 | DRM_ERROR("Register %04x failed check as flag=%02x\n", |
283 | reg + i * 4, r300_reg_flags[(reg >> 2) + i]); | 283 | reg + i * 4, r300_reg_flags[(reg >> 2) + i]); |
284 | return DRM_ERR(EINVAL); | 284 | return -EINVAL; |
285 | } | 285 | } |
286 | } | 286 | } |
287 | 287 | ||
@@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, | |||
317 | return 0; | 317 | return 0; |
318 | 318 | ||
319 | if (sz * 4 > cmdbuf->bufsz) | 319 | if (sz * 4 > cmdbuf->bufsz) |
320 | return DRM_ERR(EINVAL); | 320 | return -EINVAL; |
321 | 321 | ||
322 | if (reg + sz * 4 >= 0x10000) { | 322 | if (reg + sz * 4 >= 0x10000) { |
323 | DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, | 323 | DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, |
324 | sz); | 324 | sz); |
325 | return DRM_ERR(EINVAL); | 325 | return -EINVAL; |
326 | } | 326 | } |
327 | 327 | ||
328 | if (r300_check_range(reg, sz)) { | 328 | if (r300_check_range(reg, sz)) { |
@@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, | |||
362 | if (!sz) | 362 | if (!sz) |
363 | return 0; | 363 | return 0; |
364 | if (sz * 16 > cmdbuf->bufsz) | 364 | if (sz * 16 > cmdbuf->bufsz) |
365 | return DRM_ERR(EINVAL); | 365 | return -EINVAL; |
366 | 366 | ||
367 | BEGIN_RING(5 + sz * 4); | 367 | BEGIN_RING(5 + sz * 4); |
368 | /* Wait for VAP to come to senses.. */ | 368 | /* Wait for VAP to come to senses.. */ |
@@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, | |||
391 | RING_LOCALS; | 391 | RING_LOCALS; |
392 | 392 | ||
393 | if (8 * 4 > cmdbuf->bufsz) | 393 | if (8 * 4 > cmdbuf->bufsz) |
394 | return DRM_ERR(EINVAL); | 394 | return -EINVAL; |
395 | 395 | ||
396 | BEGIN_RING(10); | 396 | BEGIN_RING(10); |
397 | OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); | 397 | OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); |
@@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
421 | if ((count + 1) > MAX_ARRAY_PACKET) { | 421 | if ((count + 1) > MAX_ARRAY_PACKET) { |
422 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", | 422 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", |
423 | count); | 423 | count); |
424 | return DRM_ERR(EINVAL); | 424 | return -EINVAL; |
425 | } | 425 | } |
426 | memset(payload, 0, MAX_ARRAY_PACKET * 4); | 426 | memset(payload, 0, MAX_ARRAY_PACKET * 4); |
427 | memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); | 427 | memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); |
@@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
437 | DRM_ERROR | 437 | DRM_ERROR |
438 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | 438 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", |
439 | k, i); | 439 | k, i); |
440 | return DRM_ERR(EINVAL); | 440 | return -EINVAL; |
441 | } | 441 | } |
442 | k++; | 442 | k++; |
443 | i++; | 443 | i++; |
@@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
448 | DRM_ERROR | 448 | DRM_ERROR |
449 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", | 449 | ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", |
450 | k, i); | 450 | k, i); |
451 | return DRM_ERR(EINVAL); | 451 | return -EINVAL; |
452 | } | 452 | } |
453 | k++; | 453 | k++; |
454 | i++; | 454 | i++; |
@@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, | |||
458 | DRM_ERROR | 458 | DRM_ERROR |
459 | ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", | 459 | ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", |
460 | k, i, narrays, count + 1); | 460 | k, i, narrays, count + 1); |
461 | return DRM_ERR(EINVAL); | 461 | return -EINVAL; |
462 | } | 462 | } |
463 | 463 | ||
464 | /* all clear, output packet */ | 464 | /* all clear, output packet */ |
@@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | |||
492 | ret = !radeon_check_offset(dev_priv, offset); | 492 | ret = !radeon_check_offset(dev_priv, offset); |
493 | if (ret) { | 493 | if (ret) { |
494 | DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); | 494 | DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); |
495 | return DRM_ERR(EINVAL); | 495 | return -EINVAL; |
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
@@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, | |||
502 | ret = !radeon_check_offset(dev_priv, offset); | 502 | ret = !radeon_check_offset(dev_priv, offset); |
503 | if (ret) { | 503 | if (ret) { |
504 | DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); | 504 | DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); |
505 | return DRM_ERR(EINVAL); | 505 | return -EINVAL; |
506 | } | 506 | } |
507 | 507 | ||
508 | } | 508 | } |
@@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, | |||
530 | 530 | ||
531 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { | 531 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { |
532 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); | 532 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); |
533 | return DRM_ERR(EINVAL); | 533 | return -EINVAL; |
534 | } | 534 | } |
535 | ret = !radeon_check_offset(dev_priv, cmd[2]); | 535 | ret = !radeon_check_offset(dev_priv, cmd[2]); |
536 | if (ret) { | 536 | if (ret) { |
537 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); | 537 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); |
538 | return DRM_ERR(EINVAL); | 538 | return -EINVAL; |
539 | } | 539 | } |
540 | 540 | ||
541 | BEGIN_RING(count+2); | 541 | BEGIN_RING(count+2); |
@@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
557 | RING_LOCALS; | 557 | RING_LOCALS; |
558 | 558 | ||
559 | if (4 > cmdbuf->bufsz) | 559 | if (4 > cmdbuf->bufsz) |
560 | return DRM_ERR(EINVAL); | 560 | return -EINVAL; |
561 | 561 | ||
562 | /* Fixme !! This simply emits a packet without much checking. | 562 | /* Fixme !! This simply emits a packet without much checking. |
563 | We need to be smarter. */ | 563 | We need to be smarter. */ |
@@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
568 | /* Is it packet 3 ? */ | 568 | /* Is it packet 3 ? */ |
569 | if ((header >> 30) != 0x3) { | 569 | if ((header >> 30) != 0x3) { |
570 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); | 570 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); |
571 | return DRM_ERR(EINVAL); | 571 | return -EINVAL; |
572 | } | 572 | } |
573 | 573 | ||
574 | count = (header >> 16) & 0x3fff; | 574 | count = (header >> 16) & 0x3fff; |
@@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
578 | DRM_ERROR | 578 | DRM_ERROR |
579 | ("Expected packet3 of length %d but have only %d bytes left\n", | 579 | ("Expected packet3 of length %d but have only %d bytes left\n", |
580 | (count + 2) * 4, cmdbuf->bufsz); | 580 | (count + 2) * 4, cmdbuf->bufsz); |
581 | return DRM_ERR(EINVAL); | 581 | return -EINVAL; |
582 | } | 582 | } |
583 | 583 | ||
584 | /* Is it a packet type we know about ? */ | 584 | /* Is it a packet type we know about ? */ |
@@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, | |||
600 | break; | 600 | break; |
601 | default: | 601 | default: |
602 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); | 602 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); |
603 | return DRM_ERR(EINVAL); | 603 | return -EINVAL; |
604 | } | 604 | } |
605 | 605 | ||
606 | BEGIN_RING(count + 2); | 606 | BEGIN_RING(count + 2); |
@@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, | |||
664 | DRM_ERROR("bad packet3 type %i at %p\n", | 664 | DRM_ERROR("bad packet3 type %i at %p\n", |
665 | header.packet3.packet, | 665 | header.packet3.packet, |
666 | cmdbuf->buf - sizeof(header)); | 666 | cmdbuf->buf - sizeof(header)); |
667 | return DRM_ERR(EINVAL); | 667 | return -EINVAL; |
668 | } | 668 | } |
669 | 669 | ||
670 | n += R300_SIMULTANEOUS_CLIPRECTS; | 670 | n += R300_SIMULTANEOUS_CLIPRECTS; |
@@ -726,11 +726,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
726 | 726 | ||
727 | if (cmdbuf->bufsz < | 727 | if (cmdbuf->bufsz < |
728 | (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { | 728 | (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { |
729 | return DRM_ERR(EINVAL); | 729 | return -EINVAL; |
730 | } | 730 | } |
731 | 731 | ||
732 | if (header.scratch.reg >= 5) { | 732 | if (header.scratch.reg >= 5) { |
733 | return DRM_ERR(EINVAL); | 733 | return -EINVAL; |
734 | } | 734 | } |
735 | 735 | ||
736 | dev_priv->scratch_ages[header.scratch.reg]++; | 736 | dev_priv->scratch_ages[header.scratch.reg]++; |
@@ -745,21 +745,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, | |||
745 | buf_idx *= 2; /* 8 bytes per buf */ | 745 | buf_idx *= 2; /* 8 bytes per buf */ |
746 | 746 | ||
747 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { | 747 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { |
748 | return DRM_ERR(EINVAL); | 748 | return -EINVAL; |
749 | } | 749 | } |
750 | 750 | ||
751 | if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { | 751 | if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { |
752 | return DRM_ERR(EINVAL); | 752 | return -EINVAL; |
753 | } | 753 | } |
754 | 754 | ||
755 | if (h_pending == 0) { | 755 | if (h_pending == 0) { |
756 | return DRM_ERR(EINVAL); | 756 | return -EINVAL; |
757 | } | 757 | } |
758 | 758 | ||
759 | h_pending--; | 759 | h_pending--; |
760 | 760 | ||
761 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { | 761 | if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { |
762 | return DRM_ERR(EINVAL); | 762 | return -EINVAL; |
763 | } | 763 | } |
764 | 764 | ||
765 | cmdbuf->buf += sizeof(buf_idx); | 765 | cmdbuf->buf += sizeof(buf_idx); |
@@ -879,7 +879,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
879 | if (idx < 0 || idx >= dma->buf_count) { | 879 | if (idx < 0 || idx >= dma->buf_count) { |
880 | DRM_ERROR("buffer index %d (of %d max)\n", | 880 | DRM_ERROR("buffer index %d (of %d max)\n", |
881 | idx, dma->buf_count - 1); | 881 | idx, dma->buf_count - 1); |
882 | ret = DRM_ERR(EINVAL); | 882 | ret = -EINVAL; |
883 | goto cleanup; | 883 | goto cleanup; |
884 | } | 884 | } |
885 | 885 | ||
@@ -887,7 +887,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
887 | if (buf->filp != filp || buf->pending) { | 887 | if (buf->filp != filp || buf->pending) { |
888 | DRM_ERROR("bad buffer %p %p %d\n", | 888 | DRM_ERROR("bad buffer %p %p %d\n", |
889 | buf->filp, filp, buf->pending); | 889 | buf->filp, filp, buf->pending); |
890 | ret = DRM_ERR(EINVAL); | 890 | ret = -EINVAL; |
891 | goto cleanup; | 891 | goto cleanup; |
892 | } | 892 | } |
893 | 893 | ||
@@ -924,7 +924,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, | |||
924 | DRM_ERROR("bad cmd_type %i at %p\n", | 924 | DRM_ERROR("bad cmd_type %i at %p\n", |
925 | header.header.cmd_type, | 925 | header.header.cmd_type, |
926 | cmdbuf->buf - sizeof(header)); | 926 | cmdbuf->buf - sizeof(header)); |
927 | ret = DRM_ERR(EINVAL); | 927 | ret = -EINVAL; |
928 | goto cleanup; | 928 | goto cleanup; |
929 | } | 929 | } |
930 | } | 930 | } |
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index af5790f8fd53..a023fce3f1b1 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c | |||
@@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) | |||
889 | DRM_ERROR("failed!\n"); | 889 | DRM_ERROR("failed!\n"); |
890 | radeon_status(dev_priv); | 890 | radeon_status(dev_priv); |
891 | #endif | 891 | #endif |
892 | return DRM_ERR(EBUSY); | 892 | return -EBUSY; |
893 | } | 893 | } |
894 | 894 | ||
895 | static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) | 895 | static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) |
@@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) | |||
910 | DRM_ERROR("failed!\n"); | 910 | DRM_ERROR("failed!\n"); |
911 | radeon_status(dev_priv); | 911 | radeon_status(dev_priv); |
912 | #endif | 912 | #endif |
913 | return DRM_ERR(EBUSY); | 913 | return -EBUSY; |
914 | } | 914 | } |
915 | 915 | ||
916 | static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | 916 | static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) |
@@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | |||
936 | DRM_ERROR("failed!\n"); | 936 | DRM_ERROR("failed!\n"); |
937 | radeon_status(dev_priv); | 937 | radeon_status(dev_priv); |
938 | #endif | 938 | #endif |
939 | return DRM_ERR(EBUSY); | 939 | return -EBUSY; |
940 | } | 940 | } |
941 | 941 | ||
942 | /* ================================================================ | 942 | /* ================================================================ |
@@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1394 | if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { | 1394 | if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { |
1395 | DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); | 1395 | DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); |
1396 | radeon_do_cleanup_cp(dev); | 1396 | radeon_do_cleanup_cp(dev); |
1397 | return DRM_ERR(EINVAL); | 1397 | return -EINVAL; |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { | 1400 | if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { |
@@ -1409,7 +1409,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1409 | if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { | 1409 | if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { |
1410 | DRM_ERROR("PCI GART memory not allocated!\n"); | 1410 | DRM_ERROR("PCI GART memory not allocated!\n"); |
1411 | radeon_do_cleanup_cp(dev); | 1411 | radeon_do_cleanup_cp(dev); |
1412 | return DRM_ERR(EINVAL); | 1412 | return -EINVAL; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | dev_priv->usec_timeout = init->usec_timeout; | 1415 | dev_priv->usec_timeout = init->usec_timeout; |
@@ -1417,7 +1417,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1417 | dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { | 1417 | dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { |
1418 | DRM_DEBUG("TIMEOUT problem!\n"); | 1418 | DRM_DEBUG("TIMEOUT problem!\n"); |
1419 | radeon_do_cleanup_cp(dev); | 1419 | radeon_do_cleanup_cp(dev); |
1420 | return DRM_ERR(EINVAL); | 1420 | return -EINVAL; |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | /* Enable vblank on CRTC1 for older X servers | 1423 | /* Enable vblank on CRTC1 for older X servers |
@@ -1446,7 +1446,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1446 | (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { | 1446 | (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { |
1447 | DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); | 1447 | DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); |
1448 | radeon_do_cleanup_cp(dev); | 1448 | radeon_do_cleanup_cp(dev); |
1449 | return DRM_ERR(EINVAL); | 1449 | return -EINVAL; |
1450 | } | 1450 | } |
1451 | 1451 | ||
1452 | switch (init->fb_bpp) { | 1452 | switch (init->fb_bpp) { |
@@ -1515,27 +1515,27 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1515 | if (!dev_priv->sarea) { | 1515 | if (!dev_priv->sarea) { |
1516 | DRM_ERROR("could not find sarea!\n"); | 1516 | DRM_ERROR("could not find sarea!\n"); |
1517 | radeon_do_cleanup_cp(dev); | 1517 | radeon_do_cleanup_cp(dev); |
1518 | return DRM_ERR(EINVAL); | 1518 | return -EINVAL; |
1519 | } | 1519 | } |
1520 | 1520 | ||
1521 | dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); | 1521 | dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); |
1522 | if (!dev_priv->cp_ring) { | 1522 | if (!dev_priv->cp_ring) { |
1523 | DRM_ERROR("could not find cp ring region!\n"); | 1523 | DRM_ERROR("could not find cp ring region!\n"); |
1524 | radeon_do_cleanup_cp(dev); | 1524 | radeon_do_cleanup_cp(dev); |
1525 | return DRM_ERR(EINVAL); | 1525 | return -EINVAL; |
1526 | } | 1526 | } |
1527 | dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); | 1527 | dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); |
1528 | if (!dev_priv->ring_rptr) { | 1528 | if (!dev_priv->ring_rptr) { |
1529 | DRM_ERROR("could not find ring read pointer!\n"); | 1529 | DRM_ERROR("could not find ring read pointer!\n"); |
1530 | radeon_do_cleanup_cp(dev); | 1530 | radeon_do_cleanup_cp(dev); |
1531 | return DRM_ERR(EINVAL); | 1531 | return -EINVAL; |
1532 | } | 1532 | } |
1533 | dev->agp_buffer_token = init->buffers_offset; | 1533 | dev->agp_buffer_token = init->buffers_offset; |
1534 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 1534 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
1535 | if (!dev->agp_buffer_map) { | 1535 | if (!dev->agp_buffer_map) { |
1536 | DRM_ERROR("could not find dma buffer region!\n"); | 1536 | DRM_ERROR("could not find dma buffer region!\n"); |
1537 | radeon_do_cleanup_cp(dev); | 1537 | radeon_do_cleanup_cp(dev); |
1538 | return DRM_ERR(EINVAL); | 1538 | return -EINVAL; |
1539 | } | 1539 | } |
1540 | 1540 | ||
1541 | if (init->gart_textures_offset) { | 1541 | if (init->gart_textures_offset) { |
@@ -1544,7 +1544,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1544 | if (!dev_priv->gart_textures) { | 1544 | if (!dev_priv->gart_textures) { |
1545 | DRM_ERROR("could not find GART texture region!\n"); | 1545 | DRM_ERROR("could not find GART texture region!\n"); |
1546 | radeon_do_cleanup_cp(dev); | 1546 | radeon_do_cleanup_cp(dev); |
1547 | return DRM_ERR(EINVAL); | 1547 | return -EINVAL; |
1548 | } | 1548 | } |
1549 | } | 1549 | } |
1550 | 1550 | ||
@@ -1562,7 +1562,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1562 | !dev->agp_buffer_map->handle) { | 1562 | !dev->agp_buffer_map->handle) { |
1563 | DRM_ERROR("could not find ioremap agp regions!\n"); | 1563 | DRM_ERROR("could not find ioremap agp regions!\n"); |
1564 | radeon_do_cleanup_cp(dev); | 1564 | radeon_do_cleanup_cp(dev); |
1565 | return DRM_ERR(EINVAL); | 1565 | return -EINVAL; |
1566 | } | 1566 | } |
1567 | } else | 1567 | } else |
1568 | #endif | 1568 | #endif |
@@ -1710,14 +1710,14 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | |||
1710 | DRM_ERROR | 1710 | DRM_ERROR |
1711 | ("Cannot use PCI Express without GART in FB memory\n"); | 1711 | ("Cannot use PCI Express without GART in FB memory\n"); |
1712 | radeon_do_cleanup_cp(dev); | 1712 | radeon_do_cleanup_cp(dev); |
1713 | return DRM_ERR(EINVAL); | 1713 | return -EINVAL; |
1714 | } | 1714 | } |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { | 1717 | if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { |
1718 | DRM_ERROR("failed to init PCI GART!\n"); | 1718 | DRM_ERROR("failed to init PCI GART!\n"); |
1719 | radeon_do_cleanup_cp(dev); | 1719 | radeon_do_cleanup_cp(dev); |
1720 | return DRM_ERR(ENOMEM); | 1720 | return -ENOMEM; |
1721 | } | 1721 | } |
1722 | 1722 | ||
1723 | /* Turn on PCI GART */ | 1723 | /* Turn on PCI GART */ |
@@ -1797,7 +1797,7 @@ static int radeon_do_resume_cp(struct drm_device * dev) | |||
1797 | 1797 | ||
1798 | if (!dev_priv) { | 1798 | if (!dev_priv) { |
1799 | DRM_ERROR("Called with no initialization\n"); | 1799 | DRM_ERROR("Called with no initialization\n"); |
1800 | return DRM_ERR(EINVAL); | 1800 | return -EINVAL; |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | DRM_DEBUG("Starting radeon_do_resume_cp()\n"); | 1803 | DRM_DEBUG("Starting radeon_do_resume_cp()\n"); |
@@ -1845,7 +1845,7 @@ int radeon_cp_init(DRM_IOCTL_ARGS) | |||
1845 | return radeon_do_cleanup_cp(dev); | 1845 | return radeon_do_cleanup_cp(dev); |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | return DRM_ERR(EINVAL); | 1848 | return -EINVAL; |
1849 | } | 1849 | } |
1850 | 1850 | ||
1851 | int radeon_cp_start(DRM_IOCTL_ARGS) | 1851 | int radeon_cp_start(DRM_IOCTL_ARGS) |
@@ -1973,7 +1973,7 @@ int radeon_cp_reset(DRM_IOCTL_ARGS) | |||
1973 | 1973 | ||
1974 | if (!dev_priv) { | 1974 | if (!dev_priv) { |
1975 | DRM_DEBUG("%s called before init done\n", __FUNCTION__); | 1975 | DRM_DEBUG("%s called before init done\n", __FUNCTION__); |
1976 | return DRM_ERR(EINVAL); | 1976 | return -EINVAL; |
1977 | } | 1977 | } |
1978 | 1978 | ||
1979 | radeon_do_cp_reset(dev_priv); | 1979 | radeon_do_cp_reset(dev_priv); |
@@ -2167,7 +2167,7 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) | |||
2167 | radeon_status(dev_priv); | 2167 | radeon_status(dev_priv); |
2168 | DRM_ERROR("failed!\n"); | 2168 | DRM_ERROR("failed!\n"); |
2169 | #endif | 2169 | #endif |
2170 | return DRM_ERR(EBUSY); | 2170 | return -EBUSY; |
2171 | } | 2171 | } |
2172 | 2172 | ||
2173 | static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, | 2173 | static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, |
@@ -2179,16 +2179,16 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, | |||
2179 | for (i = d->granted_count; i < d->request_count; i++) { | 2179 | for (i = d->granted_count; i < d->request_count; i++) { |
2180 | buf = radeon_freelist_get(dev); | 2180 | buf = radeon_freelist_get(dev); |
2181 | if (!buf) | 2181 | if (!buf) |
2182 | return DRM_ERR(EBUSY); /* NOTE: broken client */ | 2182 | return -EBUSY; /* NOTE: broken client */ |
2183 | 2183 | ||
2184 | buf->filp = filp; | 2184 | buf->filp = filp; |
2185 | 2185 | ||
2186 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, | 2186 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, |
2187 | sizeof(buf->idx))) | 2187 | sizeof(buf->idx))) |
2188 | return DRM_ERR(EFAULT); | 2188 | return -EFAULT; |
2189 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, | 2189 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, |
2190 | sizeof(buf->total))) | 2190 | sizeof(buf->total))) |
2191 | return DRM_ERR(EFAULT); | 2191 | return -EFAULT; |
2192 | 2192 | ||
2193 | d->granted_count++; | 2193 | d->granted_count++; |
2194 | } | 2194 | } |
@@ -2212,7 +2212,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) | |||
2212 | if (d.send_count != 0) { | 2212 | if (d.send_count != 0) { |
2213 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | 2213 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", |
2214 | DRM_CURRENTPID, d.send_count); | 2214 | DRM_CURRENTPID, d.send_count); |
2215 | return DRM_ERR(EINVAL); | 2215 | return -EINVAL; |
2216 | } | 2216 | } |
2217 | 2217 | ||
2218 | /* We'll send you buffers. | 2218 | /* We'll send you buffers. |
@@ -2220,7 +2220,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS) | |||
2220 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | 2220 | if (d.request_count < 0 || d.request_count > dma->buf_count) { |
2221 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | 2221 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", |
2222 | DRM_CURRENTPID, d.request_count, dma->buf_count); | 2222 | DRM_CURRENTPID, d.request_count, dma->buf_count); |
2223 | return DRM_ERR(EINVAL); | 2223 | return -EINVAL; |
2224 | } | 2224 | } |
2225 | 2225 | ||
2226 | d.granted_count = 0; | 2226 | d.granted_count = 0; |
@@ -2241,7 +2241,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
2241 | 2241 | ||
2242 | dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); | 2242 | dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); |
2243 | if (dev_priv == NULL) | 2243 | if (dev_priv == NULL) |
2244 | return DRM_ERR(ENOMEM); | 2244 | return -ENOMEM; |
2245 | 2245 | ||
2246 | memset(dev_priv, 0, sizeof(drm_radeon_private_t)); | 2246 | memset(dev_priv, 0, sizeof(drm_radeon_private_t)); |
2247 | dev->dev_private = (void *)dev_priv; | 2247 | dev->dev_private = (void *)dev_priv; |
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c index ad8a0ac7182e..88d19a2e85c0 100644 --- a/drivers/char/drm/radeon_irq.c +++ b/drivers/char/drm/radeon_irq.c | |||
@@ -155,7 +155,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence | |||
155 | atomic_t *counter; | 155 | atomic_t *counter; |
156 | if (!dev_priv) { | 156 | if (!dev_priv) { |
157 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 157 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
158 | return DRM_ERR(EINVAL); | 158 | return -EINVAL; |
159 | } | 159 | } |
160 | 160 | ||
161 | if (crtc == DRM_RADEON_VBLANK_CRTC1) { | 161 | if (crtc == DRM_RADEON_VBLANK_CRTC1) { |
@@ -165,7 +165,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence | |||
165 | counter = &dev->vbl_received2; | 165 | counter = &dev->vbl_received2; |
166 | ack |= RADEON_CRTC2_VBLANK_STAT; | 166 | ack |= RADEON_CRTC2_VBLANK_STAT; |
167 | } else | 167 | } else |
168 | return DRM_ERR(EINVAL); | 168 | return -EINVAL; |
169 | 169 | ||
170 | radeon_acknowledge_irqs(dev_priv, ack); | 170 | radeon_acknowledge_irqs(dev_priv, ack); |
171 | 171 | ||
@@ -207,7 +207,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) | |||
207 | 207 | ||
208 | if (!dev_priv) { | 208 | if (!dev_priv) { |
209 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 209 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
210 | return DRM_ERR(EINVAL); | 210 | return -EINVAL; |
211 | } | 211 | } |
212 | 212 | ||
213 | DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, | 213 | DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, |
@@ -217,7 +217,7 @@ int radeon_irq_emit(DRM_IOCTL_ARGS) | |||
217 | 217 | ||
218 | if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { | 218 | if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { |
219 | DRM_ERROR("copy_to_user\n"); | 219 | DRM_ERROR("copy_to_user\n"); |
220 | return DRM_ERR(EFAULT); | 220 | return -EFAULT; |
221 | } | 221 | } |
222 | 222 | ||
223 | return 0; | 223 | return 0; |
@@ -233,7 +233,7 @@ int radeon_irq_wait(DRM_IOCTL_ARGS) | |||
233 | 233 | ||
234 | if (!dev_priv) { | 234 | if (!dev_priv) { |
235 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 235 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
236 | return DRM_ERR(EINVAL); | 236 | return -EINVAL; |
237 | } | 237 | } |
238 | 238 | ||
239 | DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, | 239 | DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, |
@@ -320,7 +320,7 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) | |||
320 | drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; | 320 | drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; |
321 | if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { | 321 | if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { |
322 | DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); | 322 | DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); |
323 | return DRM_ERR(EINVAL); | 323 | return -EINVAL; |
324 | } | 324 | } |
325 | dev_priv->vblank_crtc = (unsigned int)value; | 325 | dev_priv->vblank_crtc = (unsigned int)value; |
326 | radeon_enable_interrupt(dev); | 326 | radeon_enable_interrupt(dev); |
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c index 517cad8b6e3a..df5b2e0bea33 100644 --- a/drivers/char/drm/radeon_mem.c +++ b/drivers/char/drm/radeon_mem.c | |||
@@ -137,12 +137,12 @@ static int init_heap(struct mem_block **heap, int start, int size) | |||
137 | struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); | 137 | struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); |
138 | 138 | ||
139 | if (!blocks) | 139 | if (!blocks) |
140 | return DRM_ERR(ENOMEM); | 140 | return -ENOMEM; |
141 | 141 | ||
142 | *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); | 142 | *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); |
143 | if (!*heap) { | 143 | if (!*heap) { |
144 | drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); | 144 | drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); |
145 | return DRM_ERR(ENOMEM); | 145 | return -ENOMEM; |
146 | } | 146 | } |
147 | 147 | ||
148 | blocks->start = start; | 148 | blocks->start = start; |
@@ -226,7 +226,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) | |||
226 | 226 | ||
227 | if (!dev_priv) { | 227 | if (!dev_priv) { |
228 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 228 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
229 | return DRM_ERR(EINVAL); | 229 | return -EINVAL; |
230 | } | 230 | } |
231 | 231 | ||
232 | DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, | 232 | DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, |
@@ -234,7 +234,7 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) | |||
234 | 234 | ||
235 | heap = get_heap(dev_priv, alloc.region); | 235 | heap = get_heap(dev_priv, alloc.region); |
236 | if (!heap || !*heap) | 236 | if (!heap || !*heap) |
237 | return DRM_ERR(EFAULT); | 237 | return -EFAULT; |
238 | 238 | ||
239 | /* Make things easier on ourselves: all allocations at least | 239 | /* Make things easier on ourselves: all allocations at least |
240 | * 4k aligned. | 240 | * 4k aligned. |
@@ -245,11 +245,11 @@ int radeon_mem_alloc(DRM_IOCTL_ARGS) | |||
245 | block = alloc_block(*heap, alloc.size, alloc.alignment, filp); | 245 | block = alloc_block(*heap, alloc.size, alloc.alignment, filp); |
246 | 246 | ||
247 | if (!block) | 247 | if (!block) |
248 | return DRM_ERR(ENOMEM); | 248 | return -ENOMEM; |
249 | 249 | ||
250 | if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { | 250 | if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { |
251 | DRM_ERROR("copy_to_user\n"); | 251 | DRM_ERROR("copy_to_user\n"); |
252 | return DRM_ERR(EFAULT); | 252 | return -EFAULT; |
253 | } | 253 | } |
254 | 254 | ||
255 | return 0; | 255 | return 0; |
@@ -264,7 +264,7 @@ int radeon_mem_free(DRM_IOCTL_ARGS) | |||
264 | 264 | ||
265 | if (!dev_priv) { | 265 | if (!dev_priv) { |
266 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 266 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
267 | return DRM_ERR(EINVAL); | 267 | return -EINVAL; |
268 | } | 268 | } |
269 | 269 | ||
270 | DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, | 270 | DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, |
@@ -272,14 +272,14 @@ int radeon_mem_free(DRM_IOCTL_ARGS) | |||
272 | 272 | ||
273 | heap = get_heap(dev_priv, memfree.region); | 273 | heap = get_heap(dev_priv, memfree.region); |
274 | if (!heap || !*heap) | 274 | if (!heap || !*heap) |
275 | return DRM_ERR(EFAULT); | 275 | return -EFAULT; |
276 | 276 | ||
277 | block = find_block(*heap, memfree.region_offset); | 277 | block = find_block(*heap, memfree.region_offset); |
278 | if (!block) | 278 | if (!block) |
279 | return DRM_ERR(EFAULT); | 279 | return -EFAULT; |
280 | 280 | ||
281 | if (block->filp != filp) | 281 | if (block->filp != filp) |
282 | return DRM_ERR(EPERM); | 282 | return -EPERM; |
283 | 283 | ||
284 | free_block(block); | 284 | free_block(block); |
285 | return 0; | 285 | return 0; |
@@ -294,7 +294,7 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) | |||
294 | 294 | ||
295 | if (!dev_priv) { | 295 | if (!dev_priv) { |
296 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 296 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
297 | return DRM_ERR(EINVAL); | 297 | return -EINVAL; |
298 | } | 298 | } |
299 | 299 | ||
300 | DRM_COPY_FROM_USER_IOCTL(initheap, | 300 | DRM_COPY_FROM_USER_IOCTL(initheap, |
@@ -303,11 +303,11 @@ int radeon_mem_init_heap(DRM_IOCTL_ARGS) | |||
303 | 303 | ||
304 | heap = get_heap(dev_priv, initheap.region); | 304 | heap = get_heap(dev_priv, initheap.region); |
305 | if (!heap) | 305 | if (!heap) |
306 | return DRM_ERR(EFAULT); | 306 | return -EFAULT; |
307 | 307 | ||
308 | if (*heap) { | 308 | if (*heap) { |
309 | DRM_ERROR("heap already initialized?"); | 309 | DRM_ERROR("heap already initialized?"); |
310 | return DRM_ERR(EFAULT); | 310 | return -EFAULT; |
311 | } | 311 | } |
312 | 312 | ||
313 | return init_heap(heap, initheap.start, initheap.size); | 313 | return init_heap(heap, initheap.start, initheap.size); |
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c index 3ddf86f2abf0..4bc0909b226f 100644 --- a/drivers/char/drm/radeon_state.c +++ b/drivers/char/drm/radeon_state.c | |||
@@ -85,7 +85,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * | |||
85 | *offset = off; | 85 | *offset = off; |
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | return DRM_ERR(EINVAL); | 88 | return -EINVAL; |
89 | } | 89 | } |
90 | 90 | ||
91 | static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | 91 | static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * |
@@ -99,7 +99,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
99 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 99 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
100 | &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { | 100 | &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { |
101 | DRM_ERROR("Invalid depth buffer offset\n"); | 101 | DRM_ERROR("Invalid depth buffer offset\n"); |
102 | return DRM_ERR(EINVAL); | 102 | return -EINVAL; |
103 | } | 103 | } |
104 | break; | 104 | break; |
105 | 105 | ||
@@ -107,7 +107,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
107 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 107 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
108 | &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { | 108 | &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { |
109 | DRM_ERROR("Invalid colour buffer offset\n"); | 109 | DRM_ERROR("Invalid colour buffer offset\n"); |
110 | return DRM_ERR(EINVAL); | 110 | return -EINVAL; |
111 | } | 111 | } |
112 | break; | 112 | break; |
113 | 113 | ||
@@ -120,7 +120,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
120 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 120 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
121 | &data[0])) { | 121 | &data[0])) { |
122 | DRM_ERROR("Invalid R200 texture offset\n"); | 122 | DRM_ERROR("Invalid R200 texture offset\n"); |
123 | return DRM_ERR(EINVAL); | 123 | return -EINVAL; |
124 | } | 124 | } |
125 | break; | 125 | break; |
126 | 126 | ||
@@ -130,7 +130,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
130 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 130 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
131 | &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { | 131 | &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { |
132 | DRM_ERROR("Invalid R100 texture offset\n"); | 132 | DRM_ERROR("Invalid R100 texture offset\n"); |
133 | return DRM_ERR(EINVAL); | 133 | return -EINVAL; |
134 | } | 134 | } |
135 | break; | 135 | break; |
136 | 136 | ||
@@ -147,7 +147,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
147 | &data[i])) { | 147 | &data[i])) { |
148 | DRM_ERROR | 148 | DRM_ERROR |
149 | ("Invalid R200 cubic texture offset\n"); | 149 | ("Invalid R200 cubic texture offset\n"); |
150 | return DRM_ERR(EINVAL); | 150 | return -EINVAL; |
151 | } | 151 | } |
152 | } | 152 | } |
153 | break; | 153 | break; |
@@ -163,7 +163,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
163 | &data[i])) { | 163 | &data[i])) { |
164 | DRM_ERROR | 164 | DRM_ERROR |
165 | ("Invalid R100 cubic texture offset\n"); | 165 | ("Invalid R100 cubic texture offset\n"); |
166 | return DRM_ERR(EINVAL); | 166 | return -EINVAL; |
167 | } | 167 | } |
168 | } | 168 | } |
169 | } | 169 | } |
@@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * | |||
256 | 256 | ||
257 | default: | 257 | default: |
258 | DRM_ERROR("Unknown state packet ID %d\n", id); | 258 | DRM_ERROR("Unknown state packet ID %d\n", id); |
259 | return DRM_ERR(EINVAL); | 259 | return -EINVAL; |
260 | } | 260 | } |
261 | 261 | ||
262 | return 0; | 262 | return 0; |
@@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
277 | 277 | ||
278 | if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { | 278 | if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { |
279 | DRM_ERROR("Not a type 3 packet\n"); | 279 | DRM_ERROR("Not a type 3 packet\n"); |
280 | return DRM_ERR(EINVAL); | 280 | return -EINVAL; |
281 | } | 281 | } |
282 | 282 | ||
283 | if (4 * *cmdsz > cmdbuf->bufsz) { | 283 | if (4 * *cmdsz > cmdbuf->bufsz) { |
284 | DRM_ERROR("Packet size larger than size of data provided\n"); | 284 | DRM_ERROR("Packet size larger than size of data provided\n"); |
285 | return DRM_ERR(EINVAL); | 285 | return -EINVAL; |
286 | } | 286 | } |
287 | 287 | ||
288 | switch(cmd[0] & 0xff00) { | 288 | switch(cmd[0] & 0xff00) { |
@@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
307 | /* safe but r200 only */ | 307 | /* safe but r200 only */ |
308 | if (dev_priv->microcode_version != UCODE_R200) { | 308 | if (dev_priv->microcode_version != UCODE_R200) { |
309 | DRM_ERROR("Invalid 3d packet for r100-class chip\n"); | 309 | DRM_ERROR("Invalid 3d packet for r100-class chip\n"); |
310 | return DRM_ERR(EINVAL); | 310 | return -EINVAL; |
311 | } | 311 | } |
312 | break; | 312 | break; |
313 | 313 | ||
@@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
317 | if (count > 18) { /* 12 arrays max */ | 317 | if (count > 18) { /* 12 arrays max */ |
318 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", | 318 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", |
319 | count); | 319 | count); |
320 | return DRM_ERR(EINVAL); | 320 | return -EINVAL; |
321 | } | 321 | } |
322 | 322 | ||
323 | /* carefully check packet contents */ | 323 | /* carefully check packet contents */ |
@@ -330,7 +330,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
330 | DRM_ERROR | 330 | DRM_ERROR |
331 | ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", | 331 | ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", |
332 | k, i); | 332 | k, i); |
333 | return DRM_ERR(EINVAL); | 333 | return -EINVAL; |
334 | } | 334 | } |
335 | k++; | 335 | k++; |
336 | i++; | 336 | i++; |
@@ -341,7 +341,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
341 | DRM_ERROR | 341 | DRM_ERROR |
342 | ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", | 342 | ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", |
343 | k, i); | 343 | k, i); |
344 | return DRM_ERR(EINVAL); | 344 | return -EINVAL; |
345 | } | 345 | } |
346 | k++; | 346 | k++; |
347 | i++; | 347 | i++; |
@@ -351,33 +351,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
351 | DRM_ERROR | 351 | DRM_ERROR |
352 | ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", | 352 | ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", |
353 | k, i, narrays, count + 1); | 353 | k, i, narrays, count + 1); |
354 | return DRM_ERR(EINVAL); | 354 | return -EINVAL; |
355 | } | 355 | } |
356 | break; | 356 | break; |
357 | 357 | ||
358 | case RADEON_3D_RNDR_GEN_INDX_PRIM: | 358 | case RADEON_3D_RNDR_GEN_INDX_PRIM: |
359 | if (dev_priv->microcode_version != UCODE_R100) { | 359 | if (dev_priv->microcode_version != UCODE_R100) { |
360 | DRM_ERROR("Invalid 3d packet for r200-class chip\n"); | 360 | DRM_ERROR("Invalid 3d packet for r200-class chip\n"); |
361 | return DRM_ERR(EINVAL); | 361 | return -EINVAL; |
362 | } | 362 | } |
363 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { | 363 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { |
364 | DRM_ERROR("Invalid rndr_gen_indx offset\n"); | 364 | DRM_ERROR("Invalid rndr_gen_indx offset\n"); |
365 | return DRM_ERR(EINVAL); | 365 | return -EINVAL; |
366 | } | 366 | } |
367 | break; | 367 | break; |
368 | 368 | ||
369 | case RADEON_CP_INDX_BUFFER: | 369 | case RADEON_CP_INDX_BUFFER: |
370 | if (dev_priv->microcode_version != UCODE_R200) { | 370 | if (dev_priv->microcode_version != UCODE_R200) { |
371 | DRM_ERROR("Invalid 3d packet for r100-class chip\n"); | 371 | DRM_ERROR("Invalid 3d packet for r100-class chip\n"); |
372 | return DRM_ERR(EINVAL); | 372 | return -EINVAL; |
373 | } | 373 | } |
374 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { | 374 | if ((cmd[1] & 0x8000ffff) != 0x80000810) { |
375 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); | 375 | DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); |
376 | return DRM_ERR(EINVAL); | 376 | return -EINVAL; |
377 | } | 377 | } |
378 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { | 378 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { |
379 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); | 379 | DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); |
380 | return DRM_ERR(EINVAL); | 380 | return -EINVAL; |
381 | } | 381 | } |
382 | break; | 382 | break; |
383 | 383 | ||
@@ -391,7 +391,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
391 | if (radeon_check_and_fixup_offset | 391 | if (radeon_check_and_fixup_offset |
392 | (dev_priv, filp_priv, &offset)) { | 392 | (dev_priv, filp_priv, &offset)) { |
393 | DRM_ERROR("Invalid first packet offset\n"); | 393 | DRM_ERROR("Invalid first packet offset\n"); |
394 | return DRM_ERR(EINVAL); | 394 | return -EINVAL; |
395 | } | 395 | } |
396 | cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; | 396 | cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; |
397 | } | 397 | } |
@@ -402,7 +402,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
402 | if (radeon_check_and_fixup_offset | 402 | if (radeon_check_and_fixup_offset |
403 | (dev_priv, filp_priv, &offset)) { | 403 | (dev_priv, filp_priv, &offset)) { |
404 | DRM_ERROR("Invalid second packet offset\n"); | 404 | DRM_ERROR("Invalid second packet offset\n"); |
405 | return DRM_ERR(EINVAL); | 405 | return -EINVAL; |
406 | } | 406 | } |
407 | cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; | 407 | cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; |
408 | } | 408 | } |
@@ -410,7 +410,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * | |||
410 | 410 | ||
411 | default: | 411 | default: |
412 | DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); | 412 | DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); |
413 | return DRM_ERR(EINVAL); | 413 | return -EINVAL; |
414 | } | 414 | } |
415 | 415 | ||
416 | return 0; | 416 | return 0; |
@@ -451,13 +451,13 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, | |||
451 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 451 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
452 | &ctx->rb3d_depthoffset)) { | 452 | &ctx->rb3d_depthoffset)) { |
453 | DRM_ERROR("Invalid depth buffer offset\n"); | 453 | DRM_ERROR("Invalid depth buffer offset\n"); |
454 | return DRM_ERR(EINVAL); | 454 | return -EINVAL; |
455 | } | 455 | } |
456 | 456 | ||
457 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 457 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
458 | &ctx->rb3d_coloroffset)) { | 458 | &ctx->rb3d_coloroffset)) { |
459 | DRM_ERROR("Invalid depth buffer offset\n"); | 459 | DRM_ERROR("Invalid depth buffer offset\n"); |
460 | return DRM_ERR(EINVAL); | 460 | return -EINVAL; |
461 | } | 461 | } |
462 | 462 | ||
463 | BEGIN_RING(14); | 463 | BEGIN_RING(14); |
@@ -546,7 +546,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, | |||
546 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 546 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
547 | &tex[0].pp_txoffset)) { | 547 | &tex[0].pp_txoffset)) { |
548 | DRM_ERROR("Invalid texture offset for unit 0\n"); | 548 | DRM_ERROR("Invalid texture offset for unit 0\n"); |
549 | return DRM_ERR(EINVAL); | 549 | return -EINVAL; |
550 | } | 550 | } |
551 | 551 | ||
552 | BEGIN_RING(9); | 552 | BEGIN_RING(9); |
@@ -566,7 +566,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, | |||
566 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 566 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
567 | &tex[1].pp_txoffset)) { | 567 | &tex[1].pp_txoffset)) { |
568 | DRM_ERROR("Invalid texture offset for unit 1\n"); | 568 | DRM_ERROR("Invalid texture offset for unit 1\n"); |
569 | return DRM_ERR(EINVAL); | 569 | return -EINVAL; |
570 | } | 570 | } |
571 | 571 | ||
572 | BEGIN_RING(9); | 572 | BEGIN_RING(9); |
@@ -586,7 +586,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv, | |||
586 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, | 586 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, |
587 | &tex[2].pp_txoffset)) { | 587 | &tex[2].pp_txoffset)) { |
588 | DRM_ERROR("Invalid texture offset for unit 2\n"); | 588 | DRM_ERROR("Invalid texture offset for unit 2\n"); |
589 | return DRM_ERR(EINVAL); | 589 | return -EINVAL; |
590 | } | 590 | } |
591 | 591 | ||
592 | BEGIN_RING(9); | 592 | BEGIN_RING(9); |
@@ -1668,7 +1668,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, | |||
1668 | 1668 | ||
1669 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { | 1669 | if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { |
1670 | DRM_ERROR("Invalid destination offset\n"); | 1670 | DRM_ERROR("Invalid destination offset\n"); |
1671 | return DRM_ERR(EINVAL); | 1671 | return -EINVAL; |
1672 | } | 1672 | } |
1673 | 1673 | ||
1674 | dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; | 1674 | dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; |
@@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, | |||
1711 | break; | 1711 | break; |
1712 | default: | 1712 | default: |
1713 | DRM_ERROR("invalid texture format %d\n", tex->format); | 1713 | DRM_ERROR("invalid texture format %d\n", tex->format); |
1714 | return DRM_ERR(EINVAL); | 1714 | return -EINVAL; |
1715 | } | 1715 | } |
1716 | spitch = blit_width >> 6; | 1716 | spitch = blit_width >> 6; |
1717 | if (spitch == 0 && image->height > 1) | 1717 | if (spitch == 0 && image->height > 1) |
1718 | return DRM_ERR(EINVAL); | 1718 | return -EINVAL; |
1719 | 1719 | ||
1720 | texpitch = tex->pitch; | 1720 | texpitch = tex->pitch; |
1721 | if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { | 1721 | if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { |
@@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, | |||
1760 | if (!buf) { | 1760 | if (!buf) { |
1761 | DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); | 1761 | DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); |
1762 | if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) | 1762 | if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) |
1763 | return DRM_ERR(EFAULT); | 1763 | return -EFAULT; |
1764 | return DRM_ERR(EAGAIN); | 1764 | return -EAGAIN; |
1765 | } | 1765 | } |
1766 | 1766 | ||
1767 | /* Dispatch the indirect buffer. | 1767 | /* Dispatch the indirect buffer. |
@@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp, | |||
1774 | do { \ | 1774 | do { \ |
1775 | if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ | 1775 | if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ |
1776 | DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ | 1776 | DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ |
1777 | return DRM_ERR(EFAULT); \ | 1777 | return -EFAULT; \ |
1778 | } \ | 1778 | } \ |
1779 | } while(0) | 1779 | } while(0) |
1780 | 1780 | ||
@@ -2083,7 +2083,7 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS) | |||
2083 | sizeof(alloc)); | 2083 | sizeof(alloc)); |
2084 | 2084 | ||
2085 | if (alloc_surface(&alloc, dev_priv, filp) == -1) | 2085 | if (alloc_surface(&alloc, dev_priv, filp) == -1) |
2086 | return DRM_ERR(EINVAL); | 2086 | return -EINVAL; |
2087 | else | 2087 | else |
2088 | return 0; | 2088 | return 0; |
2089 | } | 2089 | } |
@@ -2098,7 +2098,7 @@ static int radeon_surface_free(DRM_IOCTL_ARGS) | |||
2098 | sizeof(memfree)); | 2098 | sizeof(memfree)); |
2099 | 2099 | ||
2100 | if (free_surface(filp, dev_priv, memfree.address)) | 2100 | if (free_surface(filp, dev_priv, memfree.address)) |
2101 | return DRM_ERR(EINVAL); | 2101 | return -EINVAL; |
2102 | else | 2102 | else |
2103 | return 0; | 2103 | return 0; |
2104 | } | 2104 | } |
@@ -2124,7 +2124,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS) | |||
2124 | 2124 | ||
2125 | if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, | 2125 | if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, |
2126 | sarea_priv->nbox * sizeof(depth_boxes[0]))) | 2126 | sarea_priv->nbox * sizeof(depth_boxes[0]))) |
2127 | return DRM_ERR(EFAULT); | 2127 | return -EFAULT; |
2128 | 2128 | ||
2129 | radeon_cp_dispatch_clear(dev, &clear, depth_boxes); | 2129 | radeon_cp_dispatch_clear(dev, &clear, depth_boxes); |
2130 | 2130 | ||
@@ -2226,11 +2226,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) | |||
2226 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { | 2226 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { |
2227 | DRM_ERROR("buffer index %d (of %d max)\n", | 2227 | DRM_ERROR("buffer index %d (of %d max)\n", |
2228 | vertex.idx, dma->buf_count - 1); | 2228 | vertex.idx, dma->buf_count - 1); |
2229 | return DRM_ERR(EINVAL); | 2229 | return -EINVAL; |
2230 | } | 2230 | } |
2231 | if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { | 2231 | if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { |
2232 | DRM_ERROR("buffer prim %d\n", vertex.prim); | 2232 | DRM_ERROR("buffer prim %d\n", vertex.prim); |
2233 | return DRM_ERR(EINVAL); | 2233 | return -EINVAL; |
2234 | } | 2234 | } |
2235 | 2235 | ||
2236 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2236 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -2241,11 +2241,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) | |||
2241 | if (buf->filp != filp) { | 2241 | if (buf->filp != filp) { |
2242 | DRM_ERROR("process %d using buffer owned by %p\n", | 2242 | DRM_ERROR("process %d using buffer owned by %p\n", |
2243 | DRM_CURRENTPID, buf->filp); | 2243 | DRM_CURRENTPID, buf->filp); |
2244 | return DRM_ERR(EINVAL); | 2244 | return -EINVAL; |
2245 | } | 2245 | } |
2246 | if (buf->pending) { | 2246 | if (buf->pending) { |
2247 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); | 2247 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); |
2248 | return DRM_ERR(EINVAL); | 2248 | return -EINVAL; |
2249 | } | 2249 | } |
2250 | 2250 | ||
2251 | /* Build up a prim_t record: | 2251 | /* Build up a prim_t record: |
@@ -2259,7 +2259,7 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS) | |||
2259 | sarea_priv->tex_state, | 2259 | sarea_priv->tex_state, |
2260 | sarea_priv->dirty)) { | 2260 | sarea_priv->dirty)) { |
2261 | DRM_ERROR("radeon_emit_state failed\n"); | 2261 | DRM_ERROR("radeon_emit_state failed\n"); |
2262 | return DRM_ERR(EINVAL); | 2262 | return -EINVAL; |
2263 | } | 2263 | } |
2264 | 2264 | ||
2265 | sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | | 2265 | sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | |
@@ -2310,11 +2310,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) | |||
2310 | if (elts.idx < 0 || elts.idx >= dma->buf_count) { | 2310 | if (elts.idx < 0 || elts.idx >= dma->buf_count) { |
2311 | DRM_ERROR("buffer index %d (of %d max)\n", | 2311 | DRM_ERROR("buffer index %d (of %d max)\n", |
2312 | elts.idx, dma->buf_count - 1); | 2312 | elts.idx, dma->buf_count - 1); |
2313 | return DRM_ERR(EINVAL); | 2313 | return -EINVAL; |
2314 | } | 2314 | } |
2315 | if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { | 2315 | if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { |
2316 | DRM_ERROR("buffer prim %d\n", elts.prim); | 2316 | DRM_ERROR("buffer prim %d\n", elts.prim); |
2317 | return DRM_ERR(EINVAL); | 2317 | return -EINVAL; |
2318 | } | 2318 | } |
2319 | 2319 | ||
2320 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2320 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -2325,11 +2325,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) | |||
2325 | if (buf->filp != filp) { | 2325 | if (buf->filp != filp) { |
2326 | DRM_ERROR("process %d using buffer owned by %p\n", | 2326 | DRM_ERROR("process %d using buffer owned by %p\n", |
2327 | DRM_CURRENTPID, buf->filp); | 2327 | DRM_CURRENTPID, buf->filp); |
2328 | return DRM_ERR(EINVAL); | 2328 | return -EINVAL; |
2329 | } | 2329 | } |
2330 | if (buf->pending) { | 2330 | if (buf->pending) { |
2331 | DRM_ERROR("sending pending buffer %d\n", elts.idx); | 2331 | DRM_ERROR("sending pending buffer %d\n", elts.idx); |
2332 | return DRM_ERR(EINVAL); | 2332 | return -EINVAL; |
2333 | } | 2333 | } |
2334 | 2334 | ||
2335 | count = (elts.end - elts.start) / sizeof(u16); | 2335 | count = (elts.end - elts.start) / sizeof(u16); |
@@ -2337,11 +2337,11 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) | |||
2337 | 2337 | ||
2338 | if (elts.start & 0x7) { | 2338 | if (elts.start & 0x7) { |
2339 | DRM_ERROR("misaligned buffer 0x%x\n", elts.start); | 2339 | DRM_ERROR("misaligned buffer 0x%x\n", elts.start); |
2340 | return DRM_ERR(EINVAL); | 2340 | return -EINVAL; |
2341 | } | 2341 | } |
2342 | if (elts.start < buf->used) { | 2342 | if (elts.start < buf->used) { |
2343 | DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); | 2343 | DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); |
2344 | return DRM_ERR(EINVAL); | 2344 | return -EINVAL; |
2345 | } | 2345 | } |
2346 | 2346 | ||
2347 | buf->used = elts.end; | 2347 | buf->used = elts.end; |
@@ -2352,7 +2352,7 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS) | |||
2352 | sarea_priv->tex_state, | 2352 | sarea_priv->tex_state, |
2353 | sarea_priv->dirty)) { | 2353 | sarea_priv->dirty)) { |
2354 | DRM_ERROR("radeon_emit_state failed\n"); | 2354 | DRM_ERROR("radeon_emit_state failed\n"); |
2355 | return DRM_ERR(EINVAL); | 2355 | return -EINVAL; |
2356 | } | 2356 | } |
2357 | 2357 | ||
2358 | sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | | 2358 | sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | |
@@ -2394,13 +2394,13 @@ static int radeon_cp_texture(DRM_IOCTL_ARGS) | |||
2394 | 2394 | ||
2395 | if (tex.image == NULL) { | 2395 | if (tex.image == NULL) { |
2396 | DRM_ERROR("null texture image!\n"); | 2396 | DRM_ERROR("null texture image!\n"); |
2397 | return DRM_ERR(EINVAL); | 2397 | return -EINVAL; |
2398 | } | 2398 | } |
2399 | 2399 | ||
2400 | if (DRM_COPY_FROM_USER(&image, | 2400 | if (DRM_COPY_FROM_USER(&image, |
2401 | (drm_radeon_tex_image_t __user *) tex.image, | 2401 | (drm_radeon_tex_image_t __user *) tex.image, |
2402 | sizeof(image))) | 2402 | sizeof(image))) |
2403 | return DRM_ERR(EFAULT); | 2403 | return -EFAULT; |
2404 | 2404 | ||
2405 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2405 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
2406 | VB_AGE_TEST_WITH_RETURN(dev_priv); | 2406 | VB_AGE_TEST_WITH_RETURN(dev_priv); |
@@ -2424,7 +2424,7 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS) | |||
2424 | sizeof(stipple)); | 2424 | sizeof(stipple)); |
2425 | 2425 | ||
2426 | if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) | 2426 | if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) |
2427 | return DRM_ERR(EFAULT); | 2427 | return -EFAULT; |
2428 | 2428 | ||
2429 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2429 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
2430 | 2430 | ||
@@ -2455,7 +2455,7 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) | |||
2455 | if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { | 2455 | if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { |
2456 | DRM_ERROR("buffer index %d (of %d max)\n", | 2456 | DRM_ERROR("buffer index %d (of %d max)\n", |
2457 | indirect.idx, dma->buf_count - 1); | 2457 | indirect.idx, dma->buf_count - 1); |
2458 | return DRM_ERR(EINVAL); | 2458 | return -EINVAL; |
2459 | } | 2459 | } |
2460 | 2460 | ||
2461 | buf = dma->buflist[indirect.idx]; | 2461 | buf = dma->buflist[indirect.idx]; |
@@ -2463,17 +2463,17 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS) | |||
2463 | if (buf->filp != filp) { | 2463 | if (buf->filp != filp) { |
2464 | DRM_ERROR("process %d using buffer owned by %p\n", | 2464 | DRM_ERROR("process %d using buffer owned by %p\n", |
2465 | DRM_CURRENTPID, buf->filp); | 2465 | DRM_CURRENTPID, buf->filp); |
2466 | return DRM_ERR(EINVAL); | 2466 | return -EINVAL; |
2467 | } | 2467 | } |
2468 | if (buf->pending) { | 2468 | if (buf->pending) { |
2469 | DRM_ERROR("sending pending buffer %d\n", indirect.idx); | 2469 | DRM_ERROR("sending pending buffer %d\n", indirect.idx); |
2470 | return DRM_ERR(EINVAL); | 2470 | return -EINVAL; |
2471 | } | 2471 | } |
2472 | 2472 | ||
2473 | if (indirect.start < buf->used) { | 2473 | if (indirect.start < buf->used) { |
2474 | DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", | 2474 | DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", |
2475 | indirect.start, buf->used); | 2475 | indirect.start, buf->used); |
2476 | return DRM_ERR(EINVAL); | 2476 | return -EINVAL; |
2477 | } | 2477 | } |
2478 | 2478 | ||
2479 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2479 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -2528,7 +2528,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) | |||
2528 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { | 2528 | if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { |
2529 | DRM_ERROR("buffer index %d (of %d max)\n", | 2529 | DRM_ERROR("buffer index %d (of %d max)\n", |
2530 | vertex.idx, dma->buf_count - 1); | 2530 | vertex.idx, dma->buf_count - 1); |
2531 | return DRM_ERR(EINVAL); | 2531 | return -EINVAL; |
2532 | } | 2532 | } |
2533 | 2533 | ||
2534 | RING_SPACE_TEST_WITH_RETURN(dev_priv); | 2534 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
@@ -2539,23 +2539,23 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) | |||
2539 | if (buf->filp != filp) { | 2539 | if (buf->filp != filp) { |
2540 | DRM_ERROR("process %d using buffer owned by %p\n", | 2540 | DRM_ERROR("process %d using buffer owned by %p\n", |
2541 | DRM_CURRENTPID, buf->filp); | 2541 | DRM_CURRENTPID, buf->filp); |
2542 | return DRM_ERR(EINVAL); | 2542 | return -EINVAL; |
2543 | } | 2543 | } |
2544 | 2544 | ||
2545 | if (buf->pending) { | 2545 | if (buf->pending) { |
2546 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); | 2546 | DRM_ERROR("sending pending buffer %d\n", vertex.idx); |
2547 | return DRM_ERR(EINVAL); | 2547 | return -EINVAL; |
2548 | } | 2548 | } |
2549 | 2549 | ||
2550 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) | 2550 | if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) |
2551 | return DRM_ERR(EINVAL); | 2551 | return -EINVAL; |
2552 | 2552 | ||
2553 | for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { | 2553 | for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { |
2554 | drm_radeon_prim_t prim; | 2554 | drm_radeon_prim_t prim; |
2555 | drm_radeon_tcl_prim_t tclprim; | 2555 | drm_radeon_tcl_prim_t tclprim; |
2556 | 2556 | ||
2557 | if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) | 2557 | if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) |
2558 | return DRM_ERR(EFAULT); | 2558 | return -EFAULT; |
2559 | 2559 | ||
2560 | if (prim.stateidx != laststate) { | 2560 | if (prim.stateidx != laststate) { |
2561 | drm_radeon_state_t state; | 2561 | drm_radeon_state_t state; |
@@ -2563,11 +2563,11 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS) | |||
2563 | if (DRM_COPY_FROM_USER(&state, | 2563 | if (DRM_COPY_FROM_USER(&state, |
2564 | &vertex.state[prim.stateidx], | 2564 | &vertex.state[prim.stateidx], |
2565 | sizeof(state))) | 2565 | sizeof(state))) |
2566 | return DRM_ERR(EFAULT); | 2566 | return -EFAULT; |
2567 | 2567 | ||
2568 | if (radeon_emit_state2(dev_priv, filp_priv, &state)) { | 2568 | if (radeon_emit_state2(dev_priv, filp_priv, &state)) { |
2569 | DRM_ERROR("radeon_emit_state2 failed\n"); | 2569 | DRM_ERROR("radeon_emit_state2 failed\n"); |
2570 | return DRM_ERR(EINVAL); | 2570 | return -EINVAL; |
2571 | } | 2571 | } |
2572 | 2572 | ||
2573 | laststate = prim.stateidx; | 2573 | laststate = prim.stateidx; |
@@ -2613,19 +2613,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, | |||
2613 | RING_LOCALS; | 2613 | RING_LOCALS; |
2614 | 2614 | ||
2615 | if (id >= RADEON_MAX_STATE_PACKETS) | 2615 | if (id >= RADEON_MAX_STATE_PACKETS) |
2616 | return DRM_ERR(EINVAL); | 2616 | return -EINVAL; |
2617 | 2617 | ||
2618 | sz = packet[id].len; | 2618 | sz = packet[id].len; |
2619 | reg = packet[id].start; | 2619 | reg = packet[id].start; |
2620 | 2620 | ||
2621 | if (sz * sizeof(int) > cmdbuf->bufsz) { | 2621 | if (sz * sizeof(int) > cmdbuf->bufsz) { |
2622 | DRM_ERROR("Packet size provided larger than data provided\n"); | 2622 | DRM_ERROR("Packet size provided larger than data provided\n"); |
2623 | return DRM_ERR(EINVAL); | 2623 | return -EINVAL; |
2624 | } | 2624 | } |
2625 | 2625 | ||
2626 | if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { | 2626 | if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { |
2627 | DRM_ERROR("Packet verification failed\n"); | 2627 | DRM_ERROR("Packet verification failed\n"); |
2628 | return DRM_ERR(EINVAL); | 2628 | return -EINVAL; |
2629 | } | 2629 | } |
2630 | 2630 | ||
2631 | BEGIN_RING(sz + 1); | 2631 | BEGIN_RING(sz + 1); |
@@ -2713,7 +2713,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, | |||
2713 | if (!sz) | 2713 | if (!sz) |
2714 | return 0; | 2714 | return 0; |
2715 | if (sz * 4 > cmdbuf->bufsz) | 2715 | if (sz * 4 > cmdbuf->bufsz) |
2716 | return DRM_ERR(EINVAL); | 2716 | return -EINVAL; |
2717 | 2717 | ||
2718 | BEGIN_RING(5 + sz); | 2718 | BEGIN_RING(5 + sz); |
2719 | OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); | 2719 | OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); |
@@ -2781,7 +2781,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev, | |||
2781 | do { | 2781 | do { |
2782 | if (i < cmdbuf->nbox) { | 2782 | if (i < cmdbuf->nbox) { |
2783 | if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) | 2783 | if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) |
2784 | return DRM_ERR(EFAULT); | 2784 | return -EFAULT; |
2785 | /* FIXME The second and subsequent times round | 2785 | /* FIXME The second and subsequent times round |
2786 | * this loop, send a WAIT_UNTIL_3D_IDLE before | 2786 | * this loop, send a WAIT_UNTIL_3D_IDLE before |
2787 | * calling emit_clip_rect(). This fixes a | 2787 | * calling emit_clip_rect(). This fixes a |
@@ -2839,7 +2839,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags) | |||
2839 | ADVANCE_RING(); | 2839 | ADVANCE_RING(); |
2840 | break; | 2840 | break; |
2841 | default: | 2841 | default: |
2842 | return DRM_ERR(EINVAL); | 2842 | return -EINVAL; |
2843 | } | 2843 | } |
2844 | 2844 | ||
2845 | return 0; | 2845 | return 0; |
@@ -2870,7 +2870,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) | |||
2870 | VB_AGE_TEST_WITH_RETURN(dev_priv); | 2870 | VB_AGE_TEST_WITH_RETURN(dev_priv); |
2871 | 2871 | ||
2872 | if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { | 2872 | if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { |
2873 | return DRM_ERR(EINVAL); | 2873 | return -EINVAL; |
2874 | } | 2874 | } |
2875 | 2875 | ||
2876 | /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid | 2876 | /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid |
@@ -2881,11 +2881,11 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) | |||
2881 | if (orig_bufsz != 0) { | 2881 | if (orig_bufsz != 0) { |
2882 | kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); | 2882 | kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); |
2883 | if (kbuf == NULL) | 2883 | if (kbuf == NULL) |
2884 | return DRM_ERR(ENOMEM); | 2884 | return -ENOMEM; |
2885 | if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, | 2885 | if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, |
2886 | cmdbuf.bufsz)) { | 2886 | cmdbuf.bufsz)) { |
2887 | drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); | 2887 | drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); |
2888 | return DRM_ERR(EFAULT); | 2888 | return -EFAULT; |
2889 | } | 2889 | } |
2890 | cmdbuf.buf = kbuf; | 2890 | cmdbuf.buf = kbuf; |
2891 | } | 2891 | } |
@@ -3012,7 +3012,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) | |||
3012 | err: | 3012 | err: |
3013 | if (orig_bufsz != 0) | 3013 | if (orig_bufsz != 0) |
3014 | drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); | 3014 | drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); |
3015 | return DRM_ERR(EINVAL); | 3015 | return -EINVAL; |
3016 | } | 3016 | } |
3017 | 3017 | ||
3018 | static int radeon_cp_getparam(DRM_IOCTL_ARGS) | 3018 | static int radeon_cp_getparam(DRM_IOCTL_ARGS) |
@@ -3074,7 +3074,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) | |||
3074 | break; | 3074 | break; |
3075 | case RADEON_PARAM_SCRATCH_OFFSET: | 3075 | case RADEON_PARAM_SCRATCH_OFFSET: |
3076 | if (!dev_priv->writeback_works) | 3076 | if (!dev_priv->writeback_works) |
3077 | return DRM_ERR(EINVAL); | 3077 | return -EINVAL; |
3078 | value = RADEON_SCRATCH_REG_OFFSET; | 3078 | value = RADEON_SCRATCH_REG_OFFSET; |
3079 | break; | 3079 | break; |
3080 | case RADEON_PARAM_CARD_TYPE: | 3080 | case RADEON_PARAM_CARD_TYPE: |
@@ -3090,12 +3090,12 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) | |||
3090 | break; | 3090 | break; |
3091 | default: | 3091 | default: |
3092 | DRM_DEBUG("Invalid parameter %d\n", param.param); | 3092 | DRM_DEBUG("Invalid parameter %d\n", param.param); |
3093 | return DRM_ERR(EINVAL); | 3093 | return -EINVAL; |
3094 | } | 3094 | } |
3095 | 3095 | ||
3096 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { | 3096 | if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { |
3097 | DRM_ERROR("copy_to_user\n"); | 3097 | DRM_ERROR("copy_to_user\n"); |
3098 | return DRM_ERR(EFAULT); | 3098 | return -EFAULT; |
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | return 0; | 3101 | return 0; |
@@ -3149,7 +3149,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS) | |||
3149 | break; | 3149 | break; |
3150 | default: | 3150 | default: |
3151 | DRM_DEBUG("Invalid parameter %d\n", sp.param); | 3151 | DRM_DEBUG("Invalid parameter %d\n", sp.param); |
3152 | return DRM_ERR(EINVAL); | 3152 | return -EINVAL; |
3153 | } | 3153 | } |
3154 | 3154 | ||
3155 | return 0; | 3155 | return 0; |
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c index 18c7235f6b73..e1e88ca1c53c 100644 --- a/drivers/char/drm/savage_bci.c +++ b/drivers/char/drm/savage_bci.c | |||
@@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) | |||
60 | DRM_ERROR("failed!\n"); | 60 | DRM_ERROR("failed!\n"); |
61 | DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); | 61 | DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); |
62 | #endif | 62 | #endif |
63 | return DRM_ERR(EBUSY); | 63 | return -EBUSY; |
64 | } | 64 | } |
65 | 65 | ||
66 | static int | 66 | static int |
@@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) | |||
81 | DRM_ERROR("failed!\n"); | 81 | DRM_ERROR("failed!\n"); |
82 | DRM_INFO(" status=0x%08x\n", status); | 82 | DRM_INFO(" status=0x%08x\n", status); |
83 | #endif | 83 | #endif |
84 | return DRM_ERR(EBUSY); | 84 | return -EBUSY; |
85 | } | 85 | } |
86 | 86 | ||
87 | static int | 87 | static int |
@@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) | |||
102 | DRM_ERROR("failed!\n"); | 102 | DRM_ERROR("failed!\n"); |
103 | DRM_INFO(" status=0x%08x\n", status); | 103 | DRM_INFO(" status=0x%08x\n", status); |
104 | #endif | 104 | #endif |
105 | return DRM_ERR(EBUSY); | 105 | return -EBUSY; |
106 | } | 106 | } |
107 | 107 | ||
108 | /* | 108 | /* |
@@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) | |||
136 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | 136 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); |
137 | #endif | 137 | #endif |
138 | 138 | ||
139 | return DRM_ERR(EBUSY); | 139 | return -EBUSY; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int | 142 | static int |
@@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) | |||
158 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | 158 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | return DRM_ERR(EBUSY); | 161 | return -EBUSY; |
162 | } | 162 | } |
163 | 163 | ||
164 | uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, | 164 | uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, |
@@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t * dev_priv) | |||
301 | dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * | 301 | dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * |
302 | dev_priv->nr_dma_pages, DRM_MEM_DRIVER); | 302 | dev_priv->nr_dma_pages, DRM_MEM_DRIVER); |
303 | if (dev_priv->dma_pages == NULL) | 303 | if (dev_priv->dma_pages == NULL) |
304 | return DRM_ERR(ENOMEM); | 304 | return -ENOMEM; |
305 | 305 | ||
306 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { | 306 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { |
307 | SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); | 307 | SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); |
@@ -541,7 +541,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) | |||
541 | 541 | ||
542 | dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); | 542 | dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); |
543 | if (dev_priv == NULL) | 543 | if (dev_priv == NULL) |
544 | return DRM_ERR(ENOMEM); | 544 | return -ENOMEM; |
545 | 545 | ||
546 | memset(dev_priv, 0, sizeof(drm_savage_private_t)); | 546 | memset(dev_priv, 0, sizeof(drm_savage_private_t)); |
547 | dev->dev_private = (void *)dev_priv; | 547 | dev->dev_private = (void *)dev_priv; |
@@ -682,16 +682,16 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
682 | 682 | ||
683 | if (init->fb_bpp != 16 && init->fb_bpp != 32) { | 683 | if (init->fb_bpp != 16 && init->fb_bpp != 32) { |
684 | DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); | 684 | DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); |
685 | return DRM_ERR(EINVAL); | 685 | return -EINVAL; |
686 | } | 686 | } |
687 | if (init->depth_bpp != 16 && init->depth_bpp != 32) { | 687 | if (init->depth_bpp != 16 && init->depth_bpp != 32) { |
688 | DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); | 688 | DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); |
689 | return DRM_ERR(EINVAL); | 689 | return -EINVAL; |
690 | } | 690 | } |
691 | if (init->dma_type != SAVAGE_DMA_AGP && | 691 | if (init->dma_type != SAVAGE_DMA_AGP && |
692 | init->dma_type != SAVAGE_DMA_PCI) { | 692 | init->dma_type != SAVAGE_DMA_PCI) { |
693 | DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); | 693 | DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); |
694 | return DRM_ERR(EINVAL); | 694 | return -EINVAL; |
695 | } | 695 | } |
696 | 696 | ||
697 | dev_priv->cob_size = init->cob_size; | 697 | dev_priv->cob_size = init->cob_size; |
@@ -715,14 +715,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
715 | if (!dev_priv->sarea) { | 715 | if (!dev_priv->sarea) { |
716 | DRM_ERROR("could not find sarea!\n"); | 716 | DRM_ERROR("could not find sarea!\n"); |
717 | savage_do_cleanup_bci(dev); | 717 | savage_do_cleanup_bci(dev); |
718 | return DRM_ERR(EINVAL); | 718 | return -EINVAL; |
719 | } | 719 | } |
720 | if (init->status_offset != 0) { | 720 | if (init->status_offset != 0) { |
721 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | 721 | dev_priv->status = drm_core_findmap(dev, init->status_offset); |
722 | if (!dev_priv->status) { | 722 | if (!dev_priv->status) { |
723 | DRM_ERROR("could not find shadow status region!\n"); | 723 | DRM_ERROR("could not find shadow status region!\n"); |
724 | savage_do_cleanup_bci(dev); | 724 | savage_do_cleanup_bci(dev); |
725 | return DRM_ERR(EINVAL); | 725 | return -EINVAL; |
726 | } | 726 | } |
727 | } else { | 727 | } else { |
728 | dev_priv->status = NULL; | 728 | dev_priv->status = NULL; |
@@ -734,13 +734,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
734 | if (!dev->agp_buffer_map) { | 734 | if (!dev->agp_buffer_map) { |
735 | DRM_ERROR("could not find DMA buffer region!\n"); | 735 | DRM_ERROR("could not find DMA buffer region!\n"); |
736 | savage_do_cleanup_bci(dev); | 736 | savage_do_cleanup_bci(dev); |
737 | return DRM_ERR(EINVAL); | 737 | return -EINVAL; |
738 | } | 738 | } |
739 | drm_core_ioremap(dev->agp_buffer_map, dev); | 739 | drm_core_ioremap(dev->agp_buffer_map, dev); |
740 | if (!dev->agp_buffer_map) { | 740 | if (!dev->agp_buffer_map) { |
741 | DRM_ERROR("failed to ioremap DMA buffer region!\n"); | 741 | DRM_ERROR("failed to ioremap DMA buffer region!\n"); |
742 | savage_do_cleanup_bci(dev); | 742 | savage_do_cleanup_bci(dev); |
743 | return DRM_ERR(ENOMEM); | 743 | return -ENOMEM; |
744 | } | 744 | } |
745 | } | 745 | } |
746 | if (init->agp_textures_offset) { | 746 | if (init->agp_textures_offset) { |
@@ -749,7 +749,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
749 | if (!dev_priv->agp_textures) { | 749 | if (!dev_priv->agp_textures) { |
750 | DRM_ERROR("could not find agp texture region!\n"); | 750 | DRM_ERROR("could not find agp texture region!\n"); |
751 | savage_do_cleanup_bci(dev); | 751 | savage_do_cleanup_bci(dev); |
752 | return DRM_ERR(EINVAL); | 752 | return -EINVAL; |
753 | } | 753 | } |
754 | } else { | 754 | } else { |
755 | dev_priv->agp_textures = NULL; | 755 | dev_priv->agp_textures = NULL; |
@@ -760,39 +760,39 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
760 | DRM_ERROR("command DMA not supported on " | 760 | DRM_ERROR("command DMA not supported on " |
761 | "Savage3D/MX/IX.\n"); | 761 | "Savage3D/MX/IX.\n"); |
762 | savage_do_cleanup_bci(dev); | 762 | savage_do_cleanup_bci(dev); |
763 | return DRM_ERR(EINVAL); | 763 | return -EINVAL; |
764 | } | 764 | } |
765 | if (dev->dma && dev->dma->buflist) { | 765 | if (dev->dma && dev->dma->buflist) { |
766 | DRM_ERROR("command and vertex DMA not supported " | 766 | DRM_ERROR("command and vertex DMA not supported " |
767 | "at the same time.\n"); | 767 | "at the same time.\n"); |
768 | savage_do_cleanup_bci(dev); | 768 | savage_do_cleanup_bci(dev); |
769 | return DRM_ERR(EINVAL); | 769 | return -EINVAL; |
770 | } | 770 | } |
771 | dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); | 771 | dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); |
772 | if (!dev_priv->cmd_dma) { | 772 | if (!dev_priv->cmd_dma) { |
773 | DRM_ERROR("could not find command DMA region!\n"); | 773 | DRM_ERROR("could not find command DMA region!\n"); |
774 | savage_do_cleanup_bci(dev); | 774 | savage_do_cleanup_bci(dev); |
775 | return DRM_ERR(EINVAL); | 775 | return -EINVAL; |
776 | } | 776 | } |
777 | if (dev_priv->dma_type == SAVAGE_DMA_AGP) { | 777 | if (dev_priv->dma_type == SAVAGE_DMA_AGP) { |
778 | if (dev_priv->cmd_dma->type != _DRM_AGP) { | 778 | if (dev_priv->cmd_dma->type != _DRM_AGP) { |
779 | DRM_ERROR("AGP command DMA region is not a " | 779 | DRM_ERROR("AGP command DMA region is not a " |
780 | "_DRM_AGP map!\n"); | 780 | "_DRM_AGP map!\n"); |
781 | savage_do_cleanup_bci(dev); | 781 | savage_do_cleanup_bci(dev); |
782 | return DRM_ERR(EINVAL); | 782 | return -EINVAL; |
783 | } | 783 | } |
784 | drm_core_ioremap(dev_priv->cmd_dma, dev); | 784 | drm_core_ioremap(dev_priv->cmd_dma, dev); |
785 | if (!dev_priv->cmd_dma->handle) { | 785 | if (!dev_priv->cmd_dma->handle) { |
786 | DRM_ERROR("failed to ioremap command " | 786 | DRM_ERROR("failed to ioremap command " |
787 | "DMA region!\n"); | 787 | "DMA region!\n"); |
788 | savage_do_cleanup_bci(dev); | 788 | savage_do_cleanup_bci(dev); |
789 | return DRM_ERR(ENOMEM); | 789 | return -ENOMEM; |
790 | } | 790 | } |
791 | } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { | 791 | } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { |
792 | DRM_ERROR("PCI command DMA region is not a " | 792 | DRM_ERROR("PCI command DMA region is not a " |
793 | "_DRM_CONSISTENT map!\n"); | 793 | "_DRM_CONSISTENT map!\n"); |
794 | savage_do_cleanup_bci(dev); | 794 | savage_do_cleanup_bci(dev); |
795 | return DRM_ERR(EINVAL); | 795 | return -EINVAL; |
796 | } | 796 | } |
797 | } else { | 797 | } else { |
798 | dev_priv->cmd_dma = NULL; | 798 | dev_priv->cmd_dma = NULL; |
@@ -809,7 +809,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
809 | if (!dev_priv->fake_dma.handle) { | 809 | if (!dev_priv->fake_dma.handle) { |
810 | DRM_ERROR("could not allocate faked DMA buffer!\n"); | 810 | DRM_ERROR("could not allocate faked DMA buffer!\n"); |
811 | savage_do_cleanup_bci(dev); | 811 | savage_do_cleanup_bci(dev); |
812 | return DRM_ERR(ENOMEM); | 812 | return -ENOMEM; |
813 | } | 813 | } |
814 | dev_priv->cmd_dma = &dev_priv->fake_dma; | 814 | dev_priv->cmd_dma = &dev_priv->fake_dma; |
815 | dev_priv->dma_flush = savage_fake_dma_flush; | 815 | dev_priv->dma_flush = savage_fake_dma_flush; |
@@ -886,13 +886,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) | |||
886 | if (savage_freelist_init(dev) < 0) { | 886 | if (savage_freelist_init(dev) < 0) { |
887 | DRM_ERROR("could not initialize freelist\n"); | 887 | DRM_ERROR("could not initialize freelist\n"); |
888 | savage_do_cleanup_bci(dev); | 888 | savage_do_cleanup_bci(dev); |
889 | return DRM_ERR(ENOMEM); | 889 | return -ENOMEM; |
890 | } | 890 | } |
891 | 891 | ||
892 | if (savage_dma_init(dev_priv) < 0) { | 892 | if (savage_dma_init(dev_priv) < 0) { |
893 | DRM_ERROR("could not initialize command DMA\n"); | 893 | DRM_ERROR("could not initialize command DMA\n"); |
894 | savage_do_cleanup_bci(dev); | 894 | savage_do_cleanup_bci(dev); |
895 | return DRM_ERR(ENOMEM); | 895 | return -ENOMEM; |
896 | } | 896 | } |
897 | 897 | ||
898 | return 0; | 898 | return 0; |
@@ -945,7 +945,7 @@ static int savage_bci_init(DRM_IOCTL_ARGS) | |||
945 | return savage_do_cleanup_bci(dev); | 945 | return savage_do_cleanup_bci(dev); |
946 | } | 946 | } |
947 | 947 | ||
948 | return DRM_ERR(EINVAL); | 948 | return -EINVAL; |
949 | } | 949 | } |
950 | 950 | ||
951 | static int savage_bci_event_emit(DRM_IOCTL_ARGS) | 951 | static int savage_bci_event_emit(DRM_IOCTL_ARGS) |
@@ -1015,16 +1015,16 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d | |||
1015 | for (i = d->granted_count; i < d->request_count; i++) { | 1015 | for (i = d->granted_count; i < d->request_count; i++) { |
1016 | buf = savage_freelist_get(dev); | 1016 | buf = savage_freelist_get(dev); |
1017 | if (!buf) | 1017 | if (!buf) |
1018 | return DRM_ERR(EAGAIN); | 1018 | return -EAGAIN; |
1019 | 1019 | ||
1020 | buf->filp = filp; | 1020 | buf->filp = filp; |
1021 | 1021 | ||
1022 | if (DRM_COPY_TO_USER(&d->request_indices[i], | 1022 | if (DRM_COPY_TO_USER(&d->request_indices[i], |
1023 | &buf->idx, sizeof(buf->idx))) | 1023 | &buf->idx, sizeof(buf->idx))) |
1024 | return DRM_ERR(EFAULT); | 1024 | return -EFAULT; |
1025 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | 1025 | if (DRM_COPY_TO_USER(&d->request_sizes[i], |
1026 | &buf->total, sizeof(buf->total))) | 1026 | &buf->total, sizeof(buf->total))) |
1027 | return DRM_ERR(EFAULT); | 1027 | return -EFAULT; |
1028 | 1028 | ||
1029 | d->granted_count++; | 1029 | d->granted_count++; |
1030 | } | 1030 | } |
@@ -1047,7 +1047,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) | |||
1047 | if (d.send_count != 0) { | 1047 | if (d.send_count != 0) { |
1048 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | 1048 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", |
1049 | DRM_CURRENTPID, d.send_count); | 1049 | DRM_CURRENTPID, d.send_count); |
1050 | return DRM_ERR(EINVAL); | 1050 | return -EINVAL; |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | /* We'll send you buffers. | 1053 | /* We'll send you buffers. |
@@ -1055,7 +1055,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS) | |||
1055 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | 1055 | if (d.request_count < 0 || d.request_count > dma->buf_count) { |
1056 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | 1056 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", |
1057 | DRM_CURRENTPID, d.request_count, dma->buf_count); | 1057 | DRM_CURRENTPID, d.request_count, dma->buf_count); |
1058 | return DRM_ERR(EINVAL); | 1058 | return -EINVAL; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | d.granted_count = 0; | 1061 | d.granted_count = 0; |
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c index 77497841478a..00e59bfd658a 100644 --- a/drivers/char/drm/savage_state.c +++ b/drivers/char/drm/savage_state.c | |||
@@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, | |||
83 | { | 83 | { |
84 | if ((addr & 6) != 2) { /* reserved bits */ | 84 | if ((addr & 6) != 2) { /* reserved bits */ |
85 | DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); | 85 | DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); |
86 | return DRM_ERR(EINVAL); | 86 | return -EINVAL; |
87 | } | 87 | } |
88 | if (!(addr & 1)) { /* local */ | 88 | if (!(addr & 1)) { /* local */ |
89 | addr &= ~7; | 89 | addr &= ~7; |
@@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, | |||
92 | DRM_ERROR | 92 | DRM_ERROR |
93 | ("bad texAddr%d %08x (local addr out of range)\n", | 93 | ("bad texAddr%d %08x (local addr out of range)\n", |
94 | unit, addr); | 94 | unit, addr); |
95 | return DRM_ERR(EINVAL); | 95 | return -EINVAL; |
96 | } | 96 | } |
97 | } else { /* AGP */ | 97 | } else { /* AGP */ |
98 | if (!dev_priv->agp_textures) { | 98 | if (!dev_priv->agp_textures) { |
99 | DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", | 99 | DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", |
100 | unit, addr); | 100 | unit, addr); |
101 | return DRM_ERR(EINVAL); | 101 | return -EINVAL; |
102 | } | 102 | } |
103 | addr &= ~7; | 103 | addr &= ~7; |
104 | if (addr < dev_priv->agp_textures->offset || | 104 | if (addr < dev_priv->agp_textures->offset || |
@@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, | |||
107 | DRM_ERROR | 107 | DRM_ERROR |
108 | ("bad texAddr%d %08x (AGP addr out of range)\n", | 108 | ("bad texAddr%d %08x (AGP addr out of range)\n", |
109 | unit, addr); | 109 | unit, addr); |
110 | return DRM_ERR(EINVAL); | 110 | return -EINVAL; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | return 0; | 113 | return 0; |
@@ -133,7 +133,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, | |||
133 | start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { | 133 | start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { |
134 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | 134 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", |
135 | start, start + count - 1); | 135 | start, start + count - 1); |
136 | return DRM_ERR(EINVAL); | 136 | return -EINVAL; |
137 | } | 137 | } |
138 | 138 | ||
139 | SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, | 139 | SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, |
@@ -165,7 +165,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv, | |||
165 | start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { | 165 | start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { |
166 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | 166 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", |
167 | start, start + count - 1); | 167 | start, start + count - 1); |
168 | return DRM_ERR(EINVAL); | 168 | return -EINVAL; |
169 | } | 169 | } |
170 | 170 | ||
171 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, | 171 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, |
@@ -289,7 +289,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | |||
289 | 289 | ||
290 | if (!dmabuf) { | 290 | if (!dmabuf) { |
291 | DRM_ERROR("called without dma buffers!\n"); | 291 | DRM_ERROR("called without dma buffers!\n"); |
292 | return DRM_ERR(EINVAL); | 292 | return -EINVAL; |
293 | } | 293 | } |
294 | 294 | ||
295 | if (!n) | 295 | if (!n) |
@@ -303,7 +303,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | |||
303 | if (n % 3 != 0) { | 303 | if (n % 3 != 0) { |
304 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | 304 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", |
305 | n); | 305 | n); |
306 | return DRM_ERR(EINVAL); | 306 | return -EINVAL; |
307 | } | 307 | } |
308 | break; | 308 | break; |
309 | case SAVAGE_PRIM_TRISTRIP: | 309 | case SAVAGE_PRIM_TRISTRIP: |
@@ -312,18 +312,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | |||
312 | DRM_ERROR | 312 | DRM_ERROR |
313 | ("wrong number of vertices %u in TRIFAN/STRIP\n", | 313 | ("wrong number of vertices %u in TRIFAN/STRIP\n", |
314 | n); | 314 | n); |
315 | return DRM_ERR(EINVAL); | 315 | return -EINVAL; |
316 | } | 316 | } |
317 | break; | 317 | break; |
318 | default: | 318 | default: |
319 | DRM_ERROR("invalid primitive type %u\n", prim); | 319 | DRM_ERROR("invalid primitive type %u\n", prim); |
320 | return DRM_ERR(EINVAL); | 320 | return -EINVAL; |
321 | } | 321 | } |
322 | 322 | ||
323 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 323 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
324 | if (skip != 0) { | 324 | if (skip != 0) { |
325 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | 325 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); |
326 | return DRM_ERR(EINVAL); | 326 | return -EINVAL; |
327 | } | 327 | } |
328 | } else { | 328 | } else { |
329 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | 329 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - |
@@ -331,18 +331,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, | |||
331 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | 331 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); |
332 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | 332 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { |
333 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | 333 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); |
334 | return DRM_ERR(EINVAL); | 334 | return -EINVAL; |
335 | } | 335 | } |
336 | if (reorder) { | 336 | if (reorder) { |
337 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | 337 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); |
338 | return DRM_ERR(EINVAL); | 338 | return -EINVAL; |
339 | } | 339 | } |
340 | } | 340 | } |
341 | 341 | ||
342 | if (start + n > dmabuf->total / 32) { | 342 | if (start + n > dmabuf->total / 32) { |
343 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | 343 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", |
344 | start, start + n - 1, dmabuf->total / 32); | 344 | start, start + n - 1, dmabuf->total / 32); |
345 | return DRM_ERR(EINVAL); | 345 | return -EINVAL; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Vertex DMA doesn't work with command DMA at the same time, | 348 | /* Vertex DMA doesn't work with command DMA at the same time, |
@@ -440,7 +440,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
440 | if (n % 3 != 0) { | 440 | if (n % 3 != 0) { |
441 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | 441 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", |
442 | n); | 442 | n); |
443 | return DRM_ERR(EINVAL); | 443 | return -EINVAL; |
444 | } | 444 | } |
445 | break; | 445 | break; |
446 | case SAVAGE_PRIM_TRISTRIP: | 446 | case SAVAGE_PRIM_TRISTRIP: |
@@ -449,24 +449,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
449 | DRM_ERROR | 449 | DRM_ERROR |
450 | ("wrong number of vertices %u in TRIFAN/STRIP\n", | 450 | ("wrong number of vertices %u in TRIFAN/STRIP\n", |
451 | n); | 451 | n); |
452 | return DRM_ERR(EINVAL); | 452 | return -EINVAL; |
453 | } | 453 | } |
454 | break; | 454 | break; |
455 | default: | 455 | default: |
456 | DRM_ERROR("invalid primitive type %u\n", prim); | 456 | DRM_ERROR("invalid primitive type %u\n", prim); |
457 | return DRM_ERR(EINVAL); | 457 | return -EINVAL; |
458 | } | 458 | } |
459 | 459 | ||
460 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 460 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
461 | if (skip > SAVAGE_SKIP_ALL_S3D) { | 461 | if (skip > SAVAGE_SKIP_ALL_S3D) { |
462 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | 462 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); |
463 | return DRM_ERR(EINVAL); | 463 | return -EINVAL; |
464 | } | 464 | } |
465 | vtx_size = 8; /* full vertex */ | 465 | vtx_size = 8; /* full vertex */ |
466 | } else { | 466 | } else { |
467 | if (skip > SAVAGE_SKIP_ALL_S4) { | 467 | if (skip > SAVAGE_SKIP_ALL_S4) { |
468 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | 468 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); |
469 | return DRM_ERR(EINVAL); | 469 | return -EINVAL; |
470 | } | 470 | } |
471 | vtx_size = 10; /* full vertex */ | 471 | vtx_size = 10; /* full vertex */ |
472 | } | 472 | } |
@@ -478,13 +478,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, | |||
478 | if (vtx_size > vb_stride) { | 478 | if (vtx_size > vb_stride) { |
479 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | 479 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", |
480 | vtx_size, vb_stride); | 480 | vtx_size, vb_stride); |
481 | return DRM_ERR(EINVAL); | 481 | return -EINVAL; |
482 | } | 482 | } |
483 | 483 | ||
484 | if (start + n > vb_size / (vb_stride * 4)) { | 484 | if (start + n > vb_size / (vb_stride * 4)) { |
485 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | 485 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", |
486 | start, start + n - 1, vb_size / (vb_stride * 4)); | 486 | start, start + n - 1, vb_size / (vb_stride * 4)); |
487 | return DRM_ERR(EINVAL); | 487 | return -EINVAL; |
488 | } | 488 | } |
489 | 489 | ||
490 | prim <<= 25; | 490 | prim <<= 25; |
@@ -547,7 +547,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
547 | 547 | ||
548 | if (!dmabuf) { | 548 | if (!dmabuf) { |
549 | DRM_ERROR("called without dma buffers!\n"); | 549 | DRM_ERROR("called without dma buffers!\n"); |
550 | return DRM_ERR(EINVAL); | 550 | return -EINVAL; |
551 | } | 551 | } |
552 | 552 | ||
553 | if (!n) | 553 | if (!n) |
@@ -560,7 +560,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
560 | case SAVAGE_PRIM_TRILIST: | 560 | case SAVAGE_PRIM_TRILIST: |
561 | if (n % 3 != 0) { | 561 | if (n % 3 != 0) { |
562 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); | 562 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); |
563 | return DRM_ERR(EINVAL); | 563 | return -EINVAL; |
564 | } | 564 | } |
565 | break; | 565 | break; |
566 | case SAVAGE_PRIM_TRISTRIP: | 566 | case SAVAGE_PRIM_TRISTRIP: |
@@ -568,18 +568,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
568 | if (n < 3) { | 568 | if (n < 3) { |
569 | DRM_ERROR | 569 | DRM_ERROR |
570 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); | 570 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); |
571 | return DRM_ERR(EINVAL); | 571 | return -EINVAL; |
572 | } | 572 | } |
573 | break; | 573 | break; |
574 | default: | 574 | default: |
575 | DRM_ERROR("invalid primitive type %u\n", prim); | 575 | DRM_ERROR("invalid primitive type %u\n", prim); |
576 | return DRM_ERR(EINVAL); | 576 | return -EINVAL; |
577 | } | 577 | } |
578 | 578 | ||
579 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 579 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
580 | if (skip != 0) { | 580 | if (skip != 0) { |
581 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | 581 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); |
582 | return DRM_ERR(EINVAL); | 582 | return -EINVAL; |
583 | } | 583 | } |
584 | } else { | 584 | } else { |
585 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | 585 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - |
@@ -587,11 +587,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
587 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | 587 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); |
588 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | 588 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { |
589 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); | 589 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); |
590 | return DRM_ERR(EINVAL); | 590 | return -EINVAL; |
591 | } | 591 | } |
592 | if (reorder) { | 592 | if (reorder) { |
593 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | 593 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); |
594 | return DRM_ERR(EINVAL); | 594 | return -EINVAL; |
595 | } | 595 | } |
596 | } | 596 | } |
597 | 597 | ||
@@ -628,7 +628,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, | |||
628 | if (idx[i] > dmabuf->total / 32) { | 628 | if (idx[i] > dmabuf->total / 32) { |
629 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | 629 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", |
630 | i, idx[i], dmabuf->total / 32); | 630 | i, idx[i], dmabuf->total / 32); |
631 | return DRM_ERR(EINVAL); | 631 | return -EINVAL; |
632 | } | 632 | } |
633 | } | 633 | } |
634 | 634 | ||
@@ -698,7 +698,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
698 | case SAVAGE_PRIM_TRILIST: | 698 | case SAVAGE_PRIM_TRILIST: |
699 | if (n % 3 != 0) { | 699 | if (n % 3 != 0) { |
700 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); | 700 | DRM_ERROR("wrong number of indices %u in TRILIST\n", n); |
701 | return DRM_ERR(EINVAL); | 701 | return -EINVAL; |
702 | } | 702 | } |
703 | break; | 703 | break; |
704 | case SAVAGE_PRIM_TRISTRIP: | 704 | case SAVAGE_PRIM_TRISTRIP: |
@@ -706,24 +706,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
706 | if (n < 3) { | 706 | if (n < 3) { |
707 | DRM_ERROR | 707 | DRM_ERROR |
708 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); | 708 | ("wrong number of indices %u in TRIFAN/STRIP\n", n); |
709 | return DRM_ERR(EINVAL); | 709 | return -EINVAL; |
710 | } | 710 | } |
711 | break; | 711 | break; |
712 | default: | 712 | default: |
713 | DRM_ERROR("invalid primitive type %u\n", prim); | 713 | DRM_ERROR("invalid primitive type %u\n", prim); |
714 | return DRM_ERR(EINVAL); | 714 | return -EINVAL; |
715 | } | 715 | } |
716 | 716 | ||
717 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | 717 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { |
718 | if (skip > SAVAGE_SKIP_ALL_S3D) { | 718 | if (skip > SAVAGE_SKIP_ALL_S3D) { |
719 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | 719 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); |
720 | return DRM_ERR(EINVAL); | 720 | return -EINVAL; |
721 | } | 721 | } |
722 | vtx_size = 8; /* full vertex */ | 722 | vtx_size = 8; /* full vertex */ |
723 | } else { | 723 | } else { |
724 | if (skip > SAVAGE_SKIP_ALL_S4) { | 724 | if (skip > SAVAGE_SKIP_ALL_S4) { |
725 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | 725 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); |
726 | return DRM_ERR(EINVAL); | 726 | return -EINVAL; |
727 | } | 727 | } |
728 | vtx_size = 10; /* full vertex */ | 728 | vtx_size = 10; /* full vertex */ |
729 | } | 729 | } |
@@ -735,7 +735,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
735 | if (vtx_size > vb_stride) { | 735 | if (vtx_size > vb_stride) { |
736 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | 736 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", |
737 | vtx_size, vb_stride); | 737 | vtx_size, vb_stride); |
738 | return DRM_ERR(EINVAL); | 738 | return -EINVAL; |
739 | } | 739 | } |
740 | 740 | ||
741 | prim <<= 25; | 741 | prim <<= 25; |
@@ -748,7 +748,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, | |||
748 | if (idx[i] > vb_size / (vb_stride * 4)) { | 748 | if (idx[i] > vb_size / (vb_stride * 4)) { |
749 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | 749 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", |
750 | i, idx[i], vb_size / (vb_stride * 4)); | 750 | i, idx[i], vb_size / (vb_stride * 4)); |
751 | return DRM_ERR(EINVAL); | 751 | return -EINVAL; |
752 | } | 752 | } |
753 | } | 753 | } |
754 | 754 | ||
@@ -942,7 +942,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv, | |||
942 | DRM_ERROR("IMPLEMENTATION ERROR: " | 942 | DRM_ERROR("IMPLEMENTATION ERROR: " |
943 | "non-drawing-command %d\n", | 943 | "non-drawing-command %d\n", |
944 | cmd_header.cmd.cmd); | 944 | cmd_header.cmd.cmd); |
945 | return DRM_ERR(EINVAL); | 945 | return -EINVAL; |
946 | } | 946 | } |
947 | 947 | ||
948 | if (ret != 0) | 948 | if (ret != 0) |
@@ -979,7 +979,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
979 | DRM_ERROR | 979 | DRM_ERROR |
980 | ("vertex buffer index %u out of range (0-%u)\n", | 980 | ("vertex buffer index %u out of range (0-%u)\n", |
981 | cmdbuf.dma_idx, dma->buf_count - 1); | 981 | cmdbuf.dma_idx, dma->buf_count - 1); |
982 | return DRM_ERR(EINVAL); | 982 | return -EINVAL; |
983 | } | 983 | } |
984 | dmabuf = dma->buflist[cmdbuf.dma_idx]; | 984 | dmabuf = dma->buflist[cmdbuf.dma_idx]; |
985 | } else { | 985 | } else { |
@@ -994,26 +994,26 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
994 | if (cmdbuf.size) { | 994 | if (cmdbuf.size) { |
995 | kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); | 995 | kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); |
996 | if (kcmd_addr == NULL) | 996 | if (kcmd_addr == NULL) |
997 | return DRM_ERR(ENOMEM); | 997 | return -ENOMEM; |
998 | 998 | ||
999 | if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, | 999 | if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, |
1000 | cmdbuf.size * 8)) | 1000 | cmdbuf.size * 8)) |
1001 | { | 1001 | { |
1002 | drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); | 1002 | drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); |
1003 | return DRM_ERR(EFAULT); | 1003 | return -EFAULT; |
1004 | } | 1004 | } |
1005 | cmdbuf.cmd_addr = kcmd_addr; | 1005 | cmdbuf.cmd_addr = kcmd_addr; |
1006 | } | 1006 | } |
1007 | if (cmdbuf.vb_size) { | 1007 | if (cmdbuf.vb_size) { |
1008 | kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); | 1008 | kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); |
1009 | if (kvb_addr == NULL) { | 1009 | if (kvb_addr == NULL) { |
1010 | ret = DRM_ERR(ENOMEM); | 1010 | ret = -ENOMEM; |
1011 | goto done; | 1011 | goto done; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, | 1014 | if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, |
1015 | cmdbuf.vb_size)) { | 1015 | cmdbuf.vb_size)) { |
1016 | ret = DRM_ERR(EFAULT); | 1016 | ret = -EFAULT; |
1017 | goto done; | 1017 | goto done; |
1018 | } | 1018 | } |
1019 | cmdbuf.vb_addr = kvb_addr; | 1019 | cmdbuf.vb_addr = kvb_addr; |
@@ -1022,13 +1022,13 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1022 | kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), | 1022 | kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), |
1023 | DRM_MEM_DRIVER); | 1023 | DRM_MEM_DRIVER); |
1024 | if (kbox_addr == NULL) { | 1024 | if (kbox_addr == NULL) { |
1025 | ret = DRM_ERR(ENOMEM); | 1025 | ret = -ENOMEM; |
1026 | goto done; | 1026 | goto done; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, | 1029 | if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, |
1030 | cmdbuf.nbox * sizeof(struct drm_clip_rect))) { | 1030 | cmdbuf.nbox * sizeof(struct drm_clip_rect))) { |
1031 | ret = DRM_ERR(EFAULT); | 1031 | ret = -EFAULT; |
1032 | goto done; | 1032 | goto done; |
1033 | } | 1033 | } |
1034 | cmdbuf.box_addr = kbox_addr; | 1034 | cmdbuf.box_addr = kbox_addr; |
@@ -1061,7 +1061,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1061 | DRM_ERROR("indexed drawing command extends " | 1061 | DRM_ERROR("indexed drawing command extends " |
1062 | "beyond end of command buffer\n"); | 1062 | "beyond end of command buffer\n"); |
1063 | DMA_FLUSH(); | 1063 | DMA_FLUSH(); |
1064 | return DRM_ERR(EINVAL); | 1064 | return -EINVAL; |
1065 | } | 1065 | } |
1066 | /* fall through */ | 1066 | /* fall through */ |
1067 | case SAVAGE_CMD_DMA_PRIM: | 1067 | case SAVAGE_CMD_DMA_PRIM: |
@@ -1094,7 +1094,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1094 | DRM_ERROR("command SAVAGE_CMD_STATE extends " | 1094 | DRM_ERROR("command SAVAGE_CMD_STATE extends " |
1095 | "beyond end of command buffer\n"); | 1095 | "beyond end of command buffer\n"); |
1096 | DMA_FLUSH(); | 1096 | DMA_FLUSH(); |
1097 | ret = DRM_ERR(EINVAL); | 1097 | ret = -EINVAL; |
1098 | goto done; | 1098 | goto done; |
1099 | } | 1099 | } |
1100 | ret = savage_dispatch_state(dev_priv, &cmd_header, | 1100 | ret = savage_dispatch_state(dev_priv, &cmd_header, |
@@ -1107,7 +1107,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1107 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " | 1107 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " |
1108 | "beyond end of command buffer\n"); | 1108 | "beyond end of command buffer\n"); |
1109 | DMA_FLUSH(); | 1109 | DMA_FLUSH(); |
1110 | ret = DRM_ERR(EINVAL); | 1110 | ret = -EINVAL; |
1111 | goto done; | 1111 | goto done; |
1112 | } | 1112 | } |
1113 | ret = savage_dispatch_clear(dev_priv, &cmd_header, | 1113 | ret = savage_dispatch_clear(dev_priv, &cmd_header, |
@@ -1123,7 +1123,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | |||
1123 | default: | 1123 | default: |
1124 | DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); | 1124 | DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); |
1125 | DMA_FLUSH(); | 1125 | DMA_FLUSH(); |
1126 | ret = DRM_ERR(EINVAL); | 1126 | ret = -EINVAL; |
1127 | goto done; | 1127 | goto done; |
1128 | } | 1128 | } |
1129 | 1129 | ||
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c index 1912f5857051..7dacc64e9b56 100644 --- a/drivers/char/drm/sis_drv.c +++ b/drivers/char/drm/sis_drv.c | |||
@@ -42,7 +42,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) | |||
42 | 42 | ||
43 | dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); | 43 | dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); |
44 | if (dev_priv == NULL) | 44 | if (dev_priv == NULL) |
45 | return DRM_ERR(ENOMEM); | 45 | return -ENOMEM; |
46 | 46 | ||
47 | dev->dev_private = (void *)dev_priv; | 47 | dev->dev_private = (void *)dev_priv; |
48 | dev_priv->chipset = chipset; | 48 | dev_priv->chipset = chipset; |
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c index 441bbdbf1510..244006a107b7 100644 --- a/drivers/char/drm/sis_mm.c +++ b/drivers/char/drm/sis_mm.c | |||
@@ -140,7 +140,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv, | |||
140 | dev_priv->agp_initialized)) { | 140 | dev_priv->agp_initialized)) { |
141 | DRM_ERROR | 141 | DRM_ERROR |
142 | ("Attempt to allocate from uninitialized memory manager.\n"); | 142 | ("Attempt to allocate from uninitialized memory manager.\n"); |
143 | return DRM_ERR(EINVAL); | 143 | return -EINVAL; |
144 | } | 144 | } |
145 | 145 | ||
146 | mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; | 146 | mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; |
@@ -159,7 +159,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv, | |||
159 | mem.offset = 0; | 159 | mem.offset = 0; |
160 | mem.size = 0; | 160 | mem.size = 0; |
161 | mem.free = 0; | 161 | mem.free = 0; |
162 | retval = DRM_ERR(ENOMEM); | 162 | retval = -ENOMEM; |
163 | } | 163 | } |
164 | 164 | ||
165 | DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); | 165 | DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); |
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c index 7ff2b623c2d4..2d0035f83a7b 100644 --- a/drivers/char/drm/via_dma.c +++ b/drivers/char/drm/via_dma.c | |||
@@ -175,24 +175,24 @@ static int via_initialize(struct drm_device * dev, | |||
175 | { | 175 | { |
176 | if (!dev_priv || !dev_priv->mmio) { | 176 | if (!dev_priv || !dev_priv->mmio) { |
177 | DRM_ERROR("via_dma_init called before via_map_init\n"); | 177 | DRM_ERROR("via_dma_init called before via_map_init\n"); |
178 | return DRM_ERR(EFAULT); | 178 | return -EFAULT; |
179 | } | 179 | } |
180 | 180 | ||
181 | if (dev_priv->ring.virtual_start != NULL) { | 181 | if (dev_priv->ring.virtual_start != NULL) { |
182 | DRM_ERROR("%s called again without calling cleanup\n", | 182 | DRM_ERROR("%s called again without calling cleanup\n", |
183 | __FUNCTION__); | 183 | __FUNCTION__); |
184 | return DRM_ERR(EFAULT); | 184 | return -EFAULT; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (!dev->agp || !dev->agp->base) { | 187 | if (!dev->agp || !dev->agp->base) { |
188 | DRM_ERROR("%s called with no agp memory available\n", | 188 | DRM_ERROR("%s called with no agp memory available\n", |
189 | __FUNCTION__); | 189 | __FUNCTION__); |
190 | return DRM_ERR(EFAULT); | 190 | return -EFAULT; |
191 | } | 191 | } |
192 | 192 | ||
193 | if (dev_priv->chipset == VIA_DX9_0) { | 193 | if (dev_priv->chipset == VIA_DX9_0) { |
194 | DRM_ERROR("AGP DMA is not supported on this chip\n"); | 194 | DRM_ERROR("AGP DMA is not supported on this chip\n"); |
195 | return DRM_ERR(EINVAL); | 195 | return -EINVAL; |
196 | } | 196 | } |
197 | 197 | ||
198 | dev_priv->ring.map.offset = dev->agp->base + init->offset; | 198 | dev_priv->ring.map.offset = dev->agp->base + init->offset; |
@@ -207,7 +207,7 @@ static int via_initialize(struct drm_device * dev, | |||
207 | via_dma_cleanup(dev); | 207 | via_dma_cleanup(dev); |
208 | DRM_ERROR("can not ioremap virtual address for" | 208 | DRM_ERROR("can not ioremap virtual address for" |
209 | " ring buffer\n"); | 209 | " ring buffer\n"); |
210 | return DRM_ERR(ENOMEM); | 210 | return -ENOMEM; |
211 | } | 211 | } |
212 | 212 | ||
213 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | 213 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; |
@@ -240,22 +240,22 @@ static int via_dma_init(DRM_IOCTL_ARGS) | |||
240 | switch (init.func) { | 240 | switch (init.func) { |
241 | case VIA_INIT_DMA: | 241 | case VIA_INIT_DMA: |
242 | if (!DRM_SUSER(DRM_CURPROC)) | 242 | if (!DRM_SUSER(DRM_CURPROC)) |
243 | retcode = DRM_ERR(EPERM); | 243 | retcode = -EPERM; |
244 | else | 244 | else |
245 | retcode = via_initialize(dev, dev_priv, &init); | 245 | retcode = via_initialize(dev, dev_priv, &init); |
246 | break; | 246 | break; |
247 | case VIA_CLEANUP_DMA: | 247 | case VIA_CLEANUP_DMA: |
248 | if (!DRM_SUSER(DRM_CURPROC)) | 248 | if (!DRM_SUSER(DRM_CURPROC)) |
249 | retcode = DRM_ERR(EPERM); | 249 | retcode = -EPERM; |
250 | else | 250 | else |
251 | retcode = via_dma_cleanup(dev); | 251 | retcode = via_dma_cleanup(dev); |
252 | break; | 252 | break; |
253 | case VIA_DMA_INITIALIZED: | 253 | case VIA_DMA_INITIALIZED: |
254 | retcode = (dev_priv->ring.virtual_start != NULL) ? | 254 | retcode = (dev_priv->ring.virtual_start != NULL) ? |
255 | 0 : DRM_ERR(EFAULT); | 255 | 0 : -EFAULT; |
256 | break; | 256 | break; |
257 | default: | 257 | default: |
258 | retcode = DRM_ERR(EINVAL); | 258 | retcode = -EINVAL; |
259 | break; | 259 | break; |
260 | } | 260 | } |
261 | 261 | ||
@@ -273,15 +273,15 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * | |||
273 | if (dev_priv->ring.virtual_start == NULL) { | 273 | if (dev_priv->ring.virtual_start == NULL) { |
274 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", | 274 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", |
275 | __FUNCTION__); | 275 | __FUNCTION__); |
276 | return DRM_ERR(EFAULT); | 276 | return -EFAULT; |
277 | } | 277 | } |
278 | 278 | ||
279 | if (cmd->size > VIA_PCI_BUF_SIZE) { | 279 | if (cmd->size > VIA_PCI_BUF_SIZE) { |
280 | return DRM_ERR(ENOMEM); | 280 | return -ENOMEM; |
281 | } | 281 | } |
282 | 282 | ||
283 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | 283 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) |
284 | return DRM_ERR(EFAULT); | 284 | return -EFAULT; |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * Running this function on AGP memory is dead slow. Therefore | 287 | * Running this function on AGP memory is dead slow. Therefore |
@@ -297,7 +297,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * | |||
297 | 297 | ||
298 | vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); | 298 | vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); |
299 | if (vb == NULL) { | 299 | if (vb == NULL) { |
300 | return DRM_ERR(EAGAIN); | 300 | return -EAGAIN; |
301 | } | 301 | } |
302 | 302 | ||
303 | memcpy(vb, dev_priv->pci_buf, cmd->size); | 303 | memcpy(vb, dev_priv->pci_buf, cmd->size); |
@@ -321,7 +321,7 @@ int via_driver_dma_quiescent(struct drm_device * dev) | |||
321 | drm_via_private_t *dev_priv = dev->dev_private; | 321 | drm_via_private_t *dev_priv = dev->dev_private; |
322 | 322 | ||
323 | if (!via_wait_idle(dev_priv)) { | 323 | if (!via_wait_idle(dev_priv)) { |
324 | return DRM_ERR(EBUSY); | 324 | return -EBUSY; |
325 | } | 325 | } |
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
@@ -363,10 +363,10 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, | |||
363 | int ret; | 363 | int ret; |
364 | 364 | ||
365 | if (cmd->size > VIA_PCI_BUF_SIZE) { | 365 | if (cmd->size > VIA_PCI_BUF_SIZE) { |
366 | return DRM_ERR(ENOMEM); | 366 | return -ENOMEM; |
367 | } | 367 | } |
368 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | 368 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) |
369 | return DRM_ERR(EFAULT); | 369 | return -EFAULT; |
370 | 370 | ||
371 | if ((ret = | 371 | if ((ret = |
372 | via_verify_command_stream((uint32_t *) dev_priv->pci_buf, | 372 | via_verify_command_stream((uint32_t *) dev_priv->pci_buf, |
@@ -669,7 +669,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) | |||
669 | if (dev_priv->ring.virtual_start == NULL) { | 669 | if (dev_priv->ring.virtual_start == NULL) { |
670 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", | 670 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", |
671 | __FUNCTION__); | 671 | __FUNCTION__); |
672 | return DRM_ERR(EFAULT); | 672 | return -EFAULT; |
673 | } | 673 | } |
674 | 674 | ||
675 | DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, | 675 | DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, |
@@ -687,7 +687,7 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) | |||
687 | } | 687 | } |
688 | if (!count) { | 688 | if (!count) { |
689 | DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); | 689 | DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); |
690 | ret = DRM_ERR(EAGAIN); | 690 | ret = -EAGAIN; |
691 | } | 691 | } |
692 | break; | 692 | break; |
693 | case VIA_CMDBUF_LAG: | 693 | case VIA_CMDBUF_LAG: |
@@ -699,11 +699,11 @@ static int via_cmdbuf_size(DRM_IOCTL_ARGS) | |||
699 | } | 699 | } |
700 | if (!count) { | 700 | if (!count) { |
701 | DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); | 701 | DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); |
702 | ret = DRM_ERR(EAGAIN); | 702 | ret = -EAGAIN; |
703 | } | 703 | } |
704 | break; | 704 | break; |
705 | default: | 705 | default: |
706 | ret = DRM_ERR(EFAULT); | 706 | ret = -EFAULT; |
707 | } | 707 | } |
708 | d_siz.size = tmp_size; | 708 | d_siz.size = tmp_size; |
709 | 709 | ||
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c index 3dd1ed3d1bf5..cd204f35eced 100644 --- a/drivers/char/drm/via_dmablit.c +++ b/drivers/char/drm/via_dmablit.c | |||
@@ -237,7 +237,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
237 | first_pfn + 1; | 237 | first_pfn + 1; |
238 | 238 | ||
239 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | 239 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) |
240 | return DRM_ERR(ENOMEM); | 240 | return -ENOMEM; |
241 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | 241 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); |
242 | down_read(¤t->mm->mmap_sem); | 242 | down_read(¤t->mm->mmap_sem); |
243 | ret = get_user_pages(current, current->mm, | 243 | ret = get_user_pages(current, current->mm, |
@@ -251,7 +251,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
251 | if (ret < 0) | 251 | if (ret < 0) |
252 | return ret; | 252 | return ret; |
253 | vsg->state = dr_via_pages_locked; | 253 | vsg->state = dr_via_pages_locked; |
254 | return DRM_ERR(EINVAL); | 254 | return -EINVAL; |
255 | } | 255 | } |
256 | vsg->state = dr_via_pages_locked; | 256 | vsg->state = dr_via_pages_locked; |
257 | DRM_DEBUG("DMA pages locked\n"); | 257 | DRM_DEBUG("DMA pages locked\n"); |
@@ -274,13 +274,13 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) | |||
274 | vsg->descriptors_per_page; | 274 | vsg->descriptors_per_page; |
275 | 275 | ||
276 | if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) | 276 | if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) |
277 | return DRM_ERR(ENOMEM); | 277 | return -ENOMEM; |
278 | 278 | ||
279 | vsg->state = dr_via_desc_pages_alloc; | 279 | vsg->state = dr_via_desc_pages_alloc; |
280 | for (i=0; i<vsg->num_desc_pages; ++i) { | 280 | for (i=0; i<vsg->num_desc_pages; ++i) { |
281 | if (NULL == (vsg->desc_pages[i] = | 281 | if (NULL == (vsg->desc_pages[i] = |
282 | (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) | 282 | (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) |
283 | return DRM_ERR(ENOMEM); | 283 | return -ENOMEM; |
284 | } | 284 | } |
285 | DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, | 285 | DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, |
286 | vsg->num_desc); | 286 | vsg->num_desc); |
@@ -593,7 +593,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
593 | 593 | ||
594 | if (xfer->num_lines <= 0 || xfer->line_length <= 0) { | 594 | if (xfer->num_lines <= 0 || xfer->line_length <= 0) { |
595 | DRM_ERROR("Zero size bitblt.\n"); | 595 | DRM_ERROR("Zero size bitblt.\n"); |
596 | return DRM_ERR(EINVAL); | 596 | return -EINVAL; |
597 | } | 597 | } |
598 | 598 | ||
599 | /* | 599 | /* |
@@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
606 | if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { | 606 | if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { |
607 | DRM_ERROR("Too large system memory stride. Stride: %d, " | 607 | DRM_ERROR("Too large system memory stride. Stride: %d, " |
608 | "Length: %d\n", xfer->mem_stride, xfer->line_length); | 608 | "Length: %d\n", xfer->mem_stride, xfer->line_length); |
609 | return DRM_ERR(EINVAL); | 609 | return -EINVAL; |
610 | } | 610 | } |
611 | 611 | ||
612 | if ((xfer->mem_stride == xfer->line_length) && | 612 | if ((xfer->mem_stride == xfer->line_length) && |
@@ -624,7 +624,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
624 | 624 | ||
625 | if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { | 625 | if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { |
626 | DRM_ERROR("Too large PCI DMA bitblt.\n"); | 626 | DRM_ERROR("Too large PCI DMA bitblt.\n"); |
627 | return DRM_ERR(EINVAL); | 627 | return -EINVAL; |
628 | } | 628 | } |
629 | 629 | ||
630 | /* | 630 | /* |
@@ -635,7 +635,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
635 | if (xfer->mem_stride < xfer->line_length || | 635 | if (xfer->mem_stride < xfer->line_length || |
636 | abs(xfer->fb_stride) < xfer->line_length) { | 636 | abs(xfer->fb_stride) < xfer->line_length) { |
637 | DRM_ERROR("Invalid frame-buffer / memory stride.\n"); | 637 | DRM_ERROR("Invalid frame-buffer / memory stride.\n"); |
638 | return DRM_ERR(EINVAL); | 638 | return -EINVAL; |
639 | } | 639 | } |
640 | 640 | ||
641 | /* | 641 | /* |
@@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
648 | if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || | 648 | if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || |
649 | ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { | 649 | ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { |
650 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); | 650 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
651 | return DRM_ERR(EINVAL); | 651 | return -EINVAL; |
652 | } | 652 | } |
653 | #else | 653 | #else |
654 | if ((((unsigned long)xfer->mem_addr & 15) || | 654 | if ((((unsigned long)xfer->mem_addr & 15) || |
@@ -656,7 +656,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli | |||
656 | ((xfer->num_lines > 1) && | 656 | ((xfer->num_lines > 1) && |
657 | ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { | 657 | ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { |
658 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); | 658 | DRM_ERROR("Invalid DRM bitblt alignment.\n"); |
659 | return DRM_ERR(EINVAL); | 659 | return -EINVAL; |
660 | } | 660 | } |
661 | #endif | 661 | #endif |
662 | 662 | ||
@@ -696,7 +696,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) | |||
696 | 696 | ||
697 | DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); | 697 | DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); |
698 | if (ret) { | 698 | if (ret) { |
699 | return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; | 699 | return (-EINTR == ret) ? -EAGAIN : ret; |
700 | } | 700 | } |
701 | 701 | ||
702 | spin_lock_irqsave(&blitq->blit_lock, irqsave); | 702 | spin_lock_irqsave(&blitq->blit_lock, irqsave); |
@@ -740,7 +740,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) | |||
740 | 740 | ||
741 | if (dev_priv == NULL) { | 741 | if (dev_priv == NULL) { |
742 | DRM_ERROR("Called without initialization.\n"); | 742 | DRM_ERROR("Called without initialization.\n"); |
743 | return DRM_ERR(EINVAL); | 743 | return -EINVAL; |
744 | } | 744 | } |
745 | 745 | ||
746 | engine = (xfer->to_fb) ? 0 : 1; | 746 | engine = (xfer->to_fb) ? 0 : 1; |
@@ -750,7 +750,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) | |||
750 | } | 750 | } |
751 | if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { | 751 | if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { |
752 | via_dmablit_release_slot(blitq); | 752 | via_dmablit_release_slot(blitq); |
753 | return DRM_ERR(ENOMEM); | 753 | return -ENOMEM; |
754 | } | 754 | } |
755 | if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { | 755 | if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { |
756 | via_dmablit_release_slot(blitq); | 756 | via_dmablit_release_slot(blitq); |
@@ -790,12 +790,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) | |||
790 | DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); | 790 | DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); |
791 | 791 | ||
792 | if (sync.engine >= VIA_NUM_BLIT_ENGINES) | 792 | if (sync.engine >= VIA_NUM_BLIT_ENGINES) |
793 | return DRM_ERR(EINVAL); | 793 | return -EINVAL; |
794 | 794 | ||
795 | err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); | 795 | err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); |
796 | 796 | ||
797 | if (DRM_ERR(EINTR) == err) | 797 | if (-EINTR == err) |
798 | err = DRM_ERR(EAGAIN); | 798 | err = -EAGAIN; |
799 | 799 | ||
800 | return err; | 800 | return err; |
801 | } | 801 | } |
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c index 8dc99b5fbab6..a5297e70f94a 100644 --- a/drivers/char/drm/via_irq.c +++ b/drivers/char/drm/via_irq.c | |||
@@ -205,13 +205,13 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc | |||
205 | 205 | ||
206 | if (!dev_priv) { | 206 | if (!dev_priv) { |
207 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 207 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); |
208 | return DRM_ERR(EINVAL); | 208 | return -EINVAL; |
209 | } | 209 | } |
210 | 210 | ||
211 | if (irq >= drm_via_irq_num) { | 211 | if (irq >= drm_via_irq_num) { |
212 | DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, | 212 | DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, |
213 | irq); | 213 | irq); |
214 | return DRM_ERR(EINVAL); | 214 | return -EINVAL; |
215 | } | 215 | } |
216 | 216 | ||
217 | real_irq = dev_priv->irq_map[irq]; | 217 | real_irq = dev_priv->irq_map[irq]; |
@@ -219,7 +219,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc | |||
219 | if (real_irq < 0) { | 219 | if (real_irq < 0) { |
220 | DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", | 220 | DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", |
221 | __FUNCTION__, irq); | 221 | __FUNCTION__, irq); |
222 | return DRM_ERR(EINVAL); | 222 | return -EINVAL; |
223 | } | 223 | } |
224 | 224 | ||
225 | masks = dev_priv->irq_masks; | 225 | masks = dev_priv->irq_masks; |
@@ -343,13 +343,13 @@ int via_wait_irq(DRM_IOCTL_ARGS) | |||
343 | int force_sequence; | 343 | int force_sequence; |
344 | 344 | ||
345 | if (!dev->irq) | 345 | if (!dev->irq) |
346 | return DRM_ERR(EINVAL); | 346 | return -EINVAL; |
347 | 347 | ||
348 | DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); | 348 | DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); |
349 | if (irqwait.request.irq >= dev_priv->num_irqs) { | 349 | if (irqwait.request.irq >= dev_priv->num_irqs) { |
350 | DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, | 350 | DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, |
351 | irqwait.request.irq); | 351 | irqwait.request.irq); |
352 | return DRM_ERR(EINVAL); | 352 | return -EINVAL; |
353 | } | 353 | } |
354 | 354 | ||
355 | cur_irq += irqwait.request.irq; | 355 | cur_irq += irqwait.request.irq; |
@@ -361,13 +361,13 @@ int via_wait_irq(DRM_IOCTL_ARGS) | |||
361 | case VIA_IRQ_ABSOLUTE: | 361 | case VIA_IRQ_ABSOLUTE: |
362 | break; | 362 | break; |
363 | default: | 363 | default: |
364 | return DRM_ERR(EINVAL); | 364 | return -EINVAL; |
365 | } | 365 | } |
366 | 366 | ||
367 | if (irqwait.request.type & VIA_IRQ_SIGNAL) { | 367 | if (irqwait.request.type & VIA_IRQ_SIGNAL) { |
368 | DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", | 368 | DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", |
369 | __FUNCTION__); | 369 | __FUNCTION__); |
370 | return DRM_ERR(EINVAL); | 370 | return -EINVAL; |
371 | } | 371 | } |
372 | 372 | ||
373 | force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); | 373 | force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); |
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c index 7fb9d2a2cce2..6345c86e1f5e 100644 --- a/drivers/char/drm/via_map.c +++ b/drivers/char/drm/via_map.c | |||
@@ -102,7 +102,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset) | |||
102 | 102 | ||
103 | dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); | 103 | dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); |
104 | if (dev_priv == NULL) | 104 | if (dev_priv == NULL) |
105 | return DRM_ERR(ENOMEM); | 105 | return -ENOMEM; |
106 | 106 | ||
107 | dev->dev_private = (void *)dev_priv; | 107 | dev->dev_private = (void *)dev_priv; |
108 | 108 | ||
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c index 85d56acd9d82..36f2547254f9 100644 --- a/drivers/char/drm/via_mm.c +++ b/drivers/char/drm/via_mm.c | |||
@@ -136,7 +136,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) | |||
136 | 136 | ||
137 | if (mem.type > VIA_MEM_AGP) { | 137 | if (mem.type > VIA_MEM_AGP) { |
138 | DRM_ERROR("Unknown memory type allocation\n"); | 138 | DRM_ERROR("Unknown memory type allocation\n"); |
139 | return DRM_ERR(EINVAL); | 139 | return -EINVAL; |
140 | } | 140 | } |
141 | mutex_lock(&dev->struct_mutex); | 141 | mutex_lock(&dev->struct_mutex); |
142 | if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : | 142 | if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : |
@@ -144,7 +144,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) | |||
144 | DRM_ERROR | 144 | DRM_ERROR |
145 | ("Attempt to allocate from uninitialized memory manager.\n"); | 145 | ("Attempt to allocate from uninitialized memory manager.\n"); |
146 | mutex_unlock(&dev->struct_mutex); | 146 | mutex_unlock(&dev->struct_mutex); |
147 | return DRM_ERR(EINVAL); | 147 | return -EINVAL; |
148 | } | 148 | } |
149 | 149 | ||
150 | tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; | 150 | tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; |
@@ -162,7 +162,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) | |||
162 | mem.size = 0; | 162 | mem.size = 0; |
163 | mem.index = 0; | 163 | mem.index = 0; |
164 | DRM_DEBUG("Video memory allocation failed\n"); | 164 | DRM_DEBUG("Video memory allocation failed\n"); |
165 | retval = DRM_ERR(ENOMEM); | 165 | retval = -ENOMEM; |
166 | } | 166 | } |
167 | DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); | 167 | DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); |
168 | 168 | ||
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c index 832d48356e91..46a579198747 100644 --- a/drivers/char/drm/via_verifier.c +++ b/drivers/char/drm/via_verifier.c | |||
@@ -1026,12 +1026,12 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, | |||
1026 | case state_error: | 1026 | case state_error: |
1027 | default: | 1027 | default: |
1028 | *hc_state = saved_state; | 1028 | *hc_state = saved_state; |
1029 | return DRM_ERR(EINVAL); | 1029 | return -EINVAL; |
1030 | } | 1030 | } |
1031 | } | 1031 | } |
1032 | if (state == state_error) { | 1032 | if (state == state_error) { |
1033 | *hc_state = saved_state; | 1033 | *hc_state = saved_state; |
1034 | return DRM_ERR(EINVAL); | 1034 | return -EINVAL; |
1035 | } | 1035 | } |
1036 | return 0; | 1036 | return 0; |
1037 | } | 1037 | } |
@@ -1082,11 +1082,11 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, | |||
1082 | break; | 1082 | break; |
1083 | case state_error: | 1083 | case state_error: |
1084 | default: | 1084 | default: |
1085 | return DRM_ERR(EINVAL); | 1085 | return -EINVAL; |
1086 | } | 1086 | } |
1087 | } | 1087 | } |
1088 | if (state == state_error) { | 1088 | if (state == state_error) { |
1089 | return DRM_ERR(EINVAL); | 1089 | return -EINVAL; |
1090 | } | 1090 | } |
1091 | return 0; | 1091 | return 0; |
1092 | } | 1092 | } |