aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/drm/drm.h20
-rw-r--r--drivers/char/drm/drmP.h237
-rw-r--r--drivers/char/drm/drm_agpsupport.c130
-rw-r--r--drivers/char/drm/drm_auth.c48
-rw-r--r--drivers/char/drm/drm_bufs.c203
-rw-r--r--drivers/char/drm/drm_context.c177
-rw-r--r--drivers/char/drm/drm_dma.c11
-rw-r--r--drivers/char/drm/drm_drawable.c67
-rw-r--r--drivers/char/drm/drm_drv.c186
-rw-r--r--drivers/char/drm/drm_fops.c34
-rw-r--r--drivers/char/drm/drm_ioc32.c2
-rw-r--r--drivers/char/drm/drm_ioctl.c196
-rw-r--r--drivers/char/drm/drm_irq.c98
-rw-r--r--drivers/char/drm/drm_lock.c75
-rw-r--r--drivers/char/drm/drm_os_linux.h10
-rw-r--r--drivers/char/drm/drm_pciids.h2
-rw-r--r--drivers/char/drm/drm_scatter.c48
-rw-r--r--drivers/char/drm/drm_vm.c4
-rw-r--r--drivers/char/drm/i810_dma.c312
-rw-r--r--drivers/char/drm/i810_drm.h5
-rw-r--r--drivers/char/drm/i810_drv.h9
-rw-r--r--drivers/char/drm/i830_dma.c210
-rw-r--r--drivers/char/drm/i830_drv.h15
-rw-r--r--drivers/char/drm/i830_irq.c30
-rw-r--r--drivers/char/drm/i915_dma.c214
-rw-r--r--drivers/char/drm/i915_drv.h36
-rw-r--r--drivers/char/drm/i915_irq.c128
-rw-r--r--drivers/char/drm/i915_mem.c125
-rw-r--r--drivers/char/drm/mga_dma.c140
-rw-r--r--drivers/char/drm/mga_drv.h21
-rw-r--r--drivers/char/drm/mga_state.c197
-rw-r--r--drivers/char/drm/mga_warp.c8
-rw-r--r--drivers/char/drm/r128_cce.c138
-rw-r--r--drivers/char/drm/r128_drm.h18
-rw-r--r--drivers/char/drm/r128_drv.h23
-rw-r--r--drivers/char/drm/r128_state.c351
-rw-r--r--drivers/char/drm/r300_cmdbuf.c68
-rw-r--r--drivers/char/drm/radeon_cp.c146
-rw-r--r--drivers/char/drm/radeon_drv.h43
-rw-r--r--drivers/char/drm/radeon_irq.c34
-rw-r--r--drivers/char/drm/radeon_mem.c108
-rw-r--r--drivers/char/drm/radeon_state.c683
-rw-r--r--drivers/char/drm/savage_bci.c145
-rw-r--r--drivers/char/drm/savage_drv.h9
-rw-r--r--drivers/char/drm/savage_state.c200
-rw-r--r--drivers/char/drm/sis_drv.c2
-rw-r--r--drivers/char/drm/sis_drv.h5
-rw-r--r--drivers/char/drm/sis_mm.c112
-rw-r--r--drivers/char/drm/via_dma.c144
-rw-r--r--drivers/char/drm/via_dmablit.c54
-rw-r--r--drivers/char/drm/via_drv.h22
-rw-r--r--drivers/char/drm/via_irq.c47
-rw-r--r--drivers/char/drm/via_map.c14
-rw-r--r--drivers/char/drm/via_mm.c83
-rw-r--r--drivers/char/drm/via_verifier.c8
-rw-r--r--drivers/char/drm/via_video.c20
56 files changed, 2359 insertions, 3116 deletions
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 2d6f2d0bd02b..82fb3d0d2785 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -63,27 +63,9 @@
63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 63#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
64#endif 64#endif
65 65
66#define XFREE86_VERSION(major,minor,patch,snap) \
67 ((major << 16) | (minor << 8) | patch)
68
69#ifndef CONFIG_XFREE86_VERSION
70#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
71#endif
72
73#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
74#define DRM_PROC_DEVICES "/proc/devices"
75#define DRM_PROC_MISC "/proc/misc"
76#define DRM_PROC_DRM "/proc/drm"
77#define DRM_DEV_DRM "/dev/drm"
78#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
79#define DRM_DEV_UID 0
80#define DRM_DEV_GID 0
81#endif
82
83#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
84#define DRM_MAJOR 226 66#define DRM_MAJOR 226
85#define DRM_MAX_MINOR 15 67#define DRM_MAX_MINOR 15
86#endif 68
87#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 69#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
88#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 70#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
89#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 71#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 0df87fc3dcb2..9dd0760dd87a 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -80,6 +80,9 @@
80#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 80#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
81#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) 81#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
82 82
83struct drm_file;
84struct drm_device;
85
83#include "drm_os_linux.h" 86#include "drm_os_linux.h"
84#include "drm_hashtab.h" 87#include "drm_hashtab.h"
85 88
@@ -231,12 +234,13 @@
231 * \param dev DRM device. 234 * \param dev DRM device.
232 * \param filp file pointer of the caller. 235 * \param filp file pointer of the caller.
233 */ 236 */
234#define LOCK_TEST_WITH_RETURN( dev, filp ) \ 237#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
235do { \ 238do { \
236 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ 239 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
237 dev->lock.filp != filp ) { \ 240 dev->lock.file_priv != file_priv ) { \
238 DRM_ERROR( "%s called without lock held\n", \ 241 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
239 __FUNCTION__ ); \ 242 __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
243 dev->lock.file_priv, file_priv ); \
240 return -EINVAL; \ 244 return -EINVAL; \
241 } \ 245 } \
242} while (0) 246} while (0)
@@ -257,12 +261,12 @@ do { \
257 * Ioctl function type. 261 * Ioctl function type.
258 * 262 *
259 * \param inode device inode. 263 * \param inode device inode.
260 * \param filp file pointer. 264 * \param file_priv DRM file private pointer.
261 * \param cmd command. 265 * \param cmd command.
262 * \param arg argument. 266 * \param arg argument.
263 */ 267 */
264typedef int drm_ioctl_t(struct inode *inode, struct file *filp, 268typedef int drm_ioctl_t(struct drm_device *dev, void *data,
265 unsigned int cmd, unsigned long arg); 269 struct drm_file *file_priv);
266 270
267typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 271typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
268 unsigned long arg); 272 unsigned long arg);
@@ -271,10 +275,18 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
271#define DRM_MASTER 0x2 275#define DRM_MASTER 0x2
272#define DRM_ROOT_ONLY 0x4 276#define DRM_ROOT_ONLY 0x4
273 277
274typedef struct drm_ioctl_desc { 278struct drm_ioctl_desc {
279 unsigned int cmd;
275 drm_ioctl_t *func; 280 drm_ioctl_t *func;
276 int flags; 281 int flags;
277} drm_ioctl_desc_t; 282};
283
284/**
285 * Creates a driver or general drm_ioctl_desc array entry for the given
286 * ioctl, for use by drm_ioctl().
287 */
288#define DRM_IOCTL_DEF(ioctl, func, flags) \
289 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
278 290
279struct drm_magic_entry { 291struct drm_magic_entry {
280 struct list_head head; 292 struct list_head head;
@@ -304,7 +316,7 @@ struct drm_buf {
304 __volatile__ int waiting; /**< On kernel DMA queue */ 316 __volatile__ int waiting; /**< On kernel DMA queue */
305 __volatile__ int pending; /**< On hardware DMA queue */ 317 __volatile__ int pending; /**< On hardware DMA queue */
306 wait_queue_head_t dma_wait; /**< Processes waiting */ 318 wait_queue_head_t dma_wait; /**< Processes waiting */
307 struct file *filp; /**< Pointer to holding file descr */ 319 struct drm_file *file_priv; /**< Private of holding file descr */
308 int context; /**< Kernel queue for this buffer */ 320 int context; /**< Kernel queue for this buffer */
309 int while_locked; /**< Dispatch this buffer while locked */ 321 int while_locked; /**< Dispatch this buffer while locked */
310 enum { 322 enum {
@@ -377,6 +389,7 @@ struct drm_file {
377 int remove_auth_on_close; 389 int remove_auth_on_close;
378 unsigned long lock_count; 390 unsigned long lock_count;
379 void *driver_priv; 391 void *driver_priv;
392 struct file *filp;
380}; 393};
381 394
382/** Wait queue */ 395/** Wait queue */
@@ -403,7 +416,7 @@ struct drm_queue {
403 */ 416 */
404struct drm_lock_data { 417struct drm_lock_data {
405 struct drm_hw_lock *hw_lock; /**< Hardware lock */ 418 struct drm_hw_lock *hw_lock; /**< Hardware lock */
406 struct file *filp; /**< File descr of lock holder (0=kernel) */ 419 struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */
407 wait_queue_head_t lock_queue; /**< Queue of blocked processes */ 420 wait_queue_head_t lock_queue; /**< Queue of blocked processes */
408 unsigned long lock_time; /**< Time of last lock in jiffies */ 421 unsigned long lock_time; /**< Time of last lock in jiffies */
409 spinlock_t spinlock; 422 spinlock_t spinlock;
@@ -552,11 +565,11 @@ struct drm_driver {
552 int (*load) (struct drm_device *, unsigned long flags); 565 int (*load) (struct drm_device *, unsigned long flags);
553 int (*firstopen) (struct drm_device *); 566 int (*firstopen) (struct drm_device *);
554 int (*open) (struct drm_device *, struct drm_file *); 567 int (*open) (struct drm_device *, struct drm_file *);
555 void (*preclose) (struct drm_device *, struct file * filp); 568 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
556 void (*postclose) (struct drm_device *, struct drm_file *); 569 void (*postclose) (struct drm_device *, struct drm_file *);
557 void (*lastclose) (struct drm_device *); 570 void (*lastclose) (struct drm_device *);
558 int (*unload) (struct drm_device *); 571 int (*unload) (struct drm_device *);
559 int (*dma_ioctl) (DRM_IOCTL_ARGS); 572 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
560 void (*dma_ready) (struct drm_device *); 573 void (*dma_ready) (struct drm_device *);
561 int (*dma_quiescent) (struct drm_device *); 574 int (*dma_quiescent) (struct drm_device *);
562 int (*context_ctor) (struct drm_device *dev, int context); 575 int (*context_ctor) (struct drm_device *dev, int context);
@@ -587,11 +600,12 @@ struct drm_driver {
587 void (*irq_preinstall) (struct drm_device *dev); 600 void (*irq_preinstall) (struct drm_device *dev);
588 void (*irq_postinstall) (struct drm_device *dev); 601 void (*irq_postinstall) (struct drm_device *dev);
589 void (*irq_uninstall) (struct drm_device *dev); 602 void (*irq_uninstall) (struct drm_device *dev);
590 void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); 603 void (*reclaim_buffers) (struct drm_device *dev,
604 struct drm_file * file_priv);
591 void (*reclaim_buffers_locked) (struct drm_device *dev, 605 void (*reclaim_buffers_locked) (struct drm_device *dev,
592 struct file *filp); 606 struct drm_file *file_priv);
593 void (*reclaim_buffers_idlelocked) (struct drm_device *dev, 607 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
594 struct file * filp); 608 struct drm_file *file_priv);
595 unsigned long (*get_map_ofs) (struct drm_map * map); 609 unsigned long (*get_map_ofs) (struct drm_map * map);
596 unsigned long (*get_reg_ofs) (struct drm_device *dev); 610 unsigned long (*get_reg_ofs) (struct drm_device *dev);
597 void (*set_version) (struct drm_device *dev, 611 void (*set_version) (struct drm_device *dev,
@@ -606,7 +620,7 @@ struct drm_driver {
606 620
607 u32 driver_features; 621 u32 driver_features;
608 int dev_priv_size; 622 int dev_priv_size;
609 drm_ioctl_desc_t *ioctls; 623 struct drm_ioctl_desc *ioctls;
610 int num_ioctls; 624 int num_ioctls;
611 struct file_operations fops; 625 struct file_operations fops;
612 struct pci_driver pci_driver; 626 struct pci_driver pci_driver;
@@ -850,70 +864,70 @@ extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
850extern int drm_unbind_agp(DRM_AGP_MEM * handle); 864extern int drm_unbind_agp(DRM_AGP_MEM * handle);
851 865
852 /* Misc. IOCTL support (drm_ioctl.h) */ 866 /* Misc. IOCTL support (drm_ioctl.h) */
853extern int drm_irq_by_busid(struct inode *inode, struct file *filp, 867extern int drm_irq_by_busid(struct drm_device *dev, void *data,
854 unsigned int cmd, unsigned long arg); 868 struct drm_file *file_priv);
855extern int drm_getunique(struct inode *inode, struct file *filp, 869extern int drm_getunique(struct drm_device *dev, void *data,
856 unsigned int cmd, unsigned long arg); 870 struct drm_file *file_priv);
857extern int drm_setunique(struct inode *inode, struct file *filp, 871extern int drm_setunique(struct drm_device *dev, void *data,
858 unsigned int cmd, unsigned long arg); 872 struct drm_file *file_priv);
859extern int drm_getmap(struct inode *inode, struct file *filp, 873extern int drm_getmap(struct drm_device *dev, void *data,
860 unsigned int cmd, unsigned long arg); 874 struct drm_file *file_priv);
861extern int drm_getclient(struct inode *inode, struct file *filp, 875extern int drm_getclient(struct drm_device *dev, void *data,
862 unsigned int cmd, unsigned long arg); 876 struct drm_file *file_priv);
863extern int drm_getstats(struct inode *inode, struct file *filp, 877extern int drm_getstats(struct drm_device *dev, void *data,
864 unsigned int cmd, unsigned long arg); 878 struct drm_file *file_priv);
865extern int drm_setversion(struct inode *inode, struct file *filp, 879extern int drm_setversion(struct drm_device *dev, void *data,
866 unsigned int cmd, unsigned long arg); 880 struct drm_file *file_priv);
867extern int drm_noop(struct inode *inode, struct file *filp, 881extern int drm_noop(struct drm_device *dev, void *data,
868 unsigned int cmd, unsigned long arg); 882 struct drm_file *file_priv);
869 883
870 /* Context IOCTL support (drm_context.h) */ 884 /* Context IOCTL support (drm_context.h) */
871extern int drm_resctx(struct inode *inode, struct file *filp, 885extern int drm_resctx(struct drm_device *dev, void *data,
872 unsigned int cmd, unsigned long arg); 886 struct drm_file *file_priv);
873extern int drm_addctx(struct inode *inode, struct file *filp, 887extern int drm_addctx(struct drm_device *dev, void *data,
874 unsigned int cmd, unsigned long arg); 888 struct drm_file *file_priv);
875extern int drm_modctx(struct inode *inode, struct file *filp, 889extern int drm_modctx(struct drm_device *dev, void *data,
876 unsigned int cmd, unsigned long arg); 890 struct drm_file *file_priv);
877extern int drm_getctx(struct inode *inode, struct file *filp, 891extern int drm_getctx(struct drm_device *dev, void *data,
878 unsigned int cmd, unsigned long arg); 892 struct drm_file *file_priv);
879extern int drm_switchctx(struct inode *inode, struct file *filp, 893extern int drm_switchctx(struct drm_device *dev, void *data,
880 unsigned int cmd, unsigned long arg); 894 struct drm_file *file_priv);
881extern int drm_newctx(struct inode *inode, struct file *filp, 895extern int drm_newctx(struct drm_device *dev, void *data,
882 unsigned int cmd, unsigned long arg); 896 struct drm_file *file_priv);
883extern int drm_rmctx(struct inode *inode, struct file *filp, 897extern int drm_rmctx(struct drm_device *dev, void *data,
884 unsigned int cmd, unsigned long arg); 898 struct drm_file *file_priv);
885 899
886extern int drm_ctxbitmap_init(struct drm_device *dev); 900extern int drm_ctxbitmap_init(struct drm_device *dev);
887extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 901extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
888extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 902extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
889 903
890extern int drm_setsareactx(struct inode *inode, struct file *filp, 904extern int drm_setsareactx(struct drm_device *dev, void *data,
891 unsigned int cmd, unsigned long arg); 905 struct drm_file *file_priv);
892extern int drm_getsareactx(struct inode *inode, struct file *filp, 906extern int drm_getsareactx(struct drm_device *dev, void *data,
893 unsigned int cmd, unsigned long arg); 907 struct drm_file *file_priv);
894 908
895 /* Drawable IOCTL support (drm_drawable.h) */ 909 /* Drawable IOCTL support (drm_drawable.h) */
896extern int drm_adddraw(struct inode *inode, struct file *filp, 910extern int drm_adddraw(struct drm_device *dev, void *data,
897 unsigned int cmd, unsigned long arg); 911 struct drm_file *file_priv);
898extern int drm_rmdraw(struct inode *inode, struct file *filp, 912extern int drm_rmdraw(struct drm_device *dev, void *data,
899 unsigned int cmd, unsigned long arg); 913 struct drm_file *file_priv);
900extern int drm_update_drawable_info(struct inode *inode, struct file *filp, 914extern int drm_update_drawable_info(struct drm_device *dev, void *data,
901 unsigned int cmd, unsigned long arg); 915 struct drm_file *file_priv);
902extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, 916extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
903 drm_drawable_t id); 917 drm_drawable_t id);
904extern void drm_drawable_free_all(struct drm_device *dev); 918extern void drm_drawable_free_all(struct drm_device *dev);
905 919
906 /* Authentication IOCTL support (drm_auth.h) */ 920 /* Authentication IOCTL support (drm_auth.h) */
907extern int drm_getmagic(struct inode *inode, struct file *filp, 921extern int drm_getmagic(struct drm_device *dev, void *data,
908 unsigned int cmd, unsigned long arg); 922 struct drm_file *file_priv);
909extern int drm_authmagic(struct inode *inode, struct file *filp, 923extern int drm_authmagic(struct drm_device *dev, void *data,
910 unsigned int cmd, unsigned long arg); 924 struct drm_file *file_priv);
911 925
912 /* Locking IOCTL support (drm_lock.h) */ 926 /* Locking IOCTL support (drm_lock.h) */
913extern int drm_lock(struct inode *inode, struct file *filp, 927extern int drm_lock(struct drm_device *dev, void *data,
914 unsigned int cmd, unsigned long arg); 928 struct drm_file *file_priv);
915extern int drm_unlock(struct inode *inode, struct file *filp, 929extern int drm_unlock(struct drm_device *dev, void *data,
916 unsigned int cmd, unsigned long arg); 930 struct drm_file *file_priv);
917extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 931extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
918extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); 932extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
919extern void drm_idlelock_take(struct drm_lock_data *lock_data); 933extern void drm_idlelock_take(struct drm_lock_data *lock_data);
@@ -924,8 +938,7 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data);
924 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 938 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
925 */ 939 */
926 940
927extern int drm_i_have_hw_lock(struct file *filp); 941extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
928extern int drm_kernel_take_hw_lock(struct file *filp);
929 942
930 /* Buffer management support (drm_bufs.h) */ 943 /* Buffer management support (drm_bufs.h) */
931extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); 944extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
@@ -933,24 +946,23 @@ extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request
933extern int drm_addmap(struct drm_device *dev, unsigned int offset, 946extern int drm_addmap(struct drm_device *dev, unsigned int offset,
934 unsigned int size, enum drm_map_type type, 947 unsigned int size, enum drm_map_type type,
935 enum drm_map_flags flags, drm_local_map_t ** map_ptr); 948 enum drm_map_flags flags, drm_local_map_t ** map_ptr);
936extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, 949extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
937 unsigned int cmd, unsigned long arg); 950 struct drm_file *file_priv);
938extern int drm_rmmap(struct drm_device *dev, drm_local_map_t * map); 951extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
939extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t * map); 952extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
940extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, 953extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
941 unsigned int cmd, unsigned long arg); 954 struct drm_file *file_priv);
942 955extern int drm_addbufs(struct drm_device *dev, void *data,
956 struct drm_file *file_priv);
957extern int drm_infobufs(struct drm_device *dev, void *data,
958 struct drm_file *file_priv);
959extern int drm_markbufs(struct drm_device *dev, void *data,
960 struct drm_file *file_priv);
961extern int drm_freebufs(struct drm_device *dev, void *data,
962 struct drm_file *file_priv);
963extern int drm_mapbufs(struct drm_device *dev, void *data,
964 struct drm_file *file_priv);
943extern int drm_order(unsigned long size); 965extern int drm_order(unsigned long size);
944extern int drm_addbufs(struct inode *inode, struct file *filp,
945 unsigned int cmd, unsigned long arg);
946extern int drm_infobufs(struct inode *inode, struct file *filp,
947 unsigned int cmd, unsigned long arg);
948extern int drm_markbufs(struct inode *inode, struct file *filp,
949 unsigned int cmd, unsigned long arg);
950extern int drm_freebufs(struct inode *inode, struct file *filp,
951 unsigned int cmd, unsigned long arg);
952extern int drm_mapbufs(struct inode *inode, struct file *filp,
953 unsigned int cmd, unsigned long arg);
954extern unsigned long drm_get_resource_start(struct drm_device *dev, 966extern unsigned long drm_get_resource_start(struct drm_device *dev,
955 unsigned int resource); 967 unsigned int resource);
956extern unsigned long drm_get_resource_len(struct drm_device *dev, 968extern unsigned long drm_get_resource_len(struct drm_device *dev,
@@ -960,19 +972,20 @@ extern unsigned long drm_get_resource_len(struct drm_device *dev,
960extern int drm_dma_setup(struct drm_device *dev); 972extern int drm_dma_setup(struct drm_device *dev);
961extern void drm_dma_takedown(struct drm_device *dev); 973extern void drm_dma_takedown(struct drm_device *dev);
962extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 974extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
963extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); 975extern void drm_core_reclaim_buffers(struct drm_device *dev,
976 struct drm_file *filp);
964 977
965 /* IRQ support (drm_irq.h) */ 978 /* IRQ support (drm_irq.h) */
966extern int drm_control(struct inode *inode, struct file *filp, 979extern int drm_control(struct drm_device *dev, void *data,
967 unsigned int cmd, unsigned long arg); 980 struct drm_file *file_priv);
968extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 981extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
969extern int drm_irq_uninstall(struct drm_device *dev); 982extern int drm_irq_uninstall(struct drm_device *dev);
970extern void drm_driver_irq_preinstall(struct drm_device *dev); 983extern void drm_driver_irq_preinstall(struct drm_device *dev);
971extern void drm_driver_irq_postinstall(struct drm_device *dev); 984extern void drm_driver_irq_postinstall(struct drm_device *dev);
972extern void drm_driver_irq_uninstall(struct drm_device *dev); 985extern void drm_driver_irq_uninstall(struct drm_device *dev);
973 986
974extern int drm_wait_vblank(struct inode *inode, struct file *filp, 987extern int drm_wait_vblank(struct drm_device *dev, void *data,
975 unsigned int cmd, unsigned long arg); 988 struct drm_file *file_priv);
976extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 989extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
977extern void drm_vbl_send_signals(struct drm_device *dev); 990extern void drm_vbl_send_signals(struct drm_device *dev);
978extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 991extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
@@ -980,31 +993,30 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de
980 /* AGP/GART support (drm_agpsupport.h) */ 993 /* AGP/GART support (drm_agpsupport.h) */
981extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 994extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
982extern int drm_agp_acquire(struct drm_device *dev); 995extern int drm_agp_acquire(struct drm_device *dev);
983extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, 996extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
984 unsigned int cmd, unsigned long arg); 997 struct drm_file *file_priv);
985extern int drm_agp_release(struct drm_device *dev); 998extern int drm_agp_release(struct drm_device *dev);
986extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 999extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
987 unsigned int cmd, unsigned long arg); 1000 struct drm_file *file_priv);
988extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); 1001extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
989extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, 1002extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
990 unsigned int cmd, unsigned long arg); 1003 struct drm_file *file_priv);
991extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info * info); 1004extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
992extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 1005extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
993 unsigned int cmd, unsigned long arg); 1006 struct drm_file *file_priv);
994extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); 1007extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
995extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, 1008extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
996 unsigned int cmd, unsigned long arg); 1009 struct drm_file *file_priv);
997extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); 1010extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
998extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, 1011extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
999 unsigned int cmd, unsigned long arg); 1012 struct drm_file *file_priv);
1000extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); 1013extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
1001extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, 1014extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1002 unsigned int cmd, unsigned long arg); 1015 struct drm_file *file_priv);
1003extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1016extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1004extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, 1017extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1005 unsigned int cmd, unsigned long arg); 1018 struct drm_file *file_priv);
1006extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 1019extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
1007 size_t pages, u32 type);
1008extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1020extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
1009extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1021extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
1010extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1022extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
@@ -1033,10 +1045,11 @@ extern int drm_proc_cleanup(int minor,
1033 1045
1034 /* Scatter Gather Support (drm_scatter.h) */ 1046 /* Scatter Gather Support (drm_scatter.h) */
1035extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1047extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1036extern int drm_sg_alloc(struct inode *inode, struct file *filp, 1048extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
1037 unsigned int cmd, unsigned long arg); 1049 struct drm_file *file_priv);
1038extern int drm_sg_free(struct inode *inode, struct file *filp, 1050extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
1039 unsigned int cmd, unsigned long arg); 1051extern int drm_sg_free(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1040 1053
1041 /* ATI PCIGART support (ati_pcigart.h) */ 1054 /* ATI PCIGART support (ati_pcigart.h) */
1042extern int drm_ati_pcigart_init(struct drm_device *dev, 1055extern int drm_ati_pcigart_init(struct drm_device *dev,
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 354f0e3674bf..214f4fbcba73 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -40,7 +40,7 @@
40 * Get AGP information. 40 * Get AGP information.
41 * 41 *
42 * \param inode device inode. 42 * \param inode device inode.
43 * \param filp file pointer. 43 * \param file_priv DRM file private.
44 * \param cmd command. 44 * \param cmd command.
45 * \param arg pointer to a (output) drm_agp_info structure. 45 * \param arg pointer to a (output) drm_agp_info structure.
46 * \return zero on success or a negative number on failure. 46 * \return zero on success or a negative number on failure.
@@ -71,20 +71,16 @@ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
71 71
72EXPORT_SYMBOL(drm_agp_info); 72EXPORT_SYMBOL(drm_agp_info);
73 73
74int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 74int drm_agp_info_ioctl(struct drm_device *dev, void *data,
75 unsigned int cmd, unsigned long arg) 75 struct drm_file *file_priv)
76{ 76{
77 struct drm_file *priv = filp->private_data; 77 struct drm_agp_info *info = data;
78 struct drm_device *dev = priv->head->dev;
79 struct drm_agp_info info;
80 int err; 78 int err;
81 79
82 err = drm_agp_info(dev, &info); 80 err = drm_agp_info(dev, info);
83 if (err) 81 if (err)
84 return err; 82 return err;
85 83
86 if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info)))
87 return -EFAULT;
88 return 0; 84 return 0;
89} 85}
90 86
@@ -115,7 +111,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
115 * Acquire the AGP device (ioctl). 111 * Acquire the AGP device (ioctl).
116 * 112 *
117 * \param inode device inode. 113 * \param inode device inode.
118 * \param filp file pointer. 114 * \param file_priv DRM file private.
119 * \param cmd command. 115 * \param cmd command.
120 * \param arg user argument. 116 * \param arg user argument.
121 * \return zero on success or a negative number on failure. 117 * \return zero on success or a negative number on failure.
@@ -123,12 +119,10 @@ EXPORT_SYMBOL(drm_agp_acquire);
123 * Verifies the AGP device hasn't been acquired before and calls 119 * Verifies the AGP device hasn't been acquired before and calls
124 * \c agp_backend_acquire. 120 * \c agp_backend_acquire.
125 */ 121 */
126int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, 122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
127 unsigned int cmd, unsigned long arg) 123 struct drm_file *file_priv)
128{ 124{
129 struct drm_file *priv = filp->private_data; 125 return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
130
131 return drm_agp_acquire((struct drm_device *) priv->head->dev);
132} 126}
133 127
134/** 128/**
@@ -149,12 +143,9 @@ int drm_agp_release(struct drm_device * dev)
149} 143}
150EXPORT_SYMBOL(drm_agp_release); 144EXPORT_SYMBOL(drm_agp_release);
151 145
152int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 146int drm_agp_release_ioctl(struct drm_device *dev, void *data,
153 unsigned int cmd, unsigned long arg) 147 struct drm_file *file_priv)
154{ 148{
155 struct drm_file *priv = filp->private_data;
156 struct drm_device *dev = priv->head->dev;
157
158 return drm_agp_release(dev); 149 return drm_agp_release(dev);
159} 150}
160 151
@@ -182,24 +173,19 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
182 173
183EXPORT_SYMBOL(drm_agp_enable); 174EXPORT_SYMBOL(drm_agp_enable);
184 175
185int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, 176int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
186 unsigned int cmd, unsigned long arg) 177 struct drm_file *file_priv)
187{ 178{
188 struct drm_file *priv = filp->private_data; 179 struct drm_agp_mode *mode = data;
189 struct drm_device *dev = priv->head->dev;
190 struct drm_agp_mode mode;
191
192 if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode)))
193 return -EFAULT;
194 180
195 return drm_agp_enable(dev, mode); 181 return drm_agp_enable(dev, *mode);
196} 182}
197 183
198/** 184/**
199 * Allocate AGP memory. 185 * Allocate AGP memory.
200 * 186 *
201 * \param inode device inode. 187 * \param inode device inode.
202 * \param filp file pointer. 188 * \param file_priv file private pointer.
203 * \param cmd command. 189 * \param cmd command.
204 * \param arg pointer to a drm_agp_buffer structure. 190 * \param arg pointer to a drm_agp_buffer structure.
205 * \return zero on success or a negative number on failure. 191 * \return zero on success or a negative number on failure.
@@ -241,35 +227,13 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
241} 227}
242EXPORT_SYMBOL(drm_agp_alloc); 228EXPORT_SYMBOL(drm_agp_alloc);
243 229
244int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
245 unsigned int cmd, unsigned long arg)
246{
247 struct drm_file *priv = filp->private_data;
248 struct drm_device *dev = priv->head->dev;
249 struct drm_agp_buffer request;
250 struct drm_agp_buffer __user *argp = (void __user *)arg;
251 int err;
252
253 if (copy_from_user(&request, argp, sizeof(request)))
254 return -EFAULT;
255 230
256 err = drm_agp_alloc(dev, &request); 231int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
257 if (err) 232 struct drm_file *file_priv)
258 return err; 233{
259 234 struct drm_agp_buffer *request = data;
260 if (copy_to_user(argp, &request, sizeof(request))) {
261 struct drm_agp_mem *entry;
262 list_for_each_entry(entry, &dev->agp->memory, head) {
263 if (entry->handle == request.handle)
264 break;
265 }
266 list_del(&entry->head);
267 drm_free_agp(entry->memory, entry->pages);
268 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
269 return -EFAULT;
270 }
271 235
272 return 0; 236 return drm_agp_alloc(dev, request);
273} 237}
274 238
275/** 239/**
@@ -297,7 +261,7 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
297 * Unbind AGP memory from the GATT (ioctl). 261 * Unbind AGP memory from the GATT (ioctl).
298 * 262 *
299 * \param inode device inode. 263 * \param inode device inode.
300 * \param filp file pointer. 264 * \param file_priv DRM file private.
301 * \param cmd command. 265 * \param cmd command.
302 * \param arg pointer to a drm_agp_binding structure. 266 * \param arg pointer to a drm_agp_binding structure.
303 * \return zero on success or a negative number on failure. 267 * \return zero on success or a negative number on failure.
@@ -323,25 +287,20 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
323} 287}
324EXPORT_SYMBOL(drm_agp_unbind); 288EXPORT_SYMBOL(drm_agp_unbind);
325 289
326int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
327 unsigned int cmd, unsigned long arg)
328{
329 struct drm_file *priv = filp->private_data;
330 struct drm_device *dev = priv->head->dev;
331 struct drm_agp_binding request;
332 290
333 if (copy_from_user 291int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
334 (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 292 struct drm_file *file_priv)
335 return -EFAULT; 293{
294 struct drm_agp_binding *request = data;
336 295
337 return drm_agp_unbind(dev, &request); 296 return drm_agp_unbind(dev, request);
338} 297}
339 298
340/** 299/**
341 * Bind AGP memory into the GATT (ioctl) 300 * Bind AGP memory into the GATT (ioctl)
342 * 301 *
343 * \param inode device inode. 302 * \param inode device inode.
344 * \param filp file pointer. 303 * \param file_priv DRM file private.
345 * \param cmd command. 304 * \param cmd command.
346 * \param arg pointer to a drm_agp_binding structure. 305 * \param arg pointer to a drm_agp_binding structure.
347 * \return zero on success or a negative number on failure. 306 * \return zero on success or a negative number on failure.
@@ -372,25 +331,20 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
372} 331}
373EXPORT_SYMBOL(drm_agp_bind); 332EXPORT_SYMBOL(drm_agp_bind);
374 333
375int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
376 unsigned int cmd, unsigned long arg)
377{
378 struct drm_file *priv = filp->private_data;
379 struct drm_device *dev = priv->head->dev;
380 struct drm_agp_binding request;
381 334
382 if (copy_from_user 335int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
383 (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 336 struct drm_file *file_priv)
384 return -EFAULT; 337{
338 struct drm_agp_binding *request = data;
385 339
386 return drm_agp_bind(dev, &request); 340 return drm_agp_bind(dev, request);
387} 341}
388 342
389/** 343/**
390 * Free AGP memory (ioctl). 344 * Free AGP memory (ioctl).
391 * 345 *
392 * \param inode device inode. 346 * \param inode device inode.
393 * \param filp file pointer. 347 * \param file_priv DRM file private.
394 * \param cmd command. 348 * \param cmd command.
395 * \param arg pointer to a drm_agp_buffer structure. 349 * \param arg pointer to a drm_agp_buffer structure.
396 * \return zero on success or a negative number on failure. 350 * \return zero on success or a negative number on failure.
@@ -419,18 +373,14 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
419} 373}
420EXPORT_SYMBOL(drm_agp_free); 374EXPORT_SYMBOL(drm_agp_free);
421 375
422int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
423 unsigned int cmd, unsigned long arg)
424{
425 struct drm_file *priv = filp->private_data;
426 struct drm_device *dev = priv->head->dev;
427 struct drm_agp_buffer request;
428 376
429 if (copy_from_user
430 (&request, (struct drm_agp_buffer __user *) arg, sizeof(request)))
431 return -EFAULT;
432 377
433 return drm_agp_free(dev, &request); 378int drm_agp_free_ioctl(struct drm_device *dev, void *data,
379 struct drm_file *file_priv)
380{
381 struct drm_agp_buffer *request = data;
382
383 return drm_agp_free(dev, request);
434} 384}
435 385
436/** 386/**
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index 7f777da872cd..a73462723d2d 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -128,42 +128,38 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
128 * Get a unique magic number (ioctl). 128 * Get a unique magic number (ioctl).
129 * 129 *
130 * \param inode device inode. 130 * \param inode device inode.
131 * \param filp file pointer. 131 * \param file_priv DRM file private.
132 * \param cmd command. 132 * \param cmd command.
133 * \param arg pointer to a resulting drm_auth structure. 133 * \param arg pointer to a resulting drm_auth structure.
134 * \return zero on success, or a negative number on failure. 134 * \return zero on success, or a negative number on failure.
135 * 135 *
136 * If there is a magic number in drm_file::magic then use it, otherwise 136 * If there is a magic number in drm_file::magic then use it, otherwise
137 * searches an unique non-zero magic number and add it associating it with \p 137 * searches an unique non-zero magic number and add it associating it with \p
138 * filp. 138 * file_priv.
139 */ 139 */
140int drm_getmagic(struct inode *inode, struct file *filp, 140int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
141 unsigned int cmd, unsigned long arg)
142{ 141{
143 static drm_magic_t sequence = 0; 142 static drm_magic_t sequence = 0;
144 static DEFINE_SPINLOCK(lock); 143 static DEFINE_SPINLOCK(lock);
145 struct drm_file *priv = filp->private_data; 144 struct drm_auth *auth = data;
146 struct drm_device *dev = priv->head->dev;
147 struct drm_auth auth;
148 145
149 /* Find unique magic */ 146 /* Find unique magic */
150 if (priv->magic) { 147 if (file_priv->magic) {
151 auth.magic = priv->magic; 148 auth->magic = file_priv->magic;
152 } else { 149 } else {
153 do { 150 do {
154 spin_lock(&lock); 151 spin_lock(&lock);
155 if (!sequence) 152 if (!sequence)
156 ++sequence; /* reserve 0 */ 153 ++sequence; /* reserve 0 */
157 auth.magic = sequence++; 154 auth->magic = sequence++;
158 spin_unlock(&lock); 155 spin_unlock(&lock);
159 } while (drm_find_file(dev, auth.magic)); 156 } while (drm_find_file(dev, auth->magic));
160 priv->magic = auth.magic; 157 file_priv->magic = auth->magic;
161 drm_add_magic(dev, priv, auth.magic); 158 drm_add_magic(dev, file_priv, auth->magic);
162 } 159 }
163 160
164 DRM_DEBUG("%u\n", auth.magic); 161 DRM_DEBUG("%u\n", auth->magic);
165 if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) 162
166 return -EFAULT;
167 return 0; 163 return 0;
168} 164}
169 165
@@ -171,27 +167,23 @@ int drm_getmagic(struct inode *inode, struct file *filp,
171 * Authenticate with a magic. 167 * Authenticate with a magic.
172 * 168 *
173 * \param inode device inode. 169 * \param inode device inode.
174 * \param filp file pointer. 170 * \param file_priv DRM file private.
175 * \param cmd command. 171 * \param cmd command.
176 * \param arg pointer to a drm_auth structure. 172 * \param arg pointer to a drm_auth structure.
177 * \return zero if authentication successed, or a negative number otherwise. 173 * \return zero if authentication successed, or a negative number otherwise.
178 * 174 *
179 * Checks if \p filp is associated with the magic number passed in \arg. 175 * Checks if \p file_priv is associated with the magic number passed in \arg.
180 */ 176 */
181int drm_authmagic(struct inode *inode, struct file *filp, 177int drm_authmagic(struct drm_device *dev, void *data,
182 unsigned int cmd, unsigned long arg) 178 struct drm_file *file_priv)
183{ 179{
184 struct drm_file *priv = filp->private_data; 180 struct drm_auth *auth = data;
185 struct drm_device *dev = priv->head->dev;
186 struct drm_auth auth;
187 struct drm_file *file; 181 struct drm_file *file;
188 182
189 if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) 183 DRM_DEBUG("%u\n", auth->magic);
190 return -EFAULT; 184 if ((file = drm_find_file(dev, auth->magic))) {
191 DRM_DEBUG("%u\n", auth.magic);
192 if ((file = drm_find_file(dev, auth.magic))) {
193 file->authenticated = 1; 185 file->authenticated = 1;
194 drm_remove_magic(dev, auth.magic); 186 drm_remove_magic(dev, auth->magic);
195 return 0; 187 return 0;
196 } 188 }
197 return -EINVAL; 189 return -EINVAL;
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index c115b39b8517..856774fbe025 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -92,7 +92,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process. 92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 * 93 *
94 * \param inode device inode. 94 * \param inode device inode.
95 * \param filp file pointer. 95 * \param file_priv DRM file private.
96 * \param cmd command. 96 * \param cmd command.
97 * \param arg pointer to a drm_map structure. 97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error. 98 * \return zero on success or a negative value on error.
@@ -332,38 +332,24 @@ int drm_addmap(struct drm_device * dev, unsigned int offset,
332 332
333EXPORT_SYMBOL(drm_addmap); 333EXPORT_SYMBOL(drm_addmap);
334 334
335int drm_addmap_ioctl(struct inode *inode, struct file *filp, 335int drm_addmap_ioctl(struct drm_device *dev, void *data,
336 unsigned int cmd, unsigned long arg) 336 struct drm_file *file_priv)
337{ 337{
338 struct drm_file *priv = filp->private_data; 338 struct drm_map *map = data;
339 struct drm_device *dev = priv->head->dev;
340 struct drm_map map;
341 struct drm_map_list *maplist; 339 struct drm_map_list *maplist;
342 struct drm_map __user *argp = (void __user *)arg;
343 int err; 340 int err;
344 341
345 if (!(filp->f_mode & 3)) 342 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
346 return -EACCES; /* Require read/write */
347
348 if (copy_from_user(&map, argp, sizeof(map))) {
349 return -EFAULT;
350 }
351
352 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
353 return -EPERM; 343 return -EPERM;
354 344
355 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 345 err = drm_addmap_core(dev, map->offset, map->size, map->type,
356 &maplist); 346 map->flags, &maplist);
357 347
358 if (err) 348 if (err)
359 return err; 349 return err;
360 350
361 if (copy_to_user(argp, maplist->map, sizeof(struct drm_map)))
362 return -EFAULT;
363
364 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 351 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
365 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) 352 map->handle = (void *)(unsigned long)maplist->user_token;
366 return -EFAULT;
367 return 0; 353 return 0;
368} 354}
369 355
@@ -372,7 +358,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
372 * isn't in use. 358 * isn't in use.
373 * 359 *
374 * \param inode device inode. 360 * \param inode device inode.
375 * \param filp file pointer. 361 * \param file_priv DRM file private.
376 * \param cmd command. 362 * \param cmd command.
377 * \param arg pointer to a struct drm_map structure. 363 * \param arg pointer to a struct drm_map structure.
378 * \return zero on success or a negative value on error. 364 * \return zero on success or a negative value on error.
@@ -453,24 +439,18 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
453 * gets used by drivers that the server doesn't need to care about. This seems 439 * gets used by drivers that the server doesn't need to care about. This seems
454 * unlikely. 440 * unlikely.
455 */ 441 */
456int drm_rmmap_ioctl(struct inode *inode, struct file *filp, 442int drm_rmmap_ioctl(struct drm_device *dev, void *data,
457 unsigned int cmd, unsigned long arg) 443 struct drm_file *file_priv)
458{ 444{
459 struct drm_file *priv = filp->private_data; 445 struct drm_map *request = data;
460 struct drm_device *dev = priv->head->dev;
461 struct drm_map request;
462 drm_local_map_t *map = NULL; 446 drm_local_map_t *map = NULL;
463 struct drm_map_list *r_list; 447 struct drm_map_list *r_list;
464 int ret; 448 int ret;
465 449
466 if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) {
467 return -EFAULT;
468 }
469
470 mutex_lock(&dev->struct_mutex); 450 mutex_lock(&dev->struct_mutex);
471 list_for_each_entry(r_list, &dev->maplist, head) { 451 list_for_each_entry(r_list, &dev->maplist, head) {
472 if (r_list->map && 452 if (r_list->map &&
473 r_list->user_token == (unsigned long)request.handle && 453 r_list->user_token == (unsigned long)request->handle &&
474 r_list->map->flags & _DRM_REMOVABLE) { 454 r_list->map->flags & _DRM_REMOVABLE) {
475 map = r_list->map; 455 map = r_list->map;
476 break; 456 break;
@@ -661,7 +641,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
661 buf->waiting = 0; 641 buf->waiting = 0;
662 buf->pending = 0; 642 buf->pending = 0;
663 init_waitqueue_head(&buf->dma_wait); 643 init_waitqueue_head(&buf->dma_wait);
664 buf->filp = NULL; 644 buf->file_priv = NULL;
665 645
666 buf->dev_priv_size = dev->driver->dev_priv_size; 646 buf->dev_priv_size = dev->driver->dev_priv_size;
667 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 647 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -872,7 +852,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
872 buf->waiting = 0; 852 buf->waiting = 0;
873 buf->pending = 0; 853 buf->pending = 0;
874 init_waitqueue_head(&buf->dma_wait); 854 init_waitqueue_head(&buf->dma_wait);
875 buf->filp = NULL; 855 buf->file_priv = NULL;
876 856
877 buf->dev_priv_size = dev->driver->dev_priv_size; 857 buf->dev_priv_size = dev->driver->dev_priv_size;
878 buf->dev_private = drm_alloc(buf->dev_priv_size, 858 buf->dev_private = drm_alloc(buf->dev_priv_size,
@@ -1050,7 +1030,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
1050 buf->waiting = 0; 1030 buf->waiting = 0;
1051 buf->pending = 0; 1031 buf->pending = 0;
1052 init_waitqueue_head(&buf->dma_wait); 1032 init_waitqueue_head(&buf->dma_wait);
1053 buf->filp = NULL; 1033 buf->file_priv = NULL;
1054 1034
1055 buf->dev_priv_size = dev->driver->dev_priv_size; 1035 buf->dev_priv_size = dev->driver->dev_priv_size;
1056 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1036 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1211,7 +1191,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1211 buf->waiting = 0; 1191 buf->waiting = 0;
1212 buf->pending = 0; 1192 buf->pending = 0;
1213 init_waitqueue_head(&buf->dma_wait); 1193 init_waitqueue_head(&buf->dma_wait);
1214 buf->filp = NULL; 1194 buf->file_priv = NULL;
1215 1195
1216 buf->dev_priv_size = dev->driver->dev_priv_size; 1196 buf->dev_priv_size = dev->driver->dev_priv_size;
1217 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); 1197 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1275,7 +1255,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1275 * Add buffers for DMA transfers (ioctl). 1255 * Add buffers for DMA transfers (ioctl).
1276 * 1256 *
1277 * \param inode device inode. 1257 * \param inode device inode.
1278 * \param filp file pointer. 1258 * \param file_priv DRM file private.
1279 * \param cmd command. 1259 * \param cmd command.
1280 * \param arg pointer to a struct drm_buf_desc request. 1260 * \param arg pointer to a struct drm_buf_desc request.
1281 * \return zero on success or a negative number on failure. 1261 * \return zero on success or a negative number on failure.
@@ -1285,38 +1265,27 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
1285 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1265 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1286 * PCI memory respectively. 1266 * PCI memory respectively.
1287 */ 1267 */
1288int drm_addbufs(struct inode *inode, struct file *filp, 1268int drm_addbufs(struct drm_device *dev, void *data,
1289 unsigned int cmd, unsigned long arg) 1269 struct drm_file *file_priv)
1290{ 1270{
1291 struct drm_buf_desc request; 1271 struct drm_buf_desc *request = data;
1292 struct drm_file *priv = filp->private_data;
1293 struct drm_device *dev = priv->head->dev;
1294 int ret; 1272 int ret;
1295 1273
1296 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1297 return -EINVAL; 1275 return -EINVAL;
1298 1276
1299 if (copy_from_user(&request, (struct drm_buf_desc __user *) arg,
1300 sizeof(request)))
1301 return -EFAULT;
1302
1303#if __OS_HAS_AGP 1277#if __OS_HAS_AGP
1304 if (request.flags & _DRM_AGP_BUFFER) 1278 if (request->flags & _DRM_AGP_BUFFER)
1305 ret = drm_addbufs_agp(dev, &request); 1279 ret = drm_addbufs_agp(dev, request);
1306 else 1280 else
1307#endif 1281#endif
1308 if (request.flags & _DRM_SG_BUFFER) 1282 if (request->flags & _DRM_SG_BUFFER)
1309 ret = drm_addbufs_sg(dev, &request); 1283 ret = drm_addbufs_sg(dev, request);
1310 else if (request.flags & _DRM_FB_BUFFER) 1284 else if (request->flags & _DRM_FB_BUFFER)
1311 ret = drm_addbufs_fb(dev, &request); 1285 ret = drm_addbufs_fb(dev, request);
1312 else 1286 else
1313 ret = drm_addbufs_pci(dev, &request); 1287 ret = drm_addbufs_pci(dev, request);
1314 1288
1315 if (ret == 0) {
1316 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1317 ret = -EFAULT;
1318 }
1319 }
1320 return ret; 1289 return ret;
1321} 1290}
1322 1291
@@ -1328,7 +1297,7 @@ int drm_addbufs(struct inode *inode, struct file *filp,
1328 * large buffers can be used for image transfer). 1297 * large buffers can be used for image transfer).
1329 * 1298 *
1330 * \param inode device inode. 1299 * \param inode device inode.
1331 * \param filp file pointer. 1300 * \param file_priv DRM file private.
1332 * \param cmd command. 1301 * \param cmd command.
1333 * \param arg pointer to a drm_buf_info structure. 1302 * \param arg pointer to a drm_buf_info structure.
1334 * \return zero on success or a negative number on failure. 1303 * \return zero on success or a negative number on failure.
@@ -1337,14 +1306,11 @@ int drm_addbufs(struct inode *inode, struct file *filp,
1337 * lock, preventing of allocating more buffers after this call. Information 1306 * lock, preventing of allocating more buffers after this call. Information
1338 * about each requested buffer is then copied into user space. 1307 * about each requested buffer is then copied into user space.
1339 */ 1308 */
1340int drm_infobufs(struct inode *inode, struct file *filp, 1309int drm_infobufs(struct drm_device *dev, void *data,
1341 unsigned int cmd, unsigned long arg) 1310 struct drm_file *file_priv)
1342{ 1311{
1343 struct drm_file *priv = filp->private_data;
1344 struct drm_device *dev = priv->head->dev;
1345 struct drm_device_dma *dma = dev->dma; 1312 struct drm_device_dma *dma = dev->dma;
1346 struct drm_buf_info request; 1313 struct drm_buf_info *request = data;
1347 struct drm_buf_info __user *argp = (void __user *)arg;
1348 int i; 1314 int i;
1349 int count; 1315 int count;
1350 1316
@@ -1362,9 +1328,6 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1362 ++dev->buf_use; /* Can't allocate more after this call */ 1328 ++dev->buf_use; /* Can't allocate more after this call */
1363 spin_unlock(&dev->count_lock); 1329 spin_unlock(&dev->count_lock);
1364 1330
1365 if (copy_from_user(&request, argp, sizeof(request)))
1366 return -EFAULT;
1367
1368 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1331 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1369 if (dma->bufs[i].buf_count) 1332 if (dma->bufs[i].buf_count)
1370 ++count; 1333 ++count;
@@ -1372,11 +1335,11 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1372 1335
1373 DRM_DEBUG("count = %d\n", count); 1336 DRM_DEBUG("count = %d\n", count);
1374 1337
1375 if (request.count >= count) { 1338 if (request->count >= count) {
1376 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1339 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1377 if (dma->bufs[i].buf_count) { 1340 if (dma->bufs[i].buf_count) {
1378 struct drm_buf_desc __user *to = 1341 struct drm_buf_desc __user *to =
1379 &request.list[count]; 1342 &request->list[count];
1380 struct drm_buf_entry *from = &dma->bufs[i]; 1343 struct drm_buf_entry *from = &dma->bufs[i];
1381 struct drm_freelist *list = &dma->bufs[i].freelist; 1344 struct drm_freelist *list = &dma->bufs[i].freelist;
1382 if (copy_to_user(&to->count, 1345 if (copy_to_user(&to->count,
@@ -1403,10 +1366,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1403 } 1366 }
1404 } 1367 }
1405 } 1368 }
1406 request.count = count; 1369 request->count = count;
1407
1408 if (copy_to_user(argp, &request, sizeof(request)))
1409 return -EFAULT;
1410 1370
1411 return 0; 1371 return 0;
1412} 1372}
@@ -1415,7 +1375,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1415 * Specifies a low and high water mark for buffer allocation 1375 * Specifies a low and high water mark for buffer allocation
1416 * 1376 *
1417 * \param inode device inode. 1377 * \param inode device inode.
1418 * \param filp file pointer. 1378 * \param file_priv DRM file private.
1419 * \param cmd command. 1379 * \param cmd command.
1420 * \param arg a pointer to a drm_buf_desc structure. 1380 * \param arg a pointer to a drm_buf_desc structure.
1421 * \return zero on success or a negative number on failure. 1381 * \return zero on success or a negative number on failure.
@@ -1425,13 +1385,11 @@ int drm_infobufs(struct inode *inode, struct file *filp,
1425 * 1385 *
1426 * \note This ioctl is deprecated and mostly never used. 1386 * \note This ioctl is deprecated and mostly never used.
1427 */ 1387 */
1428int drm_markbufs(struct inode *inode, struct file *filp, 1388int drm_markbufs(struct drm_device *dev, void *data,
1429 unsigned int cmd, unsigned long arg) 1389 struct drm_file *file_priv)
1430{ 1390{
1431 struct drm_file *priv = filp->private_data;
1432 struct drm_device *dev = priv->head->dev;
1433 struct drm_device_dma *dma = dev->dma; 1391 struct drm_device_dma *dma = dev->dma;
1434 struct drm_buf_desc request; 1392 struct drm_buf_desc *request = data;
1435 int order; 1393 int order;
1436 struct drm_buf_entry *entry; 1394 struct drm_buf_entry *entry;
1437 1395
@@ -1441,24 +1399,20 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1441 if (!dma) 1399 if (!dma)
1442 return -EINVAL; 1400 return -EINVAL;
1443 1401
1444 if (copy_from_user(&request,
1445 (struct drm_buf_desc __user *) arg, sizeof(request)))
1446 return -EFAULT;
1447
1448 DRM_DEBUG("%d, %d, %d\n", 1402 DRM_DEBUG("%d, %d, %d\n",
1449 request.size, request.low_mark, request.high_mark); 1403 request->size, request->low_mark, request->high_mark);
1450 order = drm_order(request.size); 1404 order = drm_order(request->size);
1451 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1405 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1452 return -EINVAL; 1406 return -EINVAL;
1453 entry = &dma->bufs[order]; 1407 entry = &dma->bufs[order];
1454 1408
1455 if (request.low_mark < 0 || request.low_mark > entry->buf_count) 1409 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1456 return -EINVAL; 1410 return -EINVAL;
1457 if (request.high_mark < 0 || request.high_mark > entry->buf_count) 1411 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1458 return -EINVAL; 1412 return -EINVAL;
1459 1413
1460 entry->freelist.low_mark = request.low_mark; 1414 entry->freelist.low_mark = request->low_mark;
1461 entry->freelist.high_mark = request.high_mark; 1415 entry->freelist.high_mark = request->high_mark;
1462 1416
1463 return 0; 1417 return 0;
1464} 1418}
@@ -1467,7 +1421,7 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1467 * Unreserve the buffers in list, previously reserved using drmDMA. 1421 * Unreserve the buffers in list, previously reserved using drmDMA.
1468 * 1422 *
1469 * \param inode device inode. 1423 * \param inode device inode.
1470 * \param filp file pointer. 1424 * \param file_priv DRM file private.
1471 * \param cmd command. 1425 * \param cmd command.
1472 * \param arg pointer to a drm_buf_free structure. 1426 * \param arg pointer to a drm_buf_free structure.
1473 * \return zero on success or a negative number on failure. 1427 * \return zero on success or a negative number on failure.
@@ -1475,13 +1429,11 @@ int drm_markbufs(struct inode *inode, struct file *filp,
1475 * Calls free_buffer() for each used buffer. 1429 * Calls free_buffer() for each used buffer.
1476 * This function is primarily used for debugging. 1430 * This function is primarily used for debugging.
1477 */ 1431 */
1478int drm_freebufs(struct inode *inode, struct file *filp, 1432int drm_freebufs(struct drm_device *dev, void *data,
1479 unsigned int cmd, unsigned long arg) 1433 struct drm_file *file_priv)
1480{ 1434{
1481 struct drm_file *priv = filp->private_data;
1482 struct drm_device *dev = priv->head->dev;
1483 struct drm_device_dma *dma = dev->dma; 1435 struct drm_device_dma *dma = dev->dma;
1484 struct drm_buf_free request; 1436 struct drm_buf_free *request = data;
1485 int i; 1437 int i;
1486 int idx; 1438 int idx;
1487 struct drm_buf *buf; 1439 struct drm_buf *buf;
@@ -1492,13 +1444,9 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1492 if (!dma) 1444 if (!dma)
1493 return -EINVAL; 1445 return -EINVAL;
1494 1446
1495 if (copy_from_user(&request, 1447 DRM_DEBUG("%d\n", request->count);
1496 (struct drm_buf_free __user *) arg, sizeof(request))) 1448 for (i = 0; i < request->count; i++) {
1497 return -EFAULT; 1449 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1498
1499 DRM_DEBUG("%d\n", request.count);
1500 for (i = 0; i < request.count; i++) {
1501 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1502 return -EFAULT; 1450 return -EFAULT;
1503 if (idx < 0 || idx >= dma->buf_count) { 1451 if (idx < 0 || idx >= dma->buf_count) {
1504 DRM_ERROR("Index %d (of %d max)\n", 1452 DRM_ERROR("Index %d (of %d max)\n",
@@ -1506,7 +1454,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1506 return -EINVAL; 1454 return -EINVAL;
1507 } 1455 }
1508 buf = dma->buflist[idx]; 1456 buf = dma->buflist[idx];
1509 if (buf->filp != filp) { 1457 if (buf->file_priv != file_priv) {
1510 DRM_ERROR("Process %d freeing buffer not owned\n", 1458 DRM_ERROR("Process %d freeing buffer not owned\n",
1511 current->pid); 1459 current->pid);
1512 return -EINVAL; 1460 return -EINVAL;
@@ -1521,7 +1469,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1521 * Maps all of the DMA buffers into client-virtual space (ioctl). 1469 * Maps all of the DMA buffers into client-virtual space (ioctl).
1522 * 1470 *
1523 * \param inode device inode. 1471 * \param inode device inode.
1524 * \param filp file pointer. 1472 * \param file_priv DRM file private.
1525 * \param cmd command. 1473 * \param cmd command.
1526 * \param arg pointer to a drm_buf_map structure. 1474 * \param arg pointer to a drm_buf_map structure.
1527 * \return zero on success or a negative number on failure. 1475 * \return zero on success or a negative number on failure.
@@ -1531,18 +1479,15 @@ int drm_freebufs(struct inode *inode, struct file *filp,
1531 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1479 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1532 * drm_mmap_dma(). 1480 * drm_mmap_dma().
1533 */ 1481 */
1534int drm_mapbufs(struct inode *inode, struct file *filp, 1482int drm_mapbufs(struct drm_device *dev, void *data,
1535 unsigned int cmd, unsigned long arg) 1483 struct drm_file *file_priv)
1536{ 1484{
1537 struct drm_file *priv = filp->private_data;
1538 struct drm_device *dev = priv->head->dev;
1539 struct drm_device_dma *dma = dev->dma; 1485 struct drm_device_dma *dma = dev->dma;
1540 struct drm_buf_map __user *argp = (void __user *)arg;
1541 int retcode = 0; 1486 int retcode = 0;
1542 const int zero = 0; 1487 const int zero = 0;
1543 unsigned long virtual; 1488 unsigned long virtual;
1544 unsigned long address; 1489 unsigned long address;
1545 struct drm_buf_map request; 1490 struct drm_buf_map *request = data;
1546 int i; 1491 int i;
1547 1492
1548 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1493 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1559,10 +1504,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1559 dev->buf_use++; /* Can't allocate more after this call */ 1504 dev->buf_use++; /* Can't allocate more after this call */
1560 spin_unlock(&dev->count_lock); 1505 spin_unlock(&dev->count_lock);
1561 1506
1562 if (copy_from_user(&request, argp, sizeof(request))) 1507 if (request->count >= dma->buf_count) {
1563 return -EFAULT;
1564
1565 if (request.count >= dma->buf_count) {
1566 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1508 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1567 || (drm_core_check_feature(dev, DRIVER_SG) 1509 || (drm_core_check_feature(dev, DRIVER_SG)
1568 && (dma->flags & _DRM_DMA_USE_SG)) 1510 && (dma->flags & _DRM_DMA_USE_SG))
@@ -1575,15 +1517,15 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1575 retcode = -EINVAL; 1517 retcode = -EINVAL;
1576 goto done; 1518 goto done;
1577 } 1519 }
1578
1579 down_write(&current->mm->mmap_sem); 1520 down_write(&current->mm->mmap_sem);
1580 virtual = do_mmap(filp, 0, map->size, 1521 virtual = do_mmap(file_priv->filp, 0, map->size,
1581 PROT_READ | PROT_WRITE, 1522 PROT_READ | PROT_WRITE,
1582 MAP_SHARED, token); 1523 MAP_SHARED,
1524 token);
1583 up_write(&current->mm->mmap_sem); 1525 up_write(&current->mm->mmap_sem);
1584 } else { 1526 } else {
1585 down_write(&current->mm->mmap_sem); 1527 down_write(&current->mm->mmap_sem);
1586 virtual = do_mmap(filp, 0, dma->byte_count, 1528 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1587 PROT_READ | PROT_WRITE, 1529 PROT_READ | PROT_WRITE,
1588 MAP_SHARED, 0); 1530 MAP_SHARED, 0);
1589 up_write(&current->mm->mmap_sem); 1531 up_write(&current->mm->mmap_sem);
@@ -1593,28 +1535,28 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1593 retcode = (signed long)virtual; 1535 retcode = (signed long)virtual;
1594 goto done; 1536 goto done;
1595 } 1537 }
1596 request.virtual = (void __user *)virtual; 1538 request->virtual = (void __user *)virtual;
1597 1539
1598 for (i = 0; i < dma->buf_count; i++) { 1540 for (i = 0; i < dma->buf_count; i++) {
1599 if (copy_to_user(&request.list[i].idx, 1541 if (copy_to_user(&request->list[i].idx,
1600 &dma->buflist[i]->idx, 1542 &dma->buflist[i]->idx,
1601 sizeof(request.list[0].idx))) { 1543 sizeof(request->list[0].idx))) {
1602 retcode = -EFAULT; 1544 retcode = -EFAULT;
1603 goto done; 1545 goto done;
1604 } 1546 }
1605 if (copy_to_user(&request.list[i].total, 1547 if (copy_to_user(&request->list[i].total,
1606 &dma->buflist[i]->total, 1548 &dma->buflist[i]->total,
1607 sizeof(request.list[0].total))) { 1549 sizeof(request->list[0].total))) {
1608 retcode = -EFAULT; 1550 retcode = -EFAULT;
1609 goto done; 1551 goto done;
1610 } 1552 }
1611 if (copy_to_user(&request.list[i].used, 1553 if (copy_to_user(&request->list[i].used,
1612 &zero, sizeof(zero))) { 1554 &zero, sizeof(zero))) {
1613 retcode = -EFAULT; 1555 retcode = -EFAULT;
1614 goto done; 1556 goto done;
1615 } 1557 }
1616 address = virtual + dma->buflist[i]->offset; /* *** */ 1558 address = virtual + dma->buflist[i]->offset; /* *** */
1617 if (copy_to_user(&request.list[i].address, 1559 if (copy_to_user(&request->list[i].address,
1618 &address, sizeof(address))) { 1560 &address, sizeof(address))) {
1619 retcode = -EFAULT; 1561 retcode = -EFAULT;
1620 goto done; 1562 goto done;
@@ -1622,11 +1564,8 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
1622 } 1564 }
1623 } 1565 }
1624 done: 1566 done:
1625 request.count = dma->buf_count; 1567 request->count = dma->buf_count;
1626 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); 1568 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1627
1628 if (copy_to_user(argp, &request, sizeof(request)))
1629 return -EFAULT;
1630 1569
1631 return retcode; 1570 return retcode;
1632} 1571}
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 61ad986baa8d..17fe69e7bfc1 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -131,7 +131,7 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
131 * Get per-context SAREA. 131 * Get per-context SAREA.
132 * 132 *
133 * \param inode device inode. 133 * \param inode device inode.
134 * \param filp file pointer. 134 * \param file_priv DRM file private.
135 * \param cmd command. 135 * \param cmd command.
136 * \param arg user argument pointing to a drm_ctx_priv_map structure. 136 * \param arg user argument pointing to a drm_ctx_priv_map structure.
137 * \return zero on success or a negative number on failure. 137 * \return zero on success or a negative number on failure.
@@ -139,22 +139,16 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
139 * Gets the map from drm_device::ctx_idr with the handle specified and 139 * Gets the map from drm_device::ctx_idr with the handle specified and
140 * returns its handle. 140 * returns its handle.
141 */ 141 */
142int drm_getsareactx(struct inode *inode, struct file *filp, 142int drm_getsareactx(struct drm_device *dev, void *data,
143 unsigned int cmd, unsigned long arg) 143 struct drm_file *file_priv)
144{ 144{
145 struct drm_file *priv = filp->private_data; 145 struct drm_ctx_priv_map *request = data;
146 struct drm_device *dev = priv->head->dev;
147 struct drm_ctx_priv_map __user *argp = (void __user *)arg;
148 struct drm_ctx_priv_map request;
149 struct drm_map *map; 146 struct drm_map *map;
150 struct drm_map_list *_entry; 147 struct drm_map_list *_entry;
151 148
152 if (copy_from_user(&request, argp, sizeof(request)))
153 return -EFAULT;
154
155 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
156 150
157 map = idr_find(&dev->ctx_idr, request.ctx_id); 151 map = idr_find(&dev->ctx_idr, request->ctx_id);
158 if (!map) { 152 if (!map) {
159 mutex_unlock(&dev->struct_mutex); 153 mutex_unlock(&dev->struct_mutex);
160 return -EINVAL; 154 return -EINVAL;
@@ -162,19 +156,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
162 156
163 mutex_unlock(&dev->struct_mutex); 157 mutex_unlock(&dev->struct_mutex);
164 158
165 request.handle = NULL; 159 request->handle = NULL;
166 list_for_each_entry(_entry, &dev->maplist, head) { 160 list_for_each_entry(_entry, &dev->maplist, head) {
167 if (_entry->map == map) { 161 if (_entry->map == map) {
168 request.handle = 162 request->handle =
169 (void *)(unsigned long)_entry->user_token; 163 (void *)(unsigned long)_entry->user_token;
170 break; 164 break;
171 } 165 }
172 } 166 }
173 if (request.handle == NULL) 167 if (request->handle == NULL)
174 return -EINVAL; 168 return -EINVAL;
175 169
176 if (copy_to_user(argp, &request, sizeof(request)))
177 return -EFAULT;
178 return 0; 170 return 0;
179} 171}
180 172
@@ -182,7 +174,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
182 * Set per-context SAREA. 174 * Set per-context SAREA.
183 * 175 *
184 * \param inode device inode. 176 * \param inode device inode.
185 * \param filp file pointer. 177 * \param file_priv DRM file private.
186 * \param cmd command. 178 * \param cmd command.
187 * \param arg user argument pointing to a drm_ctx_priv_map structure. 179 * \param arg user argument pointing to a drm_ctx_priv_map structure.
188 * \return zero on success or a negative number on failure. 180 * \return zero on success or a negative number on failure.
@@ -190,24 +182,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
190 * Searches the mapping specified in \p arg and update the entry in 182 * Searches the mapping specified in \p arg and update the entry in
191 * drm_device::ctx_idr with it. 183 * drm_device::ctx_idr with it.
192 */ 184 */
193int drm_setsareactx(struct inode *inode, struct file *filp, 185int drm_setsareactx(struct drm_device *dev, void *data,
194 unsigned int cmd, unsigned long arg) 186 struct drm_file *file_priv)
195{ 187{
196 struct drm_file *priv = filp->private_data; 188 struct drm_ctx_priv_map *request = data;
197 struct drm_device *dev = priv->head->dev;
198 struct drm_ctx_priv_map request;
199 struct drm_map *map = NULL; 189 struct drm_map *map = NULL;
200 struct drm_map_list *r_list = NULL; 190 struct drm_map_list *r_list = NULL;
201 191
202 if (copy_from_user(&request,
203 (struct drm_ctx_priv_map __user *) arg,
204 sizeof(request)))
205 return -EFAULT;
206
207 mutex_lock(&dev->struct_mutex); 192 mutex_lock(&dev->struct_mutex);
208 list_for_each_entry(r_list, &dev->maplist, head) { 193 list_for_each_entry(r_list, &dev->maplist, head) {
209 if (r_list->map 194 if (r_list->map
210 && r_list->user_token == (unsigned long)request.handle) 195 && r_list->user_token == (unsigned long) request->handle)
211 goto found; 196 goto found;
212 } 197 }
213 bad: 198 bad:
@@ -219,10 +204,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
219 if (!map) 204 if (!map)
220 goto bad; 205 goto bad;
221 206
222 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) 207 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
223 goto bad; 208 goto bad;
224 209
225 mutex_unlock(&dev->struct_mutex); 210 mutex_unlock(&dev->struct_mutex);
211
226 return 0; 212 return 0;
227} 213}
228 214
@@ -292,34 +278,28 @@ static int drm_context_switch_complete(struct drm_device * dev, int new)
292 * Reserve contexts. 278 * Reserve contexts.
293 * 279 *
294 * \param inode device inode. 280 * \param inode device inode.
295 * \param filp file pointer. 281 * \param file_priv DRM file private.
296 * \param cmd command. 282 * \param cmd command.
297 * \param arg user argument pointing to a drm_ctx_res structure. 283 * \param arg user argument pointing to a drm_ctx_res structure.
298 * \return zero on success or a negative number on failure. 284 * \return zero on success or a negative number on failure.
299 */ 285 */
300int drm_resctx(struct inode *inode, struct file *filp, 286int drm_resctx(struct drm_device *dev, void *data,
301 unsigned int cmd, unsigned long arg) 287 struct drm_file *file_priv)
302{ 288{
303 struct drm_ctx_res res; 289 struct drm_ctx_res *res = data;
304 struct drm_ctx_res __user *argp = (void __user *)arg;
305 struct drm_ctx ctx; 290 struct drm_ctx ctx;
306 int i; 291 int i;
307 292
308 if (copy_from_user(&res, argp, sizeof(res))) 293 if (res->count >= DRM_RESERVED_CONTEXTS) {
309 return -EFAULT;
310
311 if (res.count >= DRM_RESERVED_CONTEXTS) {
312 memset(&ctx, 0, sizeof(ctx)); 294 memset(&ctx, 0, sizeof(ctx));
313 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 295 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
314 ctx.handle = i; 296 ctx.handle = i;
315 if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) 297 if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
316 return -EFAULT; 298 return -EFAULT;
317 } 299 }
318 } 300 }
319 res.count = DRM_RESERVED_CONTEXTS; 301 res->count = DRM_RESERVED_CONTEXTS;
320 302
321 if (copy_to_user(argp, &res, sizeof(res)))
322 return -EFAULT;
323 return 0; 303 return 0;
324} 304}
325 305
@@ -327,40 +307,34 @@ int drm_resctx(struct inode *inode, struct file *filp,
327 * Add context. 307 * Add context.
328 * 308 *
329 * \param inode device inode. 309 * \param inode device inode.
330 * \param filp file pointer. 310 * \param file_priv DRM file private.
331 * \param cmd command. 311 * \param cmd command.
332 * \param arg user argument pointing to a drm_ctx structure. 312 * \param arg user argument pointing to a drm_ctx structure.
333 * \return zero on success or a negative number on failure. 313 * \return zero on success or a negative number on failure.
334 * 314 *
335 * Get a new handle for the context and copy to userspace. 315 * Get a new handle for the context and copy to userspace.
336 */ 316 */
337int drm_addctx(struct inode *inode, struct file *filp, 317int drm_addctx(struct drm_device *dev, void *data,
338 unsigned int cmd, unsigned long arg) 318 struct drm_file *file_priv)
339{ 319{
340 struct drm_file *priv = filp->private_data;
341 struct drm_device *dev = priv->head->dev;
342 struct drm_ctx_list *ctx_entry; 320 struct drm_ctx_list *ctx_entry;
343 struct drm_ctx __user *argp = (void __user *)arg; 321 struct drm_ctx *ctx = data;
344 struct drm_ctx ctx;
345
346 if (copy_from_user(&ctx, argp, sizeof(ctx)))
347 return -EFAULT;
348 322
349 ctx.handle = drm_ctxbitmap_next(dev); 323 ctx->handle = drm_ctxbitmap_next(dev);
350 if (ctx.handle == DRM_KERNEL_CONTEXT) { 324 if (ctx->handle == DRM_KERNEL_CONTEXT) {
351 /* Skip kernel's context and get a new one. */ 325 /* Skip kernel's context and get a new one. */
352 ctx.handle = drm_ctxbitmap_next(dev); 326 ctx->handle = drm_ctxbitmap_next(dev);
353 } 327 }
354 DRM_DEBUG("%d\n", ctx.handle); 328 DRM_DEBUG("%d\n", ctx->handle);
355 if (ctx.handle == -1) { 329 if (ctx->handle == -1) {
356 DRM_DEBUG("Not enough free contexts.\n"); 330 DRM_DEBUG("Not enough free contexts.\n");
357 /* Should this return -EBUSY instead? */ 331 /* Should this return -EBUSY instead? */
358 return -ENOMEM; 332 return -ENOMEM;
359 } 333 }
360 334
361 if (ctx.handle != DRM_KERNEL_CONTEXT) { 335 if (ctx->handle != DRM_KERNEL_CONTEXT) {
362 if (dev->driver->context_ctor) 336 if (dev->driver->context_ctor)
363 if (!dev->driver->context_ctor(dev, ctx.handle)) { 337 if (!dev->driver->context_ctor(dev, ctx->handle)) {
364 DRM_DEBUG("Running out of ctxs or memory.\n"); 338 DRM_DEBUG("Running out of ctxs or memory.\n");
365 return -ENOMEM; 339 return -ENOMEM;
366 } 340 }
@@ -373,21 +347,18 @@ int drm_addctx(struct inode *inode, struct file *filp,
373 } 347 }
374 348
375 INIT_LIST_HEAD(&ctx_entry->head); 349 INIT_LIST_HEAD(&ctx_entry->head);
376 ctx_entry->handle = ctx.handle; 350 ctx_entry->handle = ctx->handle;
377 ctx_entry->tag = priv; 351 ctx_entry->tag = file_priv;
378 352
379 mutex_lock(&dev->ctxlist_mutex); 353 mutex_lock(&dev->ctxlist_mutex);
380 list_add(&ctx_entry->head, &dev->ctxlist); 354 list_add(&ctx_entry->head, &dev->ctxlist);
381 ++dev->ctx_count; 355 ++dev->ctx_count;
382 mutex_unlock(&dev->ctxlist_mutex); 356 mutex_unlock(&dev->ctxlist_mutex);
383 357
384 if (copy_to_user(argp, &ctx, sizeof(ctx)))
385 return -EFAULT;
386 return 0; 358 return 0;
387} 359}
388 360
389int drm_modctx(struct inode *inode, struct file *filp, 361int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
390 unsigned int cmd, unsigned long arg)
391{ 362{
392 /* This does nothing */ 363 /* This does nothing */
393 return 0; 364 return 0;
@@ -397,25 +368,18 @@ int drm_modctx(struct inode *inode, struct file *filp,
397 * Get context. 368 * Get context.
398 * 369 *
399 * \param inode device inode. 370 * \param inode device inode.
400 * \param filp file pointer. 371 * \param file_priv DRM file private.
401 * \param cmd command. 372 * \param cmd command.
402 * \param arg user argument pointing to a drm_ctx structure. 373 * \param arg user argument pointing to a drm_ctx structure.
403 * \return zero on success or a negative number on failure. 374 * \return zero on success or a negative number on failure.
404 */ 375 */
405int drm_getctx(struct inode *inode, struct file *filp, 376int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
406 unsigned int cmd, unsigned long arg)
407{ 377{
408 struct drm_ctx __user *argp = (void __user *)arg; 378 struct drm_ctx *ctx = data;
409 struct drm_ctx ctx;
410
411 if (copy_from_user(&ctx, argp, sizeof(ctx)))
412 return -EFAULT;
413 379
414 /* This is 0, because we don't handle any context flags */ 380 /* This is 0, because we don't handle any context flags */
415 ctx.flags = 0; 381 ctx->flags = 0;
416 382
417 if (copy_to_user(argp, &ctx, sizeof(ctx)))
418 return -EFAULT;
419 return 0; 383 return 0;
420} 384}
421 385
@@ -423,50 +387,40 @@ int drm_getctx(struct inode *inode, struct file *filp,
423 * Switch context. 387 * Switch context.
424 * 388 *
425 * \param inode device inode. 389 * \param inode device inode.
426 * \param filp file pointer. 390 * \param file_priv DRM file private.
427 * \param cmd command. 391 * \param cmd command.
428 * \param arg user argument pointing to a drm_ctx structure. 392 * \param arg user argument pointing to a drm_ctx structure.
429 * \return zero on success or a negative number on failure. 393 * \return zero on success or a negative number on failure.
430 * 394 *
431 * Calls context_switch(). 395 * Calls context_switch().
432 */ 396 */
433int drm_switchctx(struct inode *inode, struct file *filp, 397int drm_switchctx(struct drm_device *dev, void *data,
434 unsigned int cmd, unsigned long arg) 398 struct drm_file *file_priv)
435{ 399{
436 struct drm_file *priv = filp->private_data; 400 struct drm_ctx *ctx = data;
437 struct drm_device *dev = priv->head->dev;
438 struct drm_ctx ctx;
439 401
440 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 402 DRM_DEBUG("%d\n", ctx->handle);
441 return -EFAULT; 403 return drm_context_switch(dev, dev->last_context, ctx->handle);
442
443 DRM_DEBUG("%d\n", ctx.handle);
444 return drm_context_switch(dev, dev->last_context, ctx.handle);
445} 404}
446 405
447/** 406/**
448 * New context. 407 * New context.
449 * 408 *
450 * \param inode device inode. 409 * \param inode device inode.
451 * \param filp file pointer. 410 * \param file_priv DRM file private.
452 * \param cmd command. 411 * \param cmd command.
453 * \param arg user argument pointing to a drm_ctx structure. 412 * \param arg user argument pointing to a drm_ctx structure.
454 * \return zero on success or a negative number on failure. 413 * \return zero on success or a negative number on failure.
455 * 414 *
456 * Calls context_switch_complete(). 415 * Calls context_switch_complete().
457 */ 416 */
458int drm_newctx(struct inode *inode, struct file *filp, 417int drm_newctx(struct drm_device *dev, void *data,
459 unsigned int cmd, unsigned long arg) 418 struct drm_file *file_priv)
460{ 419{
461 struct drm_file *priv = filp->private_data; 420 struct drm_ctx *ctx = data;
462 struct drm_device *dev = priv->head->dev;
463 struct drm_ctx ctx;
464 421
465 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 422 DRM_DEBUG("%d\n", ctx->handle);
466 return -EFAULT; 423 drm_context_switch_complete(dev, ctx->handle);
467
468 DRM_DEBUG("%d\n", ctx.handle);
469 drm_context_switch_complete(dev, ctx.handle);
470 424
471 return 0; 425 return 0;
472} 426}
@@ -475,31 +429,26 @@ int drm_newctx(struct inode *inode, struct file *filp,
475 * Remove context. 429 * Remove context.
476 * 430 *
477 * \param inode device inode. 431 * \param inode device inode.
478 * \param filp file pointer. 432 * \param file_priv DRM file private.
479 * \param cmd command. 433 * \param cmd command.
480 * \param arg user argument pointing to a drm_ctx structure. 434 * \param arg user argument pointing to a drm_ctx structure.
481 * \return zero on success or a negative number on failure. 435 * \return zero on success or a negative number on failure.
482 * 436 *
483 * If not the special kernel context, calls ctxbitmap_free() to free the specified context. 437 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
484 */ 438 */
485int drm_rmctx(struct inode *inode, struct file *filp, 439int drm_rmctx(struct drm_device *dev, void *data,
486 unsigned int cmd, unsigned long arg) 440 struct drm_file *file_priv)
487{ 441{
488 struct drm_file *priv = filp->private_data; 442 struct drm_ctx *ctx = data;
489 struct drm_device *dev = priv->head->dev;
490 struct drm_ctx ctx;
491
492 if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
493 return -EFAULT;
494 443
495 DRM_DEBUG("%d\n", ctx.handle); 444 DRM_DEBUG("%d\n", ctx->handle);
496 if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { 445 if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
497 priv->remove_auth_on_close = 1; 446 file_priv->remove_auth_on_close = 1;
498 } 447 }
499 if (ctx.handle != DRM_KERNEL_CONTEXT) { 448 if (ctx->handle != DRM_KERNEL_CONTEXT) {
500 if (dev->driver->context_dtor) 449 if (dev->driver->context_dtor)
501 dev->driver->context_dtor(dev, ctx.handle); 450 dev->driver->context_dtor(dev, ctx->handle);
502 drm_ctxbitmap_free(dev, ctx.handle); 451 drm_ctxbitmap_free(dev, ctx->handle);
503 } 452 }
504 453
505 mutex_lock(&dev->ctxlist_mutex); 454 mutex_lock(&dev->ctxlist_mutex);
@@ -507,7 +456,7 @@ int drm_rmctx(struct inode *inode, struct file *filp,
507 struct drm_ctx_list *pos, *n; 456 struct drm_ctx_list *pos, *n;
508 457
509 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 458 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
510 if (pos->handle == ctx.handle) { 459 if (pos->handle == ctx->handle) {
511 list_del(&pos->head); 460 list_del(&pos->head);
512 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 461 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
513 --dev->ctx_count; 462 --dev->ctx_count;
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 802fbdbfe1b3..7a8e2fba4678 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -136,7 +136,7 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
136 136
137 buf->waiting = 0; 137 buf->waiting = 0;
138 buf->pending = 0; 138 buf->pending = 0;
139 buf->filp = NULL; 139 buf->file_priv = NULL;
140 buf->used = 0; 140 buf->used = 0;
141 141
142 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) 142 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
@@ -148,11 +148,12 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
148/** 148/**
149 * Reclaim the buffers. 149 * Reclaim the buffers.
150 * 150 *
151 * \param filp file pointer. 151 * \param file_priv DRM file private.
152 * 152 *
153 * Frees each buffer associated with \p filp not already on the hardware. 153 * Frees each buffer associated with \p file_priv not already on the hardware.
154 */ 154 */
155void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) 155void drm_core_reclaim_buffers(struct drm_device *dev,
156 struct drm_file *file_priv)
156{ 157{
157 struct drm_device_dma *dma = dev->dma; 158 struct drm_device_dma *dma = dev->dma;
158 int i; 159 int i;
@@ -160,7 +161,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp)
160 if (!dma) 161 if (!dma)
161 return; 162 return;
162 for (i = 0; i < dma->buf_count; i++) { 163 for (i = 0; i < dma->buf_count; i++) {
163 if (dma->buflist[i]->filp == filp) { 164 if (dma->buflist[i]->file_priv == file_priv) {
164 switch (dma->buflist[i]->list) { 165 switch (dma->buflist[i]->list) {
165 case DRM_LIST_NONE: 166 case DRM_LIST_NONE:
166 drm_free_buffer(dev, dma->buflist[i]); 167 drm_free_buffer(dev, dma->buflist[i]);
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index d6cdba5644e2..1839c57663c5 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -40,11 +40,10 @@
40/** 40/**
41 * Allocate drawable ID and memory to store information about it. 41 * Allocate drawable ID and memory to store information about it.
42 */ 42 */
43int drm_adddraw(DRM_IOCTL_ARGS) 43int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
44{ 44{
45 DRM_DEVICE;
46 unsigned long irqflags; 45 unsigned long irqflags;
47 struct drm_draw draw; 46 struct drm_draw *draw = data;
48 int new_id = 0; 47 int new_id = 0;
49 int ret; 48 int ret;
50 49
@@ -63,11 +62,9 @@ again:
63 62
64 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 63 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
65 64
66 draw.handle = new_id; 65 draw->handle = new_id;
67 66
68 DRM_DEBUG("%d\n", draw.handle); 67 DRM_DEBUG("%d\n", draw->handle);
69
70 DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw));
71 68
72 return 0; 69 return 0;
73} 70}
@@ -75,72 +72,64 @@ again:
75/** 72/**
76 * Free drawable ID and memory to store information about it. 73 * Free drawable ID and memory to store information about it.
77 */ 74 */
78int drm_rmdraw(DRM_IOCTL_ARGS) 75int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{ 76{
80 DRM_DEVICE; 77 struct drm_draw *draw = data;
81 struct drm_draw draw;
82 unsigned long irqflags; 78 unsigned long irqflags;
83 79
84 DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data,
85 sizeof(draw));
86
87 spin_lock_irqsave(&dev->drw_lock, irqflags); 80 spin_lock_irqsave(&dev->drw_lock, irqflags);
88 81
89 drm_free(drm_get_drawable_info(dev, draw.handle), 82 drm_free(drm_get_drawable_info(dev, draw->handle),
90 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
91 84
92 idr_remove(&dev->drw_idr, draw.handle); 85 idr_remove(&dev->drw_idr, draw->handle);
93 86
94 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 87 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
95 DRM_DEBUG("%d\n", draw.handle); 88 DRM_DEBUG("%d\n", draw->handle);
96 return 0; 89 return 0;
97} 90}
98 91
99int drm_update_drawable_info(DRM_IOCTL_ARGS) 92int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
100{ 93{
101 DRM_DEVICE; 94 struct drm_update_draw *update = data;
102 struct drm_update_draw update;
103 unsigned long irqflags; 95 unsigned long irqflags;
104 struct drm_clip_rect *rects; 96 struct drm_clip_rect *rects;
105 struct drm_drawable_info *info; 97 struct drm_drawable_info *info;
106 int err; 98 int err;
107 99
108 DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, 100 info = idr_find(&dev->drw_idr, update->handle);
109 sizeof(update));
110
111 info = idr_find(&dev->drw_idr, update.handle);
112 if (!info) { 101 if (!info) {
113 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 102 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
114 if (!info) 103 if (!info)
115 return -ENOMEM; 104 return -ENOMEM;
116 if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { 105 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
117 DRM_ERROR("No such drawable %d\n", update.handle); 106 DRM_ERROR("No such drawable %d\n", update->handle);
118 drm_free(info, sizeof(*info), DRM_MEM_BUFS); 107 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
119 return -EINVAL; 108 return -EINVAL;
120 } 109 }
121 } 110 }
122 111
123 switch (update.type) { 112 switch (update->type) {
124 case DRM_DRAWABLE_CLIPRECTS: 113 case DRM_DRAWABLE_CLIPRECTS:
125 if (update.num != info->num_rects) { 114 if (update->num != info->num_rects) {
126 rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), 115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
127 DRM_MEM_BUFS); 116 DRM_MEM_BUFS);
128 } else 117 } else
129 rects = info->rects; 118 rects = info->rects;
130 119
131 if (update.num && !rects) { 120 if (update->num && !rects) {
132 DRM_ERROR("Failed to allocate cliprect memory\n"); 121 DRM_ERROR("Failed to allocate cliprect memory\n");
133 err = DRM_ERR(ENOMEM); 122 err = -ENOMEM;
134 goto error; 123 goto error;
135 } 124 }
136 125
137 if (update.num && DRM_COPY_FROM_USER(rects, 126 if (update->num && DRM_COPY_FROM_USER(rects,
138 (struct drm_clip_rect __user *) 127 (struct drm_clip_rect __user *)
139 (unsigned long)update.data, 128 (unsigned long)update->data,
140 update.num * 129 update->num *
141 sizeof(*rects))) { 130 sizeof(*rects))) {
142 DRM_ERROR("Failed to copy cliprects from userspace\n"); 131 DRM_ERROR("Failed to copy cliprects from userspace\n");
143 err = DRM_ERR(EFAULT); 132 err = -EFAULT;
144 goto error; 133 goto error;
145 } 134 }
146 135
@@ -152,23 +141,23 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS)
152 } 141 }
153 142
154 info->rects = rects; 143 info->rects = rects;
155 info->num_rects = update.num; 144 info->num_rects = update->num;
156 145
157 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 146 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
158 147
159 DRM_DEBUG("Updated %d cliprects for drawable %d\n", 148 DRM_DEBUG("Updated %d cliprects for drawable %d\n",
160 info->num_rects, update.handle); 149 info->num_rects, update->handle);
161 break; 150 break;
162 default: 151 default:
163 DRM_ERROR("Invalid update type %d\n", update.type); 152 DRM_ERROR("Invalid update type %d\n", update->type);
164 return DRM_ERR(EINVAL); 153 return -EINVAL;
165 } 154 }
166 155
167 return 0; 156 return 0;
168 157
169error: 158error:
170 if (rects != info->rects) 159 if (rects != info->rects)
171 drm_free(rects, update.num * sizeof(struct drm_clip_rect), 160 drm_free(rects, update->num * sizeof(struct drm_clip_rect),
172 DRM_MEM_BUFS); 161 DRM_MEM_BUFS);
173 162
174 return err; 163 return err;
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 19994cd865de..72668b15e5ce 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -49,73 +49,74 @@
49#include "drmP.h" 49#include "drmP.h"
50#include "drm_core.h" 50#include "drm_core.h"
51 51
52static int drm_version(struct inode *inode, struct file *filp, 52static int drm_version(struct drm_device *dev, void *data,
53 unsigned int cmd, unsigned long arg); 53 struct drm_file *file_priv);
54 54
55/** Ioctl table */ 55/** Ioctl table */
56static drm_ioctl_desc_t drm_ioctls[] = { 56static struct drm_ioctl_desc drm_ioctls[] = {
57 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, 57 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
58 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, 58 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
59 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, 59 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
60 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, 60 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
61 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, 61 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
62 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, 62 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
63 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, 63 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
64 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, 64 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
65 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 65
66 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 66 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
67 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 67 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
68 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 68 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69 69 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 70
71 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, 71 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72 72 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
73 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 73
74 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 74 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 75 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
76 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, 76
77 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY}, 77 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
78 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 78 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 79 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 80 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
81 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 81 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, 82 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 83 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
84 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 84
85 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 85 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
86 86 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, 87
88 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, 88 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
89 89 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
90 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, 90
91 91 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
92 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 92
93 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 93 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, 94 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, 95 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
96 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, 96 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
97 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
97 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 98 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
98 [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, 99 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
99 100
100 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 101 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 102
102#if __OS_HAS_AGP 103#if __OS_HAS_AGP
103 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 104 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, 107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111#endif 112#endif
112 113
113 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 114 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 115 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 116
116 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, 117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
117 118
118 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
119}; 120};
120 121
121#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 122#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -224,7 +225,7 @@ int drm_lastclose(struct drm_device * dev)
224 225
225 if (dev->lock.hw_lock) { 226 if (dev->lock.hw_lock) {
226 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ 227 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
227 dev->lock.filp = NULL; 228 dev->lock.file_priv = NULL;
228 wake_up_interruptible(&dev->lock.lock_queue); 229 wake_up_interruptible(&dev->lock.lock_queue);
229 } 230 }
230 mutex_unlock(&dev->struct_mutex); 231 mutex_unlock(&dev->struct_mutex);
@@ -418,27 +419,19 @@ module_exit(drm_core_exit);
418 * 419 *
419 * Fills in the version information in \p arg. 420 * Fills in the version information in \p arg.
420 */ 421 */
421static int drm_version(struct inode *inode, struct file *filp, 422static int drm_version(struct drm_device *dev, void *data,
422 unsigned int cmd, unsigned long arg) 423 struct drm_file *file_priv)
423{ 424{
424 struct drm_file *priv = filp->private_data; 425 struct drm_version *version = data;
425 struct drm_device *dev = priv->head->dev;
426 struct drm_version __user *argp = (void __user *)arg;
427 struct drm_version version;
428 int len; 426 int len;
429 427
430 if (copy_from_user(&version, argp, sizeof(version))) 428 version->version_major = dev->driver->major;
431 return -EFAULT; 429 version->version_minor = dev->driver->minor;
430 version->version_patchlevel = dev->driver->patchlevel;
431 DRM_COPY(version->name, dev->driver->name);
432 DRM_COPY(version->date, dev->driver->date);
433 DRM_COPY(version->desc, dev->driver->desc);
432 434
433 version.version_major = dev->driver->major;
434 version.version_minor = dev->driver->minor;
435 version.version_patchlevel = dev->driver->patchlevel;
436 DRM_COPY(version.name, dev->driver->name);
437 DRM_COPY(version.date, dev->driver->date);
438 DRM_COPY(version.desc, dev->driver->desc);
439
440 if (copy_to_user(argp, &version, sizeof(version)))
441 return -EFAULT;
442 return 0; 435 return 0;
443} 436}
444 437
@@ -446,7 +439,7 @@ static int drm_version(struct inode *inode, struct file *filp,
446 * Called whenever a process performs an ioctl on /dev/drm. 439 * Called whenever a process performs an ioctl on /dev/drm.
447 * 440 *
448 * \param inode device inode. 441 * \param inode device inode.
449 * \param filp file pointer. 442 * \param file_priv DRM file private.
450 * \param cmd command. 443 * \param cmd command.
451 * \param arg user argument. 444 * \param arg user argument.
452 * \return zero on success or negative number on failure. 445 * \return zero on success or negative number on failure.
@@ -457,21 +450,22 @@ static int drm_version(struct inode *inode, struct file *filp,
457int drm_ioctl(struct inode *inode, struct file *filp, 450int drm_ioctl(struct inode *inode, struct file *filp,
458 unsigned int cmd, unsigned long arg) 451 unsigned int cmd, unsigned long arg)
459{ 452{
460 struct drm_file *priv = filp->private_data; 453 struct drm_file *file_priv = filp->private_data;
461 struct drm_device *dev = priv->head->dev; 454 struct drm_device *dev = file_priv->head->dev;
462 drm_ioctl_desc_t *ioctl; 455 struct drm_ioctl_desc *ioctl;
463 drm_ioctl_t *func; 456 drm_ioctl_t *func;
464 unsigned int nr = DRM_IOCTL_NR(cmd); 457 unsigned int nr = DRM_IOCTL_NR(cmd);
465 int retcode = -EINVAL; 458 int retcode = -EINVAL;
459 char *kdata = NULL;
466 460
467 atomic_inc(&dev->ioctl_count); 461 atomic_inc(&dev->ioctl_count);
468 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 462 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
469 ++priv->ioctl_count; 463 ++file_priv->ioctl_count;
470 464
471 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", 465 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
472 current->pid, cmd, nr, 466 current->pid, cmd, nr,
473 (long)old_encode_dev(priv->head->device), 467 (long)old_encode_dev(file_priv->head->device),
474 priv->authenticated); 468 file_priv->authenticated);
475 469
476 if ((nr >= DRM_CORE_IOCTL_COUNT) && 470 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
477 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 471 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
@@ -489,18 +483,40 @@ int drm_ioctl(struct inode *inode, struct file *filp,
489 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) 483 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
490 func = dev->driver->dma_ioctl; 484 func = dev->driver->dma_ioctl;
491 485
486
492 if (!func) { 487 if (!func) {
493 DRM_DEBUG("no function\n"); 488 DRM_DEBUG("no function\n");
494 retcode = -EINVAL; 489 retcode = -EINVAL;
495 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 490 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
496 ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || 491 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
497 ((ioctl->flags & DRM_MASTER) && !priv->master)) { 492 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
498 retcode = -EACCES; 493 retcode = -EACCES;
499 } else { 494 } else {
500 retcode = func(inode, filp, cmd, arg); 495 if (cmd & (IOC_IN | IOC_OUT)) {
496 kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
497 if (!kdata)
498 return -ENOMEM;
499 }
500
501 if (cmd & IOC_IN) {
502 if (copy_from_user(kdata, (void __user *)arg,
503 _IOC_SIZE(cmd)) != 0) {
504 retcode = -EACCES;
505 goto err_i1;
506 }
507 }
508 retcode = func(dev, kdata, file_priv);
509
510 if (cmd & IOC_OUT) {
511 if (copy_to_user((void __user *)arg, kdata,
512 _IOC_SIZE(cmd)) != 0)
513 retcode = -EACCES;
514 }
501 } 515 }
502 516
503 err_i1: 517 err_i1:
518 if (kdata)
519 kfree(kdata);
504 atomic_dec(&dev->ioctl_count); 520 atomic_dec(&dev->ioctl_count);
505 if (retcode) 521 if (retcode)
506 DRM_DEBUG("ret = %x\n", retcode); 522 DRM_DEBUG("ret = %x\n", retcode);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 7bc51bac450d..f383fc37190c 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -242,6 +242,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
242 242
243 memset(priv, 0, sizeof(*priv)); 243 memset(priv, 0, sizeof(*priv));
244 filp->private_data = priv; 244 filp->private_data = priv;
245 priv->filp = filp;
245 priv->uid = current->euid; 246 priv->uid = current->euid;
246 priv->pid = current->pid; 247 priv->pid = current->pid;
247 priv->minor = minor; 248 priv->minor = minor;
@@ -312,7 +313,7 @@ EXPORT_SYMBOL(drm_fasync);
312 * Release file. 313 * Release file.
313 * 314 *
314 * \param inode device inode 315 * \param inode device inode
315 * \param filp file pointer. 316 * \param file_priv DRM file private.
316 * \return zero on success or a negative number on failure. 317 * \return zero on success or a negative number on failure.
317 * 318 *
318 * If the hardware lock is held then free it, and take it again for the kernel 319 * If the hardware lock is held then free it, and take it again for the kernel
@@ -322,29 +323,28 @@ EXPORT_SYMBOL(drm_fasync);
322 */ 323 */
323int drm_release(struct inode *inode, struct file *filp) 324int drm_release(struct inode *inode, struct file *filp)
324{ 325{
325 struct drm_file *priv = filp->private_data; 326 struct drm_file *file_priv = filp->private_data;
326 struct drm_device *dev; 327 struct drm_device *dev = file_priv->head->dev;
327 int retcode = 0; 328 int retcode = 0;
328 329
329 lock_kernel(); 330 lock_kernel();
330 dev = priv->head->dev;
331 331
332 DRM_DEBUG("open_count = %d\n", dev->open_count); 332 DRM_DEBUG("open_count = %d\n", dev->open_count);
333 333
334 if (dev->driver->preclose) 334 if (dev->driver->preclose)
335 dev->driver->preclose(dev, filp); 335 dev->driver->preclose(dev, file_priv);
336 336
337 /* ======================================================== 337 /* ========================================================
338 * Begin inline drm_release 338 * Begin inline drm_release
339 */ 339 */
340 340
341 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 341 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
342 current->pid, (long)old_encode_dev(priv->head->device), 342 current->pid, (long)old_encode_dev(file_priv->head->device),
343 dev->open_count); 343 dev->open_count);
344 344
345 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 345 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
346 if (drm_i_have_hw_lock(filp)) { 346 if (drm_i_have_hw_lock(dev, file_priv)) {
347 dev->driver->reclaim_buffers_locked(dev, filp); 347 dev->driver->reclaim_buffers_locked(dev, file_priv);
348 } else { 348 } else {
349 unsigned long _end=jiffies + 3*DRM_HZ; 349 unsigned long _end=jiffies + 3*DRM_HZ;
350 int locked = 0; 350 int locked = 0;
@@ -370,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
370 "\tI will go on reclaiming the buffers anyway.\n"); 370 "\tI will go on reclaiming the buffers anyway.\n");
371 } 371 }
372 372
373 dev->driver->reclaim_buffers_locked(dev, filp); 373 dev->driver->reclaim_buffers_locked(dev, file_priv);
374 drm_idlelock_release(&dev->lock); 374 drm_idlelock_release(&dev->lock);
375 } 375 }
376 } 376 }
@@ -378,12 +378,12 @@ int drm_release(struct inode *inode, struct file *filp)
378 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { 378 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
379 379
380 drm_idlelock_take(&dev->lock); 380 drm_idlelock_take(&dev->lock);
381 dev->driver->reclaim_buffers_idlelocked(dev, filp); 381 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
382 drm_idlelock_release(&dev->lock); 382 drm_idlelock_release(&dev->lock);
383 383
384 } 384 }
385 385
386 if (drm_i_have_hw_lock(filp)) { 386 if (drm_i_have_hw_lock(dev, file_priv)) {
387 DRM_DEBUG("File %p released, freeing lock for context %d\n", 387 DRM_DEBUG("File %p released, freeing lock for context %d\n",
388 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 388 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
389 389
@@ -394,7 +394,7 @@ int drm_release(struct inode *inode, struct file *filp)
394 394
395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
396 !dev->driver->reclaim_buffers_locked) { 396 !dev->driver->reclaim_buffers_locked) {
397 dev->driver->reclaim_buffers(dev, filp); 397 dev->driver->reclaim_buffers(dev, file_priv);
398 } 398 }
399 399
400 drm_fasync(-1, filp, 0); 400 drm_fasync(-1, filp, 0);
@@ -404,7 +404,7 @@ int drm_release(struct inode *inode, struct file *filp)
404 struct drm_ctx_list *pos, *n; 404 struct drm_ctx_list *pos, *n;
405 405
406 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 406 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
407 if (pos->tag == priv && 407 if (pos->tag == file_priv &&
408 pos->handle != DRM_KERNEL_CONTEXT) { 408 pos->handle != DRM_KERNEL_CONTEXT) {
409 if (dev->driver->context_dtor) 409 if (dev->driver->context_dtor)
410 dev->driver->context_dtor(dev, 410 dev->driver->context_dtor(dev,
@@ -421,18 +421,18 @@ int drm_release(struct inode *inode, struct file *filp)
421 mutex_unlock(&dev->ctxlist_mutex); 421 mutex_unlock(&dev->ctxlist_mutex);
422 422
423 mutex_lock(&dev->struct_mutex); 423 mutex_lock(&dev->struct_mutex);
424 if (priv->remove_auth_on_close == 1) { 424 if (file_priv->remove_auth_on_close == 1) {
425 struct drm_file *temp; 425 struct drm_file *temp;
426 426
427 list_for_each_entry(temp, &dev->filelist, lhead) 427 list_for_each_entry(temp, &dev->filelist, lhead)
428 temp->authenticated = 0; 428 temp->authenticated = 0;
429 } 429 }
430 list_del(&priv->lhead); 430 list_del(&file_priv->lhead);
431 mutex_unlock(&dev->struct_mutex); 431 mutex_unlock(&dev->struct_mutex);
432 432
433 if (dev->driver->postclose) 433 if (dev->driver->postclose)
434 dev->driver->postclose(dev, priv); 434 dev->driver->postclose(dev, file_priv);
435 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 435 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
436 436
437 /* ======================================================== 437 /* ========================================================
438 * End inline drm_release 438 * End inline drm_release
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index 462f46f2049a..2286f3312c5c 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
1040 * Called whenever a 32-bit process running under a 64-bit kernel 1040 * Called whenever a 32-bit process running under a 64-bit kernel
1041 * performs an ioctl on /dev/drm. 1041 * performs an ioctl on /dev/drm.
1042 * 1042 *
1043 * \param filp file pointer. 1043 * \param file_priv DRM file private.
1044 * \param cmd command. 1044 * \param cmd command.
1045 * \param arg user argument. 1045 * \param arg user argument.
1046 * \return zero on success or negative number on failure. 1046 * \return zero on success or negative number on failure.
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index b195e102e737..d9be14624526 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -42,30 +42,24 @@
42 * Get the bus id. 42 * Get the bus id.
43 * 43 *
44 * \param inode device inode. 44 * \param inode device inode.
45 * \param filp file pointer. 45 * \param file_priv DRM file private.
46 * \param cmd command. 46 * \param cmd command.
47 * \param arg user argument, pointing to a drm_unique structure. 47 * \param arg user argument, pointing to a drm_unique structure.
48 * \return zero on success or a negative number on failure. 48 * \return zero on success or a negative number on failure.
49 * 49 *
50 * Copies the bus id from drm_device::unique into user space. 50 * Copies the bus id from drm_device::unique into user space.
51 */ 51 */
52int drm_getunique(struct inode *inode, struct file *filp, 52int drm_getunique(struct drm_device *dev, void *data,
53 unsigned int cmd, unsigned long arg) 53 struct drm_file *file_priv)
54{ 54{
55 struct drm_file *priv = filp->private_data; 55 struct drm_unique *u = data;
56 struct drm_device *dev = priv->head->dev;
57 struct drm_unique __user *argp = (void __user *)arg;
58 struct drm_unique u;
59 56
60 if (copy_from_user(&u, argp, sizeof(u))) 57 if (u->unique_len >= dev->unique_len) {
61 return -EFAULT; 58 if (copy_to_user(u->unique, dev->unique, dev->unique_len))
62 if (u.unique_len >= dev->unique_len) {
63 if (copy_to_user(u.unique, dev->unique, dev->unique_len))
64 return -EFAULT; 59 return -EFAULT;
65 } 60 }
66 u.unique_len = dev->unique_len; 61 u->unique_len = dev->unique_len;
67 if (copy_to_user(argp, &u, sizeof(u))) 62
68 return -EFAULT;
69 return 0; 63 return 0;
70} 64}
71 65
@@ -73,7 +67,7 @@ int drm_getunique(struct inode *inode, struct file *filp,
73 * Set the bus id. 67 * Set the bus id.
74 * 68 *
75 * \param inode device inode. 69 * \param inode device inode.
76 * \param filp file pointer. 70 * \param file_priv DRM file private.
77 * \param cmd command. 71 * \param cmd command.
78 * \param arg user argument, pointing to a drm_unique structure. 72 * \param arg user argument, pointing to a drm_unique structure.
79 * \return zero on success or a negative number on failure. 73 * \return zero on success or a negative number on failure.
@@ -83,28 +77,23 @@ int drm_getunique(struct inode *inode, struct file *filp,
83 * in interface version 1.1 and will return EBUSY when setversion has requested 77 * in interface version 1.1 and will return EBUSY when setversion has requested
84 * version 1.1 or greater. 78 * version 1.1 or greater.
85 */ 79 */
86int drm_setunique(struct inode *inode, struct file *filp, 80int drm_setunique(struct drm_device *dev, void *data,
87 unsigned int cmd, unsigned long arg) 81 struct drm_file *file_priv)
88{ 82{
89 struct drm_file *priv = filp->private_data; 83 struct drm_unique *u = data;
90 struct drm_device *dev = priv->head->dev;
91 struct drm_unique u;
92 int domain, bus, slot, func, ret; 84 int domain, bus, slot, func, ret;
93 85
94 if (dev->unique_len || dev->unique) 86 if (dev->unique_len || dev->unique)
95 return -EBUSY; 87 return -EBUSY;
96 88
97 if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) 89 if (!u->unique_len || u->unique_len > 1024)
98 return -EFAULT;
99
100 if (!u.unique_len || u.unique_len > 1024)
101 return -EINVAL; 90 return -EINVAL;
102 91
103 dev->unique_len = u.unique_len; 92 dev->unique_len = u->unique_len;
104 dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); 93 dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
105 if (!dev->unique) 94 if (!dev->unique)
106 return -ENOMEM; 95 return -ENOMEM;
107 if (copy_from_user(dev->unique, u.unique, dev->unique_len)) 96 if (copy_from_user(dev->unique, u->unique, dev->unique_len))
108 return -EFAULT; 97 return -EFAULT;
109 98
110 dev->unique[dev->unique_len] = '\0'; 99 dev->unique[dev->unique_len] = '\0';
@@ -123,7 +112,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
123 */ 112 */
124 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 113 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
125 if (ret != 3) 114 if (ret != 3)
126 return DRM_ERR(EINVAL); 115 return -EINVAL;
127 domain = bus >> 8; 116 domain = bus >> 8;
128 bus &= 0xff; 117 bus &= 0xff;
129 118
@@ -172,7 +161,7 @@ static int drm_set_busid(struct drm_device * dev)
172 * Get a mapping information. 161 * Get a mapping information.
173 * 162 *
174 * \param inode device inode. 163 * \param inode device inode.
175 * \param filp file pointer. 164 * \param file_priv DRM file private.
176 * \param cmd command. 165 * \param cmd command.
177 * \param arg user argument, pointing to a drm_map structure. 166 * \param arg user argument, pointing to a drm_map structure.
178 * 167 *
@@ -181,21 +170,16 @@ static int drm_set_busid(struct drm_device * dev)
181 * Searches for the mapping with the specified offset and copies its information 170 * Searches for the mapping with the specified offset and copies its information
182 * into userspace 171 * into userspace
183 */ 172 */
184int drm_getmap(struct inode *inode, struct file *filp, 173int drm_getmap(struct drm_device *dev, void *data,
185 unsigned int cmd, unsigned long arg) 174 struct drm_file *file_priv)
186{ 175{
187 struct drm_file *priv = filp->private_data; 176 struct drm_map *map = data;
188 struct drm_device *dev = priv->head->dev;
189 struct drm_map __user *argp = (void __user *)arg;
190 struct drm_map map;
191 struct drm_map_list *r_list = NULL; 177 struct drm_map_list *r_list = NULL;
192 struct list_head *list; 178 struct list_head *list;
193 int idx; 179 int idx;
194 int i; 180 int i;
195 181
196 if (copy_from_user(&map, argp, sizeof(map))) 182 idx = map->offset;
197 return -EFAULT;
198 idx = map.offset;
199 183
200 mutex_lock(&dev->struct_mutex); 184 mutex_lock(&dev->struct_mutex);
201 if (idx < 0) { 185 if (idx < 0) {
@@ -216,16 +200,14 @@ int drm_getmap(struct inode *inode, struct file *filp,
216 return -EINVAL; 200 return -EINVAL;
217 } 201 }
218 202
219 map.offset = r_list->map->offset; 203 map->offset = r_list->map->offset;
220 map.size = r_list->map->size; 204 map->size = r_list->map->size;
221 map.type = r_list->map->type; 205 map->type = r_list->map->type;
222 map.flags = r_list->map->flags; 206 map->flags = r_list->map->flags;
223 map.handle = (void *)(unsigned long)r_list->user_token; 207 map->handle = (void *)(unsigned long) r_list->user_token;
224 map.mtrr = r_list->map->mtrr; 208 map->mtrr = r_list->map->mtrr;
225 mutex_unlock(&dev->struct_mutex); 209 mutex_unlock(&dev->struct_mutex);
226 210
227 if (copy_to_user(argp, &map, sizeof(map)))
228 return -EFAULT;
229 return 0; 211 return 0;
230} 212}
231 213
@@ -233,7 +215,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
233 * Get client information. 215 * Get client information.
234 * 216 *
235 * \param inode device inode. 217 * \param inode device inode.
236 * \param filp file pointer. 218 * \param file_priv DRM file private.
237 * \param cmd command. 219 * \param cmd command.
238 * \param arg user argument, pointing to a drm_client structure. 220 * \param arg user argument, pointing to a drm_client structure.
239 * 221 *
@@ -242,20 +224,15 @@ int drm_getmap(struct inode *inode, struct file *filp,
242 * Searches for the client with the specified index and copies its information 224 * Searches for the client with the specified index and copies its information
243 * into userspace 225 * into userspace
244 */ 226 */
245int drm_getclient(struct inode *inode, struct file *filp, 227int drm_getclient(struct drm_device *dev, void *data,
246 unsigned int cmd, unsigned long arg) 228 struct drm_file *file_priv)
247{ 229{
248 struct drm_file *priv = filp->private_data; 230 struct drm_client *client = data;
249 struct drm_device *dev = priv->head->dev;
250 struct drm_client __user *argp = (struct drm_client __user *)arg;
251 struct drm_client client;
252 struct drm_file *pt; 231 struct drm_file *pt;
253 int idx; 232 int idx;
254 int i; 233 int i;
255 234
256 if (copy_from_user(&client, argp, sizeof(client))) 235 idx = client->idx;
257 return -EFAULT;
258 idx = client.idx;
259 mutex_lock(&dev->struct_mutex); 236 mutex_lock(&dev->struct_mutex);
260 237
261 if (list_empty(&dev->filelist)) { 238 if (list_empty(&dev->filelist)) {
@@ -269,15 +246,13 @@ int drm_getclient(struct inode *inode, struct file *filp,
269 break; 246 break;
270 } 247 }
271 248
272 client.auth = pt->authenticated; 249 client->auth = pt->authenticated;
273 client.pid = pt->pid; 250 client->pid = pt->pid;
274 client.uid = pt->uid; 251 client->uid = pt->uid;
275 client.magic = pt->magic; 252 client->magic = pt->magic;
276 client.iocs = pt->ioctl_count; 253 client->iocs = pt->ioctl_count;
277 mutex_unlock(&dev->struct_mutex); 254 mutex_unlock(&dev->struct_mutex);
278 255
279 if (copy_to_user(argp, &client, sizeof(client)))
280 return -EFAULT;
281 return 0; 256 return 0;
282} 257}
283 258
@@ -285,39 +260,35 @@ int drm_getclient(struct inode *inode, struct file *filp,
285 * Get statistics information. 260 * Get statistics information.
286 * 261 *
287 * \param inode device inode. 262 * \param inode device inode.
288 * \param filp file pointer. 263 * \param file_priv DRM file private.
289 * \param cmd command. 264 * \param cmd command.
290 * \param arg user argument, pointing to a drm_stats structure. 265 * \param arg user argument, pointing to a drm_stats structure.
291 * 266 *
292 * \return zero on success or a negative number on failure. 267 * \return zero on success or a negative number on failure.
293 */ 268 */
294int drm_getstats(struct inode *inode, struct file *filp, 269int drm_getstats(struct drm_device *dev, void *data,
295 unsigned int cmd, unsigned long arg) 270 struct drm_file *file_priv)
296{ 271{
297 struct drm_file *priv = filp->private_data; 272 struct drm_stats *stats = data;
298 struct drm_device *dev = priv->head->dev;
299 struct drm_stats stats;
300 int i; 273 int i;
301 274
302 memset(&stats, 0, sizeof(stats)); 275 memset(stats, 0, sizeof(stats));
303 276
304 mutex_lock(&dev->struct_mutex); 277 mutex_lock(&dev->struct_mutex);
305 278
306 for (i = 0; i < dev->counters; i++) { 279 for (i = 0; i < dev->counters; i++) {
307 if (dev->types[i] == _DRM_STAT_LOCK) 280 if (dev->types[i] == _DRM_STAT_LOCK)
308 stats.data[i].value 281 stats->data[i].value =
309 = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); 282 (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
310 else 283 else
311 stats.data[i].value = atomic_read(&dev->counts[i]); 284 stats->data[i].value = atomic_read(&dev->counts[i]);
312 stats.data[i].type = dev->types[i]; 285 stats->data[i].type = dev->types[i];
313 } 286 }
314 287
315 stats.count = dev->counters; 288 stats->count = dev->counters;
316 289
317 mutex_unlock(&dev->struct_mutex); 290 mutex_unlock(&dev->struct_mutex);
318 291
319 if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats)))
320 return -EFAULT;
321 return 0; 292 return 0;
322} 293}
323 294
@@ -325,64 +296,59 @@ int drm_getstats(struct inode *inode, struct file *filp,
325 * Setversion ioctl. 296 * Setversion ioctl.
326 * 297 *
327 * \param inode device inode. 298 * \param inode device inode.
328 * \param filp file pointer. 299 * \param file_priv DRM file private.
329 * \param cmd command. 300 * \param cmd command.
330 * \param arg user argument, pointing to a drm_lock structure. 301 * \param arg user argument, pointing to a drm_lock structure.
331 * \return zero on success or negative number on failure. 302 * \return zero on success or negative number on failure.
332 * 303 *
333 * Sets the requested interface version 304 * Sets the requested interface version
334 */ 305 */
335int drm_setversion(DRM_IOCTL_ARGS) 306int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
336{ 307{
337 DRM_DEVICE; 308 struct drm_set_version *sv = data;
338 struct drm_set_version sv; 309 int if_version, retcode = 0;
339 struct drm_set_version retv; 310
340 int if_version; 311 if (sv->drm_di_major != -1) {
341 struct drm_set_version __user *argp = (void __user *)data; 312 if (sv->drm_di_major != DRM_IF_MAJOR ||
342 int ret; 313 sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
343 314 retcode = -EINVAL;
344 if (copy_from_user(&sv, argp, sizeof(sv))) 315 goto done;
345 return -EFAULT; 316 }
346 317 if_version = DRM_IF_VERSION(sv->drm_di_major,
347 retv.drm_di_major = DRM_IF_MAJOR; 318 sv->drm_di_minor);
348 retv.drm_di_minor = DRM_IF_MINOR;
349 retv.drm_dd_major = dev->driver->major;
350 retv.drm_dd_minor = dev->driver->minor;
351
352 if (copy_to_user(argp, &retv, sizeof(retv)))
353 return -EFAULT;
354
355 if (sv.drm_di_major != -1) {
356 if (sv.drm_di_major != DRM_IF_MAJOR ||
357 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
358 return -EINVAL;
359 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
360 dev->if_version = max(if_version, dev->if_version); 319 dev->if_version = max(if_version, dev->if_version);
361 if (sv.drm_di_minor >= 1) { 320 if (sv->drm_di_minor >= 1) {
362 /* 321 /*
363 * Version 1.1 includes tying of DRM to specific device 322 * Version 1.1 includes tying of DRM to specific device
364 */ 323 */
365 ret = drm_set_busid(dev); 324 drm_set_busid(dev);
366 if (ret)
367 return ret;
368 } 325 }
369 } 326 }
370 327
371 if (sv.drm_dd_major != -1) { 328 if (sv->drm_dd_major != -1) {
372 if (sv.drm_dd_major != dev->driver->major || 329 if (sv->drm_dd_major != dev->driver->major ||
373 sv.drm_dd_minor < 0 330 sv->drm_dd_minor < 0 || sv->drm_dd_minor >
374 || sv.drm_dd_minor > dev->driver->minor) 331 dev->driver->minor) {
375 return -EINVAL; 332 retcode = -EINVAL;
333 goto done;
334 }
376 335
377 if (dev->driver->set_version) 336 if (dev->driver->set_version)
378 dev->driver->set_version(dev, &sv); 337 dev->driver->set_version(dev, sv);
379 } 338 }
380 return 0; 339
340done:
341 sv->drm_di_major = DRM_IF_MAJOR;
342 sv->drm_di_minor = DRM_IF_MINOR;
343 sv->drm_dd_major = dev->driver->major;
344 sv->drm_dd_minor = dev->driver->minor;
345
346 return retcode;
381} 347}
382 348
383/** No-op ioctl. */ 349/** No-op ioctl. */
384int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, 350int drm_noop(struct drm_device *dev, void *data,
385 unsigned long arg) 351 struct drm_file *file_priv)
386{ 352{
387 DRM_DEBUG("\n"); 353 DRM_DEBUG("\n");
388 return 0; 354 return 0;
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 871d2fde09b3..05eae63f85ba 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -41,7 +41,7 @@
41 * Get interrupt from bus id. 41 * Get interrupt from bus id.
42 * 42 *
43 * \param inode device inode. 43 * \param inode device inode.
44 * \param filp file pointer. 44 * \param file_priv DRM file private.
45 * \param cmd command. 45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure. 46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure. 47 * \return zero on success or a negative number on failure.
@@ -50,30 +50,24 @@
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to. 51 * to that of the device that this DRM instance attached to.
52 */ 52 */
53int drm_irq_by_busid(struct inode *inode, struct file *filp, 53int drm_irq_by_busid(struct drm_device *dev, void *data,
54 unsigned int cmd, unsigned long arg) 54 struct drm_file *file_priv)
55{ 55{
56 struct drm_file *priv = filp->private_data; 56 struct drm_irq_busid *p = data;
57 struct drm_device *dev = priv->head->dev;
58 struct drm_irq_busid __user *argp = (void __user *)arg;
59 struct drm_irq_busid p;
60 57
61 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
62 return -EINVAL; 59 return -EINVAL;
63 60
64 if (copy_from_user(&p, argp, sizeof(p))) 61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
65 return -EFAULT; 62 (p->busnum & 0xff) != dev->pdev->bus->number ||
66 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
67 if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
68 (p.busnum & 0xff) != dev->pdev->bus->number ||
69 p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
70 return -EINVAL; 64 return -EINVAL;
71 65
72 p.irq = dev->irq; 66 p->irq = dev->irq;
67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq);
73 70
74 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
75 if (copy_to_user(argp, &p, sizeof(p)))
76 return -EFAULT;
77 return 0; 71 return 0;
78} 72}
79 73
@@ -187,31 +181,27 @@ EXPORT_SYMBOL(drm_irq_uninstall);
187 * IRQ control ioctl. 181 * IRQ control ioctl.
188 * 182 *
189 * \param inode device inode. 183 * \param inode device inode.
190 * \param filp file pointer. 184 * \param file_priv DRM file private.
191 * \param cmd command. 185 * \param cmd command.
192 * \param arg user argument, pointing to a drm_control structure. 186 * \param arg user argument, pointing to a drm_control structure.
193 * \return zero on success or a negative number on failure. 187 * \return zero on success or a negative number on failure.
194 * 188 *
195 * Calls irq_install() or irq_uninstall() according to \p arg. 189 * Calls irq_install() or irq_uninstall() according to \p arg.
196 */ 190 */
197int drm_control(struct inode *inode, struct file *filp, 191int drm_control(struct drm_device *dev, void *data,
198 unsigned int cmd, unsigned long arg) 192 struct drm_file *file_priv)
199{ 193{
200 struct drm_file *priv = filp->private_data; 194 struct drm_control *ctl = data;
201 struct drm_device *dev = priv->head->dev;
202 struct drm_control ctl;
203 195
204 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ 196 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
205 197
206 if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl)))
207 return -EFAULT;
208 198
209 switch (ctl.func) { 199 switch (ctl->func) {
210 case DRM_INST_HANDLER: 200 case DRM_INST_HANDLER:
211 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
212 return 0; 202 return 0;
213 if (dev->if_version < DRM_IF_VERSION(1, 2) && 203 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
214 ctl.irq != dev->irq) 204 ctl->irq != dev->irq)
215 return -EINVAL; 205 return -EINVAL;
216 return drm_irq_install(dev); 206 return drm_irq_install(dev);
217 case DRM_UNINST_HANDLER: 207 case DRM_UNINST_HANDLER:
@@ -227,7 +217,7 @@ int drm_control(struct inode *inode, struct file *filp,
227 * Wait for VBLANK. 217 * Wait for VBLANK.
228 * 218 *
229 * \param inode device inode. 219 * \param inode device inode.
230 * \param filp file pointer. 220 * \param file_priv DRM file private.
231 * \param cmd command. 221 * \param cmd command.
232 * \param data user argument, pointing to a drm_wait_vblank structure. 222 * \param data user argument, pointing to a drm_wait_vblank structure.
233 * \return zero on success or a negative number on failure. 223 * \return zero on success or a negative number on failure.
@@ -242,31 +232,25 @@ int drm_control(struct inode *inode, struct file *filp,
242 * 232 *
243 * If a signal is not requested, then calls vblank_wait(). 233 * If a signal is not requested, then calls vblank_wait().
244 */ 234 */
245int drm_wait_vblank(DRM_IOCTL_ARGS) 235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
246{ 236{
247 struct drm_file *priv = filp->private_data; 237 union drm_wait_vblank *vblwait = data;
248 struct drm_device *dev = priv->head->dev;
249 union drm_wait_vblank __user *argp = (void __user *)data;
250 union drm_wait_vblank vblwait;
251 struct timeval now; 238 struct timeval now;
252 int ret = 0; 239 int ret = 0;
253 unsigned int flags, seq; 240 unsigned int flags, seq;
254 241
255 if (!dev->irq) 242 if ((!dev->irq) || (!dev->irq_enabled))
256 return -EINVAL; 243 return -EINVAL;
257 244
258 if (copy_from_user(&vblwait, argp, sizeof(vblwait))) 245 if (vblwait->request.type &
259 return -EFAULT;
260
261 if (vblwait.request.type &
262 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 246 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
263 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 247 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
264 vblwait.request.type, 248 vblwait->request.type,
265 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); 249 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
266 return -EINVAL; 250 return -EINVAL;
267 } 251 }
268 252
269 flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; 253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
270 254
271 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
272 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) 256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
@@ -275,10 +259,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
275 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
276 : &dev->vbl_received); 260 : &dev->vbl_received);
277 261
278 switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { 262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
279 case _DRM_VBLANK_RELATIVE: 263 case _DRM_VBLANK_RELATIVE:
280 vblwait.request.sequence += seq; 264 vblwait->request.sequence += seq;
281 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; 265 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
282 case _DRM_VBLANK_ABSOLUTE: 266 case _DRM_VBLANK_ABSOLUTE:
283 break; 267 break;
284 default: 268 default:
@@ -286,8 +270,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
286 } 270 }
287 271
288 if ((flags & _DRM_VBLANK_NEXTONMISS) && 272 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
289 (seq - vblwait.request.sequence) <= (1<<23)) { 273 (seq - vblwait->request.sequence) <= (1<<23)) {
290 vblwait.request.sequence = seq + 1; 274 vblwait->request.sequence = seq + 1;
291 } 275 }
292 276
293 if (flags & _DRM_VBLANK_SIGNAL) { 277 if (flags & _DRM_VBLANK_SIGNAL) {
@@ -303,12 +287,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
303 * that case 287 * that case
304 */ 288 */
305 list_for_each_entry(vbl_sig, vbl_sigs, head) { 289 list_for_each_entry(vbl_sig, vbl_sigs, head) {
306 if (vbl_sig->sequence == vblwait.request.sequence 290 if (vbl_sig->sequence == vblwait->request.sequence
307 && vbl_sig->info.si_signo == vblwait.request.signal 291 && vbl_sig->info.si_signo ==
292 vblwait->request.signal
308 && vbl_sig->task == current) { 293 && vbl_sig->task == current) {
309 spin_unlock_irqrestore(&dev->vbl_lock, 294 spin_unlock_irqrestore(&dev->vbl_lock,
310 irqflags); 295 irqflags);
311 vblwait.reply.sequence = seq; 296 vblwait->reply.sequence = seq;
312 goto done; 297 goto done;
313 } 298 }
314 } 299 }
@@ -330,8 +315,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
330 315
331 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
332 317
333 vbl_sig->sequence = vblwait.request.sequence; 318 vbl_sig->sequence = vblwait->request.sequence;
334 vbl_sig->info.si_signo = vblwait.request.signal; 319 vbl_sig->info.si_signo = vblwait->request.signal;
335 vbl_sig->task = current; 320 vbl_sig->task = current;
336 321
337 spin_lock_irqsave(&dev->vbl_lock, irqflags); 322 spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -340,25 +325,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
340 325
341 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 326 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
342 327
343 vblwait.reply.sequence = seq; 328 vblwait->reply.sequence = seq;
344 } else { 329 } else {
345 if (flags & _DRM_VBLANK_SECONDARY) { 330 if (flags & _DRM_VBLANK_SECONDARY) {
346 if (dev->driver->vblank_wait2) 331 if (dev->driver->vblank_wait2)
347 ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); 332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
348 } else if (dev->driver->vblank_wait) 333 } else if (dev->driver->vblank_wait)
349 ret = 334 ret =
350 dev->driver->vblank_wait(dev, 335 dev->driver->vblank_wait(dev,
351 &vblwait.request.sequence); 336 &vblwait->request.sequence);
352 337
353 do_gettimeofday(&now); 338 do_gettimeofday(&now);
354 vblwait.reply.tval_sec = now.tv_sec; 339 vblwait->reply.tval_sec = now.tv_sec;
355 vblwait.reply.tval_usec = now.tv_usec; 340 vblwait->reply.tval_usec = now.tv_usec;
356 } 341 }
357 342
358 done: 343 done:
359 if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
360 return -EFAULT;
361
362 return ret; 344 return ret;
363} 345}
364 346
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index c0534b5a8b78..c6b73e744d67 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -41,39 +41,33 @@ static int drm_notifier(void *priv);
41 * Lock ioctl. 41 * Lock ioctl.
42 * 42 *
43 * \param inode device inode. 43 * \param inode device inode.
44 * \param filp file pointer. 44 * \param file_priv DRM file private.
45 * \param cmd command. 45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_lock structure. 46 * \param arg user argument, pointing to a drm_lock structure.
47 * \return zero on success or negative number on failure. 47 * \return zero on success or negative number on failure.
48 * 48 *
49 * Add the current task to the lock wait queue, and attempt to take to lock. 49 * Add the current task to the lock wait queue, and attempt to take to lock.
50 */ 50 */
51int drm_lock(struct inode *inode, struct file *filp, 51int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
52 unsigned int cmd, unsigned long arg)
53{ 52{
54 struct drm_file *priv = filp->private_data;
55 struct drm_device *dev = priv->head->dev;
56 DECLARE_WAITQUEUE(entry, current); 53 DECLARE_WAITQUEUE(entry, current);
57 struct drm_lock lock; 54 struct drm_lock *lock = data;
58 int ret = 0; 55 int ret = 0;
59 56
60 ++priv->lock_count; 57 ++file_priv->lock_count;
61 58
62 if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 59 if (lock->context == DRM_KERNEL_CONTEXT) {
63 return -EFAULT;
64
65 if (lock.context == DRM_KERNEL_CONTEXT) {
66 DRM_ERROR("Process %d using kernel context %d\n", 60 DRM_ERROR("Process %d using kernel context %d\n",
67 current->pid, lock.context); 61 current->pid, lock->context);
68 return -EINVAL; 62 return -EINVAL;
69 } 63 }
70 64
71 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
72 lock.context, current->pid, 66 lock->context, current->pid,
73 dev->lock.hw_lock->lock, lock.flags); 67 dev->lock.hw_lock->lock, lock->flags);
74 68
75 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) 69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
76 if (lock.context < 0) 70 if (lock->context < 0)
77 return -EINVAL; 71 return -EINVAL;
78 72
79 add_wait_queue(&dev->lock.lock_queue, &entry); 73 add_wait_queue(&dev->lock.lock_queue, &entry);
@@ -87,8 +81,8 @@ int drm_lock(struct inode *inode, struct file *filp,
87 ret = -EINTR; 81 ret = -EINTR;
88 break; 82 break;
89 } 83 }
90 if (drm_lock_take(&dev->lock, lock.context)) { 84 if (drm_lock_take(&dev->lock, lock->context)) {
91 dev->lock.filp = filp; 85 dev->lock.file_priv = file_priv;
92 dev->lock.lock_time = jiffies; 86 dev->lock.lock_time = jiffies;
93 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 87 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
94 break; /* Got lock */ 88 break; /* Got lock */
@@ -107,7 +101,8 @@ int drm_lock(struct inode *inode, struct file *filp,
107 __set_current_state(TASK_RUNNING); 101 __set_current_state(TASK_RUNNING);
108 remove_wait_queue(&dev->lock.lock_queue, &entry); 102 remove_wait_queue(&dev->lock.lock_queue, &entry);
109 103
110 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); 104 DRM_DEBUG("%d %s\n", lock->context,
105 ret ? "interrupted" : "has lock");
111 if (ret) return ret; 106 if (ret) return ret;
112 107
113 sigemptyset(&dev->sigmask); 108 sigemptyset(&dev->sigmask);
@@ -115,24 +110,26 @@ int drm_lock(struct inode *inode, struct file *filp,
115 sigaddset(&dev->sigmask, SIGTSTP); 110 sigaddset(&dev->sigmask, SIGTSTP);
116 sigaddset(&dev->sigmask, SIGTTIN); 111 sigaddset(&dev->sigmask, SIGTTIN);
117 sigaddset(&dev->sigmask, SIGTTOU); 112 sigaddset(&dev->sigmask, SIGTTOU);
118 dev->sigdata.context = lock.context; 113 dev->sigdata.context = lock->context;
119 dev->sigdata.lock = dev->lock.hw_lock; 114 dev->sigdata.lock = dev->lock.hw_lock;
120 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
121 116
122 if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) 117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
123 dev->driver->dma_ready(dev); 118 dev->driver->dma_ready(dev);
124 119
125 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { 120 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
121 {
126 if (dev->driver->dma_quiescent(dev)) { 122 if (dev->driver->dma_quiescent(dev)) {
127 DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); 123 DRM_DEBUG("%d waiting for DMA quiescent\n",
128 return DRM_ERR(EBUSY); 124 lock->context);
125 return -EBUSY;
129 } 126 }
130 } 127 }
131 128
132 if (dev->driver->kernel_context_switch && 129 if (dev->driver->kernel_context_switch &&
133 dev->last_context != lock.context) { 130 dev->last_context != lock->context) {
134 dev->driver->kernel_context_switch(dev, dev->last_context, 131 dev->driver->kernel_context_switch(dev, dev->last_context,
135 lock.context); 132 lock->context);
136 } 133 }
137 134
138 return 0; 135 return 0;
@@ -142,27 +139,21 @@ int drm_lock(struct inode *inode, struct file *filp,
142 * Unlock ioctl. 139 * Unlock ioctl.
143 * 140 *
144 * \param inode device inode. 141 * \param inode device inode.
145 * \param filp file pointer. 142 * \param file_priv DRM file private.
146 * \param cmd command. 143 * \param cmd command.
147 * \param arg user argument, pointing to a drm_lock structure. 144 * \param arg user argument, pointing to a drm_lock structure.
148 * \return zero on success or negative number on failure. 145 * \return zero on success or negative number on failure.
149 * 146 *
150 * Transfer and free the lock. 147 * Transfer and free the lock.
151 */ 148 */
152int drm_unlock(struct inode *inode, struct file *filp, 149int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
153 unsigned int cmd, unsigned long arg)
154{ 150{
155 struct drm_file *priv = filp->private_data; 151 struct drm_lock *lock = data;
156 struct drm_device *dev = priv->head->dev;
157 struct drm_lock lock;
158 unsigned long irqflags; 152 unsigned long irqflags;
159 153
160 if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 154 if (lock->context == DRM_KERNEL_CONTEXT) {
161 return -EFAULT;
162
163 if (lock.context == DRM_KERNEL_CONTEXT) {
164 DRM_ERROR("Process %d using kernel context %d\n", 155 DRM_ERROR("Process %d using kernel context %d\n",
165 current->pid, lock.context); 156 current->pid, lock->context);
166 return -EINVAL; 157 return -EINVAL;
167 } 158 }
168 159
@@ -184,7 +175,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
184 if (dev->driver->kernel_context_switch_unlock) 175 if (dev->driver->kernel_context_switch_unlock)
185 dev->driver->kernel_context_switch_unlock(dev); 176 dev->driver->kernel_context_switch_unlock(dev);
186 else { 177 else {
187 if (drm_lock_free(&dev->lock,lock.context)) { 178 if (drm_lock_free(&dev->lock,lock->context)) {
188 /* FIXME: Should really bail out here. */ 179 /* FIXME: Should really bail out here. */
189 } 180 }
190 } 181 }
@@ -257,7 +248,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data,
257 unsigned int old, new, prev; 248 unsigned int old, new, prev;
258 volatile unsigned int *lock = &lock_data->hw_lock->lock; 249 volatile unsigned int *lock = &lock_data->hw_lock->lock;
259 250
260 lock_data->filp = NULL; 251 lock_data->file_priv = NULL;
261 do { 252 do {
262 old = *lock; 253 old = *lock;
263 new = context | _DRM_LOCK_HELD; 254 new = context | _DRM_LOCK_HELD;
@@ -390,13 +381,11 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
390EXPORT_SYMBOL(drm_idlelock_release); 381EXPORT_SYMBOL(drm_idlelock_release);
391 382
392 383
393int drm_i_have_hw_lock(struct file *filp) 384int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
394{ 385{
395 DRM_DEVICE; 386 return (file_priv->lock_count && dev->lock.hw_lock &&
396
397 return (priv->lock_count && dev->lock.hw_lock &&
398 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && 387 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
399 dev->lock.filp == filp); 388 dev->lock.file_priv == file_priv);
400} 389}
401 390
402EXPORT_SYMBOL(drm_i_have_hw_lock); 391EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index 0b8d3433386d..114e54e0f61b 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -6,11 +6,6 @@
6#include <linux/interrupt.h> /* For task queue support */ 6#include <linux/interrupt.h> /* For task queue support */
7#include <linux/delay.h> 7#include <linux/delay.h>
8 8
9/** File pointer type */
10#define DRMFILE struct file *
11/** Ioctl arguments */
12#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data
13#define DRM_ERR(d) -(d)
14/** Current process ID */ 9/** Current process ID */
15#define DRM_CURRENTPID current->pid 10#define DRM_CURRENTPID current->pid
16#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 11#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
@@ -33,9 +28,6 @@
33#define DRM_WRITEMEMORYBARRIER() wmb() 28#define DRM_WRITEMEMORYBARRIER() wmb()
34/** Read/write memory barrier */ 29/** Read/write memory barrier */
35#define DRM_MEMORYBARRIER() mb() 30#define DRM_MEMORYBARRIER() mb()
36/** DRM device local declaration */
37#define DRM_DEVICE struct drm_file *priv = filp->private_data; \
38 struct drm_device *dev = priv->head->dev
39 31
40/** IRQ handler arguments and return type and values */ 32/** IRQ handler arguments and return type and values */
41#define DRM_IRQ_ARGS int irq, void *arg 33#define DRM_IRQ_ARGS int irq, void *arg
@@ -94,8 +86,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
94#define DRM_GET_USER_UNCHECKED(val, uaddr) \ 86#define DRM_GET_USER_UNCHECKED(val, uaddr) \
95 __get_user(val, uaddr) 87 __get_user(val, uaddr)
96 88
97#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
98
99#define DRM_HZ HZ 89#define DRM_HZ HZ
100 90
101#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ 91#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 30b200b01314..f3593974496c 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -236,10 +236,8 @@
236 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 236 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
237 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ 237 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
238 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 238 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
239 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
240 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 239 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
241 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 240 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
242 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
243 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 241 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
244 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 242 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
245 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ 243 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 067d25daaf17..eb7fa437355e 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -62,13 +62,8 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
62# define ScatterHandle(x) (unsigned int)(x) 62# define ScatterHandle(x) (unsigned int)(x)
63#endif 63#endif
64 64
65int drm_sg_alloc(struct inode *inode, struct file *filp, 65int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
66 unsigned int cmd, unsigned long arg)
67{ 66{
68 struct drm_file *priv = filp->private_data;
69 struct drm_device *dev = priv->head->dev;
70 struct drm_scatter_gather __user *argp = (void __user *)arg;
71 struct drm_scatter_gather request;
72 struct drm_sg_mem *entry; 67 struct drm_sg_mem *entry;
73 unsigned long pages, i, j; 68 unsigned long pages, i, j;
74 69
@@ -80,17 +75,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
80 if (dev->sg) 75 if (dev->sg)
81 return -EINVAL; 76 return -EINVAL;
82 77
83 if (copy_from_user(&request, argp, sizeof(request)))
84 return -EFAULT;
85
86 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 78 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
87 if (!entry) 79 if (!entry)
88 return -ENOMEM; 80 return -ENOMEM;
89 81
90 memset(entry, 0, sizeof(*entry)); 82 memset(entry, 0, sizeof(*entry));
91 83 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
92 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 84 DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
93 DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
94 85
95 entry->pages = pages; 86 entry->pages = pages;
96 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), 87 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -142,12 +133,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
142 SetPageReserved(entry->pagelist[j]); 133 SetPageReserved(entry->pagelist[j]);
143 } 134 }
144 135
145 request.handle = entry->handle; 136 request->handle = entry->handle;
146
147 if (copy_to_user(argp, &request, sizeof(request))) {
148 drm_sg_cleanup(entry);
149 return -EFAULT;
150 }
151 137
152 dev->sg = entry; 138 dev->sg = entry;
153 139
@@ -197,27 +183,31 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
197 drm_sg_cleanup(entry); 183 drm_sg_cleanup(entry);
198 return -ENOMEM; 184 return -ENOMEM;
199} 185}
186EXPORT_SYMBOL(drm_sg_alloc);
187
200 188
201int drm_sg_free(struct inode *inode, struct file *filp, 189int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
202 unsigned int cmd, unsigned long arg) 190 struct drm_file *file_priv)
203{ 191{
204 struct drm_file *priv = filp->private_data; 192 struct drm_scatter_gather *request = data;
205 struct drm_device *dev = priv->head->dev; 193
206 struct drm_scatter_gather request; 194 return drm_sg_alloc(dev, request);
195
196}
197
198int drm_sg_free(struct drm_device *dev, void *data,
199 struct drm_file *file_priv)
200{
201 struct drm_scatter_gather *request = data;
207 struct drm_sg_mem *entry; 202 struct drm_sg_mem *entry;
208 203
209 if (!drm_core_check_feature(dev, DRIVER_SG)) 204 if (!drm_core_check_feature(dev, DRIVER_SG))
210 return -EINVAL; 205 return -EINVAL;
211 206
212 if (copy_from_user(&request,
213 (struct drm_scatter_gather __user *) arg,
214 sizeof(request)))
215 return -EFAULT;
216
217 entry = dev->sg; 207 entry = dev->sg;
218 dev->sg = NULL; 208 dev->sg = NULL;
219 209
220 if (!entry || entry->handle != request.handle) 210 if (!entry || entry->handle != request->handle)
221 return -EINVAL; 211 return -EINVAL;
222 212
223 DRM_DEBUG("sg free virtual = %p\n", entry->virtual); 213 DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 68e36e51ba0c..e8d50af58201 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -463,7 +463,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
463/** 463/**
464 * mmap DMA memory. 464 * mmap DMA memory.
465 * 465 *
466 * \param filp file pointer. 466 * \param file_priv DRM file private.
467 * \param vma virtual memory area. 467 * \param vma virtual memory area.
468 * \return zero on success or a negative number on failure. 468 * \return zero on success or a negative number on failure.
469 * 469 *
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
533/** 533/**
534 * mmap DMA memory. 534 * mmap DMA memory.
535 * 535 *
536 * \param filp file pointer. 536 * \param file_priv DRM file private.
537 * \param vma virtual memory area. 537 * \param vma virtual memory area.
538 * \return zero on success or a negative number on failure. 538 * \return zero on success or a negative number on failure.
539 * 539 *
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index cb449999d0ef..8e841bdee6dc 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -120,10 +120,9 @@ static const struct file_operations i810_buffer_fops = {
120 .fasync = drm_fasync, 120 .fasync = drm_fasync,
121}; 121};
122 122
123static int i810_map_buffer(struct drm_buf * buf, struct file *filp) 123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{ 124{
125 struct drm_file *priv = filp->private_data; 125 struct drm_device *dev = file_priv->head->dev;
126 struct drm_device *dev = priv->head->dev;
127 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
128 drm_i810_private_t *dev_priv = dev->dev_private; 127 drm_i810_private_t *dev_priv = dev->dev_private;
129 const struct file_operations *old_fops; 128 const struct file_operations *old_fops;
@@ -133,14 +132,14 @@ static int i810_map_buffer(struct drm_buf * buf, struct file *filp)
133 return -EINVAL; 132 return -EINVAL;
134 133
135 down_write(&current->mm->mmap_sem); 134 down_write(&current->mm->mmap_sem);
136 old_fops = filp->f_op; 135 old_fops = file_priv->filp->f_op;
137 filp->f_op = &i810_buffer_fops; 136 file_priv->filp->f_op = &i810_buffer_fops;
138 dev_priv->mmap_buffer = buf; 137 dev_priv->mmap_buffer = buf;
139 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, 138 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
140 PROT_READ | PROT_WRITE, 139 PROT_READ | PROT_WRITE,
141 MAP_SHARED, buf->bus_address); 140 MAP_SHARED, buf->bus_address);
142 dev_priv->mmap_buffer = NULL; 141 dev_priv->mmap_buffer = NULL;
143 filp->f_op = old_fops; 142 file_priv->filp->f_op = old_fops;
144 if (IS_ERR(buf_priv->virtual)) { 143 if (IS_ERR(buf_priv->virtual)) {
145 /* Real error */ 144 /* Real error */
146 DRM_ERROR("mmap error\n"); 145 DRM_ERROR("mmap error\n");
@@ -173,7 +172,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
173} 172}
174 173
175static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, 174static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
176 struct file *filp) 175 struct drm_file *file_priv)
177{ 176{
178 struct drm_buf *buf; 177 struct drm_buf *buf;
179 drm_i810_buf_priv_t *buf_priv; 178 drm_i810_buf_priv_t *buf_priv;
@@ -186,13 +185,13 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
186 return retcode; 185 return retcode;
187 } 186 }
188 187
189 retcode = i810_map_buffer(buf, filp); 188 retcode = i810_map_buffer(buf, file_priv);
190 if (retcode) { 189 if (retcode) {
191 i810_freelist_put(dev, buf); 190 i810_freelist_put(dev, buf);
192 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 191 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
193 return retcode; 192 return retcode;
194 } 193 }
195 buf->filp = filp; 194 buf->file_priv = file_priv;
196 buf_priv = buf->dev_private; 195 buf_priv = buf->dev_private;
197 d->granted = 1; 196 d->granted = 1;
198 d->request_idx = buf->idx; 197 d->request_idx = buf->idx;
@@ -380,7 +379,7 @@ static int i810_dma_initialize(struct drm_device * dev,
380 i810_dma_cleanup(dev); 379 i810_dma_cleanup(dev);
381 DRM_ERROR("can not ioremap virtual address for" 380 DRM_ERROR("can not ioremap virtual address for"
382 " ring buffer\n"); 381 " ring buffer\n");
383 return DRM_ERR(ENOMEM); 382 return -ENOMEM;
384 } 383 }
385 384
386 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 385 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -430,99 +429,29 @@ static int i810_dma_initialize(struct drm_device * dev,
430 return 0; 429 return 0;
431} 430}
432 431
433/* i810 DRM version 1.1 used a smaller init structure with different 432static int i810_dma_init(struct drm_device *dev, void *data,
434 * ordering of values than is currently used (drm >= 1.2). There is 433 struct drm_file *file_priv)
435 * no defined way to detect the XFree version to correct this problem,
436 * however by checking using this procedure we can detect the correct
437 * thing to do.
438 *
439 * #1 Read the Smaller init structure from user-space
440 * #2 Verify the overlay_physical is a valid physical address, or NULL
441 * If it isn't then we have a v1.1 client. Fix up params.
442 * If it is, then we have a 1.2 client... get the rest of the data.
443 */
444static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg)
445{ 434{
446
447 /* Get v1.1 init data */
448 if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg,
449 sizeof(drm_i810_pre12_init_t))) {
450 return -EFAULT;
451 }
452
453 if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
454
455 /* This is a v1.2 client, just get the v1.2 init data */
456 DRM_INFO("Using POST v1.2 init.\n");
457 if (copy_from_user(init, (drm_i810_init_t __user *) arg,
458 sizeof(drm_i810_init_t))) {
459 return -EFAULT;
460 }
461 } else {
462
463 /* This is a v1.1 client, fix the params */
464 DRM_INFO("Using PRE v1.2 init.\n");
465 init->pitch_bits = init->h;
466 init->pitch = init->w;
467 init->h = init->overlay_physical;
468 init->w = init->overlay_offset;
469 init->overlay_physical = 0;
470 init->overlay_offset = 0;
471 }
472
473 return 0;
474}
475
476static int i810_dma_init(struct inode *inode, struct file *filp,
477 unsigned int cmd, unsigned long arg)
478{
479 struct drm_file *priv = filp->private_data;
480 struct drm_device *dev = priv->head->dev;
481 drm_i810_private_t *dev_priv; 435 drm_i810_private_t *dev_priv;
482 drm_i810_init_t init; 436 drm_i810_init_t *init = data;
483 int retcode = 0; 437 int retcode = 0;
484 438
485 /* Get only the init func */ 439 switch (init->func) {
486 if (copy_from_user
487 (&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
488 return -EFAULT;
489
490 switch (init.func) {
491 case I810_INIT_DMA:
492 /* This case is for backward compatibility. It
493 * handles XFree 4.1.0 and 4.2.0, and has to
494 * do some parameter checking as described below.
495 * It will someday go away.
496 */
497 retcode = i810_dma_init_compat(&init, arg);
498 if (retcode)
499 return retcode;
500
501 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
502 DRM_MEM_DRIVER);
503 if (dev_priv == NULL)
504 return -ENOMEM;
505 retcode = i810_dma_initialize(dev, dev_priv, &init);
506 break;
507
508 default:
509 case I810_INIT_DMA_1_4: 440 case I810_INIT_DMA_1_4:
510 DRM_INFO("Using v1.4 init.\n"); 441 DRM_INFO("Using v1.4 init.\n");
511 if (copy_from_user(&init, (drm_i810_init_t __user *) arg,
512 sizeof(drm_i810_init_t))) {
513 return -EFAULT;
514 }
515 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 442 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
516 DRM_MEM_DRIVER); 443 DRM_MEM_DRIVER);
517 if (dev_priv == NULL) 444 if (dev_priv == NULL)
518 return -ENOMEM; 445 return -ENOMEM;
519 retcode = i810_dma_initialize(dev, dev_priv, &init); 446 retcode = i810_dma_initialize(dev, dev_priv, init);
520 break; 447 break;
521 448
522 case I810_CLEANUP_DMA: 449 case I810_CLEANUP_DMA:
523 DRM_INFO("DMA Cleanup\n"); 450 DRM_INFO("DMA Cleanup\n");
524 retcode = i810_dma_cleanup(dev); 451 retcode = i810_dma_cleanup(dev);
525 break; 452 break;
453 default:
454 return -EINVAL;
526 } 455 }
527 456
528 return retcode; 457 return retcode;
@@ -968,7 +897,8 @@ static int i810_flush_queue(struct drm_device * dev)
968} 897}
969 898
970/* Must be called with the lock held */ 899/* Must be called with the lock held */
971static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp) 900static void i810_reclaim_buffers(struct drm_device * dev,
901 struct drm_file *file_priv)
972{ 902{
973 struct drm_device_dma *dma = dev->dma; 903 struct drm_device_dma *dma = dev->dma;
974 int i; 904 int i;
@@ -986,7 +916,7 @@ static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
986 struct drm_buf *buf = dma->buflist[i]; 916 struct drm_buf *buf = dma->buflist[i];
987 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 917 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
988 918
989 if (buf->filp == filp && buf_priv) { 919 if (buf->file_priv == file_priv && buf_priv) {
990 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, 920 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
991 I810_BUF_FREE); 921 I810_BUF_FREE);
992 922
@@ -998,47 +928,38 @@ static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
998 } 928 }
999} 929}
1000 930
1001static int i810_flush_ioctl(struct inode *inode, struct file *filp, 931static int i810_flush_ioctl(struct drm_device *dev, void *data,
1002 unsigned int cmd, unsigned long arg) 932 struct drm_file *file_priv)
1003{ 933{
1004 struct drm_file *priv = filp->private_data; 934 LOCK_TEST_WITH_RETURN(dev, file_priv);
1005 struct drm_device *dev = priv->head->dev;
1006
1007 LOCK_TEST_WITH_RETURN(dev, filp);
1008 935
1009 i810_flush_queue(dev); 936 i810_flush_queue(dev);
1010 return 0; 937 return 0;
1011} 938}
1012 939
1013static int i810_dma_vertex(struct inode *inode, struct file *filp, 940static int i810_dma_vertex(struct drm_device *dev, void *data,
1014 unsigned int cmd, unsigned long arg) 941 struct drm_file *file_priv)
1015{ 942{
1016 struct drm_file *priv = filp->private_data;
1017 struct drm_device *dev = priv->head->dev;
1018 struct drm_device_dma *dma = dev->dma; 943 struct drm_device_dma *dma = dev->dma;
1019 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 944 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1020 u32 *hw_status = dev_priv->hw_status_page; 945 u32 *hw_status = dev_priv->hw_status_page;
1021 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 946 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1022 dev_priv->sarea_priv; 947 dev_priv->sarea_priv;
1023 drm_i810_vertex_t vertex; 948 drm_i810_vertex_t *vertex = data;
1024
1025 if (copy_from_user
1026 (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex)))
1027 return -EFAULT;
1028 949
1029 LOCK_TEST_WITH_RETURN(dev, filp); 950 LOCK_TEST_WITH_RETURN(dev, file_priv);
1030 951
1031 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", 952 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1032 vertex.idx, vertex.used, vertex.discard); 953 vertex->idx, vertex->used, vertex->discard);
1033 954
1034 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 955 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1035 return -EINVAL; 956 return -EINVAL;
1036 957
1037 i810_dma_dispatch_vertex(dev, 958 i810_dma_dispatch_vertex(dev,
1038 dma->buflist[vertex.idx], 959 dma->buflist[vertex->idx],
1039 vertex.discard, vertex.used); 960 vertex->discard, vertex->used);
1040 961
1041 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); 962 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
1042 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 963 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1043 sarea_priv->last_enqueue = dev_priv->counter - 1; 964 sarea_priv->last_enqueue = dev_priv->counter - 1;
1044 sarea_priv->last_dispatch = (int)hw_status[5]; 965 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1046,48 +967,37 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp,
1046 return 0; 967 return 0;
1047} 968}
1048 969
1049static int i810_clear_bufs(struct inode *inode, struct file *filp, 970static int i810_clear_bufs(struct drm_device *dev, void *data,
1050 unsigned int cmd, unsigned long arg) 971 struct drm_file *file_priv)
1051{ 972{
1052 struct drm_file *priv = filp->private_data; 973 drm_i810_clear_t *clear = data;
1053 struct drm_device *dev = priv->head->dev;
1054 drm_i810_clear_t clear;
1055 974
1056 if (copy_from_user 975 LOCK_TEST_WITH_RETURN(dev, file_priv);
1057 (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear)))
1058 return -EFAULT;
1059
1060 LOCK_TEST_WITH_RETURN(dev, filp);
1061 976
1062 /* GH: Someone's doing nasty things... */ 977 /* GH: Someone's doing nasty things... */
1063 if (!dev->dev_private) { 978 if (!dev->dev_private) {
1064 return -EINVAL; 979 return -EINVAL;
1065 } 980 }
1066 981
1067 i810_dma_dispatch_clear(dev, clear.flags, 982 i810_dma_dispatch_clear(dev, clear->flags,
1068 clear.clear_color, clear.clear_depth); 983 clear->clear_color, clear->clear_depth);
1069 return 0; 984 return 0;
1070} 985}
1071 986
1072static int i810_swap_bufs(struct inode *inode, struct file *filp, 987static int i810_swap_bufs(struct drm_device *dev, void *data,
1073 unsigned int cmd, unsigned long arg) 988 struct drm_file *file_priv)
1074{ 989{
1075 struct drm_file *priv = filp->private_data;
1076 struct drm_device *dev = priv->head->dev;
1077
1078 DRM_DEBUG("i810_swap_bufs\n"); 990 DRM_DEBUG("i810_swap_bufs\n");
1079 991
1080 LOCK_TEST_WITH_RETURN(dev, filp); 992 LOCK_TEST_WITH_RETURN(dev, file_priv);
1081 993
1082 i810_dma_dispatch_swap(dev); 994 i810_dma_dispatch_swap(dev);
1083 return 0; 995 return 0;
1084} 996}
1085 997
1086static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, 998static int i810_getage(struct drm_device *dev, void *data,
1087 unsigned long arg) 999 struct drm_file *file_priv)
1088{ 1000{
1089 struct drm_file *priv = filp->private_data;
1090 struct drm_device *dev = priv->head->dev;
1091 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1001 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1092 u32 *hw_status = dev_priv->hw_status_page; 1002 u32 *hw_status = dev_priv->hw_status_page;
1093 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1003 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1097,46 +1007,39 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1097 return 0; 1007 return 0;
1098} 1008}
1099 1009
1100static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1010static int i810_getbuf(struct drm_device *dev, void *data,
1101 unsigned long arg) 1011 struct drm_file *file_priv)
1102{ 1012{
1103 struct drm_file *priv = filp->private_data;
1104 struct drm_device *dev = priv->head->dev;
1105 int retcode = 0; 1013 int retcode = 0;
1106 drm_i810_dma_t d; 1014 drm_i810_dma_t *d = data;
1107 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1015 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1108 u32 *hw_status = dev_priv->hw_status_page; 1016 u32 *hw_status = dev_priv->hw_status_page;
1109 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1017 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1110 dev_priv->sarea_priv; 1018 dev_priv->sarea_priv;
1111 1019
1112 if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) 1020 LOCK_TEST_WITH_RETURN(dev, file_priv);
1113 return -EFAULT;
1114
1115 LOCK_TEST_WITH_RETURN(dev, filp);
1116 1021
1117 d.granted = 0; 1022 d->granted = 0;
1118 1023
1119 retcode = i810_dma_get_buffer(dev, &d, filp); 1024 retcode = i810_dma_get_buffer(dev, d, file_priv);
1120 1025
1121 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", 1026 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1122 current->pid, retcode, d.granted); 1027 current->pid, retcode, d->granted);
1123 1028
1124 if (copy_to_user((void __user *) arg, &d, sizeof(d)))
1125 return -EFAULT;
1126 sarea_priv->last_dispatch = (int)hw_status[5]; 1029 sarea_priv->last_dispatch = (int)hw_status[5];
1127 1030
1128 return retcode; 1031 return retcode;
1129} 1032}
1130 1033
1131static int i810_copybuf(struct inode *inode, 1034static int i810_copybuf(struct drm_device *dev, void *data,
1132 struct file *filp, unsigned int cmd, unsigned long arg) 1035 struct drm_file *file_priv)
1133{ 1036{
1134 /* Never copy - 2.4.x doesn't need it */ 1037 /* Never copy - 2.4.x doesn't need it */
1135 return 0; 1038 return 0;
1136} 1039}
1137 1040
1138static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1041static int i810_docopy(struct drm_device *dev, void *data,
1139 unsigned long arg) 1042 struct drm_file *file_priv)
1140{ 1043{
1141 /* Never copy - 2.4.x doesn't need it */ 1044 /* Never copy - 2.4.x doesn't need it */
1142 return 0; 1045 return 0;
@@ -1202,30 +1105,25 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
1202 ADVANCE_LP_RING(); 1105 ADVANCE_LP_RING();
1203} 1106}
1204 1107
1205static int i810_dma_mc(struct inode *inode, struct file *filp, 1108static int i810_dma_mc(struct drm_device *dev, void *data,
1206 unsigned int cmd, unsigned long arg) 1109 struct drm_file *file_priv)
1207{ 1110{
1208 struct drm_file *priv = filp->private_data;
1209 struct drm_device *dev = priv->head->dev;
1210 struct drm_device_dma *dma = dev->dma; 1111 struct drm_device_dma *dma = dev->dma;
1211 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1112 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1212 u32 *hw_status = dev_priv->hw_status_page; 1113 u32 *hw_status = dev_priv->hw_status_page;
1213 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1114 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1214 dev_priv->sarea_priv; 1115 dev_priv->sarea_priv;
1215 drm_i810_mc_t mc; 1116 drm_i810_mc_t *mc = data;
1216
1217 if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc)))
1218 return -EFAULT;
1219 1117
1220 LOCK_TEST_WITH_RETURN(dev, filp); 1118 LOCK_TEST_WITH_RETURN(dev, file_priv);
1221 1119
1222 if (mc.idx >= dma->buf_count || mc.idx < 0) 1120 if (mc->idx >= dma->buf_count || mc->idx < 0)
1223 return -EINVAL; 1121 return -EINVAL;
1224 1122
1225 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, 1123 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1226 mc.last_render); 1124 mc->last_render);
1227 1125
1228 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); 1126 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1229 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 1127 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1230 sarea_priv->last_enqueue = dev_priv->counter - 1; 1128 sarea_priv->last_enqueue = dev_priv->counter - 1;
1231 sarea_priv->last_dispatch = (int)hw_status[5]; 1129 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1233,52 +1131,41 @@ static int i810_dma_mc(struct inode *inode, struct file *filp,
1233 return 0; 1131 return 0;
1234} 1132}
1235 1133
1236static int i810_rstatus(struct inode *inode, struct file *filp, 1134static int i810_rstatus(struct drm_device *dev, void *data,
1237 unsigned int cmd, unsigned long arg) 1135 struct drm_file *file_priv)
1238{ 1136{
1239 struct drm_file *priv = filp->private_data;
1240 struct drm_device *dev = priv->head->dev;
1241 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1137 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1242 1138
1243 return (int)(((u32 *) (dev_priv->hw_status_page))[4]); 1139 return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1244} 1140}
1245 1141
1246static int i810_ov0_info(struct inode *inode, struct file *filp, 1142static int i810_ov0_info(struct drm_device *dev, void *data,
1247 unsigned int cmd, unsigned long arg) 1143 struct drm_file *file_priv)
1248{ 1144{
1249 struct drm_file *priv = filp->private_data;
1250 struct drm_device *dev = priv->head->dev;
1251 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1145 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1252 drm_i810_overlay_t data; 1146 drm_i810_overlay_t *ov = data;
1147
1148 ov->offset = dev_priv->overlay_offset;
1149 ov->physical = dev_priv->overlay_physical;
1253 1150
1254 data.offset = dev_priv->overlay_offset;
1255 data.physical = dev_priv->overlay_physical;
1256 if (copy_to_user
1257 ((drm_i810_overlay_t __user *) arg, &data, sizeof(data)))
1258 return -EFAULT;
1259 return 0; 1151 return 0;
1260} 1152}
1261 1153
1262static int i810_fstatus(struct inode *inode, struct file *filp, 1154static int i810_fstatus(struct drm_device *dev, void *data,
1263 unsigned int cmd, unsigned long arg) 1155 struct drm_file *file_priv)
1264{ 1156{
1265 struct drm_file *priv = filp->private_data;
1266 struct drm_device *dev = priv->head->dev;
1267 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1157 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1268 1158
1269 LOCK_TEST_WITH_RETURN(dev, filp); 1159 LOCK_TEST_WITH_RETURN(dev, file_priv);
1270
1271 return I810_READ(0x30008); 1160 return I810_READ(0x30008);
1272} 1161}
1273 1162
1274static int i810_ov0_flip(struct inode *inode, struct file *filp, 1163static int i810_ov0_flip(struct drm_device *dev, void *data,
1275 unsigned int cmd, unsigned long arg) 1164 struct drm_file *file_priv)
1276{ 1165{
1277 struct drm_file *priv = filp->private_data;
1278 struct drm_device *dev = priv->head->dev;
1279 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1166 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1280 1167
1281 LOCK_TEST_WITH_RETURN(dev, filp); 1168 LOCK_TEST_WITH_RETURN(dev, file_priv);
1282 1169
1283 //Tell the overlay to update 1170 //Tell the overlay to update
1284 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); 1171 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
@@ -1310,16 +1197,14 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
1310 return 0; 1197 return 0;
1311} 1198}
1312 1199
1313static int i810_flip_bufs(struct inode *inode, struct file *filp, 1200static int i810_flip_bufs(struct drm_device *dev, void *data,
1314 unsigned int cmd, unsigned long arg) 1201 struct drm_file *file_priv)
1315{ 1202{
1316 struct drm_file *priv = filp->private_data;
1317 struct drm_device *dev = priv->head->dev;
1318 drm_i810_private_t *dev_priv = dev->dev_private; 1203 drm_i810_private_t *dev_priv = dev->dev_private;
1319 1204
1320 DRM_DEBUG("%s\n", __FUNCTION__); 1205 DRM_DEBUG("%s\n", __FUNCTION__);
1321 1206
1322 LOCK_TEST_WITH_RETURN(dev, filp); 1207 LOCK_TEST_WITH_RETURN(dev, file_priv);
1323 1208
1324 if (!dev_priv->page_flipping) 1209 if (!dev_priv->page_flipping)
1325 i810_do_init_pageflip(dev); 1210 i810_do_init_pageflip(dev);
@@ -1345,7 +1230,7 @@ void i810_driver_lastclose(struct drm_device * dev)
1345 i810_dma_cleanup(dev); 1230 i810_dma_cleanup(dev);
1346} 1231}
1347 1232
1348void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) 1233void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1349{ 1234{
1350 if (dev->dev_private) { 1235 if (dev->dev_private) {
1351 drm_i810_private_t *dev_priv = dev->dev_private; 1236 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1355,9 +1240,10 @@ void i810_driver_preclose(struct drm_device * dev, DRMFILE filp)
1355 } 1240 }
1356} 1241}
1357 1242
1358void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 1243void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
1244 struct drm_file *file_priv)
1359{ 1245{
1360 i810_reclaim_buffers(dev, filp); 1246 i810_reclaim_buffers(dev, file_priv);
1361} 1247}
1362 1248
1363int i810_driver_dma_quiescent(struct drm_device * dev) 1249int i810_driver_dma_quiescent(struct drm_device * dev)
@@ -1366,22 +1252,22 @@ int i810_driver_dma_quiescent(struct drm_device * dev)
1366 return 0; 1252 return 0;
1367} 1253}
1368 1254
1369drm_ioctl_desc_t i810_ioctls[] = { 1255struct drm_ioctl_desc i810_ioctls[] = {
1370 [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1256 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1371 [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, 1257 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
1372 [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, 1258 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
1373 [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, 1259 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
1374 [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, 1260 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
1375 [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, 1261 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
1376 [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, 1262 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
1377 [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, 1263 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
1378 [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, 1264 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
1379 [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, 1265 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
1380 [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, 1266 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
1381 [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, 1267 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
1382 [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1268 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1383 [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, 1269 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
1384 [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} 1270 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
1385}; 1271};
1386 1272
1387int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1273int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
index 614977dbce45..7a10bb6f2c0f 100644
--- a/drivers/char/drm/i810_drm.h
+++ b/drivers/char/drm/i810_drm.h
@@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func {
102/* This is the init structure after v1.2 */ 102/* This is the init structure after v1.2 */
103typedef struct _drm_i810_init { 103typedef struct _drm_i810_init {
104 drm_i810_init_func_t func; 104 drm_i810_init_func_t func;
105#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
106 int ring_map_idx;
107 int buffer_map_idx;
108#else
109 unsigned int mmio_offset; 105 unsigned int mmio_offset;
110 unsigned int buffers_offset; 106 unsigned int buffers_offset;
111#endif
112 int sarea_priv_offset; 107 int sarea_priv_offset;
113 unsigned int ring_start; 108 unsigned int ring_start;
114 unsigned int ring_end; 109 unsigned int ring_end;
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 648833844c7f..0af45872f67e 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -117,15 +117,16 @@ typedef struct drm_i810_private {
117 /* i810_dma.c */ 117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device * dev); 118extern int i810_driver_dma_quiescent(struct drm_device * dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
120 struct file *filp); 120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device * dev); 122extern void i810_driver_lastclose(struct drm_device * dev);
123extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); 123extern void i810_driver_preclose(struct drm_device * dev,
124 struct drm_file *file_priv);
124extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, 125extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
125 struct file *filp); 126 struct drm_file *file_priv);
126extern int i810_driver_device_is_agp(struct drm_device * dev); 127extern int i810_driver_device_is_agp(struct drm_device * dev);
127 128
128extern drm_ioctl_desc_t i810_ioctls[]; 129extern struct drm_ioctl_desc i810_ioctls[];
129extern int i810_max_ioctl; 130extern int i810_max_ioctl;
130 131
131#define I810_BASE(reg) ((unsigned long) \ 132#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc20c1a7834e..43a1f78712d6 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -122,10 +122,9 @@ static const struct file_operations i830_buffer_fops = {
122 .fasync = drm_fasync, 122 .fasync = drm_fasync,
123}; 123};
124 124
125static int i830_map_buffer(struct drm_buf * buf, struct file *filp) 125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
126{ 126{
127 struct drm_file *priv = filp->private_data; 127 struct drm_device *dev = file_priv->head->dev;
128 struct drm_device *dev = priv->head->dev;
129 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
130 drm_i830_private_t *dev_priv = dev->dev_private; 129 drm_i830_private_t *dev_priv = dev->dev_private;
131 const struct file_operations *old_fops; 130 const struct file_operations *old_fops;
@@ -136,13 +135,13 @@ static int i830_map_buffer(struct drm_buf * buf, struct file *filp)
136 return -EINVAL; 135 return -EINVAL;
137 136
138 down_write(&current->mm->mmap_sem); 137 down_write(&current->mm->mmap_sem);
139 old_fops = filp->f_op; 138 old_fops = file_priv->filp->f_op;
140 filp->f_op = &i830_buffer_fops; 139 file_priv->filp->f_op = &i830_buffer_fops;
141 dev_priv->mmap_buffer = buf; 140 dev_priv->mmap_buffer = buf;
142 virtual = do_mmap(filp, 0, buf->total, PROT_READ | PROT_WRITE, 141 virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
143 MAP_SHARED, buf->bus_address); 142 MAP_SHARED, buf->bus_address);
144 dev_priv->mmap_buffer = NULL; 143 dev_priv->mmap_buffer = NULL;
145 filp->f_op = old_fops; 144 file_priv->filp->f_op = old_fops;
146 if (IS_ERR((void *)virtual)) { /* ugh */ 145 if (IS_ERR((void *)virtual)) { /* ugh */
147 /* Real error */ 146 /* Real error */
148 DRM_ERROR("mmap error\n"); 147 DRM_ERROR("mmap error\n");
@@ -177,7 +176,7 @@ static int i830_unmap_buffer(struct drm_buf * buf)
177} 176}
178 177
179static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d, 178static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
180 struct file *filp) 179 struct drm_file *file_priv)
181{ 180{
182 struct drm_buf *buf; 181 struct drm_buf *buf;
183 drm_i830_buf_priv_t *buf_priv; 182 drm_i830_buf_priv_t *buf_priv;
@@ -190,13 +189,13 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
190 return retcode; 189 return retcode;
191 } 190 }
192 191
193 retcode = i830_map_buffer(buf, filp); 192 retcode = i830_map_buffer(buf, file_priv);
194 if (retcode) { 193 if (retcode) {
195 i830_freelist_put(dev, buf); 194 i830_freelist_put(dev, buf);
196 DRM_ERROR("mapbuf failed, retcode %d\n", retcode); 195 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
197 return retcode; 196 return retcode;
198 } 197 }
199 buf->filp = filp; 198 buf->file_priv = file_priv;
200 buf_priv = buf->dev_private; 199 buf_priv = buf->dev_private;
201 d->granted = 1; 200 d->granted = 1;
202 d->request_idx = buf->idx; 201 d->request_idx = buf->idx;
@@ -389,7 +388,7 @@ static int i830_dma_initialize(struct drm_device * dev,
389 i830_dma_cleanup(dev); 388 i830_dma_cleanup(dev);
390 DRM_ERROR("can not ioremap virtual address for" 389 DRM_ERROR("can not ioremap virtual address for"
391 " ring buffer\n"); 390 " ring buffer\n");
392 return DRM_ERR(ENOMEM); 391 return -ENOMEM;
393 } 392 }
394 393
395 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 394 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -451,25 +450,20 @@ static int i830_dma_initialize(struct drm_device * dev,
451 return 0; 450 return 0;
452} 451}
453 452
454static int i830_dma_init(struct inode *inode, struct file *filp, 453static int i830_dma_init(struct drm_device *dev, void *data,
455 unsigned int cmd, unsigned long arg) 454 struct drm_file *file_priv)
456{ 455{
457 struct drm_file *priv = filp->private_data;
458 struct drm_device *dev = priv->head->dev;
459 drm_i830_private_t *dev_priv; 456 drm_i830_private_t *dev_priv;
460 drm_i830_init_t init; 457 drm_i830_init_t *init = data;
461 int retcode = 0; 458 int retcode = 0;
462 459
463 if (copy_from_user(&init, (void *__user)arg, sizeof(init))) 460 switch (init->func) {
464 return -EFAULT;
465
466 switch (init.func) {
467 case I830_INIT_DMA: 461 case I830_INIT_DMA:
468 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 462 dev_priv = drm_alloc(sizeof(drm_i830_private_t),
469 DRM_MEM_DRIVER); 463 DRM_MEM_DRIVER);
470 if (dev_priv == NULL) 464 if (dev_priv == NULL)
471 return -ENOMEM; 465 return -ENOMEM;
472 retcode = i830_dma_initialize(dev, dev_priv, &init); 466 retcode = i830_dma_initialize(dev, dev_priv, init);
473 break; 467 break;
474 case I830_CLEANUP_DMA: 468 case I830_CLEANUP_DMA:
475 retcode = i830_dma_cleanup(dev); 469 retcode = i830_dma_cleanup(dev);
@@ -1248,7 +1242,7 @@ static int i830_flush_queue(struct drm_device * dev)
1248} 1242}
1249 1243
1250/* Must be called with the lock held */ 1244/* Must be called with the lock held */
1251static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp) 1245static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
1252{ 1246{
1253 struct drm_device_dma *dma = dev->dma; 1247 struct drm_device_dma *dma = dev->dma;
1254 int i; 1248 int i;
@@ -1266,7 +1260,7 @@ static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
1266 struct drm_buf *buf = dma->buflist[i]; 1260 struct drm_buf *buf = dma->buflist[i];
1267 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 1261 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1268 1262
1269 if (buf->filp == filp && buf_priv) { 1263 if (buf->file_priv == file_priv && buf_priv) {
1270 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, 1264 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1271 I830_BUF_FREE); 1265 I830_BUF_FREE);
1272 1266
@@ -1278,45 +1272,36 @@ static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
1278 } 1272 }
1279} 1273}
1280 1274
1281static int i830_flush_ioctl(struct inode *inode, struct file *filp, 1275static int i830_flush_ioctl(struct drm_device *dev, void *data,
1282 unsigned int cmd, unsigned long arg) 1276 struct drm_file *file_priv)
1283{ 1277{
1284 struct drm_file *priv = filp->private_data; 1278 LOCK_TEST_WITH_RETURN(dev, file_priv);
1285 struct drm_device *dev = priv->head->dev;
1286
1287 LOCK_TEST_WITH_RETURN(dev, filp);
1288 1279
1289 i830_flush_queue(dev); 1280 i830_flush_queue(dev);
1290 return 0; 1281 return 0;
1291} 1282}
1292 1283
1293static int i830_dma_vertex(struct inode *inode, struct file *filp, 1284static int i830_dma_vertex(struct drm_device *dev, void *data,
1294 unsigned int cmd, unsigned long arg) 1285 struct drm_file *file_priv)
1295{ 1286{
1296 struct drm_file *priv = filp->private_data;
1297 struct drm_device *dev = priv->head->dev;
1298 struct drm_device_dma *dma = dev->dma; 1287 struct drm_device_dma *dma = dev->dma;
1299 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1288 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1300 u32 *hw_status = dev_priv->hw_status_page; 1289 u32 *hw_status = dev_priv->hw_status_page;
1301 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1290 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1302 dev_priv->sarea_priv; 1291 dev_priv->sarea_priv;
1303 drm_i830_vertex_t vertex; 1292 drm_i830_vertex_t *vertex = data;
1304
1305 if (copy_from_user
1306 (&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex)))
1307 return -EFAULT;
1308 1293
1309 LOCK_TEST_WITH_RETURN(dev, filp); 1294 LOCK_TEST_WITH_RETURN(dev, file_priv);
1310 1295
1311 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", 1296 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1312 vertex.idx, vertex.used, vertex.discard); 1297 vertex->idx, vertex->used, vertex->discard);
1313 1298
1314 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1299 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1315 return -EINVAL; 1300 return -EINVAL;
1316 1301
1317 i830_dma_dispatch_vertex(dev, 1302 i830_dma_dispatch_vertex(dev,
1318 dma->buflist[vertex.idx], 1303 dma->buflist[vertex->idx],
1319 vertex.discard, vertex.used); 1304 vertex->discard, vertex->used);
1320 1305
1321 sarea_priv->last_enqueue = dev_priv->counter - 1; 1306 sarea_priv->last_enqueue = dev_priv->counter - 1;
1322 sarea_priv->last_dispatch = (int)hw_status[5]; 1307 sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1324,39 +1309,30 @@ static int i830_dma_vertex(struct inode *inode, struct file *filp,
1324 return 0; 1309 return 0;
1325} 1310}
1326 1311
1327static int i830_clear_bufs(struct inode *inode, struct file *filp, 1312static int i830_clear_bufs(struct drm_device *dev, void *data,
1328 unsigned int cmd, unsigned long arg) 1313 struct drm_file *file_priv)
1329{ 1314{
1330 struct drm_file *priv = filp->private_data; 1315 drm_i830_clear_t *clear = data;
1331 struct drm_device *dev = priv->head->dev;
1332 drm_i830_clear_t clear;
1333
1334 if (copy_from_user
1335 (&clear, (drm_i830_clear_t __user *) arg, sizeof(clear)))
1336 return -EFAULT;
1337 1316
1338 LOCK_TEST_WITH_RETURN(dev, filp); 1317 LOCK_TEST_WITH_RETURN(dev, file_priv);
1339 1318
1340 /* GH: Someone's doing nasty things... */ 1319 /* GH: Someone's doing nasty things... */
1341 if (!dev->dev_private) { 1320 if (!dev->dev_private) {
1342 return -EINVAL; 1321 return -EINVAL;
1343 } 1322 }
1344 1323
1345 i830_dma_dispatch_clear(dev, clear.flags, 1324 i830_dma_dispatch_clear(dev, clear->flags,
1346 clear.clear_color, 1325 clear->clear_color,
1347 clear.clear_depth, clear.clear_depthmask); 1326 clear->clear_depth, clear->clear_depthmask);
1348 return 0; 1327 return 0;
1349} 1328}
1350 1329
1351static int i830_swap_bufs(struct inode *inode, struct file *filp, 1330static int i830_swap_bufs(struct drm_device *dev, void *data,
1352 unsigned int cmd, unsigned long arg) 1331 struct drm_file *file_priv)
1353{ 1332{
1354 struct drm_file *priv = filp->private_data;
1355 struct drm_device *dev = priv->head->dev;
1356
1357 DRM_DEBUG("i830_swap_bufs\n"); 1333 DRM_DEBUG("i830_swap_bufs\n");
1358 1334
1359 LOCK_TEST_WITH_RETURN(dev, filp); 1335 LOCK_TEST_WITH_RETURN(dev, file_priv);
1360 1336
1361 i830_dma_dispatch_swap(dev); 1337 i830_dma_dispatch_swap(dev);
1362 return 0; 1338 return 0;
@@ -1386,16 +1362,14 @@ static int i830_do_cleanup_pageflip(struct drm_device * dev)
1386 return 0; 1362 return 0;
1387} 1363}
1388 1364
1389static int i830_flip_bufs(struct inode *inode, struct file *filp, 1365static int i830_flip_bufs(struct drm_device *dev, void *data,
1390 unsigned int cmd, unsigned long arg) 1366 struct drm_file *file_priv)
1391{ 1367{
1392 struct drm_file *priv = filp->private_data;
1393 struct drm_device *dev = priv->head->dev;
1394 drm_i830_private_t *dev_priv = dev->dev_private; 1368 drm_i830_private_t *dev_priv = dev->dev_private;
1395 1369
1396 DRM_DEBUG("%s\n", __FUNCTION__); 1370 DRM_DEBUG("%s\n", __FUNCTION__);
1397 1371
1398 LOCK_TEST_WITH_RETURN(dev, filp); 1372 LOCK_TEST_WITH_RETURN(dev, file_priv);
1399 1373
1400 if (!dev_priv->page_flipping) 1374 if (!dev_priv->page_flipping)
1401 i830_do_init_pageflip(dev); 1375 i830_do_init_pageflip(dev);
@@ -1404,11 +1378,9 @@ static int i830_flip_bufs(struct inode *inode, struct file *filp,
1404 return 0; 1378 return 0;
1405} 1379}
1406 1380
1407static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, 1381static int i830_getage(struct drm_device *dev, void *data,
1408 unsigned long arg) 1382 struct drm_file *file_priv)
1409{ 1383{
1410 struct drm_file *priv = filp->private_data;
1411 struct drm_device *dev = priv->head->dev;
1412 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1384 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1413 u32 *hw_status = dev_priv->hw_status_page; 1385 u32 *hw_status = dev_priv->hw_status_page;
1414 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1386 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1418,58 +1390,50 @@ static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1418 return 0; 1390 return 0;
1419} 1391}
1420 1392
1421static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, 1393static int i830_getbuf(struct drm_device *dev, void *data,
1422 unsigned long arg) 1394 struct drm_file *file_priv)
1423{ 1395{
1424 struct drm_file *priv = filp->private_data;
1425 struct drm_device *dev = priv->head->dev;
1426 int retcode = 0; 1396 int retcode = 0;
1427 drm_i830_dma_t d; 1397 drm_i830_dma_t *d = data;
1428 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1398 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1429 u32 *hw_status = dev_priv->hw_status_page; 1399 u32 *hw_status = dev_priv->hw_status_page;
1430 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1400 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1431 dev_priv->sarea_priv; 1401 dev_priv->sarea_priv;
1432 1402
1433 DRM_DEBUG("getbuf\n"); 1403 DRM_DEBUG("getbuf\n");
1434 if (copy_from_user(&d, (drm_i830_dma_t __user *) arg, sizeof(d)))
1435 return -EFAULT;
1436 1404
1437 LOCK_TEST_WITH_RETURN(dev, filp); 1405 LOCK_TEST_WITH_RETURN(dev, file_priv);
1438 1406
1439 d.granted = 0; 1407 d->granted = 0;
1440 1408
1441 retcode = i830_dma_get_buffer(dev, &d, filp); 1409 retcode = i830_dma_get_buffer(dev, d, file_priv);
1442 1410
1443 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", 1411 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1444 current->pid, retcode, d.granted); 1412 current->pid, retcode, d->granted);
1445 1413
1446 if (copy_to_user((void __user *) arg, &d, sizeof(d)))
1447 return -EFAULT;
1448 sarea_priv->last_dispatch = (int)hw_status[5]; 1414 sarea_priv->last_dispatch = (int)hw_status[5];
1449 1415
1450 return retcode; 1416 return retcode;
1451} 1417}
1452 1418
1453static int i830_copybuf(struct inode *inode, 1419static int i830_copybuf(struct drm_device *dev, void *data,
1454 struct file *filp, unsigned int cmd, unsigned long arg) 1420 struct drm_file *file_priv)
1455{ 1421{
1456 /* Never copy - 2.4.x doesn't need it */ 1422 /* Never copy - 2.4.x doesn't need it */
1457 return 0; 1423 return 0;
1458} 1424}
1459 1425
1460static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, 1426static int i830_docopy(struct drm_device *dev, void *data,
1461 unsigned long arg) 1427 struct drm_file *file_priv)
1462{ 1428{
1463 return 0; 1429 return 0;
1464} 1430}
1465 1431
1466static int i830_getparam(struct inode *inode, struct file *filp, 1432static int i830_getparam(struct drm_device *dev, void *data,
1467 unsigned int cmd, unsigned long arg) 1433 struct drm_file *file_priv)
1468{ 1434{
1469 struct drm_file *priv = filp->private_data;
1470 struct drm_device *dev = priv->head->dev;
1471 drm_i830_private_t *dev_priv = dev->dev_private; 1435 drm_i830_private_t *dev_priv = dev->dev_private;
1472 drm_i830_getparam_t param; 1436 drm_i830_getparam_t *param = data;
1473 int value; 1437 int value;
1474 1438
1475 if (!dev_priv) { 1439 if (!dev_priv) {
@@ -1477,11 +1441,7 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1477 return -EINVAL; 1441 return -EINVAL;
1478 } 1442 }
1479 1443
1480 if (copy_from_user 1444 switch (param->param) {
1481 (&param, (drm_i830_getparam_t __user *) arg, sizeof(param)))
1482 return -EFAULT;
1483
1484 switch (param.param) {
1485 case I830_PARAM_IRQ_ACTIVE: 1445 case I830_PARAM_IRQ_ACTIVE:
1486 value = dev->irq_enabled; 1446 value = dev->irq_enabled;
1487 break; 1447 break;
@@ -1489,7 +1449,7 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1489 return -EINVAL; 1449 return -EINVAL;
1490 } 1450 }
1491 1451
1492 if (copy_to_user(param.value, &value, sizeof(int))) { 1452 if (copy_to_user(param->value, &value, sizeof(int))) {
1493 DRM_ERROR("copy_to_user\n"); 1453 DRM_ERROR("copy_to_user\n");
1494 return -EFAULT; 1454 return -EFAULT;
1495 } 1455 }
@@ -1497,26 +1457,20 @@ static int i830_getparam(struct inode *inode, struct file *filp,
1497 return 0; 1457 return 0;
1498} 1458}
1499 1459
1500static int i830_setparam(struct inode *inode, struct file *filp, 1460static int i830_setparam(struct drm_device *dev, void *data,
1501 unsigned int cmd, unsigned long arg) 1461 struct drm_file *file_priv)
1502{ 1462{
1503 struct drm_file *priv = filp->private_data;
1504 struct drm_device *dev = priv->head->dev;
1505 drm_i830_private_t *dev_priv = dev->dev_private; 1463 drm_i830_private_t *dev_priv = dev->dev_private;
1506 drm_i830_setparam_t param; 1464 drm_i830_setparam_t *param = data;
1507 1465
1508 if (!dev_priv) { 1466 if (!dev_priv) {
1509 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1467 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1510 return -EINVAL; 1468 return -EINVAL;
1511 } 1469 }
1512 1470
1513 if (copy_from_user 1471 switch (param->param) {
1514 (&param, (drm_i830_setparam_t __user *) arg, sizeof(param)))
1515 return -EFAULT;
1516
1517 switch (param.param) {
1518 case I830_SETPARAM_USE_MI_BATCHBUFFER_START: 1472 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1519 dev_priv->use_mi_batchbuffer_start = param.value; 1473 dev_priv->use_mi_batchbuffer_start = param->value;
1520 break; 1474 break;
1521 default: 1475 default:
1522 return -EINVAL; 1476 return -EINVAL;
@@ -1542,7 +1496,7 @@ void i830_driver_lastclose(struct drm_device * dev)
1542 i830_dma_cleanup(dev); 1496 i830_dma_cleanup(dev);
1543} 1497}
1544 1498
1545void i830_driver_preclose(struct drm_device * dev, DRMFILE filp) 1499void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1546{ 1500{
1547 if (dev->dev_private) { 1501 if (dev->dev_private) {
1548 drm_i830_private_t *dev_priv = dev->dev_private; 1502 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1552,9 +1506,9 @@ void i830_driver_preclose(struct drm_device * dev, DRMFILE filp)
1552 } 1506 }
1553} 1507}
1554 1508
1555void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 1509void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
1556{ 1510{
1557 i830_reclaim_buffers(dev, filp); 1511 i830_reclaim_buffers(dev, file_priv);
1558} 1512}
1559 1513
1560int i830_driver_dma_quiescent(struct drm_device * dev) 1514int i830_driver_dma_quiescent(struct drm_device * dev)
@@ -1563,21 +1517,21 @@ int i830_driver_dma_quiescent(struct drm_device * dev)
1563 return 0; 1517 return 0;
1564} 1518}
1565 1519
1566drm_ioctl_desc_t i830_ioctls[] = { 1520struct drm_ioctl_desc i830_ioctls[] = {
1567 [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1521 DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1568 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, 1522 DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
1569 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, 1523 DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
1570 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, 1524 DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
1571 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, 1525 DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
1572 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, 1526 DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
1573 [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, 1527 DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
1574 [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, 1528 DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
1575 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, 1529 DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
1576 [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, 1530 DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
1577 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, 1531 DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
1578 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, 1532 DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
1579 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, 1533 DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
1580 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} 1534 DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
1581}; 1535};
1582 1536
1583int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1537int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index ddda67956dea..db3a9fa83960 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -122,24 +122,25 @@ typedef struct drm_i830_private {
122 122
123} drm_i830_private_t; 123} drm_i830_private_t;
124 124
125extern drm_ioctl_desc_t i830_ioctls[]; 125extern struct drm_ioctl_desc i830_ioctls[];
126extern int i830_max_ioctl; 126extern int i830_max_ioctl;
127 127
128/* i830_irq.c */ 128/* i830_irq.c */
129extern int i830_irq_emit(struct inode *inode, struct file *filp, 129extern int i830_irq_emit(struct drm_device *dev, void *data,
130 unsigned int cmd, unsigned long arg); 130 struct drm_file *file_priv);
131extern int i830_irq_wait(struct inode *inode, struct file *filp, 131extern int i830_irq_wait(struct drm_device *dev, void *data,
132 unsigned int cmd, unsigned long arg); 132 struct drm_file *file_priv);
133 133
134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); 134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
135extern void i830_driver_irq_preinstall(struct drm_device * dev); 135extern void i830_driver_irq_preinstall(struct drm_device * dev);
136extern void i830_driver_irq_postinstall(struct drm_device * dev); 136extern void i830_driver_irq_postinstall(struct drm_device * dev);
137extern void i830_driver_irq_uninstall(struct drm_device * dev); 137extern void i830_driver_irq_uninstall(struct drm_device * dev);
138extern int i830_driver_load(struct drm_device *, unsigned long flags); 138extern int i830_driver_load(struct drm_device *, unsigned long flags);
139extern void i830_driver_preclose(struct drm_device * dev, DRMFILE filp); 139extern void i830_driver_preclose(struct drm_device * dev,
140 struct drm_file *file_priv);
140extern void i830_driver_lastclose(struct drm_device * dev); 141extern void i830_driver_lastclose(struct drm_device * dev);
141extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev, 142extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
142 struct file *filp); 143 struct drm_file *file_priv);
143extern int i830_driver_dma_quiescent(struct drm_device * dev); 144extern int i830_driver_dma_quiescent(struct drm_device * dev);
144extern int i830_driver_device_is_agp(struct drm_device * dev); 145extern int i830_driver_device_is_agp(struct drm_device * dev);
145 146
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index a1b5c63c3c3e..76403f4b6200 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -114,29 +114,23 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
114 114
115/* Needs the lock as it touches the ring. 115/* Needs the lock as it touches the ring.
116 */ 116 */
117int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, 117int i830_irq_emit(struct drm_device *dev, void *data,
118 unsigned long arg) 118 struct drm_file *file_priv)
119{ 119{
120 struct drm_file *priv = filp->private_data;
121 struct drm_device *dev = priv->head->dev;
122 drm_i830_private_t *dev_priv = dev->dev_private; 120 drm_i830_private_t *dev_priv = dev->dev_private;
123 drm_i830_irq_emit_t emit; 121 drm_i830_irq_emit_t *emit = data;
124 int result; 122 int result;
125 123
126 LOCK_TEST_WITH_RETURN(dev, filp); 124 LOCK_TEST_WITH_RETURN(dev, file_priv);
127 125
128 if (!dev_priv) { 126 if (!dev_priv) {
129 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 127 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
130 return -EINVAL; 128 return -EINVAL;
131 } 129 }
132 130
133 if (copy_from_user
134 (&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit)))
135 return -EFAULT;
136
137 result = i830_emit_irq(dev); 131 result = i830_emit_irq(dev);
138 132
139 if (copy_to_user(emit.irq_seq, &result, sizeof(int))) { 133 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
140 DRM_ERROR("copy_to_user\n"); 134 DRM_ERROR("copy_to_user\n");
141 return -EFAULT; 135 return -EFAULT;
142 } 136 }
@@ -146,24 +140,18 @@ int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
146 140
147/* Doesn't need the hardware lock. 141/* Doesn't need the hardware lock.
148 */ 142 */
149int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, 143int i830_irq_wait(struct drm_device *dev, void *data,
150 unsigned long arg) 144 struct drm_file *file_priv)
151{ 145{
152 struct drm_file *priv = filp->private_data;
153 struct drm_device *dev = priv->head->dev;
154 drm_i830_private_t *dev_priv = dev->dev_private; 146 drm_i830_private_t *dev_priv = dev->dev_private;
155 drm_i830_irq_wait_t irqwait; 147 drm_i830_irq_wait_t *irqwait = data;
156 148
157 if (!dev_priv) { 149 if (!dev_priv) {
158 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 150 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
159 return -EINVAL; 151 return -EINVAL;
160 } 152 }
161 153
162 if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg, 154 return i830_wait_irq(dev, irqwait->irq_seq);
163 sizeof(irqwait)))
164 return -EFAULT;
165
166 return i830_wait_irq(dev, irqwait.irq_seq);
167} 155}
168 156
169/* drm_dma.h hooks 157/* drm_dma.h hooks
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 8e7d713a5a15..e61a43e5b3ac 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -70,7 +70,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
70 last_head = ring->head; 70 last_head = ring->head;
71 } 71 }
72 72
73 return DRM_ERR(EBUSY); 73 return -EBUSY;
74} 74}
75 75
76void i915_kernel_lost_context(struct drm_device * dev) 76void i915_kernel_lost_context(struct drm_device * dev)
@@ -137,7 +137,7 @@ static int i915_initialize(struct drm_device * dev,
137 DRM_ERROR("can not find sarea!\n"); 137 DRM_ERROR("can not find sarea!\n");
138 dev->dev_private = (void *)dev_priv; 138 dev->dev_private = (void *)dev_priv;
139 i915_dma_cleanup(dev); 139 i915_dma_cleanup(dev);
140 return DRM_ERR(EINVAL); 140 return -EINVAL;
141 } 141 }
142 142
143 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 143 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
@@ -145,7 +145,7 @@ static int i915_initialize(struct drm_device * dev,
145 dev->dev_private = (void *)dev_priv; 145 dev->dev_private = (void *)dev_priv;
146 i915_dma_cleanup(dev); 146 i915_dma_cleanup(dev);
147 DRM_ERROR("can not find mmio map!\n"); 147 DRM_ERROR("can not find mmio map!\n");
148 return DRM_ERR(EINVAL); 148 return -EINVAL;
149 } 149 }
150 150
151 dev_priv->sarea_priv = (drm_i915_sarea_t *) 151 dev_priv->sarea_priv = (drm_i915_sarea_t *)
@@ -169,7 +169,7 @@ static int i915_initialize(struct drm_device * dev,
169 i915_dma_cleanup(dev); 169 i915_dma_cleanup(dev);
170 DRM_ERROR("can not ioremap virtual address for" 170 DRM_ERROR("can not ioremap virtual address for"
171 " ring buffer\n"); 171 " ring buffer\n");
172 return DRM_ERR(ENOMEM); 172 return -ENOMEM;
173 } 173 }
174 174
175 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 175 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -200,7 +200,7 @@ static int i915_initialize(struct drm_device * dev,
200 dev->dev_private = (void *)dev_priv; 200 dev->dev_private = (void *)dev_priv;
201 i915_dma_cleanup(dev); 201 i915_dma_cleanup(dev);
202 DRM_ERROR("Can not allocate hardware status page\n"); 202 DRM_ERROR("Can not allocate hardware status page\n");
203 return DRM_ERR(ENOMEM); 203 return -ENOMEM;
204 } 204 }
205 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 205 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
206 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 206 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
@@ -221,24 +221,24 @@ static int i915_dma_resume(struct drm_device * dev)
221 221
222 if (!dev_priv->sarea) { 222 if (!dev_priv->sarea) {
223 DRM_ERROR("can not find sarea!\n"); 223 DRM_ERROR("can not find sarea!\n");
224 return DRM_ERR(EINVAL); 224 return -EINVAL;
225 } 225 }
226 226
227 if (!dev_priv->mmio_map) { 227 if (!dev_priv->mmio_map) {
228 DRM_ERROR("can not find mmio map!\n"); 228 DRM_ERROR("can not find mmio map!\n");
229 return DRM_ERR(EINVAL); 229 return -EINVAL;
230 } 230 }
231 231
232 if (dev_priv->ring.map.handle == NULL) { 232 if (dev_priv->ring.map.handle == NULL) {
233 DRM_ERROR("can not ioremap virtual address for" 233 DRM_ERROR("can not ioremap virtual address for"
234 " ring buffer\n"); 234 " ring buffer\n");
235 return DRM_ERR(ENOMEM); 235 return -ENOMEM;
236 } 236 }
237 237
238 /* Program Hardware Status Page */ 238 /* Program Hardware Status Page */
239 if (!dev_priv->hw_status_page) { 239 if (!dev_priv->hw_status_page) {
240 DRM_ERROR("Can not find hardware status page\n"); 240 DRM_ERROR("Can not find hardware status page\n");
241 return DRM_ERR(EINVAL); 241 return -EINVAL;
242 } 242 }
243 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 243 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
244 244
@@ -251,23 +251,20 @@ static int i915_dma_resume(struct drm_device * dev)
251 return 0; 251 return 0;
252} 252}
253 253
254static int i915_dma_init(DRM_IOCTL_ARGS) 254static int i915_dma_init(struct drm_device *dev, void *data,
255 struct drm_file *file_priv)
255{ 256{
256 DRM_DEVICE;
257 drm_i915_private_t *dev_priv; 257 drm_i915_private_t *dev_priv;
258 drm_i915_init_t init; 258 drm_i915_init_t *init = data;
259 int retcode = 0; 259 int retcode = 0;
260 260
261 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, 261 switch (init->func) {
262 sizeof(init));
263
264 switch (init.func) {
265 case I915_INIT_DMA: 262 case I915_INIT_DMA:
266 dev_priv = drm_alloc(sizeof(drm_i915_private_t), 263 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
267 DRM_MEM_DRIVER); 264 DRM_MEM_DRIVER);
268 if (dev_priv == NULL) 265 if (dev_priv == NULL)
269 return DRM_ERR(ENOMEM); 266 return -ENOMEM;
270 retcode = i915_initialize(dev, dev_priv, &init); 267 retcode = i915_initialize(dev, dev_priv, init);
271 break; 268 break;
272 case I915_CLEANUP_DMA: 269 case I915_CLEANUP_DMA:
273 retcode = i915_dma_cleanup(dev); 270 retcode = i915_dma_cleanup(dev);
@@ -276,7 +273,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
276 retcode = i915_dma_resume(dev); 273 retcode = i915_dma_resume(dev);
277 break; 274 break;
278 default: 275 default:
279 retcode = DRM_ERR(EINVAL); 276 retcode = -EINVAL;
280 break; 277 break;
281 } 278 }
282 279
@@ -366,7 +363,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
366 RING_LOCALS; 363 RING_LOCALS;
367 364
368 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 365 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
369 return DRM_ERR(EINVAL); 366 return -EINVAL;
370 367
371 BEGIN_LP_RING((dwords+1)&~1); 368 BEGIN_LP_RING((dwords+1)&~1);
372 369
@@ -374,17 +371,17 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
374 int cmd, sz; 371 int cmd, sz;
375 372
376 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
377 return DRM_ERR(EINVAL); 374 return -EINVAL;
378 375
379 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
380 return DRM_ERR(EINVAL); 377 return -EINVAL;
381 378
382 OUT_RING(cmd); 379 OUT_RING(cmd);
383 380
384 while (++i, --sz) { 381 while (++i, --sz) {
385 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
386 sizeof(cmd))) { 383 sizeof(cmd))) {
387 return DRM_ERR(EINVAL); 384 return -EINVAL;
388 } 385 }
389 OUT_RING(cmd); 386 OUT_RING(cmd);
390 } 387 }
@@ -407,13 +404,13 @@ static int i915_emit_box(struct drm_device * dev,
407 RING_LOCALS; 404 RING_LOCALS;
408 405
409 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 406 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
410 return DRM_ERR(EFAULT); 407 return -EFAULT;
411 } 408 }
412 409
413 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 410 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
414 DRM_ERROR("Bad box %d,%d..%d,%d\n", 411 DRM_ERROR("Bad box %d,%d..%d,%d\n",
415 box.x1, box.y1, box.x2, box.y2); 412 box.x1, box.y1, box.x2, box.y2);
416 return DRM_ERR(EINVAL); 413 return -EINVAL;
417 } 414 }
418 415
419 if (IS_I965G(dev)) { 416 if (IS_I965G(dev)) {
@@ -467,7 +464,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
467 464
468 if (cmd->sz & 0x3) { 465 if (cmd->sz & 0x3) {
469 DRM_ERROR("alignment"); 466 DRM_ERROR("alignment");
470 return DRM_ERR(EINVAL); 467 return -EINVAL;
471 } 468 }
472 469
473 i915_kernel_lost_context(dev); 470 i915_kernel_lost_context(dev);
@@ -502,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
502 499
503 if ((batch->start | batch->used) & 0x7) { 500 if ((batch->start | batch->used) & 0x7) {
504 DRM_ERROR("alignment"); 501 DRM_ERROR("alignment");
505 return DRM_ERR(EINVAL); 502 return -EINVAL;
506 } 503 }
507 504
508 i915_kernel_lost_context(dev); 505 i915_kernel_lost_context(dev);
@@ -598,76 +595,69 @@ static int i915_quiescent(struct drm_device * dev)
598 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 595 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
599} 596}
600 597
601static int i915_flush_ioctl(DRM_IOCTL_ARGS) 598static int i915_flush_ioctl(struct drm_device *dev, void *data,
599 struct drm_file *file_priv)
602{ 600{
603 DRM_DEVICE; 601 LOCK_TEST_WITH_RETURN(dev, file_priv);
604
605 LOCK_TEST_WITH_RETURN(dev, filp);
606 602
607 return i915_quiescent(dev); 603 return i915_quiescent(dev);
608} 604}
609 605
610static int i915_batchbuffer(DRM_IOCTL_ARGS) 606static int i915_batchbuffer(struct drm_device *dev, void *data,
607 struct drm_file *file_priv)
611{ 608{
612 DRM_DEVICE;
613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
614 u32 *hw_status = dev_priv->hw_status_page; 610 u32 *hw_status = dev_priv->hw_status_page;
615 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 611 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
616 dev_priv->sarea_priv; 612 dev_priv->sarea_priv;
617 drm_i915_batchbuffer_t batch; 613 drm_i915_batchbuffer_t *batch = data;
618 int ret; 614 int ret;
619 615
620 if (!dev_priv->allow_batchbuffer) { 616 if (!dev_priv->allow_batchbuffer) {
621 DRM_ERROR("Batchbuffer ioctl disabled\n"); 617 DRM_ERROR("Batchbuffer ioctl disabled\n");
622 return DRM_ERR(EINVAL); 618 return -EINVAL;
623 } 619 }
624 620
625 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
626 sizeof(batch));
627
628 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 621 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
629 batch.start, batch.used, batch.num_cliprects); 622 batch->start, batch->used, batch->num_cliprects);
630 623
631 LOCK_TEST_WITH_RETURN(dev, filp); 624 LOCK_TEST_WITH_RETURN(dev, file_priv);
632 625
633 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 626 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
634 batch.num_cliprects * 627 batch->num_cliprects *
635 sizeof(struct drm_clip_rect))) 628 sizeof(struct drm_clip_rect)))
636 return DRM_ERR(EFAULT); 629 return -EFAULT;
637 630
638 ret = i915_dispatch_batchbuffer(dev, &batch); 631 ret = i915_dispatch_batchbuffer(dev, batch);
639 632
640 sarea_priv->last_dispatch = (int)hw_status[5]; 633 sarea_priv->last_dispatch = (int)hw_status[5];
641 return ret; 634 return ret;
642} 635}
643 636
644static int i915_cmdbuffer(DRM_IOCTL_ARGS) 637static int i915_cmdbuffer(struct drm_device *dev, void *data,
638 struct drm_file *file_priv)
645{ 639{
646 DRM_DEVICE;
647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 640 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
648 u32 *hw_status = dev_priv->hw_status_page; 641 u32 *hw_status = dev_priv->hw_status_page;
649 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 642 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
650 dev_priv->sarea_priv; 643 dev_priv->sarea_priv;
651 drm_i915_cmdbuffer_t cmdbuf; 644 drm_i915_cmdbuffer_t *cmdbuf = data;
652 int ret; 645 int ret;
653 646
654 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
655 sizeof(cmdbuf));
656
657 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 647 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
658 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); 648 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
659 649
660 LOCK_TEST_WITH_RETURN(dev, filp); 650 LOCK_TEST_WITH_RETURN(dev, file_priv);
661 651
662 if (cmdbuf.num_cliprects && 652 if (cmdbuf->num_cliprects &&
663 DRM_VERIFYAREA_READ(cmdbuf.cliprects, 653 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
664 cmdbuf.num_cliprects * 654 cmdbuf->num_cliprects *
665 sizeof(struct drm_clip_rect))) { 655 sizeof(struct drm_clip_rect))) {
666 DRM_ERROR("Fault accessing cliprects\n"); 656 DRM_ERROR("Fault accessing cliprects\n");
667 return DRM_ERR(EFAULT); 657 return -EFAULT;
668 } 658 }
669 659
670 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); 660 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
671 if (ret) { 661 if (ret) {
672 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 662 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
673 return ret; 663 return ret;
@@ -677,33 +667,29 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
677 return 0; 667 return 0;
678} 668}
679 669
680static int i915_flip_bufs(DRM_IOCTL_ARGS) 670static int i915_flip_bufs(struct drm_device *dev, void *data,
671 struct drm_file *file_priv)
681{ 672{
682 DRM_DEVICE;
683
684 DRM_DEBUG("%s\n", __FUNCTION__); 673 DRM_DEBUG("%s\n", __FUNCTION__);
685 674
686 LOCK_TEST_WITH_RETURN(dev, filp); 675 LOCK_TEST_WITH_RETURN(dev, file_priv);
687 676
688 return i915_dispatch_flip(dev); 677 return i915_dispatch_flip(dev);
689} 678}
690 679
691static int i915_getparam(DRM_IOCTL_ARGS) 680static int i915_getparam(struct drm_device *dev, void *data,
681 struct drm_file *file_priv)
692{ 682{
693 DRM_DEVICE;
694 drm_i915_private_t *dev_priv = dev->dev_private; 683 drm_i915_private_t *dev_priv = dev->dev_private;
695 drm_i915_getparam_t param; 684 drm_i915_getparam_t *param = data;
696 int value; 685 int value;
697 686
698 if (!dev_priv) { 687 if (!dev_priv) {
699 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 688 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
700 return DRM_ERR(EINVAL); 689 return -EINVAL;
701 } 690 }
702 691
703 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, 692 switch (param->param) {
704 sizeof(param));
705
706 switch (param.param) {
707 case I915_PARAM_IRQ_ACTIVE: 693 case I915_PARAM_IRQ_ACTIVE:
708 value = dev->irq ? 1 : 0; 694 value = dev->irq ? 1 : 0;
709 break; 695 break;
@@ -714,68 +700,64 @@ static int i915_getparam(DRM_IOCTL_ARGS)
714 value = READ_BREADCRUMB(dev_priv); 700 value = READ_BREADCRUMB(dev_priv);
715 break; 701 break;
716 default: 702 default:
717 DRM_ERROR("Unknown parameter %d\n", param.param); 703 DRM_ERROR("Unknown parameter %d\n", param->param);
718 return DRM_ERR(EINVAL); 704 return -EINVAL;
719 } 705 }
720 706
721 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 707 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
722 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 708 DRM_ERROR("DRM_COPY_TO_USER failed\n");
723 return DRM_ERR(EFAULT); 709 return -EFAULT;
724 } 710 }
725 711
726 return 0; 712 return 0;
727} 713}
728 714
729static int i915_setparam(DRM_IOCTL_ARGS) 715static int i915_setparam(struct drm_device *dev, void *data,
716 struct drm_file *file_priv)
730{ 717{
731 DRM_DEVICE;
732 drm_i915_private_t *dev_priv = dev->dev_private; 718 drm_i915_private_t *dev_priv = dev->dev_private;
733 drm_i915_setparam_t param; 719 drm_i915_setparam_t *param = data;
734 720
735 if (!dev_priv) { 721 if (!dev_priv) {
736 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 722 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
737 return DRM_ERR(EINVAL); 723 return -EINVAL;
738 } 724 }
739 725
740 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, 726 switch (param->param) {
741 sizeof(param));
742
743 switch (param.param) {
744 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 727 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
745 if (!IS_I965G(dev)) 728 if (!IS_I965G(dev))
746 dev_priv->use_mi_batchbuffer_start = param.value; 729 dev_priv->use_mi_batchbuffer_start = param->value;
747 break; 730 break;
748 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 731 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
749 dev_priv->tex_lru_log_granularity = param.value; 732 dev_priv->tex_lru_log_granularity = param->value;
750 break; 733 break;
751 case I915_SETPARAM_ALLOW_BATCHBUFFER: 734 case I915_SETPARAM_ALLOW_BATCHBUFFER:
752 dev_priv->allow_batchbuffer = param.value; 735 dev_priv->allow_batchbuffer = param->value;
753 break; 736 break;
754 default: 737 default:
755 DRM_ERROR("unknown parameter %d\n", param.param); 738 DRM_ERROR("unknown parameter %d\n", param->param);
756 return DRM_ERR(EINVAL); 739 return -EINVAL;
757 } 740 }
758 741
759 return 0; 742 return 0;
760} 743}
761 744
762static int i915_set_status_page(DRM_IOCTL_ARGS) 745static int i915_set_status_page(struct drm_device *dev, void *data,
746 struct drm_file *file_priv)
763{ 747{
764 DRM_DEVICE;
765 drm_i915_private_t *dev_priv = dev->dev_private; 748 drm_i915_private_t *dev_priv = dev->dev_private;
766 drm_i915_hws_addr_t hws; 749 drm_i915_hws_addr_t *hws = data;
767 750
768 if (!dev_priv) { 751 if (!dev_priv) {
769 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 752 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
770 return DRM_ERR(EINVAL); 753 return -EINVAL;
771 } 754 }
772 DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
773 sizeof(hws));
774 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
775 755
776 dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); 756 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
757
758 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
777 759
778 dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr; 760 dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr;
779 dev_priv->hws_map.size = 4*1024; 761 dev_priv->hws_map.size = 4*1024;
780 dev_priv->hws_map.type = 0; 762 dev_priv->hws_map.type = 0;
781 dev_priv->hws_map.flags = 0; 763 dev_priv->hws_map.flags = 0;
@@ -788,7 +770,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
788 dev_priv->status_gfx_addr = 0; 770 dev_priv->status_gfx_addr = 0;
789 DRM_ERROR("can not ioremap virtual address for" 771 DRM_ERROR("can not ioremap virtual address for"
790 " G33 hw status page\n"); 772 " G33 hw status page\n");
791 return DRM_ERR(ENOMEM); 773 return -ENOMEM;
792 } 774 }
793 dev_priv->hw_status_page = dev_priv->hws_map.handle; 775 dev_priv->hw_status_page = dev_priv->hws_map.handle;
794 776
@@ -821,32 +803,32 @@ void i915_driver_lastclose(struct drm_device * dev)
821 i915_dma_cleanup(dev); 803 i915_dma_cleanup(dev);
822} 804}
823 805
824void i915_driver_preclose(struct drm_device * dev, DRMFILE filp) 806void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
825{ 807{
826 if (dev->dev_private) { 808 if (dev->dev_private) {
827 drm_i915_private_t *dev_priv = dev->dev_private; 809 drm_i915_private_t *dev_priv = dev->dev_private;
828 i915_mem_release(dev, filp, dev_priv->agp_heap); 810 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
829 } 811 }
830} 812}
831 813
832drm_ioctl_desc_t i915_ioctls[] = { 814struct drm_ioctl_desc i915_ioctls[] = {
833 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 815 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
834 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, 816 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
835 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, 817 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
836 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH}, 818 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
837 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH}, 819 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
838 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH}, 820 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
839 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH}, 821 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
840 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 822 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
841 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, 823 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
842 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, 824 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
843 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 825 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
844 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}, 826 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
845 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 827 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
846 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 828 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
847 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, 829 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
848 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, 830 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
849 [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH}, 831 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
850}; 832};
851 833
852int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 834int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 28b98733beb8..e064292e703a 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -70,7 +70,7 @@ struct mem_block {
70 struct mem_block *prev; 70 struct mem_block *prev;
71 int start; 71 int start;
72 int size; 72 int size;
73 DRMFILE filp; /* 0: free, -1: heap, other: real files */ 73 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
74}; 74};
75 75
76typedef struct _drm_i915_vbl_swap { 76typedef struct _drm_i915_vbl_swap {
@@ -116,21 +116,24 @@ typedef struct drm_i915_private {
116 unsigned int swaps_pending; 116 unsigned int swaps_pending;
117} drm_i915_private_t; 117} drm_i915_private_t;
118 118
119extern drm_ioctl_desc_t i915_ioctls[]; 119extern struct drm_ioctl_desc i915_ioctls[];
120extern int i915_max_ioctl; 120extern int i915_max_ioctl;
121 121
122 /* i915_dma.c */ 122 /* i915_dma.c */
123extern void i915_kernel_lost_context(struct drm_device * dev); 123extern void i915_kernel_lost_context(struct drm_device * dev);
124extern int i915_driver_load(struct drm_device *, unsigned long flags); 124extern int i915_driver_load(struct drm_device *, unsigned long flags);
125extern void i915_driver_lastclose(struct drm_device * dev); 125extern void i915_driver_lastclose(struct drm_device * dev);
126extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp); 126extern void i915_driver_preclose(struct drm_device *dev,
127 struct drm_file *file_priv);
127extern int i915_driver_device_is_agp(struct drm_device * dev); 128extern int i915_driver_device_is_agp(struct drm_device * dev);
128extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 129extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
129 unsigned long arg); 130 unsigned long arg);
130 131
131/* i915_irq.c */ 132/* i915_irq.c */
132extern int i915_irq_emit(DRM_IOCTL_ARGS); 133extern int i915_irq_emit(struct drm_device *dev, void *data,
133extern int i915_irq_wait(DRM_IOCTL_ARGS); 134 struct drm_file *file_priv);
135extern int i915_irq_wait(struct drm_device *dev, void *data,
136 struct drm_file *file_priv);
134 137
135extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 138extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
136extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 139extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -138,18 +141,25 @@ extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
138extern void i915_driver_irq_preinstall(struct drm_device * dev); 141extern void i915_driver_irq_preinstall(struct drm_device * dev);
139extern void i915_driver_irq_postinstall(struct drm_device * dev); 142extern void i915_driver_irq_postinstall(struct drm_device * dev);
140extern void i915_driver_irq_uninstall(struct drm_device * dev); 143extern void i915_driver_irq_uninstall(struct drm_device * dev);
141extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); 144extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
142extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); 145 struct drm_file *file_priv);
143extern int i915_vblank_swap(DRM_IOCTL_ARGS); 146extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
147 struct drm_file *file_priv);
148extern int i915_vblank_swap(struct drm_device *dev, void *data,
149 struct drm_file *file_priv);
144 150
145/* i915_mem.c */ 151/* i915_mem.c */
146extern int i915_mem_alloc(DRM_IOCTL_ARGS); 152extern int i915_mem_alloc(struct drm_device *dev, void *data,
147extern int i915_mem_free(DRM_IOCTL_ARGS); 153 struct drm_file *file_priv);
148extern int i915_mem_init_heap(DRM_IOCTL_ARGS); 154extern int i915_mem_free(struct drm_device *dev, void *data,
149extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); 155 struct drm_file *file_priv);
156extern int i915_mem_init_heap(struct drm_device *dev, void *data,
157 struct drm_file *file_priv);
158extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
159 struct drm_file *file_priv);
150extern void i915_mem_takedown(struct mem_block **heap); 160extern void i915_mem_takedown(struct mem_block **heap);
151extern void i915_mem_release(struct drm_device * dev, 161extern void i915_mem_release(struct drm_device * dev,
152 DRMFILE filp, struct mem_block *heap); 162 struct drm_file *file_priv, struct mem_block *heap);
153 163
154#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 164#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
155#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 165#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index bb8e9e9c8201..a443f4a202e3 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -311,7 +311,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
312 READ_BREADCRUMB(dev_priv) >= irq_nr); 312 READ_BREADCRUMB(dev_priv) >= irq_nr);
313 313
314 if (ret == DRM_ERR(EBUSY)) { 314 if (ret == -EBUSY) {
315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", 315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
316 __FUNCTION__, 316 __FUNCTION__,
317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
@@ -330,7 +330,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
330 330
331 if (!dev_priv) { 331 if (!dev_priv) {
332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
333 return DRM_ERR(EINVAL); 333 return -EINVAL;
334 } 334 }
335 335
336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
@@ -355,28 +355,25 @@ int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
355 355
356/* Needs the lock as it touches the ring. 356/* Needs the lock as it touches the ring.
357 */ 357 */
358int i915_irq_emit(DRM_IOCTL_ARGS) 358int i915_irq_emit(struct drm_device *dev, void *data,
359 struct drm_file *file_priv)
359{ 360{
360 DRM_DEVICE;
361 drm_i915_private_t *dev_priv = dev->dev_private; 361 drm_i915_private_t *dev_priv = dev->dev_private;
362 drm_i915_irq_emit_t emit; 362 drm_i915_irq_emit_t *emit = data;
363 int result; 363 int result;
364 364
365 LOCK_TEST_WITH_RETURN(dev, filp); 365 LOCK_TEST_WITH_RETURN(dev, file_priv);
366 366
367 if (!dev_priv) { 367 if (!dev_priv) {
368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
369 return DRM_ERR(EINVAL); 369 return -EINVAL;
370 } 370 }
371 371
372 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
373 sizeof(emit));
374
375 result = i915_emit_irq(dev); 372 result = i915_emit_irq(dev);
376 373
377 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 374 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
378 DRM_ERROR("copy_to_user\n"); 375 DRM_ERROR("copy_to_user\n");
379 return DRM_ERR(EFAULT); 376 return -EFAULT;
380 } 377 }
381 378
382 return 0; 379 return 0;
@@ -384,21 +381,18 @@ int i915_irq_emit(DRM_IOCTL_ARGS)
384 381
385/* Doesn't need the hardware lock. 382/* Doesn't need the hardware lock.
386 */ 383 */
387int i915_irq_wait(DRM_IOCTL_ARGS) 384int i915_irq_wait(struct drm_device *dev, void *data,
385 struct drm_file *file_priv)
388{ 386{
389 DRM_DEVICE;
390 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
391 drm_i915_irq_wait_t irqwait; 388 drm_i915_irq_wait_t *irqwait = data;
392 389
393 if (!dev_priv) { 390 if (!dev_priv) {
394 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 391 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
395 return DRM_ERR(EINVAL); 392 return -EINVAL;
396 } 393 }
397 394
398 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, 395 return i915_wait_irq(dev, irqwait->irq_seq);
399 sizeof(irqwait));
400
401 return i915_wait_irq(dev, irqwait.irq_seq);
402} 396}
403 397
404static void i915_enable_interrupt (struct drm_device *dev) 398static void i915_enable_interrupt (struct drm_device *dev)
@@ -417,64 +411,60 @@ static void i915_enable_interrupt (struct drm_device *dev)
417 411
418/* Set the vblank monitor pipe 412/* Set the vblank monitor pipe
419 */ 413 */
420int i915_vblank_pipe_set(DRM_IOCTL_ARGS) 414int i915_vblank_pipe_set(struct drm_device *dev, void *data,
415 struct drm_file *file_priv)
421{ 416{
422 DRM_DEVICE;
423 drm_i915_private_t *dev_priv = dev->dev_private; 417 drm_i915_private_t *dev_priv = dev->dev_private;
424 drm_i915_vblank_pipe_t pipe; 418 drm_i915_vblank_pipe_t *pipe = data;
425 419
426 if (!dev_priv) { 420 if (!dev_priv) {
427 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 421 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
428 return DRM_ERR(EINVAL); 422 return -EINVAL;
429 } 423 }
430 424
431 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, 425 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
432 sizeof(pipe));
433
434 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
435 DRM_ERROR("%s called with invalid pipe 0x%x\n", 426 DRM_ERROR("%s called with invalid pipe 0x%x\n",
436 __FUNCTION__, pipe.pipe); 427 __FUNCTION__, pipe->pipe);
437 return DRM_ERR(EINVAL); 428 return -EINVAL;
438 } 429 }
439 430
440 dev_priv->vblank_pipe = pipe.pipe; 431 dev_priv->vblank_pipe = pipe->pipe;
441 432
442 i915_enable_interrupt (dev); 433 i915_enable_interrupt (dev);
443 434
444 return 0; 435 return 0;
445} 436}
446 437
447int i915_vblank_pipe_get(DRM_IOCTL_ARGS) 438int i915_vblank_pipe_get(struct drm_device *dev, void *data,
439 struct drm_file *file_priv)
448{ 440{
449 DRM_DEVICE;
450 drm_i915_private_t *dev_priv = dev->dev_private; 441 drm_i915_private_t *dev_priv = dev->dev_private;
451 drm_i915_vblank_pipe_t pipe; 442 drm_i915_vblank_pipe_t *pipe = data;
452 u16 flag; 443 u16 flag;
453 444
454 if (!dev_priv) { 445 if (!dev_priv) {
455 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 446 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
456 return DRM_ERR(EINVAL); 447 return -EINVAL;
457 } 448 }
458 449
459 flag = I915_READ(I915REG_INT_ENABLE_R); 450 flag = I915_READ(I915REG_INT_ENABLE_R);
460 pipe.pipe = 0; 451 pipe->pipe = 0;
461 if (flag & VSYNC_PIPEA_FLAG) 452 if (flag & VSYNC_PIPEA_FLAG)
462 pipe.pipe |= DRM_I915_VBLANK_PIPE_A; 453 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
463 if (flag & VSYNC_PIPEB_FLAG) 454 if (flag & VSYNC_PIPEB_FLAG)
464 pipe.pipe |= DRM_I915_VBLANK_PIPE_B; 455 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
465 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe, 456
466 sizeof(pipe));
467 return 0; 457 return 0;
468} 458}
469 459
470/** 460/**
471 * Schedule buffer swap at given vertical blank. 461 * Schedule buffer swap at given vertical blank.
472 */ 462 */
473int i915_vblank_swap(DRM_IOCTL_ARGS) 463int i915_vblank_swap(struct drm_device *dev, void *data,
464 struct drm_file *file_priv)
474{ 465{
475 DRM_DEVICE;
476 drm_i915_private_t *dev_priv = dev->dev_private; 466 drm_i915_private_t *dev_priv = dev->dev_private;
477 drm_i915_vblank_swap_t swap; 467 drm_i915_vblank_swap_t *swap = data;
478 drm_i915_vbl_swap_t *vbl_swap; 468 drm_i915_vbl_swap_t *vbl_swap;
479 unsigned int pipe, seqtype, curseq; 469 unsigned int pipe, seqtype, curseq;
480 unsigned long irqflags; 470 unsigned long irqflags;
@@ -482,38 +472,35 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
482 472
483 if (!dev_priv) { 473 if (!dev_priv) {
484 DRM_ERROR("%s called with no initialization\n", __func__); 474 DRM_ERROR("%s called with no initialization\n", __func__);
485 return DRM_ERR(EINVAL); 475 return -EINVAL;
486 } 476 }
487 477
488 if (dev_priv->sarea_priv->rotation) { 478 if (dev_priv->sarea_priv->rotation) {
489 DRM_DEBUG("Rotation not supported\n"); 479 DRM_DEBUG("Rotation not supported\n");
490 return DRM_ERR(EINVAL); 480 return -EINVAL;
491 } 481 }
492 482
493 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, 483 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
494 sizeof(swap));
495
496 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
497 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 484 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
498 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); 485 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
499 return DRM_ERR(EINVAL); 486 return -EINVAL;
500 } 487 }
501 488
502 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 489 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
503 490
504 seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 491 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
505 492
506 if (!(dev_priv->vblank_pipe & (1 << pipe))) { 493 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
507 DRM_ERROR("Invalid pipe %d\n", pipe); 494 DRM_ERROR("Invalid pipe %d\n", pipe);
508 return DRM_ERR(EINVAL); 495 return -EINVAL;
509 } 496 }
510 497
511 spin_lock_irqsave(&dev->drw_lock, irqflags); 498 spin_lock_irqsave(&dev->drw_lock, irqflags);
512 499
513 if (!drm_get_drawable_info(dev, swap.drawable)) { 500 if (!drm_get_drawable_info(dev, swap->drawable)) {
514 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 501 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
515 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); 502 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
516 return DRM_ERR(EINVAL); 503 return -EINVAL;
517 } 504 }
518 505
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 506 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
@@ -521,14 +508,14 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
521 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 508 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
522 509
523 if (seqtype == _DRM_VBLANK_RELATIVE) 510 if (seqtype == _DRM_VBLANK_RELATIVE)
524 swap.sequence += curseq; 511 swap->sequence += curseq;
525 512
526 if ((curseq - swap.sequence) <= (1<<23)) { 513 if ((curseq - swap->sequence) <= (1<<23)) {
527 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) { 514 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
528 swap.sequence = curseq + 1; 515 swap->sequence = curseq + 1;
529 } else { 516 } else {
530 DRM_DEBUG("Missed target sequence\n"); 517 DRM_DEBUG("Missed target sequence\n");
531 return DRM_ERR(EINVAL); 518 return -EINVAL;
532 } 519 }
533 } 520 }
534 521
@@ -537,9 +524,9 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
537 list_for_each(list, &dev_priv->vbl_swaps.head) { 524 list_for_each(list, &dev_priv->vbl_swaps.head) {
538 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 525 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
539 526
540 if (vbl_swap->drw_id == swap.drawable && 527 if (vbl_swap->drw_id == swap->drawable &&
541 vbl_swap->pipe == pipe && 528 vbl_swap->pipe == pipe &&
542 vbl_swap->sequence == swap.sequence) { 529 vbl_swap->sequence == swap->sequence) {
543 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 530 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
544 DRM_DEBUG("Already scheduled\n"); 531 DRM_DEBUG("Already scheduled\n");
545 return 0; 532 return 0;
@@ -550,21 +537,21 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
550 537
551 if (dev_priv->swaps_pending >= 100) { 538 if (dev_priv->swaps_pending >= 100) {
552 DRM_DEBUG("Too many swaps queued\n"); 539 DRM_DEBUG("Too many swaps queued\n");
553 return DRM_ERR(EBUSY); 540 return -EBUSY;
554 } 541 }
555 542
556 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); 543 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
557 544
558 if (!vbl_swap) { 545 if (!vbl_swap) {
559 DRM_ERROR("Failed to allocate memory to queue swap\n"); 546 DRM_ERROR("Failed to allocate memory to queue swap\n");
560 return DRM_ERR(ENOMEM); 547 return -ENOMEM;
561 } 548 }
562 549
563 DRM_DEBUG("\n"); 550 DRM_DEBUG("\n");
564 551
565 vbl_swap->drw_id = swap.drawable; 552 vbl_swap->drw_id = swap->drawable;
566 vbl_swap->pipe = pipe; 553 vbl_swap->pipe = pipe;
567 vbl_swap->sequence = swap.sequence; 554 vbl_swap->sequence = swap->sequence;
568 555
569 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 556 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
570 557
@@ -573,9 +560,6 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
573 560
574 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 561 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
575 562
576 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
577 sizeof(swap));
578
579 return 0; 563 return 0;
580} 564}
581 565
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 50b4bacef0e0..56fb9b30a5d7 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -89,7 +89,7 @@ static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
89 */ 89 */
90 90
91static struct mem_block *split_block(struct mem_block *p, int start, int size, 91static struct mem_block *split_block(struct mem_block *p, int start, int size,
92 DRMFILE filp) 92 struct drm_file *file_priv)
93{ 93{
94 /* Maybe cut off the start of an existing block */ 94 /* Maybe cut off the start of an existing block */
95 if (start > p->start) { 95 if (start > p->start) {
@@ -99,7 +99,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
99 goto out; 99 goto out;
100 newblock->start = start; 100 newblock->start = start;
101 newblock->size = p->size - (start - p->start); 101 newblock->size = p->size - (start - p->start);
102 newblock->filp = NULL; 102 newblock->file_priv = NULL;
103 newblock->next = p->next; 103 newblock->next = p->next;
104 newblock->prev = p; 104 newblock->prev = p;
105 p->next->prev = newblock; 105 p->next->prev = newblock;
@@ -116,7 +116,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
116 goto out; 116 goto out;
117 newblock->start = start + size; 117 newblock->start = start + size;
118 newblock->size = p->size - size; 118 newblock->size = p->size - size;
119 newblock->filp = NULL; 119 newblock->file_priv = NULL;
120 newblock->next = p->next; 120 newblock->next = p->next;
121 newblock->prev = p; 121 newblock->prev = p;
122 p->next->prev = newblock; 122 p->next->prev = newblock;
@@ -126,20 +126,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
126 126
127 out: 127 out:
128 /* Our block is in the middle */ 128 /* Our block is in the middle */
129 p->filp = filp; 129 p->file_priv = file_priv;
130 return p; 130 return p;
131} 131}
132 132
133static struct mem_block *alloc_block(struct mem_block *heap, int size, 133static struct mem_block *alloc_block(struct mem_block *heap, int size,
134 int align2, DRMFILE filp) 134 int align2, struct drm_file *file_priv)
135{ 135{
136 struct mem_block *p; 136 struct mem_block *p;
137 int mask = (1 << align2) - 1; 137 int mask = (1 << align2) - 1;
138 138
139 for (p = heap->next; p != heap; p = p->next) { 139 for (p = heap->next; p != heap; p = p->next) {
140 int start = (p->start + mask) & ~mask; 140 int start = (p->start + mask) & ~mask;
141 if (p->filp == NULL && start + size <= p->start + p->size) 141 if (p->file_priv == NULL && start + size <= p->start + p->size)
142 return split_block(p, start, size, filp); 142 return split_block(p, start, size, file_priv);
143 } 143 }
144 144
145 return NULL; 145 return NULL;
@@ -158,12 +158,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
158 158
159static void free_block(struct mem_block *p) 159static void free_block(struct mem_block *p)
160{ 160{
161 p->filp = NULL; 161 p->file_priv = NULL;
162 162
163 /* Assumes a single contiguous range. Needs a special filp in 163 /* Assumes a single contiguous range. Needs a special file_priv in
164 * 'heap' to stop it being subsumed. 164 * 'heap' to stop it being subsumed.
165 */ 165 */
166 if (p->next->filp == NULL) { 166 if (p->next->file_priv == NULL) {
167 struct mem_block *q = p->next; 167 struct mem_block *q = p->next;
168 p->size += q->size; 168 p->size += q->size;
169 p->next = q->next; 169 p->next = q->next;
@@ -171,7 +171,7 @@ static void free_block(struct mem_block *p)
171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); 171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
172 } 172 }
173 173
174 if (p->prev->filp == NULL) { 174 if (p->prev->file_priv == NULL) {
175 struct mem_block *q = p->prev; 175 struct mem_block *q = p->prev;
176 q->size += p->size; 176 q->size += p->size;
177 q->next = p->next; 177 q->next = p->next;
@@ -197,18 +197,19 @@ static int init_heap(struct mem_block **heap, int start, int size)
197 197
198 blocks->start = start; 198 blocks->start = start;
199 blocks->size = size; 199 blocks->size = size;
200 blocks->filp = NULL; 200 blocks->file_priv = NULL;
201 blocks->next = blocks->prev = *heap; 201 blocks->next = blocks->prev = *heap;
202 202
203 memset(*heap, 0, sizeof(**heap)); 203 memset(*heap, 0, sizeof(**heap));
204 (*heap)->filp = (DRMFILE) - 1; 204 (*heap)->file_priv = (struct drm_file *) - 1;
205 (*heap)->next = (*heap)->prev = blocks; 205 (*heap)->next = (*heap)->prev = blocks;
206 return 0; 206 return 0;
207} 207}
208 208
209/* Free all blocks associated with the releasing file. 209/* Free all blocks associated with the releasing file.
210 */ 210 */
211void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap) 211void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
212 struct mem_block *heap)
212{ 213{
213 struct mem_block *p; 214 struct mem_block *p;
214 215
@@ -216,17 +217,17 @@ void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *h
216 return; 217 return;
217 218
218 for (p = heap->next; p != heap; p = p->next) { 219 for (p = heap->next; p != heap; p = p->next) {
219 if (p->filp == filp) { 220 if (p->file_priv == file_priv) {
220 p->filp = NULL; 221 p->file_priv = NULL;
221 mark_block(dev, p, 0); 222 mark_block(dev, p, 0);
222 } 223 }
223 } 224 }
224 225
225 /* Assumes a single contiguous range. Needs a special filp in 226 /* Assumes a single contiguous range. Needs a special file_priv in
226 * 'heap' to stop it being subsumed. 227 * 'heap' to stop it being subsumed.
227 */ 228 */
228 for (p = heap->next; p != heap; p = p->next) { 229 for (p = heap->next; p != heap; p = p->next) {
229 while (p->filp == NULL && p->next->filp == NULL) { 230 while (p->file_priv == NULL && p->next->file_priv == NULL) {
230 struct mem_block *q = p->next; 231 struct mem_block *q = p->next;
231 p->size += q->size; 232 p->size += q->size;
232 p->next = q->next; 233 p->next = q->next;
@@ -267,129 +268,117 @@ static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
267 268
268/* IOCTL HANDLERS */ 269/* IOCTL HANDLERS */
269 270
270int i915_mem_alloc(DRM_IOCTL_ARGS) 271int i915_mem_alloc(struct drm_device *dev, void *data,
272 struct drm_file *file_priv)
271{ 273{
272 DRM_DEVICE;
273 drm_i915_private_t *dev_priv = dev->dev_private; 274 drm_i915_private_t *dev_priv = dev->dev_private;
274 drm_i915_mem_alloc_t alloc; 275 drm_i915_mem_alloc_t *alloc = data;
275 struct mem_block *block, **heap; 276 struct mem_block *block, **heap;
276 277
277 if (!dev_priv) { 278 if (!dev_priv) {
278 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 279 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
279 return DRM_ERR(EINVAL); 280 return -EINVAL;
280 } 281 }
281 282
282 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, 283 heap = get_heap(dev_priv, alloc->region);
283 sizeof(alloc));
284
285 heap = get_heap(dev_priv, alloc.region);
286 if (!heap || !*heap) 284 if (!heap || !*heap)
287 return DRM_ERR(EFAULT); 285 return -EFAULT;
288 286
289 /* Make things easier on ourselves: all allocations at least 287 /* Make things easier on ourselves: all allocations at least
290 * 4k aligned. 288 * 4k aligned.
291 */ 289 */
292 if (alloc.alignment < 12) 290 if (alloc->alignment < 12)
293 alloc.alignment = 12; 291 alloc->alignment = 12;
294 292
295 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 293 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
296 294
297 if (!block) 295 if (!block)
298 return DRM_ERR(ENOMEM); 296 return -ENOMEM;
299 297
300 mark_block(dev, block, 1); 298 mark_block(dev, block, 1);
301 299
302 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 300 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
301 sizeof(int))) {
303 DRM_ERROR("copy_to_user\n"); 302 DRM_ERROR("copy_to_user\n");
304 return DRM_ERR(EFAULT); 303 return -EFAULT;
305 } 304 }
306 305
307 return 0; 306 return 0;
308} 307}
309 308
310int i915_mem_free(DRM_IOCTL_ARGS) 309int i915_mem_free(struct drm_device *dev, void *data,
310 struct drm_file *file_priv)
311{ 311{
312 DRM_DEVICE;
313 drm_i915_private_t *dev_priv = dev->dev_private; 312 drm_i915_private_t *dev_priv = dev->dev_private;
314 drm_i915_mem_free_t memfree; 313 drm_i915_mem_free_t *memfree = data;
315 struct mem_block *block, **heap; 314 struct mem_block *block, **heap;
316 315
317 if (!dev_priv) { 316 if (!dev_priv) {
318 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 317 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
319 return DRM_ERR(EINVAL); 318 return -EINVAL;
320 } 319 }
321 320
322 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, 321 heap = get_heap(dev_priv, memfree->region);
323 sizeof(memfree));
324
325 heap = get_heap(dev_priv, memfree.region);
326 if (!heap || !*heap) 322 if (!heap || !*heap)
327 return DRM_ERR(EFAULT); 323 return -EFAULT;
328 324
329 block = find_block(*heap, memfree.region_offset); 325 block = find_block(*heap, memfree->region_offset);
330 if (!block) 326 if (!block)
331 return DRM_ERR(EFAULT); 327 return -EFAULT;
332 328
333 if (block->filp != filp) 329 if (block->file_priv != file_priv)
334 return DRM_ERR(EPERM); 330 return -EPERM;
335 331
336 mark_block(dev, block, 0); 332 mark_block(dev, block, 0);
337 free_block(block); 333 free_block(block);
338 return 0; 334 return 0;
339} 335}
340 336
341int i915_mem_init_heap(DRM_IOCTL_ARGS) 337int i915_mem_init_heap(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
342{ 339{
343 DRM_DEVICE;
344 drm_i915_private_t *dev_priv = dev->dev_private; 340 drm_i915_private_t *dev_priv = dev->dev_private;
345 drm_i915_mem_init_heap_t initheap; 341 drm_i915_mem_init_heap_t *initheap = data;
346 struct mem_block **heap; 342 struct mem_block **heap;
347 343
348 if (!dev_priv) { 344 if (!dev_priv) {
349 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 345 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
350 return DRM_ERR(EINVAL); 346 return -EINVAL;
351 } 347 }
352 348
353 DRM_COPY_FROM_USER_IOCTL(initheap, 349 heap = get_heap(dev_priv, initheap->region);
354 (drm_i915_mem_init_heap_t __user *) data,
355 sizeof(initheap));
356
357 heap = get_heap(dev_priv, initheap.region);
358 if (!heap) 350 if (!heap)
359 return DRM_ERR(EFAULT); 351 return -EFAULT;
360 352
361 if (*heap) { 353 if (*heap) {
362 DRM_ERROR("heap already initialized?"); 354 DRM_ERROR("heap already initialized?");
363 return DRM_ERR(EFAULT); 355 return -EFAULT;
364 } 356 }
365 357
366 return init_heap(heap, initheap.start, initheap.size); 358 return init_heap(heap, initheap->start, initheap->size);
367} 359}
368 360
369int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) 361int i915_mem_destroy_heap( struct drm_device *dev, void *data,
362 struct drm_file *file_priv )
370{ 363{
371 DRM_DEVICE;
372 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
373 drm_i915_mem_destroy_heap_t destroyheap; 365 drm_i915_mem_destroy_heap_t *destroyheap = data;
374 struct mem_block **heap; 366 struct mem_block **heap;
375 367
376 if ( !dev_priv ) { 368 if ( !dev_priv ) {
377 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); 369 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
378 return DRM_ERR(EINVAL); 370 return -EINVAL;
379 } 371 }
380 372
381 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, 373 heap = get_heap( dev_priv, destroyheap->region );
382 sizeof(destroyheap) );
383
384 heap = get_heap( dev_priv, destroyheap.region );
385 if (!heap) { 374 if (!heap) {
386 DRM_ERROR("get_heap failed"); 375 DRM_ERROR("get_heap failed");
387 return DRM_ERR(EFAULT); 376 return -EFAULT;
388 } 377 }
389 378
390 if (!*heap) { 379 if (!*heap) {
391 DRM_ERROR("heap not initialized?"); 380 DRM_ERROR("heap not initialized?");
392 return DRM_ERR(EFAULT); 381 return -EFAULT;
393 } 382 }
394 383
395 i915_mem_takedown( heap ); 384 i915_mem_takedown( heap );
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 9c73a6e3861b..c567c34cda78 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
71 DRM_ERROR("failed!\n"); 71 DRM_ERROR("failed!\n");
72 DRM_INFO(" status=0x%08x\n", status); 72 DRM_INFO(" status=0x%08x\n", status);
73#endif 73#endif
74 return DRM_ERR(EBUSY); 74 return -EBUSY;
75} 75}
76 76
77static int mga_do_dma_reset(drm_mga_private_t * dev_priv) 77static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
@@ -256,7 +256,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
256 256
257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
258 if (dev_priv->head == NULL) 258 if (dev_priv->head == NULL)
259 return DRM_ERR(ENOMEM); 259 return -ENOMEM;
260 260
261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); 261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
@@ -267,7 +267,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
267 267
268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
269 if (entry == NULL) 269 if (entry == NULL)
270 return DRM_ERR(ENOMEM); 270 return -ENOMEM;
271 271
272 memset(entry, 0, sizeof(drm_mga_freelist_t)); 272 memset(entry, 0, sizeof(drm_mga_freelist_t));
273 273
@@ -399,7 +399,7 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
399 399
400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
401 if (!dev_priv) 401 if (!dev_priv)
402 return DRM_ERR(ENOMEM); 402 return -ENOMEM;
403 403
404 dev->dev_private = (void *)dev_priv; 404 dev->dev_private = (void *)dev_priv;
405 memset(dev_priv, 0, sizeof(drm_mga_private_t)); 405 memset(dev_priv, 0, sizeof(drm_mga_private_t));
@@ -578,7 +578,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", 578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
579 dev_priv->warp->handle, dev_priv->primary->handle, 579 dev_priv->warp->handle, dev_priv->primary->handle,
580 dev->agp_buffer_map->handle); 580 dev->agp_buffer_map->handle);
581 return DRM_ERR(ENOMEM); 581 return -ENOMEM;
582 } 582 }
583 583
584 dev_priv->dma_access = MGA_PAGPXFER; 584 dev_priv->dma_access = MGA_PAGPXFER;
@@ -622,7 +622,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
622 622
623 if (dev->dma == NULL) { 623 if (dev->dma == NULL) {
624 DRM_ERROR("dev->dma is NULL\n"); 624 DRM_ERROR("dev->dma is NULL\n");
625 return DRM_ERR(EFAULT); 625 return -EFAULT;
626 } 626 }
627 627
628 /* Make drm_addbufs happy by not trying to create a mapping for less 628 /* Make drm_addbufs happy by not trying to create a mapping for less
@@ -656,7 +656,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
656 656
657 if (err != 0) { 657 if (err != 0) {
658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); 658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
659 return DRM_ERR(ENOMEM); 659 return -ENOMEM;
660 } 660 }
661 661
662 if (dev_priv->primary->size != dma_bs->primary_size) { 662 if (dev_priv->primary->size != dma_bs->primary_size) {
@@ -759,36 +759,30 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
759 return err; 759 return err;
760} 760}
761 761
762int mga_dma_bootstrap(DRM_IOCTL_ARGS) 762int mga_dma_bootstrap(struct drm_device *dev, void *data,
763 struct drm_file *file_priv)
763{ 764{
764 DRM_DEVICE; 765 drm_mga_dma_bootstrap_t *bootstrap = data;
765 drm_mga_dma_bootstrap_t bootstrap;
766 int err; 766 int err;
767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
768 const drm_mga_private_t *const dev_priv = 768 const drm_mga_private_t *const dev_priv =
769 (drm_mga_private_t *) dev->dev_private; 769 (drm_mga_private_t *) dev->dev_private;
770 770
771 DRM_COPY_FROM_USER_IOCTL(bootstrap, 771 err = mga_do_dma_bootstrap(dev, bootstrap);
772 (drm_mga_dma_bootstrap_t __user *) data,
773 sizeof(bootstrap));
774
775 err = mga_do_dma_bootstrap(dev, &bootstrap);
776 if (err) { 772 if (err) {
777 mga_do_cleanup_dma(dev, FULL_CLEANUP); 773 mga_do_cleanup_dma(dev, FULL_CLEANUP);
778 return err; 774 return err;
779 } 775 }
780 776
781 if (dev_priv->agp_textures != NULL) { 777 if (dev_priv->agp_textures != NULL) {
782 bootstrap.texture_handle = dev_priv->agp_textures->offset; 778 bootstrap->texture_handle = dev_priv->agp_textures->offset;
783 bootstrap.texture_size = dev_priv->agp_textures->size; 779 bootstrap->texture_size = dev_priv->agp_textures->size;
784 } else { 780 } else {
785 bootstrap.texture_handle = 0; 781 bootstrap->texture_handle = 0;
786 bootstrap.texture_size = 0; 782 bootstrap->texture_size = 0;
787 } 783 }
788 784
789 bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 785 bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
790 DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
791 bootstrap, sizeof(bootstrap));
792 786
793 return err; 787 return err;
794} 788}
@@ -826,7 +820,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
826 dev_priv->sarea = drm_getsarea(dev); 820 dev_priv->sarea = drm_getsarea(dev);
827 if (!dev_priv->sarea) { 821 if (!dev_priv->sarea) {
828 DRM_ERROR("failed to find sarea!\n"); 822 DRM_ERROR("failed to find sarea!\n");
829 return DRM_ERR(EINVAL); 823 return -EINVAL;
830 } 824 }
831 825
832 if (!dev_priv->used_new_dma_init) { 826 if (!dev_priv->used_new_dma_init) {
@@ -837,29 +831,29 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
837 dev_priv->status = drm_core_findmap(dev, init->status_offset); 831 dev_priv->status = drm_core_findmap(dev, init->status_offset);
838 if (!dev_priv->status) { 832 if (!dev_priv->status) {
839 DRM_ERROR("failed to find status page!\n"); 833 DRM_ERROR("failed to find status page!\n");
840 return DRM_ERR(EINVAL); 834 return -EINVAL;
841 } 835 }
842 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 836 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
843 if (!dev_priv->mmio) { 837 if (!dev_priv->mmio) {
844 DRM_ERROR("failed to find mmio region!\n"); 838 DRM_ERROR("failed to find mmio region!\n");
845 return DRM_ERR(EINVAL); 839 return -EINVAL;
846 } 840 }
847 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 841 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
848 if (!dev_priv->warp) { 842 if (!dev_priv->warp) {
849 DRM_ERROR("failed to find warp microcode region!\n"); 843 DRM_ERROR("failed to find warp microcode region!\n");
850 return DRM_ERR(EINVAL); 844 return -EINVAL;
851 } 845 }
852 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 846 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
853 if (!dev_priv->primary) { 847 if (!dev_priv->primary) {
854 DRM_ERROR("failed to find primary dma region!\n"); 848 DRM_ERROR("failed to find primary dma region!\n");
855 return DRM_ERR(EINVAL); 849 return -EINVAL;
856 } 850 }
857 dev->agp_buffer_token = init->buffers_offset; 851 dev->agp_buffer_token = init->buffers_offset;
858 dev->agp_buffer_map = 852 dev->agp_buffer_map =
859 drm_core_findmap(dev, init->buffers_offset); 853 drm_core_findmap(dev, init->buffers_offset);
860 if (!dev->agp_buffer_map) { 854 if (!dev->agp_buffer_map) {
861 DRM_ERROR("failed to find dma buffer region!\n"); 855 DRM_ERROR("failed to find dma buffer region!\n");
862 return DRM_ERR(EINVAL); 856 return -EINVAL;
863 } 857 }
864 858
865 drm_core_ioremap(dev_priv->warp, dev); 859 drm_core_ioremap(dev_priv->warp, dev);
@@ -877,7 +871,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
877 ((dev->agp_buffer_map == NULL) || 871 ((dev->agp_buffer_map == NULL) ||
878 (dev->agp_buffer_map->handle == NULL)))) { 872 (dev->agp_buffer_map->handle == NULL)))) {
879 DRM_ERROR("failed to ioremap agp regions!\n"); 873 DRM_ERROR("failed to ioremap agp regions!\n");
880 return DRM_ERR(ENOMEM); 874 return -ENOMEM;
881 } 875 }
882 876
883 ret = mga_warp_install_microcode(dev_priv); 877 ret = mga_warp_install_microcode(dev_priv);
@@ -927,7 +921,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
927 921
928 if (mga_freelist_init(dev, dev_priv) < 0) { 922 if (mga_freelist_init(dev, dev_priv) < 0) {
929 DRM_ERROR("could not initialize freelist\n"); 923 DRM_ERROR("could not initialize freelist\n");
930 return DRM_ERR(ENOMEM); 924 return -ENOMEM;
931 } 925 }
932 926
933 return 0; 927 return 0;
@@ -1007,20 +1001,17 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
1007 return 0; 1001 return 0;
1008} 1002}
1009 1003
1010int mga_dma_init(DRM_IOCTL_ARGS) 1004int mga_dma_init(struct drm_device *dev, void *data,
1005 struct drm_file *file_priv)
1011{ 1006{
1012 DRM_DEVICE; 1007 drm_mga_init_t *init = data;
1013 drm_mga_init_t init;
1014 int err; 1008 int err;
1015 1009
1016 LOCK_TEST_WITH_RETURN(dev, filp); 1010 LOCK_TEST_WITH_RETURN(dev, file_priv);
1017
1018 DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
1019 sizeof(init));
1020 1011
1021 switch (init.func) { 1012 switch (init->func) {
1022 case MGA_INIT_DMA: 1013 case MGA_INIT_DMA:
1023 err = mga_do_init_dma(dev, &init); 1014 err = mga_do_init_dma(dev, init);
1024 if (err) { 1015 if (err) {
1025 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 1016 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1026 } 1017 }
@@ -1029,36 +1020,33 @@ int mga_dma_init(DRM_IOCTL_ARGS)
1029 return mga_do_cleanup_dma(dev, FULL_CLEANUP); 1020 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1030 } 1021 }
1031 1022
1032 return DRM_ERR(EINVAL); 1023 return -EINVAL;
1033} 1024}
1034 1025
1035/* ================================================================ 1026/* ================================================================
1036 * Primary DMA stream management 1027 * Primary DMA stream management
1037 */ 1028 */
1038 1029
1039int mga_dma_flush(DRM_IOCTL_ARGS) 1030int mga_dma_flush(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv)
1040{ 1032{
1041 DRM_DEVICE;
1042 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1033 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1043 struct drm_lock lock; 1034 struct drm_lock *lock = data;
1044
1045 LOCK_TEST_WITH_RETURN(dev, filp);
1046 1035
1047 DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, 1036 LOCK_TEST_WITH_RETURN(dev, file_priv);
1048 sizeof(lock));
1049 1037
1050 DRM_DEBUG("%s%s%s\n", 1038 DRM_DEBUG("%s%s%s\n",
1051 (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", 1039 (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1052 (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", 1040 (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1053 (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); 1041 (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1054 1042
1055 WRAP_WAIT_WITH_RETURN(dev_priv); 1043 WRAP_WAIT_WITH_RETURN(dev_priv);
1056 1044
1057 if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { 1045 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
1058 mga_do_dma_flush(dev_priv); 1046 mga_do_dma_flush(dev_priv);
1059 } 1047 }
1060 1048
1061 if (lock.flags & _DRM_LOCK_QUIESCENT) { 1049 if (lock->flags & _DRM_LOCK_QUIESCENT) {
1062#if MGA_DMA_DEBUG 1050#if MGA_DMA_DEBUG
1063 int ret = mga_do_wait_for_idle(dev_priv); 1051 int ret = mga_do_wait_for_idle(dev_priv);
1064 if (ret < 0) 1052 if (ret < 0)
@@ -1072,12 +1060,12 @@ int mga_dma_flush(DRM_IOCTL_ARGS)
1072 } 1060 }
1073} 1061}
1074 1062
1075int mga_dma_reset(DRM_IOCTL_ARGS) 1063int mga_dma_reset(struct drm_device *dev, void *data,
1064 struct drm_file *file_priv)
1076{ 1065{
1077 DRM_DEVICE;
1078 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1066 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1079 1067
1080 LOCK_TEST_WITH_RETURN(dev, filp); 1068 LOCK_TEST_WITH_RETURN(dev, file_priv);
1081 1069
1082 return mga_do_dma_reset(dev_priv); 1070 return mga_do_dma_reset(dev_priv);
1083} 1071}
@@ -1086,7 +1074,8 @@ int mga_dma_reset(DRM_IOCTL_ARGS)
1086 * DMA buffer management 1074 * DMA buffer management
1087 */ 1075 */
1088 1076
1089static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) 1077static int mga_dma_get_buffers(struct drm_device * dev,
1078 struct drm_file *file_priv, struct drm_dma * d)
1090{ 1079{
1091 struct drm_buf *buf; 1080 struct drm_buf *buf;
1092 int i; 1081 int i;
@@ -1094,61 +1083,56 @@ static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm
1094 for (i = d->granted_count; i < d->request_count; i++) { 1083 for (i = d->granted_count; i < d->request_count; i++) {
1095 buf = mga_freelist_get(dev); 1084 buf = mga_freelist_get(dev);
1096 if (!buf) 1085 if (!buf)
1097 return DRM_ERR(EAGAIN); 1086 return -EAGAIN;
1098 1087
1099 buf->filp = filp; 1088 buf->file_priv = file_priv;
1100 1089
1101 if (DRM_COPY_TO_USER(&d->request_indices[i], 1090 if (DRM_COPY_TO_USER(&d->request_indices[i],
1102 &buf->idx, sizeof(buf->idx))) 1091 &buf->idx, sizeof(buf->idx)))
1103 return DRM_ERR(EFAULT); 1092 return -EFAULT;
1104 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1093 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1105 &buf->total, sizeof(buf->total))) 1094 &buf->total, sizeof(buf->total)))
1106 return DRM_ERR(EFAULT); 1095 return -EFAULT;
1107 1096
1108 d->granted_count++; 1097 d->granted_count++;
1109 } 1098 }
1110 return 0; 1099 return 0;
1111} 1100}
1112 1101
1113int mga_dma_buffers(DRM_IOCTL_ARGS) 1102int mga_dma_buffers(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1114{ 1104{
1115 DRM_DEVICE;
1116 struct drm_device_dma *dma = dev->dma; 1105 struct drm_device_dma *dma = dev->dma;
1117 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1106 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1118 struct drm_dma __user *argp = (void __user *)data; 1107 struct drm_dma *d = data;
1119 struct drm_dma d;
1120 int ret = 0; 1108 int ret = 0;
1121 1109
1122 LOCK_TEST_WITH_RETURN(dev, filp); 1110 LOCK_TEST_WITH_RETURN(dev, file_priv);
1123
1124 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
1125 1111
1126 /* Please don't send us buffers. 1112 /* Please don't send us buffers.
1127 */ 1113 */
1128 if (d.send_count != 0) { 1114 if (d->send_count != 0) {
1129 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1115 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1130 DRM_CURRENTPID, d.send_count); 1116 DRM_CURRENTPID, d->send_count);
1131 return DRM_ERR(EINVAL); 1117 return -EINVAL;
1132 } 1118 }
1133 1119
1134 /* We'll send you buffers. 1120 /* We'll send you buffers.
1135 */ 1121 */
1136 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1122 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1137 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1123 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1138 DRM_CURRENTPID, d.request_count, dma->buf_count); 1124 DRM_CURRENTPID, d->request_count, dma->buf_count);
1139 return DRM_ERR(EINVAL); 1125 return -EINVAL;
1140 } 1126 }
1141 1127
1142 WRAP_TEST_WITH_RETURN(dev_priv); 1128 WRAP_TEST_WITH_RETURN(dev_priv);
1143 1129
1144 d.granted_count = 0; 1130 d->granted_count = 0;
1145 1131
1146 if (d.request_count) { 1132 if (d->request_count) {
1147 ret = mga_dma_get_buffers(filp, dev, &d); 1133 ret = mga_dma_get_buffers(dev, file_priv, d);
1148 } 1134 }
1149 1135
1150 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
1151
1152 return ret; 1136 return ret;
1153} 1137}
1154 1138
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 49253affa475..cd94c04e31c0 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -148,15 +148,20 @@ typedef struct drm_mga_private {
148 unsigned int agp_size; 148 unsigned int agp_size;
149} drm_mga_private_t; 149} drm_mga_private_t;
150 150
151extern drm_ioctl_desc_t mga_ioctls[]; 151extern struct drm_ioctl_desc mga_ioctls[];
152extern int mga_max_ioctl; 152extern int mga_max_ioctl;
153 153
154 /* mga_dma.c */ 154 /* mga_dma.c */
155extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 155extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
156extern int mga_dma_init(DRM_IOCTL_ARGS); 156 struct drm_file *file_priv);
157extern int mga_dma_flush(DRM_IOCTL_ARGS); 157extern int mga_dma_init(struct drm_device *dev, void *data,
158extern int mga_dma_reset(DRM_IOCTL_ARGS); 158 struct drm_file *file_priv);
159extern int mga_dma_buffers(DRM_IOCTL_ARGS); 159extern int mga_dma_flush(struct drm_device *dev, void *data,
160 struct drm_file *file_priv);
161extern int mga_dma_reset(struct drm_device *dev, void *data,
162 struct drm_file *file_priv);
163extern int mga_dma_buffers(struct drm_device *dev, void *data,
164 struct drm_file *file_priv);
160extern int mga_driver_load(struct drm_device *dev, unsigned long flags); 165extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
161extern int mga_driver_unload(struct drm_device * dev); 166extern int mga_driver_unload(struct drm_device * dev);
162extern void mga_driver_lastclose(struct drm_device * dev); 167extern void mga_driver_lastclose(struct drm_device * dev);
@@ -245,7 +250,7 @@ do { \
245 dev_priv->prim.high_mark ) { \ 250 dev_priv->prim.high_mark ) { \
246 if ( MGA_DMA_DEBUG ) \ 251 if ( MGA_DMA_DEBUG ) \
247 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 252 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
248 return DRM_ERR(EBUSY); \ 253 return -EBUSY; \
249 } \ 254 } \
250 } \ 255 } \
251} while (0) 256} while (0)
@@ -256,7 +261,7 @@ do { \
256 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ 261 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
257 if ( MGA_DMA_DEBUG ) \ 262 if ( MGA_DMA_DEBUG ) \
258 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 263 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
259 return DRM_ERR(EBUSY); \ 264 return -EBUSY; \
260 } \ 265 } \
261 mga_do_dma_wrap_end( dev_priv ); \ 266 mga_do_dma_wrap_end( dev_priv ); \
262 } \ 267 } \
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index d448b0aef33c..5ec8b61c5d45 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -392,7 +392,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
392 ctx->dstorg, dev_priv->front_offset, 392 ctx->dstorg, dev_priv->front_offset,
393 dev_priv->back_offset); 393 dev_priv->back_offset);
394 ctx->dstorg = 0; 394 ctx->dstorg = 0;
395 return DRM_ERR(EINVAL); 395 return -EINVAL;
396 } 396 }
397 397
398 return 0; 398 return 0;
@@ -411,7 +411,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { 411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); 412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
413 tex->texorg = 0; 413 tex->texorg = 0;
414 return DRM_ERR(EINVAL); 414 return -EINVAL;
415 } 415 }
416 416
417 return 0; 417 return 0;
@@ -453,13 +453,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
453 dstorg + length > (dev_priv->texture_offset + 453 dstorg + length > (dev_priv->texture_offset +
454 dev_priv->texture_size)) { 454 dev_priv->texture_size)) {
455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); 455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
456 return DRM_ERR(EINVAL); 456 return -EINVAL;
457 } 457 }
458 458
459 if (length & MGA_ILOAD_MASK) { 459 if (length & MGA_ILOAD_MASK) {
460 DRM_ERROR("*** bad iload length: 0x%x\n", 460 DRM_ERROR("*** bad iload length: 0x%x\n",
461 length & MGA_ILOAD_MASK); 461 length & MGA_ILOAD_MASK);
462 return DRM_ERR(EINVAL); 462 return -EINVAL;
463 } 463 }
464 464
465 return 0; 465 return 0;
@@ -471,7 +471,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || 471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { 472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); 473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
474 return DRM_ERR(EINVAL); 474 return -EINVAL;
475 } 475 }
476 return 0; 476 return 0;
477} 477}
@@ -828,24 +828,20 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
828 * 828 *
829 */ 829 */
830 830
831static int mga_dma_clear(DRM_IOCTL_ARGS) 831static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
832{ 832{
833 DRM_DEVICE;
834 drm_mga_private_t *dev_priv = dev->dev_private; 833 drm_mga_private_t *dev_priv = dev->dev_private;
835 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 834 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
836 drm_mga_clear_t clear; 835 drm_mga_clear_t *clear = data;
837 836
838 LOCK_TEST_WITH_RETURN(dev, filp); 837 LOCK_TEST_WITH_RETURN(dev, file_priv);
839
840 DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data,
841 sizeof(clear));
842 838
843 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 839 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
844 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 840 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
845 841
846 WRAP_TEST_WITH_RETURN(dev_priv); 842 WRAP_TEST_WITH_RETURN(dev_priv);
847 843
848 mga_dma_dispatch_clear(dev, &clear); 844 mga_dma_dispatch_clear(dev, clear);
849 845
850 /* Make sure we restore the 3D state next time. 846 /* Make sure we restore the 3D state next time.
851 */ 847 */
@@ -854,13 +850,12 @@ static int mga_dma_clear(DRM_IOCTL_ARGS)
854 return 0; 850 return 0;
855} 851}
856 852
857static int mga_dma_swap(DRM_IOCTL_ARGS) 853static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
858{ 854{
859 DRM_DEVICE;
860 drm_mga_private_t *dev_priv = dev->dev_private; 855 drm_mga_private_t *dev_priv = dev->dev_private;
861 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 856 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
862 857
863 LOCK_TEST_WITH_RETURN(dev, filp); 858 LOCK_TEST_WITH_RETURN(dev, file_priv);
864 859
865 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 860 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
866 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 861 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@@ -876,37 +871,32 @@ static int mga_dma_swap(DRM_IOCTL_ARGS)
876 return 0; 871 return 0;
877} 872}
878 873
879static int mga_dma_vertex(DRM_IOCTL_ARGS) 874static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
880{ 875{
881 DRM_DEVICE;
882 drm_mga_private_t *dev_priv = dev->dev_private; 876 drm_mga_private_t *dev_priv = dev->dev_private;
883 struct drm_device_dma *dma = dev->dma; 877 struct drm_device_dma *dma = dev->dma;
884 struct drm_buf *buf; 878 struct drm_buf *buf;
885 drm_mga_buf_priv_t *buf_priv; 879 drm_mga_buf_priv_t *buf_priv;
886 drm_mga_vertex_t vertex; 880 drm_mga_vertex_t *vertex = data;
887
888 LOCK_TEST_WITH_RETURN(dev, filp);
889 881
890 DRM_COPY_FROM_USER_IOCTL(vertex, 882 LOCK_TEST_WITH_RETURN(dev, file_priv);
891 (drm_mga_vertex_t __user *) data,
892 sizeof(vertex));
893 883
894 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 884 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
895 return DRM_ERR(EINVAL); 885 return -EINVAL;
896 buf = dma->buflist[vertex.idx]; 886 buf = dma->buflist[vertex->idx];
897 buf_priv = buf->dev_private; 887 buf_priv = buf->dev_private;
898 888
899 buf->used = vertex.used; 889 buf->used = vertex->used;
900 buf_priv->discard = vertex.discard; 890 buf_priv->discard = vertex->discard;
901 891
902 if (!mga_verify_state(dev_priv)) { 892 if (!mga_verify_state(dev_priv)) {
903 if (vertex.discard) { 893 if (vertex->discard) {
904 if (buf_priv->dispatched == 1) 894 if (buf_priv->dispatched == 1)
905 AGE_BUFFER(buf_priv); 895 AGE_BUFFER(buf_priv);
906 buf_priv->dispatched = 0; 896 buf_priv->dispatched = 0;
907 mga_freelist_put(dev, buf); 897 mga_freelist_put(dev, buf);
908 } 898 }
909 return DRM_ERR(EINVAL); 899 return -EINVAL;
910 } 900 }
911 901
912 WRAP_TEST_WITH_RETURN(dev_priv); 902 WRAP_TEST_WITH_RETURN(dev_priv);
@@ -916,82 +906,73 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
916 return 0; 906 return 0;
917} 907}
918 908
919static int mga_dma_indices(DRM_IOCTL_ARGS) 909static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
920{ 910{
921 DRM_DEVICE;
922 drm_mga_private_t *dev_priv = dev->dev_private; 911 drm_mga_private_t *dev_priv = dev->dev_private;
923 struct drm_device_dma *dma = dev->dma; 912 struct drm_device_dma *dma = dev->dma;
924 struct drm_buf *buf; 913 struct drm_buf *buf;
925 drm_mga_buf_priv_t *buf_priv; 914 drm_mga_buf_priv_t *buf_priv;
926 drm_mga_indices_t indices; 915 drm_mga_indices_t *indices = data;
927 916
928 LOCK_TEST_WITH_RETURN(dev, filp); 917 LOCK_TEST_WITH_RETURN(dev, file_priv);
929 918
930 DRM_COPY_FROM_USER_IOCTL(indices, 919 if (indices->idx < 0 || indices->idx > dma->buf_count)
931 (drm_mga_indices_t __user *) data, 920 return -EINVAL;
932 sizeof(indices));
933 921
934 if (indices.idx < 0 || indices.idx > dma->buf_count) 922 buf = dma->buflist[indices->idx];
935 return DRM_ERR(EINVAL);
936
937 buf = dma->buflist[indices.idx];
938 buf_priv = buf->dev_private; 923 buf_priv = buf->dev_private;
939 924
940 buf_priv->discard = indices.discard; 925 buf_priv->discard = indices->discard;
941 926
942 if (!mga_verify_state(dev_priv)) { 927 if (!mga_verify_state(dev_priv)) {
943 if (indices.discard) { 928 if (indices->discard) {
944 if (buf_priv->dispatched == 1) 929 if (buf_priv->dispatched == 1)
945 AGE_BUFFER(buf_priv); 930 AGE_BUFFER(buf_priv);
946 buf_priv->dispatched = 0; 931 buf_priv->dispatched = 0;
947 mga_freelist_put(dev, buf); 932 mga_freelist_put(dev, buf);
948 } 933 }
949 return DRM_ERR(EINVAL); 934 return -EINVAL;
950 } 935 }
951 936
952 WRAP_TEST_WITH_RETURN(dev_priv); 937 WRAP_TEST_WITH_RETURN(dev_priv);
953 938
954 mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); 939 mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
955 940
956 return 0; 941 return 0;
957} 942}
958 943
959static int mga_dma_iload(DRM_IOCTL_ARGS) 944static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
960{ 945{
961 DRM_DEVICE;
962 struct drm_device_dma *dma = dev->dma; 946 struct drm_device_dma *dma = dev->dma;
963 drm_mga_private_t *dev_priv = dev->dev_private; 947 drm_mga_private_t *dev_priv = dev->dev_private;
964 struct drm_buf *buf; 948 struct drm_buf *buf;
965 drm_mga_buf_priv_t *buf_priv; 949 drm_mga_buf_priv_t *buf_priv;
966 drm_mga_iload_t iload; 950 drm_mga_iload_t *iload = data;
967 DRM_DEBUG("\n"); 951 DRM_DEBUG("\n");
968 952
969 LOCK_TEST_WITH_RETURN(dev, filp); 953 LOCK_TEST_WITH_RETURN(dev, file_priv);
970
971 DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data,
972 sizeof(iload));
973 954
974#if 0 955#if 0
975 if (mga_do_wait_for_idle(dev_priv) < 0) { 956 if (mga_do_wait_for_idle(dev_priv) < 0) {
976 if (MGA_DMA_DEBUG) 957 if (MGA_DMA_DEBUG)
977 DRM_INFO("%s: -EBUSY\n", __FUNCTION__); 958 DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
978 return DRM_ERR(EBUSY); 959 return -EBUSY;
979 } 960 }
980#endif 961#endif
981 if (iload.idx < 0 || iload.idx > dma->buf_count) 962 if (iload->idx < 0 || iload->idx > dma->buf_count)
982 return DRM_ERR(EINVAL); 963 return -EINVAL;
983 964
984 buf = dma->buflist[iload.idx]; 965 buf = dma->buflist[iload->idx];
985 buf_priv = buf->dev_private; 966 buf_priv = buf->dev_private;
986 967
987 if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { 968 if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
988 mga_freelist_put(dev, buf); 969 mga_freelist_put(dev, buf);
989 return DRM_ERR(EINVAL); 970 return -EINVAL;
990 } 971 }
991 972
992 WRAP_TEST_WITH_RETURN(dev_priv); 973 WRAP_TEST_WITH_RETURN(dev_priv);
993 974
994 mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length); 975 mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
995 976
996 /* Make sure we restore the 3D state next time. 977 /* Make sure we restore the 3D state next time.
997 */ 978 */
@@ -1000,28 +981,24 @@ static int mga_dma_iload(DRM_IOCTL_ARGS)
1000 return 0; 981 return 0;
1001} 982}
1002 983
1003static int mga_dma_blit(DRM_IOCTL_ARGS) 984static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1004{ 985{
1005 DRM_DEVICE;
1006 drm_mga_private_t *dev_priv = dev->dev_private; 986 drm_mga_private_t *dev_priv = dev->dev_private;
1007 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 987 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
1008 drm_mga_blit_t blit; 988 drm_mga_blit_t *blit = data;
1009 DRM_DEBUG("\n"); 989 DRM_DEBUG("\n");
1010 990
1011 LOCK_TEST_WITH_RETURN(dev, filp); 991 LOCK_TEST_WITH_RETURN(dev, file_priv);
1012
1013 DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data,
1014 sizeof(blit));
1015 992
1016 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 993 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
1017 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 994 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
1018 995
1019 if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) 996 if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
1020 return DRM_ERR(EINVAL); 997 return -EINVAL;
1021 998
1022 WRAP_TEST_WITH_RETURN(dev_priv); 999 WRAP_TEST_WITH_RETURN(dev_priv);
1023 1000
1024 mga_dma_dispatch_blit(dev, &blit); 1001 mga_dma_dispatch_blit(dev, blit);
1025 1002
1026 /* Make sure we restore the 3D state next time. 1003 /* Make sure we restore the 3D state next time.
1027 */ 1004 */
@@ -1030,24 +1007,20 @@ static int mga_dma_blit(DRM_IOCTL_ARGS)
1030 return 0; 1007 return 0;
1031} 1008}
1032 1009
1033static int mga_getparam(DRM_IOCTL_ARGS) 1010static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1034{ 1011{
1035 DRM_DEVICE;
1036 drm_mga_private_t *dev_priv = dev->dev_private; 1012 drm_mga_private_t *dev_priv = dev->dev_private;
1037 drm_mga_getparam_t param; 1013 drm_mga_getparam_t *param = data;
1038 int value; 1014 int value;
1039 1015
1040 if (!dev_priv) { 1016 if (!dev_priv) {
1041 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1017 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1042 return DRM_ERR(EINVAL); 1018 return -EINVAL;
1043 } 1019 }
1044 1020
1045 DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data,
1046 sizeof(param));
1047
1048 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1021 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1049 1022
1050 switch (param.param) { 1023 switch (param->param) {
1051 case MGA_PARAM_IRQ_NR: 1024 case MGA_PARAM_IRQ_NR:
1052 value = dev->irq; 1025 value = dev->irq;
1053 break; 1026 break;
@@ -1055,36 +1028,35 @@ static int mga_getparam(DRM_IOCTL_ARGS)
1055 value = dev_priv->chipset; 1028 value = dev_priv->chipset;
1056 break; 1029 break;
1057 default: 1030 default:
1058 return DRM_ERR(EINVAL); 1031 return -EINVAL;
1059 } 1032 }
1060 1033
1061 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1034 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1062 DRM_ERROR("copy_to_user\n"); 1035 DRM_ERROR("copy_to_user\n");
1063 return DRM_ERR(EFAULT); 1036 return -EFAULT;
1064 } 1037 }
1065 1038
1066 return 0; 1039 return 0;
1067} 1040}
1068 1041
1069static int mga_set_fence(DRM_IOCTL_ARGS) 1042static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
1070{ 1043{
1071 DRM_DEVICE;
1072 drm_mga_private_t *dev_priv = dev->dev_private; 1044 drm_mga_private_t *dev_priv = dev->dev_private;
1073 u32 temp; 1045 u32 *fence = data;
1074 DMA_LOCALS; 1046 DMA_LOCALS;
1075 1047
1076 if (!dev_priv) { 1048 if (!dev_priv) {
1077 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1049 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1078 return DRM_ERR(EINVAL); 1050 return -EINVAL;
1079 } 1051 }
1080 1052
1081 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1053 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1082 1054
1083 /* I would normal do this assignment in the declaration of temp, 1055 /* I would normal do this assignment in the declaration of fence,
1084 * but dev_priv may be NULL. 1056 * but dev_priv may be NULL.
1085 */ 1057 */
1086 1058
1087 temp = dev_priv->next_fence_to_post; 1059 *fence = dev_priv->next_fence_to_post;
1088 dev_priv->next_fence_to_post++; 1060 dev_priv->next_fence_to_post++;
1089 1061
1090 BEGIN_DMA(1); 1062 BEGIN_DMA(1);
@@ -1093,53 +1065,40 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
1093 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); 1065 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
1094 ADVANCE_DMA(); 1066 ADVANCE_DMA();
1095 1067
1096 if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) {
1097 DRM_ERROR("copy_to_user\n");
1098 return DRM_ERR(EFAULT);
1099 }
1100
1101 return 0; 1068 return 0;
1102} 1069}
1103 1070
1104static int mga_wait_fence(DRM_IOCTL_ARGS) 1071static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
1072file_priv)
1105{ 1073{
1106 DRM_DEVICE;
1107 drm_mga_private_t *dev_priv = dev->dev_private; 1074 drm_mga_private_t *dev_priv = dev->dev_private;
1108 u32 fence; 1075 u32 *fence = data;
1109 1076
1110 if (!dev_priv) { 1077 if (!dev_priv) {
1111 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1078 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1112 return DRM_ERR(EINVAL); 1079 return -EINVAL;
1113 } 1080 }
1114 1081
1115 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
1116
1117 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1082 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1118 1083
1119 mga_driver_fence_wait(dev, &fence); 1084 mga_driver_fence_wait(dev, fence);
1120
1121 if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) {
1122 DRM_ERROR("copy_to_user\n");
1123 return DRM_ERR(EFAULT);
1124 }
1125
1126 return 0; 1085 return 0;
1127} 1086}
1128 1087
1129drm_ioctl_desc_t mga_ioctls[] = { 1088struct drm_ioctl_desc mga_ioctls[] = {
1130 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1089 DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1131 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, 1090 DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1132 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, 1091 DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
1133 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH}, 1092 DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
1134 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH}, 1093 DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1135 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH}, 1094 DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1136 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH}, 1095 DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
1137 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH}, 1096 DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1138 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH}, 1097 DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
1139 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH}, 1098 DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
1140 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH}, 1099 DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1141 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH}, 1100 DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1142 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1101 DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1143}; 1102};
1144 1103
1145int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1104int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index d67f4925fbac..651b93c8ab5d 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -141,7 +141,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
141 if (size > dev_priv->warp->size) { 141 if (size > dev_priv->warp->size) {
142 DRM_ERROR("microcode too large! (%u > %lu)\n", 142 DRM_ERROR("microcode too large! (%u > %lu)\n",
143 size, dev_priv->warp->size); 143 size, dev_priv->warp->size);
144 return DRM_ERR(ENOMEM); 144 return -ENOMEM;
145 } 145 }
146 146
147 switch (dev_priv->chipset) { 147 switch (dev_priv->chipset) {
@@ -151,7 +151,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
151 case MGA_CARD_TYPE_G200: 151 case MGA_CARD_TYPE_G200:
152 return mga_warp_install_g200_microcode(dev_priv); 152 return mga_warp_install_g200_microcode(dev_priv);
153 default: 153 default:
154 return DRM_ERR(EINVAL); 154 return -EINVAL;
155 } 155 }
156} 156}
157 157
@@ -177,7 +177,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
177 MGA_WRITE(MGA_WVRTXSZ, 7); 177 MGA_WRITE(MGA_WVRTXSZ, 7);
178 break; 178 break;
179 default: 179 default:
180 return DRM_ERR(EINVAL); 180 return -EINVAL;
181 } 181 }
182 182
183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | 183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
@@ -186,7 +186,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
186 if (wmisc != WMISC_EXPECTED) { 186 if (wmisc != WMISC_EXPECTED) {
187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", 187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
188 wmisc, WMISC_EXPECTED); 188 wmisc, WMISC_EXPECTED);
189 return DRM_ERR(EINVAL); 189 return -EINVAL;
190 } 190 }
191 191
192 return 0; 192 return 0;
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index b163ed09bd81..7d550aba165e 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
129#if R128_FIFO_DEBUG 129#if R128_FIFO_DEBUG
130 DRM_ERROR("failed!\n"); 130 DRM_ERROR("failed!\n");
131#endif 131#endif
132 return DRM_ERR(EBUSY); 132 return -EBUSY;
133} 133}
134 134
135static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) 135static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
@@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
146#if R128_FIFO_DEBUG 146#if R128_FIFO_DEBUG
147 DRM_ERROR("failed!\n"); 147 DRM_ERROR("failed!\n");
148#endif 148#endif
149 return DRM_ERR(EBUSY); 149 return -EBUSY;
150} 150}
151 151
152static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) 152static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
@@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
168#if R128_FIFO_DEBUG 168#if R128_FIFO_DEBUG
169 DRM_ERROR("failed!\n"); 169 DRM_ERROR("failed!\n");
170#endif 170#endif
171 return DRM_ERR(EBUSY); 171 return -EBUSY;
172} 172}
173 173
174/* ================================================================ 174/* ================================================================
@@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
227 DRM_ERROR("failed!\n"); 227 DRM_ERROR("failed!\n");
228 r128_status(dev_priv); 228 r128_status(dev_priv);
229#endif 229#endif
230 return DRM_ERR(EBUSY); 230 return -EBUSY;
231} 231}
232 232
233/* Start the Concurrent Command Engine. 233/* Start the Concurrent Command Engine.
@@ -355,7 +355,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
355 355
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
357 if (dev_priv == NULL) 357 if (dev_priv == NULL)
358 return DRM_ERR(ENOMEM); 358 return -ENOMEM;
359 359
360 memset(dev_priv, 0, sizeof(drm_r128_private_t)); 360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
361 361
@@ -365,7 +365,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
365 DRM_ERROR("PCI GART memory not allocated!\n"); 365 DRM_ERROR("PCI GART memory not allocated!\n");
366 dev->dev_private = (void *)dev_priv; 366 dev->dev_private = (void *)dev_priv;
367 r128_do_cleanup_cce(dev); 367 r128_do_cleanup_cce(dev);
368 return DRM_ERR(EINVAL); 368 return -EINVAL;
369 } 369 }
370 370
371 dev_priv->usec_timeout = init->usec_timeout; 371 dev_priv->usec_timeout = init->usec_timeout;
@@ -374,7 +374,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
374 DRM_DEBUG("TIMEOUT problem!\n"); 374 DRM_DEBUG("TIMEOUT problem!\n");
375 dev->dev_private = (void *)dev_priv; 375 dev->dev_private = (void *)dev_priv;
376 r128_do_cleanup_cce(dev); 376 r128_do_cleanup_cce(dev);
377 return DRM_ERR(EINVAL); 377 return -EINVAL;
378 } 378 }
379 379
380 dev_priv->cce_mode = init->cce_mode; 380 dev_priv->cce_mode = init->cce_mode;
@@ -394,7 +394,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
394 DRM_DEBUG("Bad cce_mode!\n"); 394 DRM_DEBUG("Bad cce_mode!\n");
395 dev->dev_private = (void *)dev_priv; 395 dev->dev_private = (void *)dev_priv;
396 r128_do_cleanup_cce(dev); 396 r128_do_cleanup_cce(dev);
397 return DRM_ERR(EINVAL); 397 return -EINVAL;
398 } 398 }
399 399
400 switch (init->cce_mode) { 400 switch (init->cce_mode) {
@@ -461,7 +461,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
461 DRM_ERROR("could not find sarea!\n"); 461 DRM_ERROR("could not find sarea!\n");
462 dev->dev_private = (void *)dev_priv; 462 dev->dev_private = (void *)dev_priv;
463 r128_do_cleanup_cce(dev); 463 r128_do_cleanup_cce(dev);
464 return DRM_ERR(EINVAL); 464 return -EINVAL;
465 } 465 }
466 466
467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
@@ -469,21 +469,21 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
469 DRM_ERROR("could not find mmio region!\n"); 469 DRM_ERROR("could not find mmio region!\n");
470 dev->dev_private = (void *)dev_priv; 470 dev->dev_private = (void *)dev_priv;
471 r128_do_cleanup_cce(dev); 471 r128_do_cleanup_cce(dev);
472 return DRM_ERR(EINVAL); 472 return -EINVAL;
473 } 473 }
474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); 474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
475 if (!dev_priv->cce_ring) { 475 if (!dev_priv->cce_ring) {
476 DRM_ERROR("could not find cce ring region!\n"); 476 DRM_ERROR("could not find cce ring region!\n");
477 dev->dev_private = (void *)dev_priv; 477 dev->dev_private = (void *)dev_priv;
478 r128_do_cleanup_cce(dev); 478 r128_do_cleanup_cce(dev);
479 return DRM_ERR(EINVAL); 479 return -EINVAL;
480 } 480 }
481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
482 if (!dev_priv->ring_rptr) { 482 if (!dev_priv->ring_rptr) {
483 DRM_ERROR("could not find ring read pointer!\n"); 483 DRM_ERROR("could not find ring read pointer!\n");
484 dev->dev_private = (void *)dev_priv; 484 dev->dev_private = (void *)dev_priv;
485 r128_do_cleanup_cce(dev); 485 r128_do_cleanup_cce(dev);
486 return DRM_ERR(EINVAL); 486 return -EINVAL;
487 } 487 }
488 dev->agp_buffer_token = init->buffers_offset; 488 dev->agp_buffer_token = init->buffers_offset;
489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
@@ -491,7 +491,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
491 DRM_ERROR("could not find dma buffer region!\n"); 491 DRM_ERROR("could not find dma buffer region!\n");
492 dev->dev_private = (void *)dev_priv; 492 dev->dev_private = (void *)dev_priv;
493 r128_do_cleanup_cce(dev); 493 r128_do_cleanup_cce(dev);
494 return DRM_ERR(EINVAL); 494 return -EINVAL;
495 } 495 }
496 496
497 if (!dev_priv->is_pci) { 497 if (!dev_priv->is_pci) {
@@ -501,7 +501,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
501 DRM_ERROR("could not find agp texture region!\n"); 501 DRM_ERROR("could not find agp texture region!\n");
502 dev->dev_private = (void *)dev_priv; 502 dev->dev_private = (void *)dev_priv;
503 r128_do_cleanup_cce(dev); 503 r128_do_cleanup_cce(dev);
504 return DRM_ERR(EINVAL); 504 return -EINVAL;
505 } 505 }
506 } 506 }
507 507
@@ -520,7 +520,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
520 DRM_ERROR("Could not ioremap agp regions!\n"); 520 DRM_ERROR("Could not ioremap agp regions!\n");
521 dev->dev_private = (void *)dev_priv; 521 dev->dev_private = (void *)dev_priv;
522 r128_do_cleanup_cce(dev); 522 r128_do_cleanup_cce(dev);
523 return DRM_ERR(ENOMEM); 523 return -ENOMEM;
524 } 524 }
525 } else 525 } else
526#endif 526#endif
@@ -567,7 +567,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
567 DRM_ERROR("failed to init PCI GART!\n"); 567 DRM_ERROR("failed to init PCI GART!\n");
568 dev->dev_private = (void *)dev_priv; 568 dev->dev_private = (void *)dev_priv;
569 r128_do_cleanup_cce(dev); 569 r128_do_cleanup_cce(dev);
570 return DRM_ERR(ENOMEM); 570 return -ENOMEM;
571 } 571 }
572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); 572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
573#if __OS_HAS_AGP 573#if __OS_HAS_AGP
@@ -625,35 +625,30 @@ int r128_do_cleanup_cce(struct drm_device * dev)
625 return 0; 625 return 0;
626} 626}
627 627
628int r128_cce_init(DRM_IOCTL_ARGS) 628int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
629{ 629{
630 DRM_DEVICE; 630 drm_r128_init_t *init = data;
631 drm_r128_init_t init;
632 631
633 DRM_DEBUG("\n"); 632 DRM_DEBUG("\n");
634 633
635 LOCK_TEST_WITH_RETURN(dev, filp); 634 LOCK_TEST_WITH_RETURN(dev, file_priv);
636 635
637 DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data, 636 switch (init->func) {
638 sizeof(init));
639
640 switch (init.func) {
641 case R128_INIT_CCE: 637 case R128_INIT_CCE:
642 return r128_do_init_cce(dev, &init); 638 return r128_do_init_cce(dev, init);
643 case R128_CLEANUP_CCE: 639 case R128_CLEANUP_CCE:
644 return r128_do_cleanup_cce(dev); 640 return r128_do_cleanup_cce(dev);
645 } 641 }
646 642
647 return DRM_ERR(EINVAL); 643 return -EINVAL;
648} 644}
649 645
650int r128_cce_start(DRM_IOCTL_ARGS) 646int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
651{ 647{
652 DRM_DEVICE;
653 drm_r128_private_t *dev_priv = dev->dev_private; 648 drm_r128_private_t *dev_priv = dev->dev_private;
654 DRM_DEBUG("\n"); 649 DRM_DEBUG("\n");
655 650
656 LOCK_TEST_WITH_RETURN(dev, filp); 651 LOCK_TEST_WITH_RETURN(dev, file_priv);
657 652
658 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { 653 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
659 DRM_DEBUG("%s while CCE running\n", __FUNCTION__); 654 DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
@@ -668,30 +663,26 @@ int r128_cce_start(DRM_IOCTL_ARGS)
668/* Stop the CCE. The engine must have been idled before calling this 663/* Stop the CCE. The engine must have been idled before calling this
669 * routine. 664 * routine.
670 */ 665 */
671int r128_cce_stop(DRM_IOCTL_ARGS) 666int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
672{ 667{
673 DRM_DEVICE;
674 drm_r128_private_t *dev_priv = dev->dev_private; 668 drm_r128_private_t *dev_priv = dev->dev_private;
675 drm_r128_cce_stop_t stop; 669 drm_r128_cce_stop_t *stop = data;
676 int ret; 670 int ret;
677 DRM_DEBUG("\n"); 671 DRM_DEBUG("\n");
678 672
679 LOCK_TEST_WITH_RETURN(dev, filp); 673 LOCK_TEST_WITH_RETURN(dev, file_priv);
680
681 DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
682 sizeof(stop));
683 674
684 /* Flush any pending CCE commands. This ensures any outstanding 675 /* Flush any pending CCE commands. This ensures any outstanding
685 * commands are exectuted by the engine before we turn it off. 676 * commands are exectuted by the engine before we turn it off.
686 */ 677 */
687 if (stop.flush) { 678 if (stop->flush) {
688 r128_do_cce_flush(dev_priv); 679 r128_do_cce_flush(dev_priv);
689 } 680 }
690 681
691 /* If we fail to make the engine go idle, we return an error 682 /* If we fail to make the engine go idle, we return an error
692 * code so that the DRM ioctl wrapper can try again. 683 * code so that the DRM ioctl wrapper can try again.
693 */ 684 */
694 if (stop.idle) { 685 if (stop->idle) {
695 ret = r128_do_cce_idle(dev_priv); 686 ret = r128_do_cce_idle(dev_priv);
696 if (ret) 687 if (ret)
697 return ret; 688 return ret;
@@ -711,17 +702,16 @@ int r128_cce_stop(DRM_IOCTL_ARGS)
711 702
712/* Just reset the CCE ring. Called as part of an X Server engine reset. 703/* Just reset the CCE ring. Called as part of an X Server engine reset.
713 */ 704 */
714int r128_cce_reset(DRM_IOCTL_ARGS) 705int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
715{ 706{
716 DRM_DEVICE;
717 drm_r128_private_t *dev_priv = dev->dev_private; 707 drm_r128_private_t *dev_priv = dev->dev_private;
718 DRM_DEBUG("\n"); 708 DRM_DEBUG("\n");
719 709
720 LOCK_TEST_WITH_RETURN(dev, filp); 710 LOCK_TEST_WITH_RETURN(dev, file_priv);
721 711
722 if (!dev_priv) { 712 if (!dev_priv) {
723 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 713 DRM_DEBUG("%s called before init done\n", __FUNCTION__);
724 return DRM_ERR(EINVAL); 714 return -EINVAL;
725 } 715 }
726 716
727 r128_do_cce_reset(dev_priv); 717 r128_do_cce_reset(dev_priv);
@@ -732,13 +722,12 @@ int r128_cce_reset(DRM_IOCTL_ARGS)
732 return 0; 722 return 0;
733} 723}
734 724
735int r128_cce_idle(DRM_IOCTL_ARGS) 725int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
736{ 726{
737 DRM_DEVICE;
738 drm_r128_private_t *dev_priv = dev->dev_private; 727 drm_r128_private_t *dev_priv = dev->dev_private;
739 DRM_DEBUG("\n"); 728 DRM_DEBUG("\n");
740 729
741 LOCK_TEST_WITH_RETURN(dev, filp); 730 LOCK_TEST_WITH_RETURN(dev, file_priv);
742 731
743 if (dev_priv->cce_running) { 732 if (dev_priv->cce_running) {
744 r128_do_cce_flush(dev_priv); 733 r128_do_cce_flush(dev_priv);
@@ -747,19 +736,18 @@ int r128_cce_idle(DRM_IOCTL_ARGS)
747 return r128_do_cce_idle(dev_priv); 736 return r128_do_cce_idle(dev_priv);
748} 737}
749 738
750int r128_engine_reset(DRM_IOCTL_ARGS) 739int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
751{ 740{
752 DRM_DEVICE;
753 DRM_DEBUG("\n"); 741 DRM_DEBUG("\n");
754 742
755 LOCK_TEST_WITH_RETURN(dev, filp); 743 LOCK_TEST_WITH_RETURN(dev, file_priv);
756 744
757 return r128_do_engine_reset(dev); 745 return r128_do_engine_reset(dev);
758} 746}
759 747
760int r128_fullscreen(DRM_IOCTL_ARGS) 748int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
761{ 749{
762 return DRM_ERR(EINVAL); 750 return -EINVAL;
763} 751}
764 752
765/* ================================================================ 753/* ================================================================
@@ -780,7 +768,7 @@ static int r128_freelist_init(struct drm_device * dev)
780 768
781 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 769 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
782 if (dev_priv->head == NULL) 770 if (dev_priv->head == NULL)
783 return DRM_ERR(ENOMEM); 771 return -ENOMEM;
784 772
785 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); 773 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
786 dev_priv->head->age = R128_BUFFER_USED; 774 dev_priv->head->age = R128_BUFFER_USED;
@@ -791,7 +779,7 @@ static int r128_freelist_init(struct drm_device * dev)
791 779
792 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 780 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
793 if (!entry) 781 if (!entry)
794 return DRM_ERR(ENOMEM); 782 return -ENOMEM;
795 783
796 entry->age = R128_BUFFER_FREE; 784 entry->age = R128_BUFFER_FREE;
797 entry->buf = buf; 785 entry->buf = buf;
@@ -828,7 +816,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
828 for (i = 0; i < dma->buf_count; i++) { 816 for (i = 0; i < dma->buf_count; i++) {
829 buf = dma->buflist[i]; 817 buf = dma->buflist[i];
830 buf_priv = buf->dev_private; 818 buf_priv = buf->dev_private;
831 if (buf->filp == 0) 819 if (buf->file_priv == 0)
832 return buf; 820 return buf;
833 } 821 }
834 822
@@ -883,10 +871,12 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
883 871
884 /* FIXME: This is being ignored... */ 872 /* FIXME: This is being ignored... */
885 DRM_ERROR("failed!\n"); 873 DRM_ERROR("failed!\n");
886 return DRM_ERR(EBUSY); 874 return -EBUSY;
887} 875}
888 876
889static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) 877static int r128_cce_get_buffers(struct drm_device * dev,
878 struct drm_file *file_priv,
879 struct drm_dma * d)
890{ 880{
891 int i; 881 int i;
892 struct drm_buf *buf; 882 struct drm_buf *buf;
@@ -894,57 +884,51 @@ static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct dr
894 for (i = d->granted_count; i < d->request_count; i++) { 884 for (i = d->granted_count; i < d->request_count; i++) {
895 buf = r128_freelist_get(dev); 885 buf = r128_freelist_get(dev);
896 if (!buf) 886 if (!buf)
897 return DRM_ERR(EAGAIN); 887 return -EAGAIN;
898 888
899 buf->filp = filp; 889 buf->file_priv = file_priv;
900 890
901 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 891 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
902 sizeof(buf->idx))) 892 sizeof(buf->idx)))
903 return DRM_ERR(EFAULT); 893 return -EFAULT;
904 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 894 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
905 sizeof(buf->total))) 895 sizeof(buf->total)))
906 return DRM_ERR(EFAULT); 896 return -EFAULT;
907 897
908 d->granted_count++; 898 d->granted_count++;
909 } 899 }
910 return 0; 900 return 0;
911} 901}
912 902
913int r128_cce_buffers(DRM_IOCTL_ARGS) 903int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
914{ 904{
915 DRM_DEVICE;
916 struct drm_device_dma *dma = dev->dma; 905 struct drm_device_dma *dma = dev->dma;
917 int ret = 0; 906 int ret = 0;
918 struct drm_dma __user *argp = (void __user *)data; 907 struct drm_dma *d = data;
919 struct drm_dma d;
920 908
921 LOCK_TEST_WITH_RETURN(dev, filp); 909 LOCK_TEST_WITH_RETURN(dev, file_priv);
922
923 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
924 910
925 /* Please don't send us buffers. 911 /* Please don't send us buffers.
926 */ 912 */
927 if (d.send_count != 0) { 913 if (d->send_count != 0) {
928 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 914 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
929 DRM_CURRENTPID, d.send_count); 915 DRM_CURRENTPID, d->send_count);
930 return DRM_ERR(EINVAL); 916 return -EINVAL;
931 } 917 }
932 918
933 /* We'll send you buffers. 919 /* We'll send you buffers.
934 */ 920 */
935 if (d.request_count < 0 || d.request_count > dma->buf_count) { 921 if (d->request_count < 0 || d->request_count > dma->buf_count) {
936 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 922 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
937 DRM_CURRENTPID, d.request_count, dma->buf_count); 923 DRM_CURRENTPID, d->request_count, dma->buf_count);
938 return DRM_ERR(EINVAL); 924 return -EINVAL;
939 } 925 }
940 926
941 d.granted_count = 0; 927 d->granted_count = 0;
942 928
943 if (d.request_count) { 929 if (d->request_count) {
944 ret = r128_cce_get_buffers(filp, dev, &d); 930 ret = r128_cce_get_buffers(dev, file_priv, d);
945 } 931 }
946 932
947 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
948
949 return ret; 933 return ret;
950} 934}
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index e94a39c6e327..8d8878b55f55 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -222,11 +222,7 @@ typedef struct drm_r128_init {
222 R128_INIT_CCE = 0x01, 222 R128_INIT_CCE = 0x01,
223 R128_CLEANUP_CCE = 0x02 223 R128_CLEANUP_CCE = 0x02
224 } func; 224 } func;
225#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
226 int sarea_priv_offset;
227#else
228 unsigned long sarea_priv_offset; 225 unsigned long sarea_priv_offset;
229#endif
230 int is_pci; 226 int is_pci;
231 int cce_mode; 227 int cce_mode;
232 int cce_secure; 228 int cce_secure;
@@ -240,21 +236,12 @@ typedef struct drm_r128_init {
240 unsigned int depth_offset, depth_pitch; 236 unsigned int depth_offset, depth_pitch;
241 unsigned int span_offset; 237 unsigned int span_offset;
242 238
243#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
244 unsigned int fb_offset;
245 unsigned int mmio_offset;
246 unsigned int ring_offset;
247 unsigned int ring_rptr_offset;
248 unsigned int buffers_offset;
249 unsigned int agp_textures_offset;
250#else
251 unsigned long fb_offset; 239 unsigned long fb_offset;
252 unsigned long mmio_offset; 240 unsigned long mmio_offset;
253 unsigned long ring_offset; 241 unsigned long ring_offset;
254 unsigned long ring_rptr_offset; 242 unsigned long ring_rptr_offset;
255 unsigned long buffers_offset; 243 unsigned long buffers_offset;
256 unsigned long agp_textures_offset; 244 unsigned long agp_textures_offset;
257#endif
258} drm_r128_init_t; 245} drm_r128_init_t;
259 246
260typedef struct drm_r128_cce_stop { 247typedef struct drm_r128_cce_stop {
@@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop {
264 251
265typedef struct drm_r128_clear { 252typedef struct drm_r128_clear {
266 unsigned int flags; 253 unsigned int flags;
267#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
268 int x, y, w, h;
269#endif
270 unsigned int clear_color; 254 unsigned int clear_color;
271 unsigned int clear_depth; 255 unsigned int clear_depth;
272#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
273 unsigned int color_mask; 256 unsigned int color_mask;
274 unsigned int depth_mask; 257 unsigned int depth_mask;
275#endif
276} drm_r128_clear_t; 258} drm_r128_clear_t;
277 259
278typedef struct drm_r128_vertex { 260typedef struct drm_r128_vertex {
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 72249fb2fd1c..250d2aa46581 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -129,18 +129,18 @@ typedef struct drm_r128_buf_priv {
129 drm_r128_freelist_t *list_entry; 129 drm_r128_freelist_t *list_entry;
130} drm_r128_buf_priv_t; 130} drm_r128_buf_priv_t;
131 131
132extern drm_ioctl_desc_t r128_ioctls[]; 132extern struct drm_ioctl_desc r128_ioctls[];
133extern int r128_max_ioctl; 133extern int r128_max_ioctl;
134 134
135 /* r128_cce.c */ 135 /* r128_cce.c */
136extern int r128_cce_init(DRM_IOCTL_ARGS); 136extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
137extern int r128_cce_start(DRM_IOCTL_ARGS); 137extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
138extern int r128_cce_stop(DRM_IOCTL_ARGS); 138extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
139extern int r128_cce_reset(DRM_IOCTL_ARGS); 139extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
140extern int r128_cce_idle(DRM_IOCTL_ARGS); 140extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
141extern int r128_engine_reset(DRM_IOCTL_ARGS); 141extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
142extern int r128_fullscreen(DRM_IOCTL_ARGS); 142extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
143extern int r128_cce_buffers(DRM_IOCTL_ARGS); 143extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
144 144
145extern void r128_freelist_reset(struct drm_device * dev); 145extern void r128_freelist_reset(struct drm_device * dev);
146 146
@@ -156,7 +156,8 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev); 156extern void r128_driver_irq_postinstall(struct drm_device * dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev); 157extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev); 158extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp); 159extern void r128_driver_preclose(struct drm_device * dev,
160 struct drm_file *file_priv);
160 161
161extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 162extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
162 unsigned long arg); 163 unsigned long arg);
@@ -428,7 +429,7 @@ do { \
428 DRM_UDELAY(1); \ 429 DRM_UDELAY(1); \
429 } \ 430 } \
430 DRM_ERROR( "ring space check failed!\n" ); \ 431 DRM_ERROR( "ring space check failed!\n" ); \
431 return DRM_ERR(EBUSY); \ 432 return -EBUSY; \
432 } \ 433 } \
433 __ring_space_done: \ 434 __ring_space_done: \
434 ; \ 435 ; \
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 7b334fb7d649..b7f483cac6d4 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -776,8 +776,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
776 sarea_priv->nbox = 0; 776 sarea_priv->nbox = 0;
777} 777}
778 778
779static int r128_cce_dispatch_blit(DRMFILE filp, 779static int r128_cce_dispatch_blit(struct drm_device * dev,
780 struct drm_device * dev, drm_r128_blit_t * blit) 780 struct drm_file *file_priv,
781 drm_r128_blit_t * blit)
781{ 782{
782 drm_r128_private_t *dev_priv = dev->dev_private; 783 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma; 784 struct drm_device_dma *dma = dev->dma;
@@ -809,7 +810,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
809 break; 810 break;
810 default: 811 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format); 812 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return DRM_ERR(EINVAL); 813 return -EINVAL;
813 } 814 }
814 815
815 /* Flush the pixel cache, and mark the contents as Read Invalid. 816 /* Flush the pixel cache, and mark the contents as Read Invalid.
@@ -829,14 +830,14 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
829 buf = dma->buflist[blit->idx]; 830 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private; 831 buf_priv = buf->dev_private;
831 832
832 if (buf->filp != filp) { 833 if (buf->file_priv != file_priv) {
833 DRM_ERROR("process %d using buffer owned by %p\n", 834 DRM_ERROR("process %d using buffer owned by %p\n",
834 DRM_CURRENTPID, buf->filp); 835 DRM_CURRENTPID, buf->file_priv);
835 return DRM_ERR(EINVAL); 836 return -EINVAL;
836 } 837 }
837 if (buf->pending) { 838 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx); 839 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return DRM_ERR(EINVAL); 840 return -EINVAL;
840 } 841 }
841 842
842 buf_priv->discard = 1; 843 buf_priv->discard = 1;
@@ -900,22 +901,22 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
900 901
901 count = depth->n; 902 count = depth->n;
902 if (count > 4096 || count <= 0) 903 if (count > 4096 || count <= 0)
903 return DRM_ERR(EMSGSIZE); 904 return -EMSGSIZE;
904 905
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 906 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
906 return DRM_ERR(EFAULT); 907 return -EFAULT;
907 } 908 }
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 909 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return DRM_ERR(EFAULT); 910 return -EFAULT;
910 } 911 }
911 912
912 buffer_size = depth->n * sizeof(u32); 913 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 914 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
914 if (buffer == NULL) 915 if (buffer == NULL)
915 return DRM_ERR(ENOMEM); 916 return -ENOMEM;
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 917 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 918 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
918 return DRM_ERR(EFAULT); 919 return -EFAULT;
919 } 920 }
920 921
921 mask_size = depth->n * sizeof(u8); 922 mask_size = depth->n * sizeof(u8);
@@ -923,12 +924,12 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 924 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
924 if (mask == NULL) { 925 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 926 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
926 return DRM_ERR(ENOMEM); 927 return -ENOMEM;
927 } 928 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 929 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 930 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
930 drm_free(mask, mask_size, DRM_MEM_BUFS); 931 drm_free(mask, mask_size, DRM_MEM_BUFS);
931 return DRM_ERR(EFAULT); 932 return -EFAULT;
932 } 933 }
933 934
934 for (i = 0; i < count; i++, x++) { 935 for (i = 0; i < count; i++, x++) {
@@ -996,28 +997,28 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
996 997
997 count = depth->n; 998 count = depth->n;
998 if (count > 4096 || count <= 0) 999 if (count > 4096 || count <= 0)
999 return DRM_ERR(EMSGSIZE); 1000 return -EMSGSIZE;
1000 1001
1001 xbuf_size = count * sizeof(*x); 1002 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y); 1003 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1004 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1004 if (x == NULL) { 1005 if (x == NULL) {
1005 return DRM_ERR(ENOMEM); 1006 return -ENOMEM;
1006 } 1007 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1008 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1008 if (y == NULL) { 1009 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1010 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1010 return DRM_ERR(ENOMEM); 1011 return -ENOMEM;
1011 } 1012 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1013 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1014 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1015 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1015 return DRM_ERR(EFAULT); 1016 return -EFAULT;
1016 } 1017 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1018 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1019 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1020 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1020 return DRM_ERR(EFAULT); 1021 return -EFAULT;
1021 } 1022 }
1022 1023
1023 buffer_size = depth->n * sizeof(u32); 1024 buffer_size = depth->n * sizeof(u32);
@@ -1025,13 +1026,13 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1025 if (buffer == NULL) { 1026 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1027 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1028 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1028 return DRM_ERR(ENOMEM); 1029 return -ENOMEM;
1029 } 1030 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1031 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1032 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1033 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1034 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1034 return DRM_ERR(EFAULT); 1035 return -EFAULT;
1035 } 1036 }
1036 1037
1037 if (depth->mask) { 1038 if (depth->mask) {
@@ -1041,14 +1042,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1042 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1043 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1044 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1044 return DRM_ERR(ENOMEM); 1045 return -ENOMEM;
1045 } 1046 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1047 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1048 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1049 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1050 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS); 1051 drm_free(mask, mask_size, DRM_MEM_BUFS);
1051 return DRM_ERR(EFAULT); 1052 return -EFAULT;
1052 } 1053 }
1053 1054
1054 for (i = 0; i < count; i++) { 1055 for (i = 0; i < count; i++) {
@@ -1115,13 +1116,13 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
1115 1116
1116 count = depth->n; 1117 count = depth->n;
1117 if (count > 4096 || count <= 0) 1118 if (count > 4096 || count <= 0)
1118 return DRM_ERR(EMSGSIZE); 1119 return -EMSGSIZE;
1119 1120
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 1121 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1121 return DRM_ERR(EFAULT); 1122 return -EFAULT;
1122 } 1123 }
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 1124 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return DRM_ERR(EFAULT); 1125 return -EFAULT;
1125 } 1126 }
1126 1127
1127 BEGIN_RING(7); 1128 BEGIN_RING(7);
@@ -1159,7 +1160,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1159 1160
1160 count = depth->n; 1161 count = depth->n;
1161 if (count > 4096 || count <= 0) 1162 if (count > 4096 || count <= 0)
1162 return DRM_ERR(EMSGSIZE); 1163 return -EMSGSIZE;
1163 1164
1164 if (count > dev_priv->depth_pitch) { 1165 if (count > dev_priv->depth_pitch) {
1165 count = dev_priv->depth_pitch; 1166 count = dev_priv->depth_pitch;
@@ -1169,22 +1170,22 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1169 ybuf_size = count * sizeof(*y); 1170 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1171 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1171 if (x == NULL) { 1172 if (x == NULL) {
1172 return DRM_ERR(ENOMEM); 1173 return -ENOMEM;
1173 } 1174 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1175 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1175 if (y == NULL) { 1176 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1177 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1177 return DRM_ERR(ENOMEM); 1178 return -ENOMEM;
1178 } 1179 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1180 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1181 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1182 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1182 return DRM_ERR(EFAULT); 1183 return -EFAULT;
1183 } 1184 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1185 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1186 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1187 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1187 return DRM_ERR(EFAULT); 1188 return -EFAULT;
1188 } 1189 }
1189 1190
1190 for (i = 0; i < count; i++) { 1191 for (i = 0; i < count; i++) {
@@ -1241,25 +1242,21 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1241 * IOCTL functions 1242 * IOCTL functions
1242 */ 1243 */
1243 1244
1244static int r128_cce_clear(DRM_IOCTL_ARGS) 1245static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1245{ 1246{
1246 DRM_DEVICE;
1247 drm_r128_private_t *dev_priv = dev->dev_private; 1247 drm_r128_private_t *dev_priv = dev->dev_private;
1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1249 drm_r128_clear_t clear; 1249 drm_r128_clear_t *clear = data;
1250 DRM_DEBUG("\n"); 1250 DRM_DEBUG("\n");
1251 1251
1252 LOCK_TEST_WITH_RETURN(dev, filp); 1252 LOCK_TEST_WITH_RETURN(dev, file_priv);
1253
1254 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1255 sizeof(clear));
1256 1253
1257 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1254 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1258 1255
1259 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) 1256 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1260 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; 1257 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1261 1258
1262 r128_cce_dispatch_clear(dev, &clear); 1259 r128_cce_dispatch_clear(dev, clear);
1263 COMMIT_RING(); 1260 COMMIT_RING();
1264 1261
1265 /* Make sure we restore the 3D state next time. 1262 /* Make sure we restore the 3D state next time.
@@ -1309,13 +1306,12 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)
1309 * They can & should be intermixed to support multiple 3d windows. 1306 * They can & should be intermixed to support multiple 3d windows.
1310 */ 1307 */
1311 1308
1312static int r128_cce_flip(DRM_IOCTL_ARGS) 1309static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1313{ 1310{
1314 DRM_DEVICE;
1315 drm_r128_private_t *dev_priv = dev->dev_private; 1311 drm_r128_private_t *dev_priv = dev->dev_private;
1316 DRM_DEBUG("%s\n", __FUNCTION__); 1312 DRM_DEBUG("%s\n", __FUNCTION__);
1317 1313
1318 LOCK_TEST_WITH_RETURN(dev, filp); 1314 LOCK_TEST_WITH_RETURN(dev, file_priv);
1319 1315
1320 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1316 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1321 1317
@@ -1328,14 +1324,13 @@ static int r128_cce_flip(DRM_IOCTL_ARGS)
1328 return 0; 1324 return 0;
1329} 1325}
1330 1326
1331static int r128_cce_swap(DRM_IOCTL_ARGS) 1327static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1332{ 1328{
1333 DRM_DEVICE;
1334 drm_r128_private_t *dev_priv = dev->dev_private; 1329 drm_r128_private_t *dev_priv = dev->dev_private;
1335 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1330 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1336 DRM_DEBUG("%s\n", __FUNCTION__); 1331 DRM_DEBUG("%s\n", __FUNCTION__);
1337 1332
1338 LOCK_TEST_WITH_RETURN(dev, filp); 1333 LOCK_TEST_WITH_RETURN(dev, file_priv);
1339 1334
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1335 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341 1336
@@ -1350,58 +1345,54 @@ static int r128_cce_swap(DRM_IOCTL_ARGS)
1350 return 0; 1345 return 0;
1351} 1346}
1352 1347
1353static int r128_cce_vertex(DRM_IOCTL_ARGS) 1348static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1354{ 1349{
1355 DRM_DEVICE;
1356 drm_r128_private_t *dev_priv = dev->dev_private; 1350 drm_r128_private_t *dev_priv = dev->dev_private;
1357 struct drm_device_dma *dma = dev->dma; 1351 struct drm_device_dma *dma = dev->dma;
1358 struct drm_buf *buf; 1352 struct drm_buf *buf;
1359 drm_r128_buf_priv_t *buf_priv; 1353 drm_r128_buf_priv_t *buf_priv;
1360 drm_r128_vertex_t vertex; 1354 drm_r128_vertex_t *vertex = data;
1361 1355
1362 LOCK_TEST_WITH_RETURN(dev, filp); 1356 LOCK_TEST_WITH_RETURN(dev, file_priv);
1363 1357
1364 if (!dev_priv) { 1358 if (!dev_priv) {
1365 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1359 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1366 return DRM_ERR(EINVAL); 1360 return -EINVAL;
1367 } 1361 }
1368 1362
1369 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1370 sizeof(vertex));
1371
1372 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 1363 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1373 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 1364 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
1374 1365
1375 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 1366 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1376 DRM_ERROR("buffer index %d (of %d max)\n", 1367 DRM_ERROR("buffer index %d (of %d max)\n",
1377 vertex.idx, dma->buf_count - 1); 1368 vertex->idx, dma->buf_count - 1);
1378 return DRM_ERR(EINVAL); 1369 return -EINVAL;
1379 } 1370 }
1380 if (vertex.prim < 0 || 1371 if (vertex->prim < 0 ||
1381 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1372 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1382 DRM_ERROR("buffer prim %d\n", vertex.prim); 1373 DRM_ERROR("buffer prim %d\n", vertex->prim);
1383 return DRM_ERR(EINVAL); 1374 return -EINVAL;
1384 } 1375 }
1385 1376
1386 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1377 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1387 VB_AGE_TEST_WITH_RETURN(dev_priv); 1378 VB_AGE_TEST_WITH_RETURN(dev_priv);
1388 1379
1389 buf = dma->buflist[vertex.idx]; 1380 buf = dma->buflist[vertex->idx];
1390 buf_priv = buf->dev_private; 1381 buf_priv = buf->dev_private;
1391 1382
1392 if (buf->filp != filp) { 1383 if (buf->file_priv != file_priv) {
1393 DRM_ERROR("process %d using buffer owned by %p\n", 1384 DRM_ERROR("process %d using buffer owned by %p\n",
1394 DRM_CURRENTPID, buf->filp); 1385 DRM_CURRENTPID, buf->file_priv);
1395 return DRM_ERR(EINVAL); 1386 return -EINVAL;
1396 } 1387 }
1397 if (buf->pending) { 1388 if (buf->pending) {
1398 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 1389 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1399 return DRM_ERR(EINVAL); 1390 return -EINVAL;
1400 } 1391 }
1401 1392
1402 buf->used = vertex.count; 1393 buf->used = vertex->count;
1403 buf_priv->prim = vertex.prim; 1394 buf_priv->prim = vertex->prim;
1404 buf_priv->discard = vertex.discard; 1395 buf_priv->discard = vertex->discard;
1405 1396
1406 r128_cce_dispatch_vertex(dev, buf); 1397 r128_cce_dispatch_vertex(dev, buf);
1407 1398
@@ -1409,134 +1400,123 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
1409 return 0; 1400 return 0;
1410} 1401}
1411 1402
1412static int r128_cce_indices(DRM_IOCTL_ARGS) 1403static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1413{ 1404{
1414 DRM_DEVICE;
1415 drm_r128_private_t *dev_priv = dev->dev_private; 1405 drm_r128_private_t *dev_priv = dev->dev_private;
1416 struct drm_device_dma *dma = dev->dma; 1406 struct drm_device_dma *dma = dev->dma;
1417 struct drm_buf *buf; 1407 struct drm_buf *buf;
1418 drm_r128_buf_priv_t *buf_priv; 1408 drm_r128_buf_priv_t *buf_priv;
1419 drm_r128_indices_t elts; 1409 drm_r128_indices_t *elts = data;
1420 int count; 1410 int count;
1421 1411
1422 LOCK_TEST_WITH_RETURN(dev, filp); 1412 LOCK_TEST_WITH_RETURN(dev, file_priv);
1423 1413
1424 if (!dev_priv) { 1414 if (!dev_priv) {
1425 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1415 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1426 return DRM_ERR(EINVAL); 1416 return -EINVAL;
1427 } 1417 }
1428 1418
1429 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1430 sizeof(elts));
1431
1432 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, 1419 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1433 elts.idx, elts.start, elts.end, elts.discard); 1420 elts->idx, elts->start, elts->end, elts->discard);
1434 1421
1435 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 1422 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1436 DRM_ERROR("buffer index %d (of %d max)\n", 1423 DRM_ERROR("buffer index %d (of %d max)\n",
1437 elts.idx, dma->buf_count - 1); 1424 elts->idx, dma->buf_count - 1);
1438 return DRM_ERR(EINVAL); 1425 return -EINVAL;
1439 } 1426 }
1440 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1427 if (elts->prim < 0 ||
1441 DRM_ERROR("buffer prim %d\n", elts.prim); 1428 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1442 return DRM_ERR(EINVAL); 1429 DRM_ERROR("buffer prim %d\n", elts->prim);
1430 return -EINVAL;
1443 } 1431 }
1444 1432
1445 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1433 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1446 VB_AGE_TEST_WITH_RETURN(dev_priv); 1434 VB_AGE_TEST_WITH_RETURN(dev_priv);
1447 1435
1448 buf = dma->buflist[elts.idx]; 1436 buf = dma->buflist[elts->idx];
1449 buf_priv = buf->dev_private; 1437 buf_priv = buf->dev_private;
1450 1438
1451 if (buf->filp != filp) { 1439 if (buf->file_priv != file_priv) {
1452 DRM_ERROR("process %d using buffer owned by %p\n", 1440 DRM_ERROR("process %d using buffer owned by %p\n",
1453 DRM_CURRENTPID, buf->filp); 1441 DRM_CURRENTPID, buf->file_priv);
1454 return DRM_ERR(EINVAL); 1442 return -EINVAL;
1455 } 1443 }
1456 if (buf->pending) { 1444 if (buf->pending) {
1457 DRM_ERROR("sending pending buffer %d\n", elts.idx); 1445 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1458 return DRM_ERR(EINVAL); 1446 return -EINVAL;
1459 } 1447 }
1460 1448
1461 count = (elts.end - elts.start) / sizeof(u16); 1449 count = (elts->end - elts->start) / sizeof(u16);
1462 elts.start -= R128_INDEX_PRIM_OFFSET; 1450 elts->start -= R128_INDEX_PRIM_OFFSET;
1463 1451
1464 if (elts.start & 0x7) { 1452 if (elts->start & 0x7) {
1465 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 1453 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1466 return DRM_ERR(EINVAL); 1454 return -EINVAL;
1467 } 1455 }
1468 if (elts.start < buf->used) { 1456 if (elts->start < buf->used) {
1469 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 1457 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1470 return DRM_ERR(EINVAL); 1458 return -EINVAL;
1471 } 1459 }
1472 1460
1473 buf->used = elts.end; 1461 buf->used = elts->end;
1474 buf_priv->prim = elts.prim; 1462 buf_priv->prim = elts->prim;
1475 buf_priv->discard = elts.discard; 1463 buf_priv->discard = elts->discard;
1476 1464
1477 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count); 1465 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1478 1466
1479 COMMIT_RING(); 1467 COMMIT_RING();
1480 return 0; 1468 return 0;
1481} 1469}
1482 1470
1483static int r128_cce_blit(DRM_IOCTL_ARGS) 1471static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1484{ 1472{
1485 DRM_DEVICE;
1486 struct drm_device_dma *dma = dev->dma; 1473 struct drm_device_dma *dma = dev->dma;
1487 drm_r128_private_t *dev_priv = dev->dev_private; 1474 drm_r128_private_t *dev_priv = dev->dev_private;
1488 drm_r128_blit_t blit; 1475 drm_r128_blit_t *blit = data;
1489 int ret; 1476 int ret;
1490 1477
1491 LOCK_TEST_WITH_RETURN(dev, filp); 1478 LOCK_TEST_WITH_RETURN(dev, file_priv);
1492
1493 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1494 sizeof(blit));
1495 1479
1496 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx); 1480 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
1497 1481
1498 if (blit.idx < 0 || blit.idx >= dma->buf_count) { 1482 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1499 DRM_ERROR("buffer index %d (of %d max)\n", 1483 DRM_ERROR("buffer index %d (of %d max)\n",
1500 blit.idx, dma->buf_count - 1); 1484 blit->idx, dma->buf_count - 1);
1501 return DRM_ERR(EINVAL); 1485 return -EINVAL;
1502 } 1486 }
1503 1487
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1488 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505 VB_AGE_TEST_WITH_RETURN(dev_priv); 1489 VB_AGE_TEST_WITH_RETURN(dev_priv);
1506 1490
1507 ret = r128_cce_dispatch_blit(filp, dev, &blit); 1491 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1508 1492
1509 COMMIT_RING(); 1493 COMMIT_RING();
1510 return ret; 1494 return ret;
1511} 1495}
1512 1496
1513static int r128_cce_depth(DRM_IOCTL_ARGS) 1497static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1514{ 1498{
1515 DRM_DEVICE;
1516 drm_r128_private_t *dev_priv = dev->dev_private; 1499 drm_r128_private_t *dev_priv = dev->dev_private;
1517 drm_r128_depth_t depth; 1500 drm_r128_depth_t *depth = data;
1518 int ret; 1501 int ret;
1519 1502
1520 LOCK_TEST_WITH_RETURN(dev, filp); 1503 LOCK_TEST_WITH_RETURN(dev, file_priv);
1521
1522 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1523 sizeof(depth));
1524 1504
1525 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1505 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1526 1506
1527 ret = DRM_ERR(EINVAL); 1507 ret = -EINVAL;
1528 switch (depth.func) { 1508 switch (depth->func) {
1529 case R128_WRITE_SPAN: 1509 case R128_WRITE_SPAN:
1530 ret = r128_cce_dispatch_write_span(dev, &depth); 1510 ret = r128_cce_dispatch_write_span(dev, depth);
1531 break; 1511 break;
1532 case R128_WRITE_PIXELS: 1512 case R128_WRITE_PIXELS:
1533 ret = r128_cce_dispatch_write_pixels(dev, &depth); 1513 ret = r128_cce_dispatch_write_pixels(dev, depth);
1534 break; 1514 break;
1535 case R128_READ_SPAN: 1515 case R128_READ_SPAN:
1536 ret = r128_cce_dispatch_read_span(dev, &depth); 1516 ret = r128_cce_dispatch_read_span(dev, depth);
1537 break; 1517 break;
1538 case R128_READ_PIXELS: 1518 case R128_READ_PIXELS:
1539 ret = r128_cce_dispatch_read_pixels(dev, &depth); 1519 ret = r128_cce_dispatch_read_pixels(dev, depth);
1540 break; 1520 break;
1541 } 1521 }
1542 1522
@@ -1544,20 +1524,16 @@ static int r128_cce_depth(DRM_IOCTL_ARGS)
1544 return ret; 1524 return ret;
1545} 1525}
1546 1526
1547static int r128_cce_stipple(DRM_IOCTL_ARGS) 1527static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1548{ 1528{
1549 DRM_DEVICE;
1550 drm_r128_private_t *dev_priv = dev->dev_private; 1529 drm_r128_private_t *dev_priv = dev->dev_private;
1551 drm_r128_stipple_t stipple; 1530 drm_r128_stipple_t *stipple = data;
1552 u32 mask[32]; 1531 u32 mask[32];
1553 1532
1554 LOCK_TEST_WITH_RETURN(dev, filp); 1533 LOCK_TEST_WITH_RETURN(dev, file_priv);
1555
1556 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1557 sizeof(stipple));
1558 1534
1559 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 1535 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
1560 return DRM_ERR(EFAULT); 1536 return -EFAULT;
1561 1537
1562 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1538 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1563 1539
@@ -1567,61 +1543,58 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS)
1567 return 0; 1543 return 0;
1568} 1544}
1569 1545
1570static int r128_cce_indirect(DRM_IOCTL_ARGS) 1546static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1571{ 1547{
1572 DRM_DEVICE;
1573 drm_r128_private_t *dev_priv = dev->dev_private; 1548 drm_r128_private_t *dev_priv = dev->dev_private;
1574 struct drm_device_dma *dma = dev->dma; 1549 struct drm_device_dma *dma = dev->dma;
1575 struct drm_buf *buf; 1550 struct drm_buf *buf;
1576 drm_r128_buf_priv_t *buf_priv; 1551 drm_r128_buf_priv_t *buf_priv;
1577 drm_r128_indirect_t indirect; 1552 drm_r128_indirect_t *indirect = data;
1578#if 0 1553#if 0
1579 RING_LOCALS; 1554 RING_LOCALS;
1580#endif 1555#endif
1581 1556
1582 LOCK_TEST_WITH_RETURN(dev, filp); 1557 LOCK_TEST_WITH_RETURN(dev, file_priv);
1583 1558
1584 if (!dev_priv) { 1559 if (!dev_priv) {
1585 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1560 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1586 return DRM_ERR(EINVAL); 1561 return -EINVAL;
1587 } 1562 }
1588 1563
1589 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1590 sizeof(indirect));
1591
1592 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 1564 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1593 indirect.idx, indirect.start, indirect.end, indirect.discard); 1565 indirect->idx, indirect->start, indirect->end,
1566 indirect->discard);
1594 1567
1595 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 1568 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1596 DRM_ERROR("buffer index %d (of %d max)\n", 1569 DRM_ERROR("buffer index %d (of %d max)\n",
1597 indirect.idx, dma->buf_count - 1); 1570 indirect->idx, dma->buf_count - 1);
1598 return DRM_ERR(EINVAL); 1571 return -EINVAL;
1599 } 1572 }
1600 1573
1601 buf = dma->buflist[indirect.idx]; 1574 buf = dma->buflist[indirect->idx];
1602 buf_priv = buf->dev_private; 1575 buf_priv = buf->dev_private;
1603 1576
1604 if (buf->filp != filp) { 1577 if (buf->file_priv != file_priv) {
1605 DRM_ERROR("process %d using buffer owned by %p\n", 1578 DRM_ERROR("process %d using buffer owned by %p\n",
1606 DRM_CURRENTPID, buf->filp); 1579 DRM_CURRENTPID, buf->file_priv);
1607 return DRM_ERR(EINVAL); 1580 return -EINVAL;
1608 } 1581 }
1609 if (buf->pending) { 1582 if (buf->pending) {
1610 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 1583 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1611 return DRM_ERR(EINVAL); 1584 return -EINVAL;
1612 } 1585 }
1613 1586
1614 if (indirect.start < buf->used) { 1587 if (indirect->start < buf->used) {
1615 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 1588 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1616 indirect.start, buf->used); 1589 indirect->start, buf->used);
1617 return DRM_ERR(EINVAL); 1590 return -EINVAL;
1618 } 1591 }
1619 1592
1620 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1593 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1621 VB_AGE_TEST_WITH_RETURN(dev_priv); 1594 VB_AGE_TEST_WITH_RETURN(dev_priv);
1622 1595
1623 buf->used = indirect.end; 1596 buf->used = indirect->end;
1624 buf_priv->discard = indirect.discard; 1597 buf_priv->discard = indirect->discard;
1625 1598
1626#if 0 1599#if 0
1627 /* Wait for the 3D stream to idle before the indirect buffer 1600 /* Wait for the 3D stream to idle before the indirect buffer
@@ -1636,46 +1609,42 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
1636 * X server. This is insecure and is thus only available to 1609 * X server. This is insecure and is thus only available to
1637 * privileged clients. 1610 * privileged clients.
1638 */ 1611 */
1639 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end); 1612 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1640 1613
1641 COMMIT_RING(); 1614 COMMIT_RING();
1642 return 0; 1615 return 0;
1643} 1616}
1644 1617
1645static int r128_getparam(DRM_IOCTL_ARGS) 1618static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1646{ 1619{
1647 DRM_DEVICE;
1648 drm_r128_private_t *dev_priv = dev->dev_private; 1620 drm_r128_private_t *dev_priv = dev->dev_private;
1649 drm_r128_getparam_t param; 1621 drm_r128_getparam_t *param = data;
1650 int value; 1622 int value;
1651 1623
1652 if (!dev_priv) { 1624 if (!dev_priv) {
1653 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1625 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1654 return DRM_ERR(EINVAL); 1626 return -EINVAL;
1655 } 1627 }
1656 1628
1657 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1658 sizeof(param));
1659
1660 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1629 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1661 1630
1662 switch (param.param) { 1631 switch (param->param) {
1663 case R128_PARAM_IRQ_NR: 1632 case R128_PARAM_IRQ_NR:
1664 value = dev->irq; 1633 value = dev->irq;
1665 break; 1634 break;
1666 default: 1635 default:
1667 return DRM_ERR(EINVAL); 1636 return -EINVAL;
1668 } 1637 }
1669 1638
1670 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1639 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1671 DRM_ERROR("copy_to_user\n"); 1640 DRM_ERROR("copy_to_user\n");
1672 return DRM_ERR(EFAULT); 1641 return -EFAULT;
1673 } 1642 }
1674 1643
1675 return 0; 1644 return 0;
1676} 1645}
1677 1646
1678void r128_driver_preclose(struct drm_device * dev, DRMFILE filp) 1647void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1679{ 1648{
1680 if (dev->dev_private) { 1649 if (dev->dev_private) {
1681 drm_r128_private_t *dev_priv = dev->dev_private; 1650 drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1690,24 +1659,24 @@ void r128_driver_lastclose(struct drm_device * dev)
1690 r128_do_cleanup_cce(dev); 1659 r128_do_cleanup_cce(dev);
1691} 1660}
1692 1661
1693drm_ioctl_desc_t r128_ioctls[] = { 1662struct drm_ioctl_desc r128_ioctls[] = {
1694 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1663 DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1695 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1664 DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1696 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1665 DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1697 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1666 DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1698 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH}, 1667 DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1699 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH}, 1668 DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
1700 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH}, 1669 DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1701 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH}, 1670 DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
1702 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH}, 1671 DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
1703 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH}, 1672 DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
1704 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH}, 1673 DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1705 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH}, 1674 DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
1706 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH}, 1675 DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
1707 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH}, 1676 DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
1708 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH}, 1677 DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1709 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1678 DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1710 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH}, 1679 DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
1711}; 1680};
1712 1681
1713int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1682int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 4e5aca6ba59a..59b2944811c5 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
74 if (DRM_COPY_FROM_USER_UNCHECKED 74 if (DRM_COPY_FROM_USER_UNCHECKED
75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
76 DRM_ERROR("copy cliprect faulted\n"); 76 DRM_ERROR("copy cliprect faulted\n");
77 return DRM_ERR(EFAULT); 77 return -EFAULT;
78 } 78 }
79 79
80 box.x1 = 80 box.x1 =
@@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
263 DRM_ERROR 263 DRM_ERROR
264 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 264 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
265 reg, sz); 265 reg, sz);
266 return DRM_ERR(EINVAL); 266 return -EINVAL;
267 } 267 }
268 for (i = 0; i < sz; i++) { 268 for (i = 0; i < sz; i++) {
269 values[i] = ((int *)cmdbuf->buf)[i]; 269 values[i] = ((int *)cmdbuf->buf)[i];
@@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
275 DRM_ERROR 275 DRM_ERROR
276 ("Offset failed range check (reg=%04x sz=%d)\n", 276 ("Offset failed range check (reg=%04x sz=%d)\n",
277 reg, sz); 277 reg, sz);
278 return DRM_ERR(EINVAL); 278 return -EINVAL;
279 } 279 }
280 break; 280 break;
281 default: 281 default:
282 DRM_ERROR("Register %04x failed check as flag=%02x\n", 282 DRM_ERROR("Register %04x failed check as flag=%02x\n",
283 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 283 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
284 return DRM_ERR(EINVAL); 284 return -EINVAL;
285 } 285 }
286 } 286 }
287 287
@@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
317 return 0; 317 return 0;
318 318
319 if (sz * 4 > cmdbuf->bufsz) 319 if (sz * 4 > cmdbuf->bufsz)
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 321
322 if (reg + sz * 4 >= 0x10000) { 322 if (reg + sz * 4 >= 0x10000) {
323 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, 323 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
324 sz); 324 sz);
325 return DRM_ERR(EINVAL); 325 return -EINVAL;
326 } 326 }
327 327
328 if (r300_check_range(reg, sz)) { 328 if (r300_check_range(reg, sz)) {
@@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
362 if (!sz) 362 if (!sz)
363 return 0; 363 return 0;
364 if (sz * 16 > cmdbuf->bufsz) 364 if (sz * 16 > cmdbuf->bufsz)
365 return DRM_ERR(EINVAL); 365 return -EINVAL;
366 366
367 BEGIN_RING(5 + sz * 4); 367 BEGIN_RING(5 + sz * 4);
368 /* Wait for VAP to come to senses.. */ 368 /* Wait for VAP to come to senses.. */
@@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
391 RING_LOCALS; 391 RING_LOCALS;
392 392
393 if (8 * 4 > cmdbuf->bufsz) 393 if (8 * 4 > cmdbuf->bufsz)
394 return DRM_ERR(EINVAL); 394 return -EINVAL;
395 395
396 BEGIN_RING(10); 396 BEGIN_RING(10);
397 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 397 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
@@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
421 if ((count + 1) > MAX_ARRAY_PACKET) { 421 if ((count + 1) > MAX_ARRAY_PACKET) {
422 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 422 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
423 count); 423 count);
424 return DRM_ERR(EINVAL); 424 return -EINVAL;
425 } 425 }
426 memset(payload, 0, MAX_ARRAY_PACKET * 4); 426 memset(payload, 0, MAX_ARRAY_PACKET * 4);
427 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); 427 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
@@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
437 DRM_ERROR 437 DRM_ERROR
438 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 438 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
439 k, i); 439 k, i);
440 return DRM_ERR(EINVAL); 440 return -EINVAL;
441 } 441 }
442 k++; 442 k++;
443 i++; 443 i++;
@@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
448 DRM_ERROR 448 DRM_ERROR
449 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 449 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
450 k, i); 450 k, i);
451 return DRM_ERR(EINVAL); 451 return -EINVAL;
452 } 452 }
453 k++; 453 k++;
454 i++; 454 i++;
@@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
458 DRM_ERROR 458 DRM_ERROR
459 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 459 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
460 k, i, narrays, count + 1); 460 k, i, narrays, count + 1);
461 return DRM_ERR(EINVAL); 461 return -EINVAL;
462 } 462 }
463 463
464 /* all clear, output packet */ 464 /* all clear, output packet */
@@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
492 ret = !radeon_check_offset(dev_priv, offset); 492 ret = !radeon_check_offset(dev_priv, offset);
493 if (ret) { 493 if (ret) {
494 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 494 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
495 return DRM_ERR(EINVAL); 495 return -EINVAL;
496 } 496 }
497 } 497 }
498 498
@@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
502 ret = !radeon_check_offset(dev_priv, offset); 502 ret = !radeon_check_offset(dev_priv, offset);
503 if (ret) { 503 if (ret) {
504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
505 return DRM_ERR(EINVAL); 505 return -EINVAL;
506 } 506 }
507 507
508 } 508 }
@@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
530 530
531 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 531 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
532 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 532 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
533 return DRM_ERR(EINVAL); 533 return -EINVAL;
534 } 534 }
535 ret = !radeon_check_offset(dev_priv, cmd[2]); 535 ret = !radeon_check_offset(dev_priv, cmd[2]);
536 if (ret) { 536 if (ret) {
537 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 537 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
538 return DRM_ERR(EINVAL); 538 return -EINVAL;
539 } 539 }
540 540
541 BEGIN_RING(count+2); 541 BEGIN_RING(count+2);
@@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
557 RING_LOCALS; 557 RING_LOCALS;
558 558
559 if (4 > cmdbuf->bufsz) 559 if (4 > cmdbuf->bufsz)
560 return DRM_ERR(EINVAL); 560 return -EINVAL;
561 561
562 /* Fixme !! This simply emits a packet without much checking. 562 /* Fixme !! This simply emits a packet without much checking.
563 We need to be smarter. */ 563 We need to be smarter. */
@@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
568 /* Is it packet 3 ? */ 568 /* Is it packet 3 ? */
569 if ((header >> 30) != 0x3) { 569 if ((header >> 30) != 0x3) {
570 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 570 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
571 return DRM_ERR(EINVAL); 571 return -EINVAL;
572 } 572 }
573 573
574 count = (header >> 16) & 0x3fff; 574 count = (header >> 16) & 0x3fff;
@@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
578 DRM_ERROR 578 DRM_ERROR
579 ("Expected packet3 of length %d but have only %d bytes left\n", 579 ("Expected packet3 of length %d but have only %d bytes left\n",
580 (count + 2) * 4, cmdbuf->bufsz); 580 (count + 2) * 4, cmdbuf->bufsz);
581 return DRM_ERR(EINVAL); 581 return -EINVAL;
582 } 582 }
583 583
584 /* Is it a packet type we know about ? */ 584 /* Is it a packet type we know about ? */
@@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
600 break; 600 break;
601 default: 601 default:
602 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 602 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
603 return DRM_ERR(EINVAL); 603 return -EINVAL;
604 } 604 }
605 605
606 BEGIN_RING(count + 2); 606 BEGIN_RING(count + 2);
@@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
664 DRM_ERROR("bad packet3 type %i at %p\n", 664 DRM_ERROR("bad packet3 type %i at %p\n",
665 header.packet3.packet, 665 header.packet3.packet,
666 cmdbuf->buf - sizeof(header)); 666 cmdbuf->buf - sizeof(header));
667 return DRM_ERR(EINVAL); 667 return -EINVAL;
668 } 668 }
669 669
670 n += R300_SIMULTANEOUS_CLIPRECTS; 670 n += R300_SIMULTANEOUS_CLIPRECTS;
@@ -726,11 +726,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
726 726
727 if (cmdbuf->bufsz < 727 if (cmdbuf->bufsz <
728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
729 return DRM_ERR(EINVAL); 729 return -EINVAL;
730 } 730 }
731 731
732 if (header.scratch.reg >= 5) { 732 if (header.scratch.reg >= 5) {
733 return DRM_ERR(EINVAL); 733 return -EINVAL;
734 } 734 }
735 735
736 dev_priv->scratch_ages[header.scratch.reg]++; 736 dev_priv->scratch_ages[header.scratch.reg]++;
@@ -745,21 +745,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
745 buf_idx *= 2; /* 8 bytes per buf */ 745 buf_idx *= 2; /* 8 bytes per buf */
746 746
747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
748 return DRM_ERR(EINVAL); 748 return -EINVAL;
749 } 749 }
750 750
751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
752 return DRM_ERR(EINVAL); 752 return -EINVAL;
753 } 753 }
754 754
755 if (h_pending == 0) { 755 if (h_pending == 0) {
756 return DRM_ERR(EINVAL); 756 return -EINVAL;
757 } 757 }
758 758
759 h_pending--; 759 h_pending--;
760 760
761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
762 return DRM_ERR(EINVAL); 762 return -EINVAL;
763 } 763 }
764 764
765 cmdbuf->buf += sizeof(buf_idx); 765 cmdbuf->buf += sizeof(buf_idx);
@@ -780,8 +780,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
780 * Called by the ioctl handler function radeon_cp_cmdbuf. 780 * Called by the ioctl handler function radeon_cp_cmdbuf.
781 */ 781 */
782int r300_do_cp_cmdbuf(struct drm_device *dev, 782int r300_do_cp_cmdbuf(struct drm_device *dev,
783 DRMFILE filp, 783 struct drm_file *file_priv,
784 struct drm_file *filp_priv,
785 drm_radeon_kcmd_buffer_t *cmdbuf) 784 drm_radeon_kcmd_buffer_t *cmdbuf)
786{ 785{
787 drm_radeon_private_t *dev_priv = dev->dev_private; 786 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -879,15 +878,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
879 if (idx < 0 || idx >= dma->buf_count) { 878 if (idx < 0 || idx >= dma->buf_count) {
880 DRM_ERROR("buffer index %d (of %d max)\n", 879 DRM_ERROR("buffer index %d (of %d max)\n",
881 idx, dma->buf_count - 1); 880 idx, dma->buf_count - 1);
882 ret = DRM_ERR(EINVAL); 881 ret = -EINVAL;
883 goto cleanup; 882 goto cleanup;
884 } 883 }
885 884
886 buf = dma->buflist[idx]; 885 buf = dma->buflist[idx];
887 if (buf->filp != filp || buf->pending) { 886 if (buf->file_priv != file_priv || buf->pending) {
888 DRM_ERROR("bad buffer %p %p %d\n", 887 DRM_ERROR("bad buffer %p %p %d\n",
889 buf->filp, filp, buf->pending); 888 buf->file_priv, file_priv,
890 ret = DRM_ERR(EINVAL); 889 buf->pending);
890 ret = -EINVAL;
891 goto cleanup; 891 goto cleanup;
892 } 892 }
893 893
@@ -924,7 +924,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
924 DRM_ERROR("bad cmd_type %i at %p\n", 924 DRM_ERROR("bad cmd_type %i at %p\n",
925 header.header.cmd_type, 925 header.header.cmd_type,
926 cmdbuf->buf - sizeof(header)); 926 cmdbuf->buf - sizeof(header));
927 ret = DRM_ERR(EINVAL); 927 ret = -EINVAL;
928 goto cleanup; 928 goto cleanup;
929 } 929 }
930 } 930 }
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index af5790f8fd53..335423c5c186 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
889 DRM_ERROR("failed!\n"); 889 DRM_ERROR("failed!\n");
890 radeon_status(dev_priv); 890 radeon_status(dev_priv);
891#endif 891#endif
892 return DRM_ERR(EBUSY); 892 return -EBUSY;
893} 893}
894 894
895static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) 895static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
@@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
910 DRM_ERROR("failed!\n"); 910 DRM_ERROR("failed!\n");
911 radeon_status(dev_priv); 911 radeon_status(dev_priv);
912#endif 912#endif
913 return DRM_ERR(EBUSY); 913 return -EBUSY;
914} 914}
915 915
916static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) 916static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
@@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
936 DRM_ERROR("failed!\n"); 936 DRM_ERROR("failed!\n");
937 radeon_status(dev_priv); 937 radeon_status(dev_priv);
938#endif 938#endif
939 return DRM_ERR(EBUSY); 939 return -EBUSY;
940} 940}
941 941
942/* ================================================================ 942/* ================================================================
@@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1394 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 1394 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1395 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 1395 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
1396 radeon_do_cleanup_cp(dev); 1396 radeon_do_cleanup_cp(dev);
1397 return DRM_ERR(EINVAL); 1397 return -EINVAL;
1398 } 1398 }
1399 1399
1400 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { 1400 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
@@ -1409,7 +1409,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1409 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { 1409 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
1410 DRM_ERROR("PCI GART memory not allocated!\n"); 1410 DRM_ERROR("PCI GART memory not allocated!\n");
1411 radeon_do_cleanup_cp(dev); 1411 radeon_do_cleanup_cp(dev);
1412 return DRM_ERR(EINVAL); 1412 return -EINVAL;
1413 } 1413 }
1414 1414
1415 dev_priv->usec_timeout = init->usec_timeout; 1415 dev_priv->usec_timeout = init->usec_timeout;
@@ -1417,7 +1417,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1417 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1417 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
1418 DRM_DEBUG("TIMEOUT problem!\n"); 1418 DRM_DEBUG("TIMEOUT problem!\n");
1419 radeon_do_cleanup_cp(dev); 1419 radeon_do_cleanup_cp(dev);
1420 return DRM_ERR(EINVAL); 1420 return -EINVAL;
1421 } 1421 }
1422 1422
1423 /* Enable vblank on CRTC1 for older X servers 1423 /* Enable vblank on CRTC1 for older X servers
@@ -1446,7 +1446,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1446 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1446 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
1447 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1447 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1448 radeon_do_cleanup_cp(dev); 1448 radeon_do_cleanup_cp(dev);
1449 return DRM_ERR(EINVAL); 1449 return -EINVAL;
1450 } 1450 }
1451 1451
1452 switch (init->fb_bpp) { 1452 switch (init->fb_bpp) {
@@ -1515,27 +1515,27 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1515 if (!dev_priv->sarea) { 1515 if (!dev_priv->sarea) {
1516 DRM_ERROR("could not find sarea!\n"); 1516 DRM_ERROR("could not find sarea!\n");
1517 radeon_do_cleanup_cp(dev); 1517 radeon_do_cleanup_cp(dev);
1518 return DRM_ERR(EINVAL); 1518 return -EINVAL;
1519 } 1519 }
1520 1520
1521 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1521 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
1522 if (!dev_priv->cp_ring) { 1522 if (!dev_priv->cp_ring) {
1523 DRM_ERROR("could not find cp ring region!\n"); 1523 DRM_ERROR("could not find cp ring region!\n");
1524 radeon_do_cleanup_cp(dev); 1524 radeon_do_cleanup_cp(dev);
1525 return DRM_ERR(EINVAL); 1525 return -EINVAL;
1526 } 1526 }
1527 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1527 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
1528 if (!dev_priv->ring_rptr) { 1528 if (!dev_priv->ring_rptr) {
1529 DRM_ERROR("could not find ring read pointer!\n"); 1529 DRM_ERROR("could not find ring read pointer!\n");
1530 radeon_do_cleanup_cp(dev); 1530 radeon_do_cleanup_cp(dev);
1531 return DRM_ERR(EINVAL); 1531 return -EINVAL;
1532 } 1532 }
1533 dev->agp_buffer_token = init->buffers_offset; 1533 dev->agp_buffer_token = init->buffers_offset;
1534 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1534 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1535 if (!dev->agp_buffer_map) { 1535 if (!dev->agp_buffer_map) {
1536 DRM_ERROR("could not find dma buffer region!\n"); 1536 DRM_ERROR("could not find dma buffer region!\n");
1537 radeon_do_cleanup_cp(dev); 1537 radeon_do_cleanup_cp(dev);
1538 return DRM_ERR(EINVAL); 1538 return -EINVAL;
1539 } 1539 }
1540 1540
1541 if (init->gart_textures_offset) { 1541 if (init->gart_textures_offset) {
@@ -1544,7 +1544,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1544 if (!dev_priv->gart_textures) { 1544 if (!dev_priv->gart_textures) {
1545 DRM_ERROR("could not find GART texture region!\n"); 1545 DRM_ERROR("could not find GART texture region!\n");
1546 radeon_do_cleanup_cp(dev); 1546 radeon_do_cleanup_cp(dev);
1547 return DRM_ERR(EINVAL); 1547 return -EINVAL;
1548 } 1548 }
1549 } 1549 }
1550 1550
@@ -1562,7 +1562,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1562 !dev->agp_buffer_map->handle) { 1562 !dev->agp_buffer_map->handle) {
1563 DRM_ERROR("could not find ioremap agp regions!\n"); 1563 DRM_ERROR("could not find ioremap agp regions!\n");
1564 radeon_do_cleanup_cp(dev); 1564 radeon_do_cleanup_cp(dev);
1565 return DRM_ERR(EINVAL); 1565 return -EINVAL;
1566 } 1566 }
1567 } else 1567 } else
1568#endif 1568#endif
@@ -1710,14 +1710,14 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
1710 DRM_ERROR 1710 DRM_ERROR
1711 ("Cannot use PCI Express without GART in FB memory\n"); 1711 ("Cannot use PCI Express without GART in FB memory\n");
1712 radeon_do_cleanup_cp(dev); 1712 radeon_do_cleanup_cp(dev);
1713 return DRM_ERR(EINVAL); 1713 return -EINVAL;
1714 } 1714 }
1715 } 1715 }
1716 1716
1717 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1717 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1718 DRM_ERROR("failed to init PCI GART!\n"); 1718 DRM_ERROR("failed to init PCI GART!\n");
1719 radeon_do_cleanup_cp(dev); 1719 radeon_do_cleanup_cp(dev);
1720 return DRM_ERR(ENOMEM); 1720 return -ENOMEM;
1721 } 1721 }
1722 1722
1723 /* Turn on PCI GART */ 1723 /* Turn on PCI GART */
@@ -1797,7 +1797,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1797 1797
1798 if (!dev_priv) { 1798 if (!dev_priv) {
1799 DRM_ERROR("Called with no initialization\n"); 1799 DRM_ERROR("Called with no initialization\n");
1800 return DRM_ERR(EINVAL); 1800 return -EINVAL;
1801 } 1801 }
1802 1802
1803 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1803 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
@@ -1823,38 +1823,33 @@ static int radeon_do_resume_cp(struct drm_device * dev)
1823 return 0; 1823 return 0;
1824} 1824}
1825 1825
1826int radeon_cp_init(DRM_IOCTL_ARGS) 1826int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1827{ 1827{
1828 DRM_DEVICE; 1828 drm_radeon_init_t *init = data;
1829 drm_radeon_init_t init;
1830 1829
1831 LOCK_TEST_WITH_RETURN(dev, filp); 1830 LOCK_TEST_WITH_RETURN(dev, file_priv);
1832 1831
1833 DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, 1832 if (init->func == RADEON_INIT_R300_CP)
1834 sizeof(init));
1835
1836 if (init.func == RADEON_INIT_R300_CP)
1837 r300_init_reg_flags(); 1833 r300_init_reg_flags();
1838 1834
1839 switch (init.func) { 1835 switch (init->func) {
1840 case RADEON_INIT_CP: 1836 case RADEON_INIT_CP:
1841 case RADEON_INIT_R200_CP: 1837 case RADEON_INIT_R200_CP:
1842 case RADEON_INIT_R300_CP: 1838 case RADEON_INIT_R300_CP:
1843 return radeon_do_init_cp(dev, &init); 1839 return radeon_do_init_cp(dev, init);
1844 case RADEON_CLEANUP_CP: 1840 case RADEON_CLEANUP_CP:
1845 return radeon_do_cleanup_cp(dev); 1841 return radeon_do_cleanup_cp(dev);
1846 } 1842 }
1847 1843
1848 return DRM_ERR(EINVAL); 1844 return -EINVAL;
1849} 1845}
1850 1846
1851int radeon_cp_start(DRM_IOCTL_ARGS) 1847int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1852{ 1848{
1853 DRM_DEVICE;
1854 drm_radeon_private_t *dev_priv = dev->dev_private; 1849 drm_radeon_private_t *dev_priv = dev->dev_private;
1855 DRM_DEBUG("\n"); 1850 DRM_DEBUG("\n");
1856 1851
1857 LOCK_TEST_WITH_RETURN(dev, filp); 1852 LOCK_TEST_WITH_RETURN(dev, file_priv);
1858 1853
1859 if (dev_priv->cp_running) { 1854 if (dev_priv->cp_running) {
1860 DRM_DEBUG("%s while CP running\n", __FUNCTION__); 1855 DRM_DEBUG("%s while CP running\n", __FUNCTION__);
@@ -1874,18 +1869,14 @@ int radeon_cp_start(DRM_IOCTL_ARGS)
1874/* Stop the CP. The engine must have been idled before calling this 1869/* Stop the CP. The engine must have been idled before calling this
1875 * routine. 1870 * routine.
1876 */ 1871 */
1877int radeon_cp_stop(DRM_IOCTL_ARGS) 1872int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1878{ 1873{
1879 DRM_DEVICE;
1880 drm_radeon_private_t *dev_priv = dev->dev_private; 1874 drm_radeon_private_t *dev_priv = dev->dev_private;
1881 drm_radeon_cp_stop_t stop; 1875 drm_radeon_cp_stop_t *stop = data;
1882 int ret; 1876 int ret;
1883 DRM_DEBUG("\n"); 1877 DRM_DEBUG("\n");
1884 1878
1885 LOCK_TEST_WITH_RETURN(dev, filp); 1879 LOCK_TEST_WITH_RETURN(dev, file_priv);
1886
1887 DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data,
1888 sizeof(stop));
1889 1880
1890 if (!dev_priv->cp_running) 1881 if (!dev_priv->cp_running)
1891 return 0; 1882 return 0;
@@ -1893,14 +1884,14 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
1893 /* Flush any pending CP commands. This ensures any outstanding 1884 /* Flush any pending CP commands. This ensures any outstanding
1894 * commands are exectuted by the engine before we turn it off. 1885 * commands are exectuted by the engine before we turn it off.
1895 */ 1886 */
1896 if (stop.flush) { 1887 if (stop->flush) {
1897 radeon_do_cp_flush(dev_priv); 1888 radeon_do_cp_flush(dev_priv);
1898 } 1889 }
1899 1890
1900 /* If we fail to make the engine go idle, we return an error 1891 /* If we fail to make the engine go idle, we return an error
1901 * code so that the DRM ioctl wrapper can try again. 1892 * code so that the DRM ioctl wrapper can try again.
1902 */ 1893 */
1903 if (stop.idle) { 1894 if (stop->idle) {
1904 ret = radeon_do_cp_idle(dev_priv); 1895 ret = radeon_do_cp_idle(dev_priv);
1905 if (ret) 1896 if (ret)
1906 return ret; 1897 return ret;
@@ -1963,17 +1954,16 @@ void radeon_do_release(struct drm_device * dev)
1963 1954
1964/* Just reset the CP ring. Called as part of an X Server engine reset. 1955/* Just reset the CP ring. Called as part of an X Server engine reset.
1965 */ 1956 */
1966int radeon_cp_reset(DRM_IOCTL_ARGS) 1957int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1967{ 1958{
1968 DRM_DEVICE;
1969 drm_radeon_private_t *dev_priv = dev->dev_private; 1959 drm_radeon_private_t *dev_priv = dev->dev_private;
1970 DRM_DEBUG("\n"); 1960 DRM_DEBUG("\n");
1971 1961
1972 LOCK_TEST_WITH_RETURN(dev, filp); 1962 LOCK_TEST_WITH_RETURN(dev, file_priv);
1973 1963
1974 if (!dev_priv) { 1964 if (!dev_priv) {
1975 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 1965 DRM_DEBUG("%s called before init done\n", __FUNCTION__);
1976 return DRM_ERR(EINVAL); 1966 return -EINVAL;
1977 } 1967 }
1978 1968
1979 radeon_do_cp_reset(dev_priv); 1969 radeon_do_cp_reset(dev_priv);
@@ -1984,32 +1974,29 @@ int radeon_cp_reset(DRM_IOCTL_ARGS)
1984 return 0; 1974 return 0;
1985} 1975}
1986 1976
1987int radeon_cp_idle(DRM_IOCTL_ARGS) 1977int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1988{ 1978{
1989 DRM_DEVICE;
1990 drm_radeon_private_t *dev_priv = dev->dev_private; 1979 drm_radeon_private_t *dev_priv = dev->dev_private;
1991 DRM_DEBUG("\n"); 1980 DRM_DEBUG("\n");
1992 1981
1993 LOCK_TEST_WITH_RETURN(dev, filp); 1982 LOCK_TEST_WITH_RETURN(dev, file_priv);
1994 1983
1995 return radeon_do_cp_idle(dev_priv); 1984 return radeon_do_cp_idle(dev_priv);
1996} 1985}
1997 1986
1998/* Added by Charl P. Botha to call radeon_do_resume_cp(). 1987/* Added by Charl P. Botha to call radeon_do_resume_cp().
1999 */ 1988 */
2000int radeon_cp_resume(DRM_IOCTL_ARGS) 1989int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
2001{ 1990{
2002 DRM_DEVICE;
2003 1991
2004 return radeon_do_resume_cp(dev); 1992 return radeon_do_resume_cp(dev);
2005} 1993}
2006 1994
2007int radeon_engine_reset(DRM_IOCTL_ARGS) 1995int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
2008{ 1996{
2009 DRM_DEVICE;
2010 DRM_DEBUG("\n"); 1997 DRM_DEBUG("\n");
2011 1998
2012 LOCK_TEST_WITH_RETURN(dev, filp); 1999 LOCK_TEST_WITH_RETURN(dev, file_priv);
2013 2000
2014 return radeon_do_engine_reset(dev); 2001 return radeon_do_engine_reset(dev);
2015} 2002}
@@ -2020,7 +2007,7 @@ int radeon_engine_reset(DRM_IOCTL_ARGS)
2020 2007
2021/* KW: Deprecated to say the least: 2008/* KW: Deprecated to say the least:
2022 */ 2009 */
2023int radeon_fullscreen(DRM_IOCTL_ARGS) 2010int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
2024{ 2011{
2025 return 0; 2012 return 0;
2026} 2013}
@@ -2066,8 +2053,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
2066 for (i = start; i < dma->buf_count; i++) { 2053 for (i = start; i < dma->buf_count; i++) {
2067 buf = dma->buflist[i]; 2054 buf = dma->buflist[i];
2068 buf_priv = buf->dev_private; 2055 buf_priv = buf->dev_private;
2069 if (buf->filp == 0 || (buf->pending && 2056 if (buf->file_priv == NULL || (buf->pending &&
2070 buf_priv->age <= done_age)) { 2057 buf_priv->age <=
2058 done_age)) {
2071 dev_priv->stats.requested_bufs++; 2059 dev_priv->stats.requested_bufs++;
2072 buf->pending = 0; 2060 buf->pending = 0;
2073 return buf; 2061 return buf;
@@ -2106,8 +2094,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
2106 for (i = start; i < dma->buf_count; i++) { 2094 for (i = start; i < dma->buf_count; i++) {
2107 buf = dma->buflist[i]; 2095 buf = dma->buflist[i];
2108 buf_priv = buf->dev_private; 2096 buf_priv = buf->dev_private;
2109 if (buf->filp == 0 || (buf->pending && 2097 if (buf->file_priv == 0 || (buf->pending &&
2110 buf_priv->age <= done_age)) { 2098 buf_priv->age <=
2099 done_age)) {
2111 dev_priv->stats.requested_bufs++; 2100 dev_priv->stats.requested_bufs++;
2112 buf->pending = 0; 2101 buf->pending = 0;
2113 return buf; 2102 return buf;
@@ -2167,10 +2156,11 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
2167 radeon_status(dev_priv); 2156 radeon_status(dev_priv);
2168 DRM_ERROR("failed!\n"); 2157 DRM_ERROR("failed!\n");
2169#endif 2158#endif
2170 return DRM_ERR(EBUSY); 2159 return -EBUSY;
2171} 2160}
2172 2161
2173static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, 2162static int radeon_cp_get_buffers(struct drm_device *dev,
2163 struct drm_file *file_priv,
2174 struct drm_dma * d) 2164 struct drm_dma * d)
2175{ 2165{
2176 int i; 2166 int i;
@@ -2179,58 +2169,52 @@ static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
2179 for (i = d->granted_count; i < d->request_count; i++) { 2169 for (i = d->granted_count; i < d->request_count; i++) {
2180 buf = radeon_freelist_get(dev); 2170 buf = radeon_freelist_get(dev);
2181 if (!buf) 2171 if (!buf)
2182 return DRM_ERR(EBUSY); /* NOTE: broken client */ 2172 return -EBUSY; /* NOTE: broken client */
2183 2173
2184 buf->filp = filp; 2174 buf->file_priv = file_priv;
2185 2175
2186 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 2176 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
2187 sizeof(buf->idx))) 2177 sizeof(buf->idx)))
2188 return DRM_ERR(EFAULT); 2178 return -EFAULT;
2189 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 2179 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
2190 sizeof(buf->total))) 2180 sizeof(buf->total)))
2191 return DRM_ERR(EFAULT); 2181 return -EFAULT;
2192 2182
2193 d->granted_count++; 2183 d->granted_count++;
2194 } 2184 }
2195 return 0; 2185 return 0;
2196} 2186}
2197 2187
2198int radeon_cp_buffers(DRM_IOCTL_ARGS) 2188int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
2199{ 2189{
2200 DRM_DEVICE;
2201 struct drm_device_dma *dma = dev->dma; 2190 struct drm_device_dma *dma = dev->dma;
2202 int ret = 0; 2191 int ret = 0;
2203 struct drm_dma __user *argp = (void __user *)data; 2192 struct drm_dma *d = data;
2204 struct drm_dma d;
2205 2193
2206 LOCK_TEST_WITH_RETURN(dev, filp); 2194 LOCK_TEST_WITH_RETURN(dev, file_priv);
2207
2208 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
2209 2195
2210 /* Please don't send us buffers. 2196 /* Please don't send us buffers.
2211 */ 2197 */
2212 if (d.send_count != 0) { 2198 if (d->send_count != 0) {
2213 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 2199 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
2214 DRM_CURRENTPID, d.send_count); 2200 DRM_CURRENTPID, d->send_count);
2215 return DRM_ERR(EINVAL); 2201 return -EINVAL;
2216 } 2202 }
2217 2203
2218 /* We'll send you buffers. 2204 /* We'll send you buffers.
2219 */ 2205 */
2220 if (d.request_count < 0 || d.request_count > dma->buf_count) { 2206 if (d->request_count < 0 || d->request_count > dma->buf_count) {
2221 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 2207 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
2222 DRM_CURRENTPID, d.request_count, dma->buf_count); 2208 DRM_CURRENTPID, d->request_count, dma->buf_count);
2223 return DRM_ERR(EINVAL); 2209 return -EINVAL;
2224 } 2210 }
2225 2211
2226 d.granted_count = 0; 2212 d->granted_count = 0;
2227 2213
2228 if (d.request_count) { 2214 if (d->request_count) {
2229 ret = radeon_cp_get_buffers(filp, dev, &d); 2215 ret = radeon_cp_get_buffers(dev, file_priv, d);
2230 } 2216 }
2231 2217
2232 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
2233
2234 return ret; 2218 return ret;
2235} 2219}
2236 2220
@@ -2241,7 +2225,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2241 2225
2242 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2226 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
2243 if (dev_priv == NULL) 2227 if (dev_priv == NULL)
2244 return DRM_ERR(ENOMEM); 2228 return -ENOMEM;
2245 2229
2246 memset(dev_priv, 0, sizeof(drm_radeon_private_t)); 2230 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
2247 dev->dev_private = (void *)dev_priv; 2231 dev->dev_private = (void *)dev_priv;
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 3b3d9357201c..e4077bc212b3 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -188,7 +188,7 @@ struct mem_block {
188 struct mem_block *prev; 188 struct mem_block *prev;
189 int start; 189 int start;
190 int size; 190 int size;
191 DRMFILE filp; /* 0: free, -1: heap, other: real files */ 191 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
192}; 192};
193 193
194struct radeon_surface { 194struct radeon_surface {
@@ -203,7 +203,7 @@ struct radeon_virt_surface {
203 u32 lower; 203 u32 lower;
204 u32 upper; 204 u32 upper;
205 u32 flags; 205 u32 flags;
206 DRMFILE filp; 206 struct drm_file *file_priv;
207}; 207};
208 208
209typedef struct drm_radeon_private { 209typedef struct drm_radeon_private {
@@ -307,7 +307,7 @@ typedef struct drm_radeon_kcmd_buffer {
307} drm_radeon_kcmd_buffer_t; 307} drm_radeon_kcmd_buffer_t;
308 308
309extern int radeon_no_wb; 309extern int radeon_no_wb;
310extern drm_ioctl_desc_t radeon_ioctls[]; 310extern struct drm_ioctl_desc radeon_ioctls[];
311extern int radeon_max_ioctl; 311extern int radeon_max_ioctl;
312 312
313/* Check whether the given hardware address is inside the framebuffer or the 313/* Check whether the given hardware address is inside the framebuffer or the
@@ -326,15 +326,15 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
326} 326}
327 327
328 /* radeon_cp.c */ 328 /* radeon_cp.c */
329extern int radeon_cp_init(DRM_IOCTL_ARGS); 329extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
330extern int radeon_cp_start(DRM_IOCTL_ARGS); 330extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
331extern int radeon_cp_stop(DRM_IOCTL_ARGS); 331extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
332extern int radeon_cp_reset(DRM_IOCTL_ARGS); 332extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
333extern int radeon_cp_idle(DRM_IOCTL_ARGS); 333extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
334extern int radeon_cp_resume(DRM_IOCTL_ARGS); 334extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
335extern int radeon_engine_reset(DRM_IOCTL_ARGS); 335extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
336extern int radeon_fullscreen(DRM_IOCTL_ARGS); 336extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
337extern int radeon_cp_buffers(DRM_IOCTL_ARGS); 337extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
338 338
339extern void radeon_freelist_reset(struct drm_device * dev); 339extern void radeon_freelist_reset(struct drm_device * dev);
340extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); 340extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@@ -347,15 +347,16 @@ extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
347extern int radeon_presetup(struct drm_device *dev); 347extern int radeon_presetup(struct drm_device *dev);
348extern int radeon_driver_postcleanup(struct drm_device *dev); 348extern int radeon_driver_postcleanup(struct drm_device *dev);
349 349
350extern int radeon_mem_alloc(DRM_IOCTL_ARGS); 350extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
351extern int radeon_mem_free(DRM_IOCTL_ARGS); 351extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
352extern int radeon_mem_init_heap(DRM_IOCTL_ARGS); 352extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
353extern void radeon_mem_takedown(struct mem_block **heap); 353extern void radeon_mem_takedown(struct mem_block **heap);
354extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap); 354extern void radeon_mem_release(struct drm_file *file_priv,
355 struct mem_block *heap);
355 356
356 /* radeon_irq.c */ 357 /* radeon_irq.c */
357extern int radeon_irq_emit(DRM_IOCTL_ARGS); 358extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
358extern int radeon_irq_wait(DRM_IOCTL_ARGS); 359extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
359 360
360extern void radeon_do_release(struct drm_device * dev); 361extern void radeon_do_release(struct drm_device * dev);
361extern int radeon_driver_vblank_wait(struct drm_device * dev, 362extern int radeon_driver_vblank_wait(struct drm_device * dev,
@@ -372,7 +373,7 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
372extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 373extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
373extern int radeon_driver_unload(struct drm_device *dev); 374extern int radeon_driver_unload(struct drm_device *dev);
374extern int radeon_driver_firstopen(struct drm_device *dev); 375extern int radeon_driver_firstopen(struct drm_device *dev);
375extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp); 376extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
376extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); 377extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
377extern void radeon_driver_lastclose(struct drm_device * dev); 378extern void radeon_driver_lastclose(struct drm_device * dev);
378extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); 379extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
@@ -382,8 +383,8 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
382/* r300_cmdbuf.c */ 383/* r300_cmdbuf.c */
383extern void r300_init_reg_flags(void); 384extern void r300_init_reg_flags(void);
384 385
385extern int r300_do_cp_cmdbuf(struct drm_device * dev, DRMFILE filp, 386extern int r300_do_cp_cmdbuf(struct drm_device * dev,
386 struct drm_file * filp_priv, 387 struct drm_file *file_priv,
387 drm_radeon_kcmd_buffer_t * cmdbuf); 388 drm_radeon_kcmd_buffer_t * cmdbuf);
388 389
389/* Flags for stats.boxes 390/* Flags for stats.boxes
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index ad8a0ac7182e..f89e57665b64 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -155,7 +155,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
155 atomic_t *counter; 155 atomic_t *counter;
156 if (!dev_priv) { 156 if (!dev_priv) {
157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
158 return DRM_ERR(EINVAL); 158 return -EINVAL;
159 } 159 }
160 160
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { 161 if (crtc == DRM_RADEON_VBLANK_CRTC1) {
@@ -165,7 +165,7 @@ int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence
165 counter = &dev->vbl_received2; 165 counter = &dev->vbl_received2;
166 ack |= RADEON_CRTC2_VBLANK_STAT; 166 ack |= RADEON_CRTC2_VBLANK_STAT;
167 } else 167 } else
168 return DRM_ERR(EINVAL); 168 return -EINVAL;
169 169
170 radeon_acknowledge_irqs(dev_priv, ack); 170 radeon_acknowledge_irqs(dev_priv, ack);
171 171
@@ -196,28 +196,24 @@ int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
196 196
197/* Needs the lock as it touches the ring. 197/* Needs the lock as it touches the ring.
198 */ 198 */
199int radeon_irq_emit(DRM_IOCTL_ARGS) 199int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
200{ 200{
201 DRM_DEVICE;
202 drm_radeon_private_t *dev_priv = dev->dev_private; 201 drm_radeon_private_t *dev_priv = dev->dev_private;
203 drm_radeon_irq_emit_t emit; 202 drm_radeon_irq_emit_t *emit = data;
204 int result; 203 int result;
205 204
206 LOCK_TEST_WITH_RETURN(dev, filp); 205 LOCK_TEST_WITH_RETURN(dev, file_priv);
207 206
208 if (!dev_priv) { 207 if (!dev_priv) {
209 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 208 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
210 return DRM_ERR(EINVAL); 209 return -EINVAL;
211 } 210 }
212 211
213 DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
214 sizeof(emit));
215
216 result = radeon_emit_irq(dev); 212 result = radeon_emit_irq(dev);
217 213
218 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 214 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
219 DRM_ERROR("copy_to_user\n"); 215 DRM_ERROR("copy_to_user\n");
220 return DRM_ERR(EFAULT); 216 return -EFAULT;
221 } 217 }
222 218
223 return 0; 219 return 0;
@@ -225,21 +221,17 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
225 221
226/* Doesn't need the hardware lock. 222/* Doesn't need the hardware lock.
227 */ 223 */
228int radeon_irq_wait(DRM_IOCTL_ARGS) 224int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
229{ 225{
230 DRM_DEVICE;
231 drm_radeon_private_t *dev_priv = dev->dev_private; 226 drm_radeon_private_t *dev_priv = dev->dev_private;
232 drm_radeon_irq_wait_t irqwait; 227 drm_radeon_irq_wait_t *irqwait = data;
233 228
234 if (!dev_priv) { 229 if (!dev_priv) {
235 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 230 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
236 return DRM_ERR(EINVAL); 231 return -EINVAL;
237 } 232 }
238 233
239 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, 234 return radeon_wait_irq(dev, irqwait->irq_seq);
240 sizeof(irqwait));
241
242 return radeon_wait_irq(dev, irqwait.irq_seq);
243} 235}
244 236
245static void radeon_enable_interrupt(struct drm_device *dev) 237static void radeon_enable_interrupt(struct drm_device *dev)
@@ -320,7 +312,7 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
320 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 312 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
321 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 313 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
322 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); 314 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
323 return DRM_ERR(EINVAL); 315 return -EINVAL;
324 } 316 }
325 dev_priv->vblank_crtc = (unsigned int)value; 317 dev_priv->vblank_crtc = (unsigned int)value;
326 radeon_enable_interrupt(dev); 318 radeon_enable_interrupt(dev);
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c
index 517cad8b6e3a..a29acfe2f973 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/char/drm/radeon_mem.c
@@ -39,7 +39,7 @@
39 */ 39 */
40 40
41static struct mem_block *split_block(struct mem_block *p, int start, int size, 41static struct mem_block *split_block(struct mem_block *p, int start, int size,
42 DRMFILE filp) 42 struct drm_file *file_priv)
43{ 43{
44 /* Maybe cut off the start of an existing block */ 44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) { 45 if (start > p->start) {
@@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
49 goto out; 49 goto out;
50 newblock->start = start; 50 newblock->start = start;
51 newblock->size = p->size - (start - p->start); 51 newblock->size = p->size - (start - p->start);
52 newblock->filp = NULL; 52 newblock->file_priv = NULL;
53 newblock->next = p->next; 53 newblock->next = p->next;
54 newblock->prev = p; 54 newblock->prev = p;
55 p->next->prev = newblock; 55 p->next->prev = newblock;
@@ -66,7 +66,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
66 goto out; 66 goto out;
67 newblock->start = start + size; 67 newblock->start = start + size;
68 newblock->size = p->size - size; 68 newblock->size = p->size - size;
69 newblock->filp = NULL; 69 newblock->file_priv = NULL;
70 newblock->next = p->next; 70 newblock->next = p->next;
71 newblock->prev = p; 71 newblock->prev = p;
72 p->next->prev = newblock; 72 p->next->prev = newblock;
@@ -76,20 +76,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
76 76
77 out: 77 out:
78 /* Our block is in the middle */ 78 /* Our block is in the middle */
79 p->filp = filp; 79 p->file_priv = file_priv;
80 return p; 80 return p;
81} 81}
82 82
83static struct mem_block *alloc_block(struct mem_block *heap, int size, 83static struct mem_block *alloc_block(struct mem_block *heap, int size,
84 int align2, DRMFILE filp) 84 int align2, struct drm_file *file_priv)
85{ 85{
86 struct mem_block *p; 86 struct mem_block *p;
87 int mask = (1 << align2) - 1; 87 int mask = (1 << align2) - 1;
88 88
89 list_for_each(p, heap) { 89 list_for_each(p, heap) {
90 int start = (p->start + mask) & ~mask; 90 int start = (p->start + mask) & ~mask;
91 if (p->filp == 0 && start + size <= p->start + p->size) 91 if (p->file_priv == 0 && start + size <= p->start + p->size)
92 return split_block(p, start, size, filp); 92 return split_block(p, start, size, file_priv);
93 } 93 }
94 94
95 return NULL; 95 return NULL;
@@ -108,12 +108,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
108 108
109static void free_block(struct mem_block *p) 109static void free_block(struct mem_block *p)
110{ 110{
111 p->filp = NULL; 111 p->file_priv = NULL;
112 112
113 /* Assumes a single contiguous range. Needs a special filp in 113 /* Assumes a single contiguous range. Needs a special file_priv in
114 * 'heap' to stop it being subsumed. 114 * 'heap' to stop it being subsumed.
115 */ 115 */
116 if (p->next->filp == 0) { 116 if (p->next->file_priv == 0) {
117 struct mem_block *q = p->next; 117 struct mem_block *q = p->next;
118 p->size += q->size; 118 p->size += q->size;
119 p->next = q->next; 119 p->next = q->next;
@@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 121 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
122 } 122 }
123 123
124 if (p->prev->filp == 0) { 124 if (p->prev->file_priv == 0) {
125 struct mem_block *q = p->prev; 125 struct mem_block *q = p->prev;
126 q->size += p->size; 126 q->size += p->size;
127 q->next = p->next; 127 q->next = p->next;
@@ -137,28 +137,28 @@ static int init_heap(struct mem_block **heap, int start, int size)
137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
138 138
139 if (!blocks) 139 if (!blocks)
140 return DRM_ERR(ENOMEM); 140 return -ENOMEM;
141 141
142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
143 if (!*heap) { 143 if (!*heap) {
144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
145 return DRM_ERR(ENOMEM); 145 return -ENOMEM;
146 } 146 }
147 147
148 blocks->start = start; 148 blocks->start = start;
149 blocks->size = size; 149 blocks->size = size;
150 blocks->filp = NULL; 150 blocks->file_priv = NULL;
151 blocks->next = blocks->prev = *heap; 151 blocks->next = blocks->prev = *heap;
152 152
153 memset(*heap, 0, sizeof(**heap)); 153 memset(*heap, 0, sizeof(**heap));
154 (*heap)->filp = (DRMFILE) - 1; 154 (*heap)->file_priv = (struct drm_file *) - 1;
155 (*heap)->next = (*heap)->prev = blocks; 155 (*heap)->next = (*heap)->prev = blocks;
156 return 0; 156 return 0;
157} 157}
158 158
159/* Free all blocks associated with the releasing file. 159/* Free all blocks associated with the releasing file.
160 */ 160 */
161void radeon_mem_release(DRMFILE filp, struct mem_block *heap) 161void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
162{ 162{
163 struct mem_block *p; 163 struct mem_block *p;
164 164
@@ -166,15 +166,15 @@ void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
166 return; 166 return;
167 167
168 list_for_each(p, heap) { 168 list_for_each(p, heap) {
169 if (p->filp == filp) 169 if (p->file_priv == file_priv)
170 p->filp = NULL; 170 p->file_priv = NULL;
171 } 171 }
172 172
173 /* Assumes a single contiguous range. Needs a special filp in 173 /* Assumes a single contiguous range. Needs a special file_priv in
174 * 'heap' to stop it being subsumed. 174 * 'heap' to stop it being subsumed.
175 */ 175 */
176 list_for_each(p, heap) { 176 list_for_each(p, heap) {
177 while (p->filp == 0 && p->next->filp == 0) { 177 while (p->file_priv == 0 && p->next->file_priv == 0) {
178 struct mem_block *q = p->next; 178 struct mem_block *q = p->next;
179 p->size += q->size; 179 p->size += q->size;
180 p->next = q->next; 180 p->next = q->next;
@@ -217,98 +217,86 @@ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
217 } 217 }
218} 218}
219 219
220int radeon_mem_alloc(DRM_IOCTL_ARGS) 220int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
221{ 221{
222 DRM_DEVICE;
223 drm_radeon_private_t *dev_priv = dev->dev_private; 222 drm_radeon_private_t *dev_priv = dev->dev_private;
224 drm_radeon_mem_alloc_t alloc; 223 drm_radeon_mem_alloc_t *alloc = data;
225 struct mem_block *block, **heap; 224 struct mem_block *block, **heap;
226 225
227 if (!dev_priv) { 226 if (!dev_priv) {
228 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 227 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
229 return DRM_ERR(EINVAL); 228 return -EINVAL;
230 } 229 }
231 230
232 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, 231 heap = get_heap(dev_priv, alloc->region);
233 sizeof(alloc));
234
235 heap = get_heap(dev_priv, alloc.region);
236 if (!heap || !*heap) 232 if (!heap || !*heap)
237 return DRM_ERR(EFAULT); 233 return -EFAULT;
238 234
239 /* Make things easier on ourselves: all allocations at least 235 /* Make things easier on ourselves: all allocations at least
240 * 4k aligned. 236 * 4k aligned.
241 */ 237 */
242 if (alloc.alignment < 12) 238 if (alloc->alignment < 12)
243 alloc.alignment = 12; 239 alloc->alignment = 12;
244 240
245 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 241 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
246 242
247 if (!block) 243 if (!block)
248 return DRM_ERR(ENOMEM); 244 return -ENOMEM;
249 245
250 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 246 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
247 sizeof(int))) {
251 DRM_ERROR("copy_to_user\n"); 248 DRM_ERROR("copy_to_user\n");
252 return DRM_ERR(EFAULT); 249 return -EFAULT;
253 } 250 }
254 251
255 return 0; 252 return 0;
256} 253}
257 254
258int radeon_mem_free(DRM_IOCTL_ARGS) 255int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
259{ 256{
260 DRM_DEVICE;
261 drm_radeon_private_t *dev_priv = dev->dev_private; 257 drm_radeon_private_t *dev_priv = dev->dev_private;
262 drm_radeon_mem_free_t memfree; 258 drm_radeon_mem_free_t *memfree = data;
263 struct mem_block *block, **heap; 259 struct mem_block *block, **heap;
264 260
265 if (!dev_priv) { 261 if (!dev_priv) {
266 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 262 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
267 return DRM_ERR(EINVAL); 263 return -EINVAL;
268 } 264 }
269 265
270 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, 266 heap = get_heap(dev_priv, memfree->region);
271 sizeof(memfree));
272
273 heap = get_heap(dev_priv, memfree.region);
274 if (!heap || !*heap) 267 if (!heap || !*heap)
275 return DRM_ERR(EFAULT); 268 return -EFAULT;
276 269
277 block = find_block(*heap, memfree.region_offset); 270 block = find_block(*heap, memfree->region_offset);
278 if (!block) 271 if (!block)
279 return DRM_ERR(EFAULT); 272 return -EFAULT;
280 273
281 if (block->filp != filp) 274 if (block->file_priv != file_priv)
282 return DRM_ERR(EPERM); 275 return -EPERM;
283 276
284 free_block(block); 277 free_block(block);
285 return 0; 278 return 0;
286} 279}
287 280
288int radeon_mem_init_heap(DRM_IOCTL_ARGS) 281int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
289{ 282{
290 DRM_DEVICE;
291 drm_radeon_private_t *dev_priv = dev->dev_private; 283 drm_radeon_private_t *dev_priv = dev->dev_private;
292 drm_radeon_mem_init_heap_t initheap; 284 drm_radeon_mem_init_heap_t *initheap = data;
293 struct mem_block **heap; 285 struct mem_block **heap;
294 286
295 if (!dev_priv) { 287 if (!dev_priv) {
296 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 288 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
297 return DRM_ERR(EINVAL); 289 return -EINVAL;
298 } 290 }
299 291
300 DRM_COPY_FROM_USER_IOCTL(initheap, 292 heap = get_heap(dev_priv, initheap->region);
301 (drm_radeon_mem_init_heap_t __user *) data,
302 sizeof(initheap));
303
304 heap = get_heap(dev_priv, initheap.region);
305 if (!heap) 293 if (!heap)
306 return DRM_ERR(EFAULT); 294 return -EFAULT;
307 295
308 if (*heap) { 296 if (*heap) {
309 DRM_ERROR("heap already initialized?"); 297 DRM_ERROR("heap already initialized?");
310 return DRM_ERR(EFAULT); 298 return -EFAULT;
311 } 299 }
312 300
313 return init_heap(heap, initheap.start, initheap.size); 301 return init_heap(heap, initheap->start, initheap->size);
314} 302}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 3ddf86f2abf0..69c9f2febf43 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -39,7 +39,7 @@
39 39
40static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * 40static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
41 dev_priv, 41 dev_priv,
42 struct drm_file * filp_priv, 42 struct drm_file * file_priv,
43 u32 *offset) 43 u32 *offset)
44{ 44{
45 u64 off = *offset; 45 u64 off = *offset;
@@ -71,7 +71,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
71 * magic offset we get from SETPARAM or calculated from fb_location 71 * magic offset we get from SETPARAM or calculated from fb_location
72 */ 72 */
73 if (off < (dev_priv->fb_size + dev_priv->gart_size)) { 73 if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
74 radeon_priv = filp_priv->driver_priv; 74 radeon_priv = file_priv->driver_priv;
75 off += radeon_priv->radeon_fb_delta; 75 off += radeon_priv->radeon_fb_delta;
76 } 76 }
77 77
@@ -85,29 +85,29 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
85 *offset = off; 85 *offset = off;
86 return 0; 86 return 0;
87 } 87 }
88 return DRM_ERR(EINVAL); 88 return -EINVAL;
89} 89}
90 90
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 92 dev_priv,
93 struct drm_file * filp_priv, 93 struct drm_file *file_priv,
94 int id, u32 *data) 94 int id, u32 *data)
95{ 95{
96 switch (id) { 96 switch (id) {
97 97
98 case RADEON_EMIT_PP_MISC: 98 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 99 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 101 DRM_ERROR("Invalid depth buffer offset\n");
102 return DRM_ERR(EINVAL); 102 return -EINVAL;
103 } 103 }
104 break; 104 break;
105 105
106 case RADEON_EMIT_PP_CNTL: 106 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 107 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 109 DRM_ERROR("Invalid colour buffer offset\n");
110 return DRM_ERR(EINVAL); 110 return -EINVAL;
111 } 111 }
112 break; 112 break;
113 113
@@ -117,20 +117,20 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 117 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 118 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 119 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 120 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
121 &data[0])) { 121 &data[0])) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 122 DRM_ERROR("Invalid R200 texture offset\n");
123 return DRM_ERR(EINVAL); 123 return -EINVAL;
124 } 124 }
125 break; 125 break;
126 126
127 case RADEON_EMIT_PP_TXFILTER_0: 127 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 128 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 129 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 130 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 132 DRM_ERROR("Invalid R100 texture offset\n");
133 return DRM_ERR(EINVAL); 133 return -EINVAL;
134 } 134 }
135 break; 135 break;
136 136
@@ -143,11 +143,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
143 int i; 143 int i;
144 for (i = 0; i < 5; i++) { 144 for (i = 0; i < 5; i++) {
145 if (radeon_check_and_fixup_offset(dev_priv, 145 if (radeon_check_and_fixup_offset(dev_priv,
146 filp_priv, 146 file_priv,
147 &data[i])) { 147 &data[i])) {
148 DRM_ERROR 148 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 149 ("Invalid R200 cubic texture offset\n");
150 return DRM_ERR(EINVAL); 150 return -EINVAL;
151 } 151 }
152 } 152 }
153 break; 153 break;
@@ -159,11 +159,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
159 int i; 159 int i;
160 for (i = 0; i < 5; i++) { 160 for (i = 0; i < 5; i++) {
161 if (radeon_check_and_fixup_offset(dev_priv, 161 if (radeon_check_and_fixup_offset(dev_priv,
162 filp_priv, 162 file_priv,
163 &data[i])) { 163 &data[i])) {
164 DRM_ERROR 164 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 165 ("Invalid R100 cubic texture offset\n");
166 return DRM_ERR(EINVAL); 166 return -EINVAL;
167 } 167 }
168 } 168 }
169 } 169 }
@@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
256 256
257 default: 257 default:
258 DRM_ERROR("Unknown state packet ID %d\n", id); 258 DRM_ERROR("Unknown state packet ID %d\n", id);
259 return DRM_ERR(EINVAL); 259 return -EINVAL;
260 } 260 }
261 261
262 return 0; 262 return 0;
@@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
264 264
265static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 265static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
266 dev_priv, 266 dev_priv,
267 struct drm_file *filp_priv, 267 struct drm_file *file_priv,
268 drm_radeon_kcmd_buffer_t * 268 drm_radeon_kcmd_buffer_t *
269 cmdbuf, 269 cmdbuf,
270 unsigned int *cmdsz) 270 unsigned int *cmdsz)
@@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
277 277
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 279 DRM_ERROR("Not a type 3 packet\n");
280 return DRM_ERR(EINVAL); 280 return -EINVAL;
281 } 281 }
282 282
283 if (4 * *cmdsz > cmdbuf->bufsz) { 283 if (4 * *cmdsz > cmdbuf->bufsz) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 284 DRM_ERROR("Packet size larger than size of data provided\n");
285 return DRM_ERR(EINVAL); 285 return -EINVAL;
286 } 286 }
287 287
288 switch(cmd[0] & 0xff00) { 288 switch(cmd[0] & 0xff00) {
@@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
307 /* safe but r200 only */ 307 /* safe but r200 only */
308 if (dev_priv->microcode_version != UCODE_R200) { 308 if (dev_priv->microcode_version != UCODE_R200) {
309 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 309 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
310 return DRM_ERR(EINVAL); 310 return -EINVAL;
311 } 311 }
312 break; 312 break;
313 313
@@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
317 if (count > 18) { /* 12 arrays max */ 317 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
319 count); 319 count);
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 } 321 }
322 322
323 /* carefully check packet contents */ 323 /* carefully check packet contents */
@@ -326,22 +326,25 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
326 i = 2; 326 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 327 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 328 i++; /* skip attribute field */
329 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { 329 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) {
330 DRM_ERROR 331 DRM_ERROR
331 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
332 k, i); 333 k, i);
333 return DRM_ERR(EINVAL); 334 return -EINVAL;
334 } 335 }
335 k++; 336 k++;
336 i++; 337 i++;
337 if (k == narrays) 338 if (k == narrays)
338 break; 339 break;
339 /* have one more to process, they come in pairs */ 340 /* have one more to process, they come in pairs */
340 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { 341 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i]))
343 {
341 DRM_ERROR 344 DRM_ERROR
342 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
343 k, i); 346 k, i);
344 return DRM_ERR(EINVAL); 347 return -EINVAL;
345 } 348 }
346 k++; 349 k++;
347 i++; 350 i++;
@@ -351,33 +354,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
351 DRM_ERROR 354 DRM_ERROR
352 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 355 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
353 k, i, narrays, count + 1); 356 k, i, narrays, count + 1);
354 return DRM_ERR(EINVAL); 357 return -EINVAL;
355 } 358 }
356 break; 359 break;
357 360
358 case RADEON_3D_RNDR_GEN_INDX_PRIM: 361 case RADEON_3D_RNDR_GEN_INDX_PRIM:
359 if (dev_priv->microcode_version != UCODE_R100) { 362 if (dev_priv->microcode_version != UCODE_R100) {
360 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 363 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
361 return DRM_ERR(EINVAL); 364 return -EINVAL;
362 } 365 }
363 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { 366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
364 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 367 DRM_ERROR("Invalid rndr_gen_indx offset\n");
365 return DRM_ERR(EINVAL); 368 return -EINVAL;
366 } 369 }
367 break; 370 break;
368 371
369 case RADEON_CP_INDX_BUFFER: 372 case RADEON_CP_INDX_BUFFER:
370 if (dev_priv->microcode_version != UCODE_R200) { 373 if (dev_priv->microcode_version != UCODE_R200) {
371 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 374 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
372 return DRM_ERR(EINVAL); 375 return -EINVAL;
373 } 376 }
374 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 377 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
375 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
376 return DRM_ERR(EINVAL); 379 return -EINVAL;
377 } 380 }
378 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { 381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
379 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
380 return DRM_ERR(EINVAL); 383 return -EINVAL;
381 } 384 }
382 break; 385 break;
383 386
@@ -389,9 +392,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
389 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
390 offset = cmd[2] << 10; 393 offset = cmd[2] << 10;
391 if (radeon_check_and_fixup_offset 394 if (radeon_check_and_fixup_offset
392 (dev_priv, filp_priv, &offset)) { 395 (dev_priv, file_priv, &offset)) {
393 DRM_ERROR("Invalid first packet offset\n"); 396 DRM_ERROR("Invalid first packet offset\n");
394 return DRM_ERR(EINVAL); 397 return -EINVAL;
395 } 398 }
396 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
397 } 400 }
@@ -400,9 +403,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
400 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
401 offset = cmd[3] << 10; 404 offset = cmd[3] << 10;
402 if (radeon_check_and_fixup_offset 405 if (radeon_check_and_fixup_offset
403 (dev_priv, filp_priv, &offset)) { 406 (dev_priv, file_priv, &offset)) {
404 DRM_ERROR("Invalid second packet offset\n"); 407 DRM_ERROR("Invalid second packet offset\n");
405 return DRM_ERR(EINVAL); 408 return -EINVAL;
406 } 409 }
407 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
408 } 411 }
@@ -410,7 +413,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
410 413
411 default: 414 default:
412 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
413 return DRM_ERR(EINVAL); 416 return -EINVAL;
414 } 417 }
415 418
416 return 0; 419 return 0;
@@ -439,7 +442,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
439/* Emit 1.1 state 442/* Emit 1.1 state
440 */ 443 */
441static int radeon_emit_state(drm_radeon_private_t * dev_priv, 444static int radeon_emit_state(drm_radeon_private_t * dev_priv,
442 struct drm_file * filp_priv, 445 struct drm_file *file_priv,
443 drm_radeon_context_regs_t * ctx, 446 drm_radeon_context_regs_t * ctx,
444 drm_radeon_texture_regs_t * tex, 447 drm_radeon_texture_regs_t * tex,
445 unsigned int dirty) 448 unsigned int dirty)
@@ -448,16 +451,16 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
448 DRM_DEBUG("dirty=0x%08x\n", dirty); 451 DRM_DEBUG("dirty=0x%08x\n", dirty);
449 452
450 if (dirty & RADEON_UPLOAD_CONTEXT) { 453 if (dirty & RADEON_UPLOAD_CONTEXT) {
451 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 454 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
452 &ctx->rb3d_depthoffset)) { 455 &ctx->rb3d_depthoffset)) {
453 DRM_ERROR("Invalid depth buffer offset\n"); 456 DRM_ERROR("Invalid depth buffer offset\n");
454 return DRM_ERR(EINVAL); 457 return -EINVAL;
455 } 458 }
456 459
457 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 460 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
458 &ctx->rb3d_coloroffset)) { 461 &ctx->rb3d_coloroffset)) {
459 DRM_ERROR("Invalid depth buffer offset\n"); 462 DRM_ERROR("Invalid depth buffer offset\n");
460 return DRM_ERR(EINVAL); 463 return -EINVAL;
461 } 464 }
462 465
463 BEGIN_RING(14); 466 BEGIN_RING(14);
@@ -543,10 +546,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
543 } 546 }
544 547
545 if (dirty & RADEON_UPLOAD_TEX0) { 548 if (dirty & RADEON_UPLOAD_TEX0) {
546 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 549 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
547 &tex[0].pp_txoffset)) { 550 &tex[0].pp_txoffset)) {
548 DRM_ERROR("Invalid texture offset for unit 0\n"); 551 DRM_ERROR("Invalid texture offset for unit 0\n");
549 return DRM_ERR(EINVAL); 552 return -EINVAL;
550 } 553 }
551 554
552 BEGIN_RING(9); 555 BEGIN_RING(9);
@@ -563,10 +566,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
563 } 566 }
564 567
565 if (dirty & RADEON_UPLOAD_TEX1) { 568 if (dirty & RADEON_UPLOAD_TEX1) {
566 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 569 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
567 &tex[1].pp_txoffset)) { 570 &tex[1].pp_txoffset)) {
568 DRM_ERROR("Invalid texture offset for unit 1\n"); 571 DRM_ERROR("Invalid texture offset for unit 1\n");
569 return DRM_ERR(EINVAL); 572 return -EINVAL;
570 } 573 }
571 574
572 BEGIN_RING(9); 575 BEGIN_RING(9);
@@ -583,10 +586,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
583 } 586 }
584 587
585 if (dirty & RADEON_UPLOAD_TEX2) { 588 if (dirty & RADEON_UPLOAD_TEX2) {
586 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 589 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
587 &tex[2].pp_txoffset)) { 590 &tex[2].pp_txoffset)) {
588 DRM_ERROR("Invalid texture offset for unit 2\n"); 591 DRM_ERROR("Invalid texture offset for unit 2\n");
589 return DRM_ERR(EINVAL); 592 return -EINVAL;
590 } 593 }
591 594
592 BEGIN_RING(9); 595 BEGIN_RING(9);
@@ -608,7 +611,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
608/* Emit 1.2 state 611/* Emit 1.2 state
609 */ 612 */
610static int radeon_emit_state2(drm_radeon_private_t * dev_priv, 613static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
611 struct drm_file * filp_priv, 614 struct drm_file *file_priv,
612 drm_radeon_state_t * state) 615 drm_radeon_state_t * state)
613{ 616{
614 RING_LOCALS; 617 RING_LOCALS;
@@ -621,7 +624,7 @@ static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
621 ADVANCE_RING(); 624 ADVANCE_RING();
622 } 625 }
623 626
624 return radeon_emit_state(dev_priv, filp_priv, &state->context, 627 return radeon_emit_state(dev_priv, file_priv, &state->context,
625 state->tex, state->dirty); 628 state->tex, state->dirty);
626} 629}
627 630
@@ -1646,13 +1649,12 @@ static void radeon_cp_dispatch_indices(struct drm_device * dev,
1646 1649
1647#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE 1650#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1648 1651
1649static int radeon_cp_dispatch_texture(DRMFILE filp, 1652static int radeon_cp_dispatch_texture(struct drm_device * dev,
1650 struct drm_device * dev, 1653 struct drm_file *file_priv,
1651 drm_radeon_texture_t * tex, 1654 drm_radeon_texture_t * tex,
1652 drm_radeon_tex_image_t * image) 1655 drm_radeon_tex_image_t * image)
1653{ 1656{
1654 drm_radeon_private_t *dev_priv = dev->dev_private; 1657 drm_radeon_private_t *dev_priv = dev->dev_private;
1655 struct drm_file *filp_priv;
1656 struct drm_buf *buf; 1658 struct drm_buf *buf;
1657 u32 format; 1659 u32 format;
1658 u32 *buffer; 1660 u32 *buffer;
@@ -1664,11 +1666,9 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1664 u32 offset; 1666 u32 offset;
1665 RING_LOCALS; 1667 RING_LOCALS;
1666 1668
1667 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 1669 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
1668
1669 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) {
1670 DRM_ERROR("Invalid destination offset\n"); 1670 DRM_ERROR("Invalid destination offset\n");
1671 return DRM_ERR(EINVAL); 1671 return -EINVAL;
1672 } 1672 }
1673 1673
1674 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; 1674 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
@@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1711 break; 1711 break;
1712 default: 1712 default:
1713 DRM_ERROR("invalid texture format %d\n", tex->format); 1713 DRM_ERROR("invalid texture format %d\n", tex->format);
1714 return DRM_ERR(EINVAL); 1714 return -EINVAL;
1715 } 1715 }
1716 spitch = blit_width >> 6; 1716 spitch = blit_width >> 6;
1717 if (spitch == 0 && image->height > 1) 1717 if (spitch == 0 && image->height > 1)
1718 return DRM_ERR(EINVAL); 1718 return -EINVAL;
1719 1719
1720 texpitch = tex->pitch; 1720 texpitch = tex->pitch;
1721 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { 1721 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
@@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1760 if (!buf) { 1760 if (!buf) {
1761 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); 1761 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1762 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) 1762 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
1763 return DRM_ERR(EFAULT); 1763 return -EFAULT;
1764 return DRM_ERR(EAGAIN); 1764 return -EAGAIN;
1765 } 1765 }
1766 1766
1767 /* Dispatch the indirect buffer. 1767 /* Dispatch the indirect buffer.
@@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1774 do { \ 1774 do { \
1775 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ 1775 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1776 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ 1776 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1777 return DRM_ERR(EFAULT); \ 1777 return -EFAULT; \
1778 } \ 1778 } \
1779 } while(0) 1779 } while(0)
1780 1780
@@ -1841,7 +1841,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1841 } 1841 }
1842 1842
1843#undef RADEON_COPY_MT 1843#undef RADEON_COPY_MT
1844 buf->filp = filp; 1844 buf->file_priv = file_priv;
1845 buf->used = size; 1845 buf->used = size;
1846 offset = dev_priv->gart_buffers_offset + buf->offset; 1846 offset = dev_priv->gart_buffers_offset + buf->offset;
1847 BEGIN_RING(9); 1847 BEGIN_RING(9);
@@ -1861,6 +1861,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1861 OUT_RING((image->width << 16) | height); 1861 OUT_RING((image->width << 16) | height);
1862 RADEON_WAIT_UNTIL_2D_IDLE(); 1862 RADEON_WAIT_UNTIL_2D_IDLE();
1863 ADVANCE_RING(); 1863 ADVANCE_RING();
1864 COMMIT_RING();
1864 1865
1865 radeon_cp_discard_buffer(dev, buf); 1866 radeon_cp_discard_buffer(dev, buf);
1866 1867
@@ -1878,6 +1879,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1878 RADEON_FLUSH_CACHE(); 1879 RADEON_FLUSH_CACHE();
1879 RADEON_WAIT_UNTIL_2D_IDLE(); 1880 RADEON_WAIT_UNTIL_2D_IDLE();
1880 ADVANCE_RING(); 1881 ADVANCE_RING();
1882 COMMIT_RING();
1883
1881 return 0; 1884 return 0;
1882} 1885}
1883 1886
@@ -1929,7 +1932,8 @@ static void radeon_apply_surface_regs(int surf_index,
1929 * not always be available. 1932 * not always be available.
1930 */ 1933 */
1931static int alloc_surface(drm_radeon_surface_alloc_t *new, 1934static int alloc_surface(drm_radeon_surface_alloc_t *new,
1932 drm_radeon_private_t *dev_priv, DRMFILE filp) 1935 drm_radeon_private_t *dev_priv,
1936 struct drm_file *file_priv)
1933{ 1937{
1934 struct radeon_virt_surface *s; 1938 struct radeon_virt_surface *s;
1935 int i; 1939 int i;
@@ -1959,7 +1963,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1959 1963
1960 /* find a virtual surface */ 1964 /* find a virtual surface */
1961 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) 1965 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
1962 if (dev_priv->virt_surfaces[i].filp == 0) 1966 if (dev_priv->virt_surfaces[i].file_priv == 0)
1963 break; 1967 break;
1964 if (i == 2 * RADEON_MAX_SURFACES) { 1968 if (i == 2 * RADEON_MAX_SURFACES) {
1965 return -1; 1969 return -1;
@@ -1977,7 +1981,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1977 s->lower = new_lower; 1981 s->lower = new_lower;
1978 s->upper = new_upper; 1982 s->upper = new_upper;
1979 s->flags = new->flags; 1983 s->flags = new->flags;
1980 s->filp = filp; 1984 s->file_priv = file_priv;
1981 dev_priv->surfaces[i].refcount++; 1985 dev_priv->surfaces[i].refcount++;
1982 dev_priv->surfaces[i].lower = s->lower; 1986 dev_priv->surfaces[i].lower = s->lower;
1983 radeon_apply_surface_regs(s->surface_index, dev_priv); 1987 radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -1993,7 +1997,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
1993 s->lower = new_lower; 1997 s->lower = new_lower;
1994 s->upper = new_upper; 1998 s->upper = new_upper;
1995 s->flags = new->flags; 1999 s->flags = new->flags;
1996 s->filp = filp; 2000 s->file_priv = file_priv;
1997 dev_priv->surfaces[i].refcount++; 2001 dev_priv->surfaces[i].refcount++;
1998 dev_priv->surfaces[i].upper = s->upper; 2002 dev_priv->surfaces[i].upper = s->upper;
1999 radeon_apply_surface_regs(s->surface_index, dev_priv); 2003 radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -2009,7 +2013,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
2009 s->lower = new_lower; 2013 s->lower = new_lower;
2010 s->upper = new_upper; 2014 s->upper = new_upper;
2011 s->flags = new->flags; 2015 s->flags = new->flags;
2012 s->filp = filp; 2016 s->file_priv = file_priv;
2013 dev_priv->surfaces[i].refcount = 1; 2017 dev_priv->surfaces[i].refcount = 1;
2014 dev_priv->surfaces[i].lower = s->lower; 2018 dev_priv->surfaces[i].lower = s->lower;
2015 dev_priv->surfaces[i].upper = s->upper; 2019 dev_priv->surfaces[i].upper = s->upper;
@@ -2023,7 +2027,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
2023 return -1; 2027 return -1;
2024} 2028}
2025 2029
2026static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, 2030static int free_surface(struct drm_file *file_priv,
2031 drm_radeon_private_t * dev_priv,
2027 int lower) 2032 int lower)
2028{ 2033{
2029 struct radeon_virt_surface *s; 2034 struct radeon_virt_surface *s;
@@ -2031,8 +2036,9 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2031 /* find the virtual surface */ 2036 /* find the virtual surface */
2032 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { 2037 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2033 s = &(dev_priv->virt_surfaces[i]); 2038 s = &(dev_priv->virt_surfaces[i]);
2034 if (s->filp) { 2039 if (s->file_priv) {
2035 if ((lower == s->lower) && (filp == s->filp)) { 2040 if ((lower == s->lower) && (file_priv == s->file_priv))
2041 {
2036 if (dev_priv->surfaces[s->surface_index]. 2042 if (dev_priv->surfaces[s->surface_index].
2037 lower == s->lower) 2043 lower == s->lower)
2038 dev_priv->surfaces[s->surface_index]. 2044 dev_priv->surfaces[s->surface_index].
@@ -2048,7 +2054,7 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2048 refcount == 0) 2054 refcount == 0)
2049 dev_priv->surfaces[s->surface_index]. 2055 dev_priv->surfaces[s->surface_index].
2050 flags = 0; 2056 flags = 0;
2051 s->filp = NULL; 2057 s->file_priv = NULL;
2052 radeon_apply_surface_regs(s->surface_index, 2058 radeon_apply_surface_regs(s->surface_index,
2053 dev_priv); 2059 dev_priv);
2054 return 0; 2060 return 0;
@@ -2058,13 +2064,13 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
2058 return 1; 2064 return 1;
2059} 2065}
2060 2066
2061static void radeon_surfaces_release(DRMFILE filp, 2067static void radeon_surfaces_release(struct drm_file *file_priv,
2062 drm_radeon_private_t * dev_priv) 2068 drm_radeon_private_t * dev_priv)
2063{ 2069{
2064 int i; 2070 int i;
2065 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { 2071 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2066 if (dev_priv->virt_surfaces[i].filp == filp) 2072 if (dev_priv->virt_surfaces[i].file_priv == file_priv)
2067 free_surface(filp, dev_priv, 2073 free_surface(file_priv, dev_priv,
2068 dev_priv->virt_surfaces[i].lower); 2074 dev_priv->virt_surfaces[i].lower);
2069 } 2075 }
2070} 2076}
@@ -2072,61 +2078,48 @@ static void radeon_surfaces_release(DRMFILE filp,
2072/* ================================================================ 2078/* ================================================================
2073 * IOCTL functions 2079 * IOCTL functions
2074 */ 2080 */
2075static int radeon_surface_alloc(DRM_IOCTL_ARGS) 2081static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
2076{ 2082{
2077 DRM_DEVICE;
2078 drm_radeon_private_t *dev_priv = dev->dev_private; 2083 drm_radeon_private_t *dev_priv = dev->dev_private;
2079 drm_radeon_surface_alloc_t alloc; 2084 drm_radeon_surface_alloc_t *alloc = data;
2080 2085
2081 DRM_COPY_FROM_USER_IOCTL(alloc, 2086 if (alloc_surface(alloc, dev_priv, file_priv) == -1)
2082 (drm_radeon_surface_alloc_t __user *) data, 2087 return -EINVAL;
2083 sizeof(alloc));
2084
2085 if (alloc_surface(&alloc, dev_priv, filp) == -1)
2086 return DRM_ERR(EINVAL);
2087 else 2088 else
2088 return 0; 2089 return 0;
2089} 2090}
2090 2091
2091static int radeon_surface_free(DRM_IOCTL_ARGS) 2092static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
2092{ 2093{
2093 DRM_DEVICE;
2094 drm_radeon_private_t *dev_priv = dev->dev_private; 2094 drm_radeon_private_t *dev_priv = dev->dev_private;
2095 drm_radeon_surface_free_t memfree; 2095 drm_radeon_surface_free_t *memfree = data;
2096
2097 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data,
2098 sizeof(memfree));
2099 2096
2100 if (free_surface(filp, dev_priv, memfree.address)) 2097 if (free_surface(file_priv, dev_priv, memfree->address))
2101 return DRM_ERR(EINVAL); 2098 return -EINVAL;
2102 else 2099 else
2103 return 0; 2100 return 0;
2104} 2101}
2105 2102
2106static int radeon_cp_clear(DRM_IOCTL_ARGS) 2103static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
2107{ 2104{
2108 DRM_DEVICE;
2109 drm_radeon_private_t *dev_priv = dev->dev_private; 2105 drm_radeon_private_t *dev_priv = dev->dev_private;
2110 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2106 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2111 drm_radeon_clear_t clear; 2107 drm_radeon_clear_t *clear = data;
2112 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; 2108 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2113 DRM_DEBUG("\n"); 2109 DRM_DEBUG("\n");
2114 2110
2115 LOCK_TEST_WITH_RETURN(dev, filp); 2111 LOCK_TEST_WITH_RETURN(dev, file_priv);
2116
2117 DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data,
2118 sizeof(clear));
2119 2112
2120 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2113 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2121 2114
2122 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2115 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2123 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; 2116 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2124 2117
2125 if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, 2118 if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
2126 sarea_priv->nbox * sizeof(depth_boxes[0]))) 2119 sarea_priv->nbox * sizeof(depth_boxes[0])))
2127 return DRM_ERR(EFAULT); 2120 return -EFAULT;
2128 2121
2129 radeon_cp_dispatch_clear(dev, &clear, depth_boxes); 2122 radeon_cp_dispatch_clear(dev, clear, depth_boxes);
2130 2123
2131 COMMIT_RING(); 2124 COMMIT_RING();
2132 return 0; 2125 return 0;
@@ -2162,13 +2155,12 @@ static int radeon_do_init_pageflip(struct drm_device * dev)
2162/* Swapping and flipping are different operations, need different ioctls. 2155/* Swapping and flipping are different operations, need different ioctls.
2163 * They can & should be intermixed to support multiple 3d windows. 2156 * They can & should be intermixed to support multiple 3d windows.
2164 */ 2157 */
2165static int radeon_cp_flip(DRM_IOCTL_ARGS) 2158static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
2166{ 2159{
2167 DRM_DEVICE;
2168 drm_radeon_private_t *dev_priv = dev->dev_private; 2160 drm_radeon_private_t *dev_priv = dev->dev_private;
2169 DRM_DEBUG("\n"); 2161 DRM_DEBUG("\n");
2170 2162
2171 LOCK_TEST_WITH_RETURN(dev, filp); 2163 LOCK_TEST_WITH_RETURN(dev, file_priv);
2172 2164
2173 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2165 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2174 2166
@@ -2181,14 +2173,13 @@ static int radeon_cp_flip(DRM_IOCTL_ARGS)
2181 return 0; 2173 return 0;
2182} 2174}
2183 2175
2184static int radeon_cp_swap(DRM_IOCTL_ARGS) 2176static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2185{ 2177{
2186 DRM_DEVICE;
2187 drm_radeon_private_t *dev_priv = dev->dev_private; 2178 drm_radeon_private_t *dev_priv = dev->dev_private;
2188 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2179 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2189 DRM_DEBUG("\n"); 2180 DRM_DEBUG("\n");
2190 2181
2191 LOCK_TEST_WITH_RETURN(dev, filp); 2182 LOCK_TEST_WITH_RETURN(dev, file_priv);
2192 2183
2193 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2184 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2194 2185
@@ -2202,64 +2193,57 @@ static int radeon_cp_swap(DRM_IOCTL_ARGS)
2202 return 0; 2193 return 0;
2203} 2194}
2204 2195
2205static int radeon_cp_vertex(DRM_IOCTL_ARGS) 2196static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
2206{ 2197{
2207 DRM_DEVICE;
2208 drm_radeon_private_t *dev_priv = dev->dev_private; 2198 drm_radeon_private_t *dev_priv = dev->dev_private;
2209 struct drm_file *filp_priv;
2210 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2199 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2211 struct drm_device_dma *dma = dev->dma; 2200 struct drm_device_dma *dma = dev->dma;
2212 struct drm_buf *buf; 2201 struct drm_buf *buf;
2213 drm_radeon_vertex_t vertex; 2202 drm_radeon_vertex_t *vertex = data;
2214 drm_radeon_tcl_prim_t prim; 2203 drm_radeon_tcl_prim_t prim;
2215 2204
2216 LOCK_TEST_WITH_RETURN(dev, filp); 2205 LOCK_TEST_WITH_RETURN(dev, file_priv);
2217
2218 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2219
2220 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
2221 sizeof(vertex));
2222 2206
2223 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 2207 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2224 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 2208 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
2225 2209
2226 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2210 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2227 DRM_ERROR("buffer index %d (of %d max)\n", 2211 DRM_ERROR("buffer index %d (of %d max)\n",
2228 vertex.idx, dma->buf_count - 1); 2212 vertex->idx, dma->buf_count - 1);
2229 return DRM_ERR(EINVAL); 2213 return -EINVAL;
2230 } 2214 }
2231 if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2215 if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2232 DRM_ERROR("buffer prim %d\n", vertex.prim); 2216 DRM_ERROR("buffer prim %d\n", vertex->prim);
2233 return DRM_ERR(EINVAL); 2217 return -EINVAL;
2234 } 2218 }
2235 2219
2236 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2220 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2237 VB_AGE_TEST_WITH_RETURN(dev_priv); 2221 VB_AGE_TEST_WITH_RETURN(dev_priv);
2238 2222
2239 buf = dma->buflist[vertex.idx]; 2223 buf = dma->buflist[vertex->idx];
2240 2224
2241 if (buf->filp != filp) { 2225 if (buf->file_priv != file_priv) {
2242 DRM_ERROR("process %d using buffer owned by %p\n", 2226 DRM_ERROR("process %d using buffer owned by %p\n",
2243 DRM_CURRENTPID, buf->filp); 2227 DRM_CURRENTPID, buf->file_priv);
2244 return DRM_ERR(EINVAL); 2228 return -EINVAL;
2245 } 2229 }
2246 if (buf->pending) { 2230 if (buf->pending) {
2247 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2231 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2248 return DRM_ERR(EINVAL); 2232 return -EINVAL;
2249 } 2233 }
2250 2234
2251 /* Build up a prim_t record: 2235 /* Build up a prim_t record:
2252 */ 2236 */
2253 if (vertex.count) { 2237 if (vertex->count) {
2254 buf->used = vertex.count; /* not used? */ 2238 buf->used = vertex->count; /* not used? */
2255 2239
2256 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2240 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2257 if (radeon_emit_state(dev_priv, filp_priv, 2241 if (radeon_emit_state(dev_priv, file_priv,
2258 &sarea_priv->context_state, 2242 &sarea_priv->context_state,
2259 sarea_priv->tex_state, 2243 sarea_priv->tex_state,
2260 sarea_priv->dirty)) { 2244 sarea_priv->dirty)) {
2261 DRM_ERROR("radeon_emit_state failed\n"); 2245 DRM_ERROR("radeon_emit_state failed\n");
2262 return DRM_ERR(EINVAL); 2246 return -EINVAL;
2263 } 2247 }
2264 2248
2265 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | 2249 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2269,15 +2253,15 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2269 } 2253 }
2270 2254
2271 prim.start = 0; 2255 prim.start = 0;
2272 prim.finish = vertex.count; /* unused */ 2256 prim.finish = vertex->count; /* unused */
2273 prim.prim = vertex.prim; 2257 prim.prim = vertex->prim;
2274 prim.numverts = vertex.count; 2258 prim.numverts = vertex->count;
2275 prim.vc_format = dev_priv->sarea_priv->vc_format; 2259 prim.vc_format = dev_priv->sarea_priv->vc_format;
2276 2260
2277 radeon_cp_dispatch_vertex(dev, buf, &prim); 2261 radeon_cp_dispatch_vertex(dev, buf, &prim);
2278 } 2262 }
2279 2263
2280 if (vertex.discard) { 2264 if (vertex->discard) {
2281 radeon_cp_discard_buffer(dev, buf); 2265 radeon_cp_discard_buffer(dev, buf);
2282 } 2266 }
2283 2267
@@ -2285,74 +2269,68 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2285 return 0; 2269 return 0;
2286} 2270}
2287 2271
2288static int radeon_cp_indices(DRM_IOCTL_ARGS) 2272static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
2289{ 2273{
2290 DRM_DEVICE;
2291 drm_radeon_private_t *dev_priv = dev->dev_private; 2274 drm_radeon_private_t *dev_priv = dev->dev_private;
2292 struct drm_file *filp_priv;
2293 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2275 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2294 struct drm_device_dma *dma = dev->dma; 2276 struct drm_device_dma *dma = dev->dma;
2295 struct drm_buf *buf; 2277 struct drm_buf *buf;
2296 drm_radeon_indices_t elts; 2278 drm_radeon_indices_t *elts = data;
2297 drm_radeon_tcl_prim_t prim; 2279 drm_radeon_tcl_prim_t prim;
2298 int count; 2280 int count;
2299 2281
2300 LOCK_TEST_WITH_RETURN(dev, filp); 2282 LOCK_TEST_WITH_RETURN(dev, file_priv);
2301
2302 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2303
2304 DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data,
2305 sizeof(elts));
2306 2283
2307 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", 2284 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2308 DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard); 2285 DRM_CURRENTPID, elts->idx, elts->start, elts->end,
2286 elts->discard);
2309 2287
2310 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 2288 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
2311 DRM_ERROR("buffer index %d (of %d max)\n", 2289 DRM_ERROR("buffer index %d (of %d max)\n",
2312 elts.idx, dma->buf_count - 1); 2290 elts->idx, dma->buf_count - 1);
2313 return DRM_ERR(EINVAL); 2291 return -EINVAL;
2314 } 2292 }
2315 if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2293 if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2316 DRM_ERROR("buffer prim %d\n", elts.prim); 2294 DRM_ERROR("buffer prim %d\n", elts->prim);
2317 return DRM_ERR(EINVAL); 2295 return -EINVAL;
2318 } 2296 }
2319 2297
2320 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2298 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2321 VB_AGE_TEST_WITH_RETURN(dev_priv); 2299 VB_AGE_TEST_WITH_RETURN(dev_priv);
2322 2300
2323 buf = dma->buflist[elts.idx]; 2301 buf = dma->buflist[elts->idx];
2324 2302
2325 if (buf->filp != filp) { 2303 if (buf->file_priv != file_priv) {
2326 DRM_ERROR("process %d using buffer owned by %p\n", 2304 DRM_ERROR("process %d using buffer owned by %p\n",
2327 DRM_CURRENTPID, buf->filp); 2305 DRM_CURRENTPID, buf->file_priv);
2328 return DRM_ERR(EINVAL); 2306 return -EINVAL;
2329 } 2307 }
2330 if (buf->pending) { 2308 if (buf->pending) {
2331 DRM_ERROR("sending pending buffer %d\n", elts.idx); 2309 DRM_ERROR("sending pending buffer %d\n", elts->idx);
2332 return DRM_ERR(EINVAL); 2310 return -EINVAL;
2333 } 2311 }
2334 2312
2335 count = (elts.end - elts.start) / sizeof(u16); 2313 count = (elts->end - elts->start) / sizeof(u16);
2336 elts.start -= RADEON_INDEX_PRIM_OFFSET; 2314 elts->start -= RADEON_INDEX_PRIM_OFFSET;
2337 2315
2338 if (elts.start & 0x7) { 2316 if (elts->start & 0x7) {
2339 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 2317 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
2340 return DRM_ERR(EINVAL); 2318 return -EINVAL;
2341 } 2319 }
2342 if (elts.start < buf->used) { 2320 if (elts->start < buf->used) {
2343 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 2321 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
2344 return DRM_ERR(EINVAL); 2322 return -EINVAL;
2345 } 2323 }
2346 2324
2347 buf->used = elts.end; 2325 buf->used = elts->end;
2348 2326
2349 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2327 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2350 if (radeon_emit_state(dev_priv, filp_priv, 2328 if (radeon_emit_state(dev_priv, file_priv,
2351 &sarea_priv->context_state, 2329 &sarea_priv->context_state,
2352 sarea_priv->tex_state, 2330 sarea_priv->tex_state,
2353 sarea_priv->dirty)) { 2331 sarea_priv->dirty)) {
2354 DRM_ERROR("radeon_emit_state failed\n"); 2332 DRM_ERROR("radeon_emit_state failed\n");
2355 return DRM_ERR(EINVAL); 2333 return -EINVAL;
2356 } 2334 }
2357 2335
2358 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | 2336 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2363,15 +2341,15 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
2363 2341
2364 /* Build up a prim_t record: 2342 /* Build up a prim_t record:
2365 */ 2343 */
2366 prim.start = elts.start; 2344 prim.start = elts->start;
2367 prim.finish = elts.end; 2345 prim.finish = elts->end;
2368 prim.prim = elts.prim; 2346 prim.prim = elts->prim;
2369 prim.offset = 0; /* offset from start of dma buffers */ 2347 prim.offset = 0; /* offset from start of dma buffers */
2370 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ 2348 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2371 prim.vc_format = dev_priv->sarea_priv->vc_format; 2349 prim.vc_format = dev_priv->sarea_priv->vc_format;
2372 2350
2373 radeon_cp_dispatch_indices(dev, buf, &prim); 2351 radeon_cp_dispatch_indices(dev, buf, &prim);
2374 if (elts.discard) { 2352 if (elts->discard) {
2375 radeon_cp_discard_buffer(dev, buf); 2353 radeon_cp_discard_buffer(dev, buf);
2376 } 2354 }
2377 2355
@@ -2379,52 +2357,43 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
2379 return 0; 2357 return 0;
2380} 2358}
2381 2359
2382static int radeon_cp_texture(DRM_IOCTL_ARGS) 2360static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
2383{ 2361{
2384 DRM_DEVICE;
2385 drm_radeon_private_t *dev_priv = dev->dev_private; 2362 drm_radeon_private_t *dev_priv = dev->dev_private;
2386 drm_radeon_texture_t tex; 2363 drm_radeon_texture_t *tex = data;
2387 drm_radeon_tex_image_t image; 2364 drm_radeon_tex_image_t image;
2388 int ret; 2365 int ret;
2389 2366
2390 LOCK_TEST_WITH_RETURN(dev, filp); 2367 LOCK_TEST_WITH_RETURN(dev, file_priv);
2391
2392 DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data,
2393 sizeof(tex));
2394 2368
2395 if (tex.image == NULL) { 2369 if (tex->image == NULL) {
2396 DRM_ERROR("null texture image!\n"); 2370 DRM_ERROR("null texture image!\n");
2397 return DRM_ERR(EINVAL); 2371 return -EINVAL;
2398 } 2372 }
2399 2373
2400 if (DRM_COPY_FROM_USER(&image, 2374 if (DRM_COPY_FROM_USER(&image,
2401 (drm_radeon_tex_image_t __user *) tex.image, 2375 (drm_radeon_tex_image_t __user *) tex->image,
2402 sizeof(image))) 2376 sizeof(image)))
2403 return DRM_ERR(EFAULT); 2377 return -EFAULT;
2404 2378
2405 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2379 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2406 VB_AGE_TEST_WITH_RETURN(dev_priv); 2380 VB_AGE_TEST_WITH_RETURN(dev_priv);
2407 2381
2408 ret = radeon_cp_dispatch_texture(filp, dev, &tex, &image); 2382 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2409 2383
2410 COMMIT_RING();
2411 return ret; 2384 return ret;
2412} 2385}
2413 2386
2414static int radeon_cp_stipple(DRM_IOCTL_ARGS) 2387static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
2415{ 2388{
2416 DRM_DEVICE;
2417 drm_radeon_private_t *dev_priv = dev->dev_private; 2389 drm_radeon_private_t *dev_priv = dev->dev_private;
2418 drm_radeon_stipple_t stipple; 2390 drm_radeon_stipple_t *stipple = data;
2419 u32 mask[32]; 2391 u32 mask[32];
2420 2392
2421 LOCK_TEST_WITH_RETURN(dev, filp); 2393 LOCK_TEST_WITH_RETURN(dev, file_priv);
2422
2423 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data,
2424 sizeof(stipple));
2425 2394
2426 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 2395 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
2427 return DRM_ERR(EFAULT); 2396 return -EFAULT;
2428 2397
2429 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2398 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2430 2399
@@ -2434,52 +2403,48 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS)
2434 return 0; 2403 return 0;
2435} 2404}
2436 2405
2437static int radeon_cp_indirect(DRM_IOCTL_ARGS) 2406static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
2438{ 2407{
2439 DRM_DEVICE;
2440 drm_radeon_private_t *dev_priv = dev->dev_private; 2408 drm_radeon_private_t *dev_priv = dev->dev_private;
2441 struct drm_device_dma *dma = dev->dma; 2409 struct drm_device_dma *dma = dev->dma;
2442 struct drm_buf *buf; 2410 struct drm_buf *buf;
2443 drm_radeon_indirect_t indirect; 2411 drm_radeon_indirect_t *indirect = data;
2444 RING_LOCALS; 2412 RING_LOCALS;
2445 2413
2446 LOCK_TEST_WITH_RETURN(dev, filp); 2414 LOCK_TEST_WITH_RETURN(dev, file_priv);
2447
2448 DRM_COPY_FROM_USER_IOCTL(indirect,
2449 (drm_radeon_indirect_t __user *) data,
2450 sizeof(indirect));
2451 2415
2452 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 2416 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
2453 indirect.idx, indirect.start, indirect.end, indirect.discard); 2417 indirect->idx, indirect->start, indirect->end,
2418 indirect->discard);
2454 2419
2455 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 2420 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
2456 DRM_ERROR("buffer index %d (of %d max)\n", 2421 DRM_ERROR("buffer index %d (of %d max)\n",
2457 indirect.idx, dma->buf_count - 1); 2422 indirect->idx, dma->buf_count - 1);
2458 return DRM_ERR(EINVAL); 2423 return -EINVAL;
2459 } 2424 }
2460 2425
2461 buf = dma->buflist[indirect.idx]; 2426 buf = dma->buflist[indirect->idx];
2462 2427
2463 if (buf->filp != filp) { 2428 if (buf->file_priv != file_priv) {
2464 DRM_ERROR("process %d using buffer owned by %p\n", 2429 DRM_ERROR("process %d using buffer owned by %p\n",
2465 DRM_CURRENTPID, buf->filp); 2430 DRM_CURRENTPID, buf->file_priv);
2466 return DRM_ERR(EINVAL); 2431 return -EINVAL;
2467 } 2432 }
2468 if (buf->pending) { 2433 if (buf->pending) {
2469 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 2434 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
2470 return DRM_ERR(EINVAL); 2435 return -EINVAL;
2471 } 2436 }
2472 2437
2473 if (indirect.start < buf->used) { 2438 if (indirect->start < buf->used) {
2474 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 2439 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
2475 indirect.start, buf->used); 2440 indirect->start, buf->used);
2476 return DRM_ERR(EINVAL); 2441 return -EINVAL;
2477 } 2442 }
2478 2443
2479 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2444 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2480 VB_AGE_TEST_WITH_RETURN(dev_priv); 2445 VB_AGE_TEST_WITH_RETURN(dev_priv);
2481 2446
2482 buf->used = indirect.end; 2447 buf->used = indirect->end;
2483 2448
2484 /* Wait for the 3D stream to idle before the indirect buffer 2449 /* Wait for the 3D stream to idle before the indirect buffer
2485 * containing 2D acceleration commands is processed. 2450 * containing 2D acceleration commands is processed.
@@ -2494,8 +2459,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
2494 * X server. This is insecure and is thus only available to 2459 * X server. This is insecure and is thus only available to
2495 * privileged clients. 2460 * privileged clients.
2496 */ 2461 */
2497 radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end); 2462 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2498 if (indirect.discard) { 2463 if (indirect->discard) {
2499 radeon_cp_discard_buffer(dev, buf); 2464 radeon_cp_discard_buffer(dev, buf);
2500 } 2465 }
2501 2466
@@ -2503,71 +2468,64 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
2503 return 0; 2468 return 0;
2504} 2469}
2505 2470
2506static int radeon_cp_vertex2(DRM_IOCTL_ARGS) 2471static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
2507{ 2472{
2508 DRM_DEVICE;
2509 drm_radeon_private_t *dev_priv = dev->dev_private; 2473 drm_radeon_private_t *dev_priv = dev->dev_private;
2510 struct drm_file *filp_priv;
2511 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2474 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2512 struct drm_device_dma *dma = dev->dma; 2475 struct drm_device_dma *dma = dev->dma;
2513 struct drm_buf *buf; 2476 struct drm_buf *buf;
2514 drm_radeon_vertex2_t vertex; 2477 drm_radeon_vertex2_t *vertex = data;
2515 int i; 2478 int i;
2516 unsigned char laststate; 2479 unsigned char laststate;
2517 2480
2518 LOCK_TEST_WITH_RETURN(dev, filp); 2481 LOCK_TEST_WITH_RETURN(dev, file_priv);
2519
2520 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2521
2522 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
2523 sizeof(vertex));
2524 2482
2525 DRM_DEBUG("pid=%d index=%d discard=%d\n", 2483 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2526 DRM_CURRENTPID, vertex.idx, vertex.discard); 2484 DRM_CURRENTPID, vertex->idx, vertex->discard);
2527 2485
2528 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2486 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2529 DRM_ERROR("buffer index %d (of %d max)\n", 2487 DRM_ERROR("buffer index %d (of %d max)\n",
2530 vertex.idx, dma->buf_count - 1); 2488 vertex->idx, dma->buf_count - 1);
2531 return DRM_ERR(EINVAL); 2489 return -EINVAL;
2532 } 2490 }
2533 2491
2534 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2492 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2535 VB_AGE_TEST_WITH_RETURN(dev_priv); 2493 VB_AGE_TEST_WITH_RETURN(dev_priv);
2536 2494
2537 buf = dma->buflist[vertex.idx]; 2495 buf = dma->buflist[vertex->idx];
2538 2496
2539 if (buf->filp != filp) { 2497 if (buf->file_priv != file_priv) {
2540 DRM_ERROR("process %d using buffer owned by %p\n", 2498 DRM_ERROR("process %d using buffer owned by %p\n",
2541 DRM_CURRENTPID, buf->filp); 2499 DRM_CURRENTPID, buf->file_priv);
2542 return DRM_ERR(EINVAL); 2500 return -EINVAL;
2543 } 2501 }
2544 2502
2545 if (buf->pending) { 2503 if (buf->pending) {
2546 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2504 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2547 return DRM_ERR(EINVAL); 2505 return -EINVAL;
2548 } 2506 }
2549 2507
2550 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2508 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2551 return DRM_ERR(EINVAL); 2509 return -EINVAL;
2552 2510
2553 for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { 2511 for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
2554 drm_radeon_prim_t prim; 2512 drm_radeon_prim_t prim;
2555 drm_radeon_tcl_prim_t tclprim; 2513 drm_radeon_tcl_prim_t tclprim;
2556 2514
2557 if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) 2515 if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
2558 return DRM_ERR(EFAULT); 2516 return -EFAULT;
2559 2517
2560 if (prim.stateidx != laststate) { 2518 if (prim.stateidx != laststate) {
2561 drm_radeon_state_t state; 2519 drm_radeon_state_t state;
2562 2520
2563 if (DRM_COPY_FROM_USER(&state, 2521 if (DRM_COPY_FROM_USER(&state,
2564 &vertex.state[prim.stateidx], 2522 &vertex->state[prim.stateidx],
2565 sizeof(state))) 2523 sizeof(state)))
2566 return DRM_ERR(EFAULT); 2524 return -EFAULT;
2567 2525
2568 if (radeon_emit_state2(dev_priv, filp_priv, &state)) { 2526 if (radeon_emit_state2(dev_priv, file_priv, &state)) {
2569 DRM_ERROR("radeon_emit_state2 failed\n"); 2527 DRM_ERROR("radeon_emit_state2 failed\n");
2570 return DRM_ERR(EINVAL); 2528 return -EINVAL;
2571 } 2529 }
2572 2530
2573 laststate = prim.stateidx; 2531 laststate = prim.stateidx;
@@ -2594,7 +2552,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
2594 sarea_priv->nbox = 0; 2552 sarea_priv->nbox = 0;
2595 } 2553 }
2596 2554
2597 if (vertex.discard) { 2555 if (vertex->discard) {
2598 radeon_cp_discard_buffer(dev, buf); 2556 radeon_cp_discard_buffer(dev, buf);
2599 } 2557 }
2600 2558
@@ -2603,7 +2561,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
2603} 2561}
2604 2562
2605static int radeon_emit_packets(drm_radeon_private_t * dev_priv, 2563static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2606 struct drm_file * filp_priv, 2564 struct drm_file *file_priv,
2607 drm_radeon_cmd_header_t header, 2565 drm_radeon_cmd_header_t header,
2608 drm_radeon_kcmd_buffer_t *cmdbuf) 2566 drm_radeon_kcmd_buffer_t *cmdbuf)
2609{ 2567{
@@ -2613,19 +2571,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2613 RING_LOCALS; 2571 RING_LOCALS;
2614 2572
2615 if (id >= RADEON_MAX_STATE_PACKETS) 2573 if (id >= RADEON_MAX_STATE_PACKETS)
2616 return DRM_ERR(EINVAL); 2574 return -EINVAL;
2617 2575
2618 sz = packet[id].len; 2576 sz = packet[id].len;
2619 reg = packet[id].start; 2577 reg = packet[id].start;
2620 2578
2621 if (sz * sizeof(int) > cmdbuf->bufsz) { 2579 if (sz * sizeof(int) > cmdbuf->bufsz) {
2622 DRM_ERROR("Packet size provided larger than data provided\n"); 2580 DRM_ERROR("Packet size provided larger than data provided\n");
2623 return DRM_ERR(EINVAL); 2581 return -EINVAL;
2624 } 2582 }
2625 2583
2626 if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { 2584 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
2627 DRM_ERROR("Packet verification failed\n"); 2585 DRM_ERROR("Packet verification failed\n");
2628 return DRM_ERR(EINVAL); 2586 return -EINVAL;
2629 } 2587 }
2630 2588
2631 BEGIN_RING(sz + 1); 2589 BEGIN_RING(sz + 1);
@@ -2713,7 +2671,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2713 if (!sz) 2671 if (!sz)
2714 return 0; 2672 return 0;
2715 if (sz * 4 > cmdbuf->bufsz) 2673 if (sz * 4 > cmdbuf->bufsz)
2716 return DRM_ERR(EINVAL); 2674 return -EINVAL;
2717 2675
2718 BEGIN_RING(5 + sz); 2676 BEGIN_RING(5 + sz);
2719 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); 2677 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
@@ -2729,7 +2687,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2729} 2687}
2730 2688
2731static int radeon_emit_packet3(struct drm_device * dev, 2689static int radeon_emit_packet3(struct drm_device * dev,
2732 struct drm_file * filp_priv, 2690 struct drm_file *file_priv,
2733 drm_radeon_kcmd_buffer_t *cmdbuf) 2691 drm_radeon_kcmd_buffer_t *cmdbuf)
2734{ 2692{
2735 drm_radeon_private_t *dev_priv = dev->dev_private; 2693 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -2739,7 +2697,7 @@ static int radeon_emit_packet3(struct drm_device * dev,
2739 2697
2740 DRM_DEBUG("\n"); 2698 DRM_DEBUG("\n");
2741 2699
2742 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, 2700 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2743 cmdbuf, &cmdsz))) { 2701 cmdbuf, &cmdsz))) {
2744 DRM_ERROR("Packet verification failed\n"); 2702 DRM_ERROR("Packet verification failed\n");
2745 return ret; 2703 return ret;
@@ -2755,7 +2713,7 @@ static int radeon_emit_packet3(struct drm_device * dev,
2755} 2713}
2756 2714
2757static int radeon_emit_packet3_cliprect(struct drm_device *dev, 2715static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2758 struct drm_file *filp_priv, 2716 struct drm_file *file_priv,
2759 drm_radeon_kcmd_buffer_t *cmdbuf, 2717 drm_radeon_kcmd_buffer_t *cmdbuf,
2760 int orig_nbox) 2718 int orig_nbox)
2761{ 2719{
@@ -2769,7 +2727,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2769 2727
2770 DRM_DEBUG("\n"); 2728 DRM_DEBUG("\n");
2771 2729
2772 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv, 2730 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2773 cmdbuf, &cmdsz))) { 2731 cmdbuf, &cmdsz))) {
2774 DRM_ERROR("Packet verification failed\n"); 2732 DRM_ERROR("Packet verification failed\n");
2775 return ret; 2733 return ret;
@@ -2781,7 +2739,7 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2781 do { 2739 do {
2782 if (i < cmdbuf->nbox) { 2740 if (i < cmdbuf->nbox) {
2783 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) 2741 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
2784 return DRM_ERR(EFAULT); 2742 return -EFAULT;
2785 /* FIXME The second and subsequent times round 2743 /* FIXME The second and subsequent times round
2786 * this loop, send a WAIT_UNTIL_3D_IDLE before 2744 * this loop, send a WAIT_UNTIL_3D_IDLE before
2787 * calling emit_clip_rect(). This fixes a 2745 * calling emit_clip_rect(). This fixes a
@@ -2839,62 +2797,54 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2839 ADVANCE_RING(); 2797 ADVANCE_RING();
2840 break; 2798 break;
2841 default: 2799 default:
2842 return DRM_ERR(EINVAL); 2800 return -EINVAL;
2843 } 2801 }
2844 2802
2845 return 0; 2803 return 0;
2846} 2804}
2847 2805
2848static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) 2806static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
2849{ 2807{
2850 DRM_DEVICE;
2851 drm_radeon_private_t *dev_priv = dev->dev_private; 2808 drm_radeon_private_t *dev_priv = dev->dev_private;
2852 struct drm_file *filp_priv;
2853 struct drm_device_dma *dma = dev->dma; 2809 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2810 struct drm_buf *buf = NULL;
2855 int idx; 2811 int idx;
2856 drm_radeon_kcmd_buffer_t cmdbuf; 2812 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2813 drm_radeon_cmd_header_t header;
2858 int orig_nbox, orig_bufsz; 2814 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL; 2815 char *kbuf = NULL;
2860 2816
2861 LOCK_TEST_WITH_RETURN(dev, filp); 2817 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862
2863 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2864
2865 DRM_COPY_FROM_USER_IOCTL(cmdbuf,
2866 (drm_radeon_cmd_buffer_t __user *) data,
2867 sizeof(cmdbuf));
2868 2818
2869 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2819 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2870 VB_AGE_TEST_WITH_RETURN(dev_priv); 2820 VB_AGE_TEST_WITH_RETURN(dev_priv);
2871 2821
2872 if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { 2822 if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
2873 return DRM_ERR(EINVAL); 2823 return -EINVAL;
2874 } 2824 }
2875 2825
2876 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid 2826 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2877 * races between checking values and using those values in other code, 2827 * races between checking values and using those values in other code,
2878 * and simply to avoid a lot of function calls to copy in data. 2828 * and simply to avoid a lot of function calls to copy in data.
2879 */ 2829 */
2880 orig_bufsz = cmdbuf.bufsz; 2830 orig_bufsz = cmdbuf->bufsz;
2881 if (orig_bufsz != 0) { 2831 if (orig_bufsz != 0) {
2882 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2832 kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
2883 if (kbuf == NULL) 2833 if (kbuf == NULL)
2884 return DRM_ERR(ENOMEM); 2834 return -ENOMEM;
2885 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, 2835 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
2886 cmdbuf.bufsz)) { 2836 cmdbuf->bufsz)) {
2887 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2837 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2888 return DRM_ERR(EFAULT); 2838 return -EFAULT;
2889 } 2839 }
2890 cmdbuf.buf = kbuf; 2840 cmdbuf->buf = kbuf;
2891 } 2841 }
2892 2842
2893 orig_nbox = cmdbuf.nbox; 2843 orig_nbox = cmdbuf->nbox;
2894 2844
2895 if (dev_priv->microcode_version == UCODE_R300) { 2845 if (dev_priv->microcode_version == UCODE_R300) {
2896 int temp; 2846 int temp;
2897 temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf); 2847 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2898 2848
2899 if (orig_bufsz != 0) 2849 if (orig_bufsz != 0)
2900 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2850 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
@@ -2903,17 +2853,17 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2903 } 2853 }
2904 2854
2905 /* microcode_version != r300 */ 2855 /* microcode_version != r300 */
2906 while (cmdbuf.bufsz >= sizeof(header)) { 2856 while (cmdbuf->bufsz >= sizeof(header)) {
2907 2857
2908 header.i = *(int *)cmdbuf.buf; 2858 header.i = *(int *)cmdbuf->buf;
2909 cmdbuf.buf += sizeof(header); 2859 cmdbuf->buf += sizeof(header);
2910 cmdbuf.bufsz -= sizeof(header); 2860 cmdbuf->bufsz -= sizeof(header);
2911 2861
2912 switch (header.header.cmd_type) { 2862 switch (header.header.cmd_type) {
2913 case RADEON_CMD_PACKET: 2863 case RADEON_CMD_PACKET:
2914 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2864 DRM_DEBUG("RADEON_CMD_PACKET\n");
2915 if (radeon_emit_packets 2865 if (radeon_emit_packets
2916 (dev_priv, filp_priv, header, &cmdbuf)) { 2866 (dev_priv, file_priv, header, cmdbuf)) {
2917 DRM_ERROR("radeon_emit_packets failed\n"); 2867 DRM_ERROR("radeon_emit_packets failed\n");
2918 goto err; 2868 goto err;
2919 } 2869 }
@@ -2921,7 +2871,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2921 2871
2922 case RADEON_CMD_SCALARS: 2872 case RADEON_CMD_SCALARS:
2923 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2873 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2924 if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) { 2874 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
2925 DRM_ERROR("radeon_emit_scalars failed\n"); 2875 DRM_ERROR("radeon_emit_scalars failed\n");
2926 goto err; 2876 goto err;
2927 } 2877 }
@@ -2929,7 +2879,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2929 2879
2930 case RADEON_CMD_VECTORS: 2880 case RADEON_CMD_VECTORS:
2931 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2881 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2932 if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) { 2882 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
2933 DRM_ERROR("radeon_emit_vectors failed\n"); 2883 DRM_ERROR("radeon_emit_vectors failed\n");
2934 goto err; 2884 goto err;
2935 } 2885 }
@@ -2945,9 +2895,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2945 } 2895 }
2946 2896
2947 buf = dma->buflist[idx]; 2897 buf = dma->buflist[idx];
2948 if (buf->filp != filp || buf->pending) { 2898 if (buf->file_priv != file_priv || buf->pending) {
2949 DRM_ERROR("bad buffer %p %p %d\n", 2899 DRM_ERROR("bad buffer %p %p %d\n",
2950 buf->filp, filp, buf->pending); 2900 buf->file_priv, file_priv,
2901 buf->pending);
2951 goto err; 2902 goto err;
2952 } 2903 }
2953 2904
@@ -2956,7 +2907,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2956 2907
2957 case RADEON_CMD_PACKET3: 2908 case RADEON_CMD_PACKET3:
2958 DRM_DEBUG("RADEON_CMD_PACKET3\n"); 2909 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2959 if (radeon_emit_packet3(dev, filp_priv, &cmdbuf)) { 2910 if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
2960 DRM_ERROR("radeon_emit_packet3 failed\n"); 2911 DRM_ERROR("radeon_emit_packet3 failed\n");
2961 goto err; 2912 goto err;
2962 } 2913 }
@@ -2965,7 +2916,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2965 case RADEON_CMD_PACKET3_CLIP: 2916 case RADEON_CMD_PACKET3_CLIP:
2966 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); 2917 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2967 if (radeon_emit_packet3_cliprect 2918 if (radeon_emit_packet3_cliprect
2968 (dev, filp_priv, &cmdbuf, orig_nbox)) { 2919 (dev, file_priv, cmdbuf, orig_nbox)) {
2969 DRM_ERROR("radeon_emit_packet3_clip failed\n"); 2920 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2970 goto err; 2921 goto err;
2971 } 2922 }
@@ -2973,7 +2924,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2973 2924
2974 case RADEON_CMD_SCALARS2: 2925 case RADEON_CMD_SCALARS2:
2975 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2926 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2976 if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) { 2927 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
2977 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2928 DRM_ERROR("radeon_emit_scalars2 failed\n");
2978 goto err; 2929 goto err;
2979 } 2930 }
@@ -2988,7 +2939,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2988 break; 2939 break;
2989 case RADEON_CMD_VECLINEAR: 2940 case RADEON_CMD_VECLINEAR:
2990 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 2941 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2991 if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) { 2942 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
2992 DRM_ERROR("radeon_emit_veclinear failed\n"); 2943 DRM_ERROR("radeon_emit_veclinear failed\n");
2993 goto err; 2944 goto err;
2994 } 2945 }
@@ -2997,7 +2948,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2997 default: 2948 default:
2998 DRM_ERROR("bad cmd_type %d at %p\n", 2949 DRM_ERROR("bad cmd_type %d at %p\n",
2999 header.header.cmd_type, 2950 header.header.cmd_type,
3000 cmdbuf.buf - sizeof(header)); 2951 cmdbuf->buf - sizeof(header));
3001 goto err; 2952 goto err;
3002 } 2953 }
3003 } 2954 }
@@ -3012,22 +2963,18 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
3012 err: 2963 err:
3013 if (orig_bufsz != 0) 2964 if (orig_bufsz != 0)
3014 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2965 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
3015 return DRM_ERR(EINVAL); 2966 return -EINVAL;
3016} 2967}
3017 2968
3018static int radeon_cp_getparam(DRM_IOCTL_ARGS) 2969static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3019{ 2970{
3020 DRM_DEVICE;
3021 drm_radeon_private_t *dev_priv = dev->dev_private; 2971 drm_radeon_private_t *dev_priv = dev->dev_private;
3022 drm_radeon_getparam_t param; 2972 drm_radeon_getparam_t *param = data;
3023 int value; 2973 int value;
3024 2974
3025 DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data,
3026 sizeof(param));
3027
3028 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 2975 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
3029 2976
3030 switch (param.param) { 2977 switch (param->param) {
3031 case RADEON_PARAM_GART_BUFFER_OFFSET: 2978 case RADEON_PARAM_GART_BUFFER_OFFSET:
3032 value = dev_priv->gart_buffers_offset; 2979 value = dev_priv->gart_buffers_offset;
3033 break; 2980 break;
@@ -3074,7 +3021,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3074 break; 3021 break;
3075 case RADEON_PARAM_SCRATCH_OFFSET: 3022 case RADEON_PARAM_SCRATCH_OFFSET:
3076 if (!dev_priv->writeback_works) 3023 if (!dev_priv->writeback_works)
3077 return DRM_ERR(EINVAL); 3024 return -EINVAL;
3078 value = RADEON_SCRATCH_REG_OFFSET; 3025 value = RADEON_SCRATCH_REG_OFFSET;
3079 break; 3026 break;
3080 case RADEON_PARAM_CARD_TYPE: 3027 case RADEON_PARAM_CARD_TYPE:
@@ -3089,43 +3036,37 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3089 value = radeon_vblank_crtc_get(dev); 3036 value = radeon_vblank_crtc_get(dev);
3090 break; 3037 break;
3091 default: 3038 default:
3092 DRM_DEBUG("Invalid parameter %d\n", param.param); 3039 DRM_DEBUG("Invalid parameter %d\n", param->param);
3093 return DRM_ERR(EINVAL); 3040 return -EINVAL;
3094 } 3041 }
3095 3042
3096 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 3043 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
3097 DRM_ERROR("copy_to_user\n"); 3044 DRM_ERROR("copy_to_user\n");
3098 return DRM_ERR(EFAULT); 3045 return -EFAULT;
3099 } 3046 }
3100 3047
3101 return 0; 3048 return 0;
3102} 3049}
3103 3050
3104static int radeon_cp_setparam(DRM_IOCTL_ARGS) 3051static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3105{ 3052{
3106 DRM_DEVICE;
3107 drm_radeon_private_t *dev_priv = dev->dev_private; 3053 drm_radeon_private_t *dev_priv = dev->dev_private;
3108 struct drm_file *filp_priv; 3054 drm_radeon_setparam_t *sp = data;
3109 drm_radeon_setparam_t sp;
3110 struct drm_radeon_driver_file_fields *radeon_priv; 3055 struct drm_radeon_driver_file_fields *radeon_priv;
3111 3056
3112 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 3057 switch (sp->param) {
3113
3114 DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data,
3115 sizeof(sp));
3116
3117 switch (sp.param) {
3118 case RADEON_SETPARAM_FB_LOCATION: 3058 case RADEON_SETPARAM_FB_LOCATION:
3119 radeon_priv = filp_priv->driver_priv; 3059 radeon_priv = file_priv->driver_priv;
3120 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value; 3060 radeon_priv->radeon_fb_delta = dev_priv->fb_location -
3061 sp->value;
3121 break; 3062 break;
3122 case RADEON_SETPARAM_SWITCH_TILING: 3063 case RADEON_SETPARAM_SWITCH_TILING:
3123 if (sp.value == 0) { 3064 if (sp->value == 0) {
3124 DRM_DEBUG("color tiling disabled\n"); 3065 DRM_DEBUG("color tiling disabled\n");
3125 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3066 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3126 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3067 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3127 dev_priv->sarea_priv->tiling_enabled = 0; 3068 dev_priv->sarea_priv->tiling_enabled = 0;
3128 } else if (sp.value == 1) { 3069 } else if (sp->value == 1) {
3129 DRM_DEBUG("color tiling enabled\n"); 3070 DRM_DEBUG("color tiling enabled\n");
3130 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; 3071 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3131 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; 3072 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
@@ -3133,23 +3074,23 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3133 } 3074 }
3134 break; 3075 break;
3135 case RADEON_SETPARAM_PCIGART_LOCATION: 3076 case RADEON_SETPARAM_PCIGART_LOCATION:
3136 dev_priv->pcigart_offset = sp.value; 3077 dev_priv->pcigart_offset = sp->value;
3137 dev_priv->pcigart_offset_set = 1; 3078 dev_priv->pcigart_offset_set = 1;
3138 break; 3079 break;
3139 case RADEON_SETPARAM_NEW_MEMMAP: 3080 case RADEON_SETPARAM_NEW_MEMMAP:
3140 dev_priv->new_memmap = sp.value; 3081 dev_priv->new_memmap = sp->value;
3141 break; 3082 break;
3142 case RADEON_SETPARAM_PCIGART_TABLE_SIZE: 3083 case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
3143 dev_priv->gart_info.table_size = sp.value; 3084 dev_priv->gart_info.table_size = sp->value;
3144 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) 3085 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
3145 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 3086 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
3146 break; 3087 break;
3147 case RADEON_SETPARAM_VBLANK_CRTC: 3088 case RADEON_SETPARAM_VBLANK_CRTC:
3148 return radeon_vblank_crtc_set(dev, sp.value); 3089 return radeon_vblank_crtc_set(dev, sp->value);
3149 break; 3090 break;
3150 default: 3091 default:
3151 DRM_DEBUG("Invalid parameter %d\n", sp.param); 3092 DRM_DEBUG("Invalid parameter %d\n", sp->param);
3152 return DRM_ERR(EINVAL); 3093 return -EINVAL;
3153 } 3094 }
3154 3095
3155 return 0; 3096 return 0;
@@ -3162,14 +3103,14 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3162 * 3103 *
3163 * DRM infrastructure takes care of reclaiming dma buffers. 3104 * DRM infrastructure takes care of reclaiming dma buffers.
3164 */ 3105 */
3165void radeon_driver_preclose(struct drm_device *dev, DRMFILE filp) 3106void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
3166{ 3107{
3167 if (dev->dev_private) { 3108 if (dev->dev_private) {
3168 drm_radeon_private_t *dev_priv = dev->dev_private; 3109 drm_radeon_private_t *dev_priv = dev->dev_private;
3169 dev_priv->page_flipping = 0; 3110 dev_priv->page_flipping = 0;
3170 radeon_mem_release(filp, dev_priv->gart_heap); 3111 radeon_mem_release(file_priv, dev_priv->gart_heap);
3171 radeon_mem_release(filp, dev_priv->fb_heap); 3112 radeon_mem_release(file_priv, dev_priv->fb_heap);
3172 radeon_surfaces_release(filp, dev_priv); 3113 radeon_surfaces_release(file_priv, dev_priv);
3173 } 3114 }
3174} 3115}
3175 3116
@@ -3186,7 +3127,7 @@ void radeon_driver_lastclose(struct drm_device *dev)
3186 radeon_do_release(dev); 3127 radeon_do_release(dev);
3187} 3128}
3188 3129
3189int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv) 3130int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3190{ 3131{
3191 drm_radeon_private_t *dev_priv = dev->dev_private; 3132 drm_radeon_private_t *dev_priv = dev->dev_private;
3192 struct drm_radeon_driver_file_fields *radeon_priv; 3133 struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3199,7 +3140,7 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
3199 if (!radeon_priv) 3140 if (!radeon_priv)
3200 return -ENOMEM; 3141 return -ENOMEM;
3201 3142
3202 filp_priv->driver_priv = radeon_priv; 3143 file_priv->driver_priv = radeon_priv;
3203 3144
3204 if (dev_priv) 3145 if (dev_priv)
3205 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3146 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
@@ -3208,42 +3149,42 @@ int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
3208 return 0; 3149 return 0;
3209} 3150}
3210 3151
3211void radeon_driver_postclose(struct drm_device *dev, struct drm_file *filp_priv) 3152void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3212{ 3153{
3213 struct drm_radeon_driver_file_fields *radeon_priv = 3154 struct drm_radeon_driver_file_fields *radeon_priv =
3214 filp_priv->driver_priv; 3155 file_priv->driver_priv;
3215 3156
3216 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3157 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
3217} 3158}
3218 3159
3219drm_ioctl_desc_t radeon_ioctls[] = { 3160struct drm_ioctl_desc radeon_ioctls[] = {
3220 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3161 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3221 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3162 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3222 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3163 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3223 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3164 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3224 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH}, 3165 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3225 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH}, 3166 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3226 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH}, 3167 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3227 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH}, 3168 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3228 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH}, 3169 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3229 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH}, 3170 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3230 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH}, 3171 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3231 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH}, 3172 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3232 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH}, 3173 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3233 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH}, 3174 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3234 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3175 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3235 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH}, 3176 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3236 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH}, 3177 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3237 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH}, 3178 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3238 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH}, 3179 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3239 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH}, 3180 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3240 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH}, 3181 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
3241 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3182 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3242 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH}, 3183 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3243 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH}, 3184 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3244 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH}, 3185 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3245 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH}, 3186 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3246 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH} 3187 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
3247}; 3188};
3248 3189
3249int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3190int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 18c7235f6b73..59484d56b333 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
60 DRM_ERROR("failed!\n"); 60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); 61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62#endif 62#endif
63 return DRM_ERR(EBUSY); 63 return -EBUSY;
64} 64}
65 65
66static int 66static int
@@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
81 DRM_ERROR("failed!\n"); 81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status); 82 DRM_INFO(" status=0x%08x\n", status);
83#endif 83#endif
84 return DRM_ERR(EBUSY); 84 return -EBUSY;
85} 85}
86 86
87static int 87static int
@@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
102 DRM_ERROR("failed!\n"); 102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status); 103 DRM_INFO(" status=0x%08x\n", status);
104#endif 104#endif
105 return DRM_ERR(EBUSY); 105 return -EBUSY;
106} 106}
107 107
108/* 108/*
@@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137#endif 137#endif
138 138
139 return DRM_ERR(EBUSY); 139 return -EBUSY;
140} 140}
141 141
142static int 142static int
@@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159#endif 159#endif
160 160
161 return DRM_ERR(EBUSY); 161 return -EBUSY;
162} 162}
163 163
164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, 164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
@@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t * dev_priv)
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
303 if (dev_priv->dma_pages == NULL) 303 if (dev_priv->dma_pages == NULL)
304 return DRM_ERR(ENOMEM); 304 return -ENOMEM;
305 305
306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) { 306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); 307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
@@ -541,7 +541,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
541 541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
543 if (dev_priv == NULL) 543 if (dev_priv == NULL)
544 return DRM_ERR(ENOMEM); 544 return -ENOMEM;
545 545
546 memset(dev_priv, 0, sizeof(drm_savage_private_t)); 546 memset(dev_priv, 0, sizeof(drm_savage_private_t));
547 dev->dev_private = (void *)dev_priv; 547 dev->dev_private = (void *)dev_priv;
@@ -682,16 +682,16 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
682 682
683 if (init->fb_bpp != 16 && init->fb_bpp != 32) { 683 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); 684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
685 return DRM_ERR(EINVAL); 685 return -EINVAL;
686 } 686 }
687 if (init->depth_bpp != 16 && init->depth_bpp != 32) { 687 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); 688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
689 return DRM_ERR(EINVAL); 689 return -EINVAL;
690 } 690 }
691 if (init->dma_type != SAVAGE_DMA_AGP && 691 if (init->dma_type != SAVAGE_DMA_AGP &&
692 init->dma_type != SAVAGE_DMA_PCI) { 692 init->dma_type != SAVAGE_DMA_PCI) {
693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); 693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
694 return DRM_ERR(EINVAL); 694 return -EINVAL;
695 } 695 }
696 696
697 dev_priv->cob_size = init->cob_size; 697 dev_priv->cob_size = init->cob_size;
@@ -715,14 +715,14 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
715 if (!dev_priv->sarea) { 715 if (!dev_priv->sarea) {
716 DRM_ERROR("could not find sarea!\n"); 716 DRM_ERROR("could not find sarea!\n");
717 savage_do_cleanup_bci(dev); 717 savage_do_cleanup_bci(dev);
718 return DRM_ERR(EINVAL); 718 return -EINVAL;
719 } 719 }
720 if (init->status_offset != 0) { 720 if (init->status_offset != 0) {
721 dev_priv->status = drm_core_findmap(dev, init->status_offset); 721 dev_priv->status = drm_core_findmap(dev, init->status_offset);
722 if (!dev_priv->status) { 722 if (!dev_priv->status) {
723 DRM_ERROR("could not find shadow status region!\n"); 723 DRM_ERROR("could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev); 724 savage_do_cleanup_bci(dev);
725 return DRM_ERR(EINVAL); 725 return -EINVAL;
726 } 726 }
727 } else { 727 } else {
728 dev_priv->status = NULL; 728 dev_priv->status = NULL;
@@ -734,13 +734,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
734 if (!dev->agp_buffer_map) { 734 if (!dev->agp_buffer_map) {
735 DRM_ERROR("could not find DMA buffer region!\n"); 735 DRM_ERROR("could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev); 736 savage_do_cleanup_bci(dev);
737 return DRM_ERR(EINVAL); 737 return -EINVAL;
738 } 738 }
739 drm_core_ioremap(dev->agp_buffer_map, dev); 739 drm_core_ioremap(dev->agp_buffer_map, dev);
740 if (!dev->agp_buffer_map) { 740 if (!dev->agp_buffer_map) {
741 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 741 DRM_ERROR("failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev); 742 savage_do_cleanup_bci(dev);
743 return DRM_ERR(ENOMEM); 743 return -ENOMEM;
744 } 744 }
745 } 745 }
746 if (init->agp_textures_offset) { 746 if (init->agp_textures_offset) {
@@ -749,7 +749,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
749 if (!dev_priv->agp_textures) { 749 if (!dev_priv->agp_textures) {
750 DRM_ERROR("could not find agp texture region!\n"); 750 DRM_ERROR("could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev); 751 savage_do_cleanup_bci(dev);
752 return DRM_ERR(EINVAL); 752 return -EINVAL;
753 } 753 }
754 } else { 754 } else {
755 dev_priv->agp_textures = NULL; 755 dev_priv->agp_textures = NULL;
@@ -760,39 +760,39 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
760 DRM_ERROR("command DMA not supported on " 760 DRM_ERROR("command DMA not supported on "
761 "Savage3D/MX/IX.\n"); 761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev); 762 savage_do_cleanup_bci(dev);
763 return DRM_ERR(EINVAL); 763 return -EINVAL;
764 } 764 }
765 if (dev->dma && dev->dma->buflist) { 765 if (dev->dma && dev->dma->buflist) {
766 DRM_ERROR("command and vertex DMA not supported " 766 DRM_ERROR("command and vertex DMA not supported "
767 "at the same time.\n"); 767 "at the same time.\n");
768 savage_do_cleanup_bci(dev); 768 savage_do_cleanup_bci(dev);
769 return DRM_ERR(EINVAL); 769 return -EINVAL;
770 } 770 }
771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); 771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
772 if (!dev_priv->cmd_dma) { 772 if (!dev_priv->cmd_dma) {
773 DRM_ERROR("could not find command DMA region!\n"); 773 DRM_ERROR("could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev); 774 savage_do_cleanup_bci(dev);
775 return DRM_ERR(EINVAL); 775 return -EINVAL;
776 } 776 }
777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) { 777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
778 if (dev_priv->cmd_dma->type != _DRM_AGP) { 778 if (dev_priv->cmd_dma->type != _DRM_AGP) {
779 DRM_ERROR("AGP command DMA region is not a " 779 DRM_ERROR("AGP command DMA region is not a "
780 "_DRM_AGP map!\n"); 780 "_DRM_AGP map!\n");
781 savage_do_cleanup_bci(dev); 781 savage_do_cleanup_bci(dev);
782 return DRM_ERR(EINVAL); 782 return -EINVAL;
783 } 783 }
784 drm_core_ioremap(dev_priv->cmd_dma, dev); 784 drm_core_ioremap(dev_priv->cmd_dma, dev);
785 if (!dev_priv->cmd_dma->handle) { 785 if (!dev_priv->cmd_dma->handle) {
786 DRM_ERROR("failed to ioremap command " 786 DRM_ERROR("failed to ioremap command "
787 "DMA region!\n"); 787 "DMA region!\n");
788 savage_do_cleanup_bci(dev); 788 savage_do_cleanup_bci(dev);
789 return DRM_ERR(ENOMEM); 789 return -ENOMEM;
790 } 790 }
791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { 791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
792 DRM_ERROR("PCI command DMA region is not a " 792 DRM_ERROR("PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n"); 793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev); 794 savage_do_cleanup_bci(dev);
795 return DRM_ERR(EINVAL); 795 return -EINVAL;
796 } 796 }
797 } else { 797 } else {
798 dev_priv->cmd_dma = NULL; 798 dev_priv->cmd_dma = NULL;
@@ -809,7 +809,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
809 if (!dev_priv->fake_dma.handle) { 809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev); 811 savage_do_cleanup_bci(dev);
812 return DRM_ERR(ENOMEM); 812 return -ENOMEM;
813 } 813 }
814 dev_priv->cmd_dma = &dev_priv->fake_dma; 814 dev_priv->cmd_dma = &dev_priv->fake_dma;
815 dev_priv->dma_flush = savage_fake_dma_flush; 815 dev_priv->dma_flush = savage_fake_dma_flush;
@@ -886,13 +886,13 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
886 if (savage_freelist_init(dev) < 0) { 886 if (savage_freelist_init(dev) < 0) {
887 DRM_ERROR("could not initialize freelist\n"); 887 DRM_ERROR("could not initialize freelist\n");
888 savage_do_cleanup_bci(dev); 888 savage_do_cleanup_bci(dev);
889 return DRM_ERR(ENOMEM); 889 return -ENOMEM;
890 } 890 }
891 891
892 if (savage_dma_init(dev_priv) < 0) { 892 if (savage_dma_init(dev_priv) < 0) {
893 DRM_ERROR("could not initialize command DMA\n"); 893 DRM_ERROR("could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev); 894 savage_do_cleanup_bci(dev);
895 return DRM_ERR(ENOMEM); 895 return -ENOMEM;
896 } 896 }
897 897
898 return 0; 898 return 0;
@@ -928,51 +928,41 @@ static int savage_do_cleanup_bci(struct drm_device * dev)
928 return 0; 928 return 0;
929} 929}
930 930
931static int savage_bci_init(DRM_IOCTL_ARGS) 931static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
932{ 932{
933 DRM_DEVICE; 933 drm_savage_init_t *init = data;
934 drm_savage_init_t init;
935 934
936 LOCK_TEST_WITH_RETURN(dev, filp); 935 LOCK_TEST_WITH_RETURN(dev, file_priv);
937 936
938 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *) data, 937 switch (init->func) {
939 sizeof(init));
940
941 switch (init.func) {
942 case SAVAGE_INIT_BCI: 938 case SAVAGE_INIT_BCI:
943 return savage_do_init_bci(dev, &init); 939 return savage_do_init_bci(dev, init);
944 case SAVAGE_CLEANUP_BCI: 940 case SAVAGE_CLEANUP_BCI:
945 return savage_do_cleanup_bci(dev); 941 return savage_do_cleanup_bci(dev);
946 } 942 }
947 943
948 return DRM_ERR(EINVAL); 944 return -EINVAL;
949} 945}
950 946
951static int savage_bci_event_emit(DRM_IOCTL_ARGS) 947static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
952{ 948{
953 DRM_DEVICE;
954 drm_savage_private_t *dev_priv = dev->dev_private; 949 drm_savage_private_t *dev_priv = dev->dev_private;
955 drm_savage_event_emit_t event; 950 drm_savage_event_emit_t *event = data;
956 951
957 DRM_DEBUG("\n"); 952 DRM_DEBUG("\n");
958 953
959 LOCK_TEST_WITH_RETURN(dev, filp); 954 LOCK_TEST_WITH_RETURN(dev, file_priv);
960 955
961 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *) data, 956 event->count = savage_bci_emit_event(dev_priv, event->flags);
962 sizeof(event)); 957 event->count |= dev_priv->event_wrap << 16;
963 958
964 event.count = savage_bci_emit_event(dev_priv, event.flags);
965 event.count |= dev_priv->event_wrap << 16;
966 DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *) data,
967 event, sizeof(event));
968 return 0; 959 return 0;
969} 960}
970 961
971static int savage_bci_event_wait(DRM_IOCTL_ARGS) 962static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
972{ 963{
973 DRM_DEVICE;
974 drm_savage_private_t *dev_priv = dev->dev_private; 964 drm_savage_private_t *dev_priv = dev->dev_private;
975 drm_savage_event_wait_t event; 965 drm_savage_event_wait_t *event = data;
976 unsigned int event_e, hw_e; 966 unsigned int event_e, hw_e;
977 unsigned int event_w, hw_w; 967 unsigned int event_w, hw_w;
978 968
@@ -990,8 +980,8 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
990 if (hw_e > dev_priv->event_counter) 980 if (hw_e > dev_priv->event_counter)
991 hw_w--; /* hardware hasn't passed the last wrap yet */ 981 hw_w--; /* hardware hasn't passed the last wrap yet */
992 982
993 event_e = event.count & 0xffff; 983 event_e = event->count & 0xffff;
994 event_w = event.count >> 16; 984 event_w = event->count >> 16;
995 985
996 /* Don't need to wait if 986 /* Don't need to wait if
997 * - event counter wrapped since the event was emitted or 987 * - event counter wrapped since the event was emitted or
@@ -1007,7 +997,9 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
1007 * DMA buffer management 997 * DMA buffer management
1008 */ 998 */
1009 999
1010static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d) 1000static int savage_bci_get_buffers(struct drm_device *dev,
1001 struct drm_file *file_priv,
1002 struct drm_dma *d)
1011{ 1003{
1012 struct drm_buf *buf; 1004 struct drm_buf *buf;
1013 int i; 1005 int i;
@@ -1015,61 +1007,56 @@ static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct d
1015 for (i = d->granted_count; i < d->request_count; i++) { 1007 for (i = d->granted_count; i < d->request_count; i++) {
1016 buf = savage_freelist_get(dev); 1008 buf = savage_freelist_get(dev);
1017 if (!buf) 1009 if (!buf)
1018 return DRM_ERR(EAGAIN); 1010 return -EAGAIN;
1019 1011
1020 buf->filp = filp; 1012 buf->file_priv = file_priv;
1021 1013
1022 if (DRM_COPY_TO_USER(&d->request_indices[i], 1014 if (DRM_COPY_TO_USER(&d->request_indices[i],
1023 &buf->idx, sizeof(buf->idx))) 1015 &buf->idx, sizeof(buf->idx)))
1024 return DRM_ERR(EFAULT); 1016 return -EFAULT;
1025 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1017 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1026 &buf->total, sizeof(buf->total))) 1018 &buf->total, sizeof(buf->total)))
1027 return DRM_ERR(EFAULT); 1019 return -EFAULT;
1028 1020
1029 d->granted_count++; 1021 d->granted_count++;
1030 } 1022 }
1031 return 0; 1023 return 0;
1032} 1024}
1033 1025
1034int savage_bci_buffers(DRM_IOCTL_ARGS) 1026int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1035{ 1027{
1036 DRM_DEVICE;
1037 struct drm_device_dma *dma = dev->dma; 1028 struct drm_device_dma *dma = dev->dma;
1038 struct drm_dma d; 1029 struct drm_dma *d = data;
1039 int ret = 0; 1030 int ret = 0;
1040 1031
1041 LOCK_TEST_WITH_RETURN(dev, filp); 1032 LOCK_TEST_WITH_RETURN(dev, file_priv);
1042
1043 DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *) data, sizeof(d));
1044 1033
1045 /* Please don't send us buffers. 1034 /* Please don't send us buffers.
1046 */ 1035 */
1047 if (d.send_count != 0) { 1036 if (d->send_count != 0) {
1048 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1037 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1049 DRM_CURRENTPID, d.send_count); 1038 DRM_CURRENTPID, d->send_count);
1050 return DRM_ERR(EINVAL); 1039 return -EINVAL;
1051 } 1040 }
1052 1041
1053 /* We'll send you buffers. 1042 /* We'll send you buffers.
1054 */ 1043 */
1055 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1044 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1056 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1045 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1057 DRM_CURRENTPID, d.request_count, dma->buf_count); 1046 DRM_CURRENTPID, d->request_count, dma->buf_count);
1058 return DRM_ERR(EINVAL); 1047 return -EINVAL;
1059 } 1048 }
1060 1049
1061 d.granted_count = 0; 1050 d->granted_count = 0;
1062 1051
1063 if (d.request_count) { 1052 if (d->request_count) {
1064 ret = savage_bci_get_buffers(filp, dev, &d); 1053 ret = savage_bci_get_buffers(dev, file_priv, d);
1065 } 1054 }
1066 1055
1067 DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *) data, d, sizeof(d));
1068
1069 return ret; 1056 return ret;
1070} 1057}
1071 1058
1072void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp) 1059void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1073{ 1060{
1074 struct drm_device_dma *dma = dev->dma; 1061 struct drm_device_dma *dma = dev->dma;
1075 drm_savage_private_t *dev_priv = dev->dev_private; 1062 drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1088,7 +1075,7 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
1088 struct drm_buf *buf = dma->buflist[i]; 1075 struct drm_buf *buf = dma->buflist[i];
1089 drm_savage_buf_priv_t *buf_priv = buf->dev_private; 1076 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1090 1077
1091 if (buf->filp == filp && buf_priv && 1078 if (buf->file_priv == file_priv && buf_priv &&
1092 buf_priv->next == NULL && buf_priv->prev == NULL) { 1079 buf_priv->next == NULL && buf_priv->prev == NULL) {
1093 uint16_t event; 1080 uint16_t event;
1094 DRM_DEBUG("reclaimed from client\n"); 1081 DRM_DEBUG("reclaimed from client\n");
@@ -1098,14 +1085,14 @@ void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
1098 } 1085 }
1099 } 1086 }
1100 1087
1101 drm_core_reclaim_buffers(dev, filp); 1088 drm_core_reclaim_buffers(dev, file_priv);
1102} 1089}
1103 1090
1104drm_ioctl_desc_t savage_ioctls[] = { 1091struct drm_ioctl_desc savage_ioctls[] = {
1105 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1092 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1106 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, 1093 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1107 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, 1094 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1108 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH}, 1095 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1109}; 1096};
1110 1097
1111int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1098int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index 5fd54de4280e..df2aac6636f7 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
104 S3_LAST 104 S3_LAST
105}; 105};
106 106
107extern drm_ioctl_desc_t savage_ioctls[]; 107extern struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl; 108extern int savage_max_ioctl;
109 109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
@@ -197,8 +197,8 @@ typedef struct drm_savage_private {
197} drm_savage_private_t; 197} drm_savage_private_t;
198 198
199/* ioctls */ 199/* ioctls */
200extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS); 200extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
201extern int savage_bci_buffers(DRM_IOCTL_ARGS); 201extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
202 202
203/* BCI functions */ 203/* BCI functions */
204extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, 204extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
@@ -212,7 +212,8 @@ extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
212extern int savage_driver_firstopen(struct drm_device *dev); 212extern int savage_driver_firstopen(struct drm_device *dev);
213extern void savage_driver_lastclose(struct drm_device *dev); 213extern void savage_driver_lastclose(struct drm_device *dev);
214extern int savage_driver_unload(struct drm_device *dev); 214extern int savage_driver_unload(struct drm_device *dev);
215extern void savage_reclaim_buffers(struct drm_device * dev, DRMFILE filp); 215extern void savage_reclaim_buffers(struct drm_device *dev,
216 struct drm_file *file_priv);
216 217
217/* state functions */ 218/* state functions */
218extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 219extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index 77497841478a..bf8e0e10fe21 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
83{ 83{
84 if ((addr & 6) != 2) { /* reserved bits */ 84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); 85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL); 86 return -EINVAL;
87 } 87 }
88 if (!(addr & 1)) { /* local */ 88 if (!(addr & 1)) { /* local */
89 addr &= ~7; 89 addr &= ~7;
@@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
92 DRM_ERROR 92 DRM_ERROR
93 ("bad texAddr%d %08x (local addr out of range)\n", 93 ("bad texAddr%d %08x (local addr out of range)\n",
94 unit, addr); 94 unit, addr);
95 return DRM_ERR(EINVAL); 95 return -EINVAL;
96 } 96 }
97 } else { /* AGP */ 97 } else { /* AGP */
98 if (!dev_priv->agp_textures) { 98 if (!dev_priv->agp_textures) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", 99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
100 unit, addr); 100 unit, addr);
101 return DRM_ERR(EINVAL); 101 return -EINVAL;
102 } 102 }
103 addr &= ~7; 103 addr &= ~7;
104 if (addr < dev_priv->agp_textures->offset || 104 if (addr < dev_priv->agp_textures->offset ||
@@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
107 DRM_ERROR 107 DRM_ERROR
108 ("bad texAddr%d %08x (AGP addr out of range)\n", 108 ("bad texAddr%d %08x (AGP addr out of range)\n",
109 unit, addr); 109 unit, addr);
110 return DRM_ERR(EINVAL); 110 return -EINVAL;
111 } 111 }
112 } 112 }
113 return 0; 113 return 0;
@@ -133,7 +133,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
135 start, start + count - 1); 135 start, start + count - 1);
136 return DRM_ERR(EINVAL); 136 return -EINVAL;
137 } 137 }
138 138
139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, 139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
@@ -165,7 +165,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { 165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
167 start, start + count - 1); 167 start, start + count - 1);
168 return DRM_ERR(EINVAL); 168 return -EINVAL;
169 } 169 }
170 170
171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, 171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
@@ -289,7 +289,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
289 289
290 if (!dmabuf) { 290 if (!dmabuf) {
291 DRM_ERROR("called without dma buffers!\n"); 291 DRM_ERROR("called without dma buffers!\n");
292 return DRM_ERR(EINVAL); 292 return -EINVAL;
293 } 293 }
294 294
295 if (!n) 295 if (!n)
@@ -303,7 +303,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
303 if (n % 3 != 0) { 303 if (n % 3 != 0) {
304 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 304 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
305 n); 305 n);
306 return DRM_ERR(EINVAL); 306 return -EINVAL;
307 } 307 }
308 break; 308 break;
309 case SAVAGE_PRIM_TRISTRIP: 309 case SAVAGE_PRIM_TRISTRIP:
@@ -312,18 +312,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
312 DRM_ERROR 312 DRM_ERROR
313 ("wrong number of vertices %u in TRIFAN/STRIP\n", 313 ("wrong number of vertices %u in TRIFAN/STRIP\n",
314 n); 314 n);
315 return DRM_ERR(EINVAL); 315 return -EINVAL;
316 } 316 }
317 break; 317 break;
318 default: 318 default:
319 DRM_ERROR("invalid primitive type %u\n", prim); 319 DRM_ERROR("invalid primitive type %u\n", prim);
320 return DRM_ERR(EINVAL); 320 return -EINVAL;
321 } 321 }
322 322
323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
324 if (skip != 0) { 324 if (skip != 0) {
325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
326 return DRM_ERR(EINVAL); 326 return -EINVAL;
327 } 327 }
328 } else { 328 } else {
329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -331,18 +331,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
334 return DRM_ERR(EINVAL); 334 return -EINVAL;
335 } 335 }
336 if (reorder) { 336 if (reorder) {
337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
338 return DRM_ERR(EINVAL); 338 return -EINVAL;
339 } 339 }
340 } 340 }
341 341
342 if (start + n > dmabuf->total / 32) { 342 if (start + n > dmabuf->total / 32) {
343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
344 start, start + n - 1, dmabuf->total / 32); 344 start, start + n - 1, dmabuf->total / 32);
345 return DRM_ERR(EINVAL); 345 return -EINVAL;
346 } 346 }
347 347
348 /* Vertex DMA doesn't work with command DMA at the same time, 348 /* Vertex DMA doesn't work with command DMA at the same time,
@@ -440,7 +440,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
440 if (n % 3 != 0) { 440 if (n % 3 != 0) {
441 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 441 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
442 n); 442 n);
443 return DRM_ERR(EINVAL); 443 return -EINVAL;
444 } 444 }
445 break; 445 break;
446 case SAVAGE_PRIM_TRISTRIP: 446 case SAVAGE_PRIM_TRISTRIP:
@@ -449,24 +449,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
449 DRM_ERROR 449 DRM_ERROR
450 ("wrong number of vertices %u in TRIFAN/STRIP\n", 450 ("wrong number of vertices %u in TRIFAN/STRIP\n",
451 n); 451 n);
452 return DRM_ERR(EINVAL); 452 return -EINVAL;
453 } 453 }
454 break; 454 break;
455 default: 455 default:
456 DRM_ERROR("invalid primitive type %u\n", prim); 456 DRM_ERROR("invalid primitive type %u\n", prim);
457 return DRM_ERR(EINVAL); 457 return -EINVAL;
458 } 458 }
459 459
460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
461 if (skip > SAVAGE_SKIP_ALL_S3D) { 461 if (skip > SAVAGE_SKIP_ALL_S3D) {
462 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 462 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
463 return DRM_ERR(EINVAL); 463 return -EINVAL;
464 } 464 }
465 vtx_size = 8; /* full vertex */ 465 vtx_size = 8; /* full vertex */
466 } else { 466 } else {
467 if (skip > SAVAGE_SKIP_ALL_S4) { 467 if (skip > SAVAGE_SKIP_ALL_S4) {
468 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 468 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
469 return DRM_ERR(EINVAL); 469 return -EINVAL;
470 } 470 }
471 vtx_size = 10; /* full vertex */ 471 vtx_size = 10; /* full vertex */
472 } 472 }
@@ -478,13 +478,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
478 if (vtx_size > vb_stride) { 478 if (vtx_size > vb_stride) {
479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
480 vtx_size, vb_stride); 480 vtx_size, vb_stride);
481 return DRM_ERR(EINVAL); 481 return -EINVAL;
482 } 482 }
483 483
484 if (start + n > vb_size / (vb_stride * 4)) { 484 if (start + n > vb_size / (vb_stride * 4)) {
485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
486 start, start + n - 1, vb_size / (vb_stride * 4)); 486 start, start + n - 1, vb_size / (vb_stride * 4));
487 return DRM_ERR(EINVAL); 487 return -EINVAL;
488 } 488 }
489 489
490 prim <<= 25; 490 prim <<= 25;
@@ -547,7 +547,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
547 547
548 if (!dmabuf) { 548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n"); 549 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL); 550 return -EINVAL;
551 } 551 }
552 552
553 if (!n) 553 if (!n)
@@ -560,7 +560,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
560 case SAVAGE_PRIM_TRILIST: 560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) { 561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
563 return DRM_ERR(EINVAL); 563 return -EINVAL;
564 } 564 }
565 break; 565 break;
566 case SAVAGE_PRIM_TRISTRIP: 566 case SAVAGE_PRIM_TRISTRIP:
@@ -568,18 +568,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
568 if (n < 3) { 568 if (n < 3) {
569 DRM_ERROR 569 DRM_ERROR
570 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 570 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
571 return DRM_ERR(EINVAL); 571 return -EINVAL;
572 } 572 }
573 break; 573 break;
574 default: 574 default:
575 DRM_ERROR("invalid primitive type %u\n", prim); 575 DRM_ERROR("invalid primitive type %u\n", prim);
576 return DRM_ERR(EINVAL); 576 return -EINVAL;
577 } 577 }
578 578
579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
580 if (skip != 0) { 580 if (skip != 0) {
581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
582 return DRM_ERR(EINVAL); 582 return -EINVAL;
583 } 583 }
584 } else { 584 } else {
585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - 585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -587,11 +587,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
590 return DRM_ERR(EINVAL); 590 return -EINVAL;
591 } 591 }
592 if (reorder) { 592 if (reorder) {
593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
594 return DRM_ERR(EINVAL); 594 return -EINVAL;
595 } 595 }
596 } 596 }
597 597
@@ -628,7 +628,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
628 if (idx[i] > dmabuf->total / 32) { 628 if (idx[i] > dmabuf->total / 32) {
629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
630 i, idx[i], dmabuf->total / 32); 630 i, idx[i], dmabuf->total / 32);
631 return DRM_ERR(EINVAL); 631 return -EINVAL;
632 } 632 }
633 } 633 }
634 634
@@ -698,7 +698,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
698 case SAVAGE_PRIM_TRILIST: 698 case SAVAGE_PRIM_TRILIST:
699 if (n % 3 != 0) { 699 if (n % 3 != 0) {
700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
701 return DRM_ERR(EINVAL); 701 return -EINVAL;
702 } 702 }
703 break; 703 break;
704 case SAVAGE_PRIM_TRISTRIP: 704 case SAVAGE_PRIM_TRISTRIP:
@@ -706,24 +706,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
706 if (n < 3) { 706 if (n < 3) {
707 DRM_ERROR 707 DRM_ERROR
708 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 708 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
709 return DRM_ERR(EINVAL); 709 return -EINVAL;
710 } 710 }
711 break; 711 break;
712 default: 712 default:
713 DRM_ERROR("invalid primitive type %u\n", prim); 713 DRM_ERROR("invalid primitive type %u\n", prim);
714 return DRM_ERR(EINVAL); 714 return -EINVAL;
715 } 715 }
716 716
717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
718 if (skip > SAVAGE_SKIP_ALL_S3D) { 718 if (skip > SAVAGE_SKIP_ALL_S3D) {
719 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 719 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
720 return DRM_ERR(EINVAL); 720 return -EINVAL;
721 } 721 }
722 vtx_size = 8; /* full vertex */ 722 vtx_size = 8; /* full vertex */
723 } else { 723 } else {
724 if (skip > SAVAGE_SKIP_ALL_S4) { 724 if (skip > SAVAGE_SKIP_ALL_S4) {
725 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 725 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
726 return DRM_ERR(EINVAL); 726 return -EINVAL;
727 } 727 }
728 vtx_size = 10; /* full vertex */ 728 vtx_size = 10; /* full vertex */
729 } 729 }
@@ -735,7 +735,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
735 if (vtx_size > vb_stride) { 735 if (vtx_size > vb_stride) {
736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
737 vtx_size, vb_stride); 737 vtx_size, vb_stride);
738 return DRM_ERR(EINVAL); 738 return -EINVAL;
739 } 739 }
740 740
741 prim <<= 25; 741 prim <<= 25;
@@ -748,7 +748,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
748 if (idx[i] > vb_size / (vb_stride * 4)) { 748 if (idx[i] > vb_size / (vb_stride * 4)) {
749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
750 i, idx[i], vb_size / (vb_stride * 4)); 750 i, idx[i], vb_size / (vb_stride * 4));
751 return DRM_ERR(EINVAL); 751 return -EINVAL;
752 } 752 }
753 } 753 }
754 754
@@ -942,7 +942,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
942 DRM_ERROR("IMPLEMENTATION ERROR: " 942 DRM_ERROR("IMPLEMENTATION ERROR: "
943 "non-drawing-command %d\n", 943 "non-drawing-command %d\n",
944 cmd_header.cmd.cmd); 944 cmd_header.cmd.cmd);
945 return DRM_ERR(EINVAL); 945 return -EINVAL;
946 } 946 }
947 947
948 if (ret != 0) 948 if (ret != 0)
@@ -953,13 +953,12 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
953 return 0; 953 return 0;
954} 954}
955 955
956int savage_bci_cmdbuf(DRM_IOCTL_ARGS) 956int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
957{ 957{
958 DRM_DEVICE;
959 drm_savage_private_t *dev_priv = dev->dev_private; 958 drm_savage_private_t *dev_priv = dev->dev_private;
960 struct drm_device_dma *dma = dev->dma; 959 struct drm_device_dma *dma = dev->dma;
961 struct drm_buf *dmabuf; 960 struct drm_buf *dmabuf;
962 drm_savage_cmdbuf_t cmdbuf; 961 drm_savage_cmdbuf_t *cmdbuf = data;
963 drm_savage_cmd_header_t *kcmd_addr = NULL; 962 drm_savage_cmd_header_t *kcmd_addr = NULL;
964 drm_savage_cmd_header_t *first_draw_cmd; 963 drm_savage_cmd_header_t *first_draw_cmd;
965 unsigned int *kvb_addr = NULL; 964 unsigned int *kvb_addr = NULL;
@@ -969,19 +968,16 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
969 968
970 DRM_DEBUG("\n"); 969 DRM_DEBUG("\n");
971 970
972 LOCK_TEST_WITH_RETURN(dev, filp); 971 LOCK_TEST_WITH_RETURN(dev, file_priv);
973
974 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *) data,
975 sizeof(cmdbuf));
976 972
977 if (dma && dma->buflist) { 973 if (dma && dma->buflist) {
978 if (cmdbuf.dma_idx > dma->buf_count) { 974 if (cmdbuf->dma_idx > dma->buf_count) {
979 DRM_ERROR 975 DRM_ERROR
980 ("vertex buffer index %u out of range (0-%u)\n", 976 ("vertex buffer index %u out of range (0-%u)\n",
981 cmdbuf.dma_idx, dma->buf_count - 1); 977 cmdbuf->dma_idx, dma->buf_count - 1);
982 return DRM_ERR(EINVAL); 978 return -EINVAL;
983 } 979 }
984 dmabuf = dma->buflist[cmdbuf.dma_idx]; 980 dmabuf = dma->buflist[cmdbuf->dma_idx];
985 } else { 981 } else {
986 dmabuf = NULL; 982 dmabuf = NULL;
987 } 983 }
@@ -991,47 +987,47 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
991 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct 987 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
992 * for locking on FreeBSD. 988 * for locking on FreeBSD.
993 */ 989 */
994 if (cmdbuf.size) { 990 if (cmdbuf->size) {
995 kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 991 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
996 if (kcmd_addr == NULL) 992 if (kcmd_addr == NULL)
997 return DRM_ERR(ENOMEM); 993 return -ENOMEM;
998 994
999 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
1000 cmdbuf.size * 8)) 996 cmdbuf->size * 8))
1001 { 997 {
1002 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 998 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1003 return DRM_ERR(EFAULT); 999 return -EFAULT;
1004 } 1000 }
1005 cmdbuf.cmd_addr = kcmd_addr; 1001 cmdbuf->cmd_addr = kcmd_addr;
1006 } 1002 }
1007 if (cmdbuf.vb_size) { 1003 if (cmdbuf->vb_size) {
1008 kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1004 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
1009 if (kvb_addr == NULL) { 1005 if (kvb_addr == NULL) {
1010 ret = DRM_ERR(ENOMEM); 1006 ret = -ENOMEM;
1011 goto done; 1007 goto done;
1012 } 1008 }
1013 1009
1014 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1010 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
1015 cmdbuf.vb_size)) { 1011 cmdbuf->vb_size)) {
1016 ret = DRM_ERR(EFAULT); 1012 ret = -EFAULT;
1017 goto done; 1013 goto done;
1018 } 1014 }
1019 cmdbuf.vb_addr = kvb_addr; 1015 cmdbuf->vb_addr = kvb_addr;
1020 } 1016 }
1021 if (cmdbuf.nbox) { 1017 if (cmdbuf->nbox) {
1022 kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), 1018 kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
1023 DRM_MEM_DRIVER); 1019 DRM_MEM_DRIVER);
1024 if (kbox_addr == NULL) { 1020 if (kbox_addr == NULL) {
1025 ret = DRM_ERR(ENOMEM); 1021 ret = -ENOMEM;
1026 goto done; 1022 goto done;
1027 } 1023 }
1028 1024
1029 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1025 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
1030 cmdbuf.nbox * sizeof(struct drm_clip_rect))) { 1026 cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
1031 ret = DRM_ERR(EFAULT); 1027 ret = -EFAULT;
1032 goto done; 1028 goto done;
1033 } 1029 }
1034 cmdbuf.box_addr = kbox_addr; 1030 cmdbuf->box_addr = kbox_addr;
1035 } 1031 }
1036 1032
1037 /* Make sure writes to DMA buffers are finished before sending 1033 /* Make sure writes to DMA buffers are finished before sending
@@ -1044,10 +1040,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1044 1040
1045 i = 0; 1041 i = 0;
1046 first_draw_cmd = NULL; 1042 first_draw_cmd = NULL;
1047 while (i < cmdbuf.size) { 1043 while (i < cmdbuf->size) {
1048 drm_savage_cmd_header_t cmd_header; 1044 drm_savage_cmd_header_t cmd_header;
1049 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; 1045 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
1050 cmdbuf.cmd_addr++; 1046 cmdbuf->cmd_addr++;
1051 i++; 1047 i++;
1052 1048
1053 /* Group drawing commands with same state to minimize 1049 /* Group drawing commands with same state to minimize
@@ -1057,28 +1053,28 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1057 case SAVAGE_CMD_DMA_IDX: 1053 case SAVAGE_CMD_DMA_IDX:
1058 case SAVAGE_CMD_VB_IDX: 1054 case SAVAGE_CMD_VB_IDX:
1059 j = (cmd_header.idx.count + 3) / 4; 1055 j = (cmd_header.idx.count + 3) / 4;
1060 if (i + j > cmdbuf.size) { 1056 if (i + j > cmdbuf->size) {
1061 DRM_ERROR("indexed drawing command extends " 1057 DRM_ERROR("indexed drawing command extends "
1062 "beyond end of command buffer\n"); 1058 "beyond end of command buffer\n");
1063 DMA_FLUSH(); 1059 DMA_FLUSH();
1064 return DRM_ERR(EINVAL); 1060 return -EINVAL;
1065 } 1061 }
1066 /* fall through */ 1062 /* fall through */
1067 case SAVAGE_CMD_DMA_PRIM: 1063 case SAVAGE_CMD_DMA_PRIM:
1068 case SAVAGE_CMD_VB_PRIM: 1064 case SAVAGE_CMD_VB_PRIM:
1069 if (!first_draw_cmd) 1065 if (!first_draw_cmd)
1070 first_draw_cmd = cmdbuf.cmd_addr - 1; 1066 first_draw_cmd = cmdbuf->cmd_addr - 1;
1071 cmdbuf.cmd_addr += j; 1067 cmdbuf->cmd_addr += j;
1072 i += j; 1068 i += j;
1073 break; 1069 break;
1074 default: 1070 default:
1075 if (first_draw_cmd) { 1071 if (first_draw_cmd) {
1076 ret = savage_dispatch_draw( 1072 ret = savage_dispatch_draw(
1077 dev_priv, first_draw_cmd, 1073 dev_priv, first_draw_cmd,
1078 cmdbuf.cmd_addr - 1, 1074 cmdbuf->cmd_addr - 1,
1079 dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, 1075 dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
1080 cmdbuf.vb_stride, 1076 cmdbuf->vb_stride,
1081 cmdbuf.nbox, cmdbuf.box_addr); 1077 cmdbuf->nbox, cmdbuf->box_addr);
1082 if (ret != 0) 1078 if (ret != 0)
1083 return ret; 1079 return ret;
1084 first_draw_cmd = NULL; 1080 first_draw_cmd = NULL;
@@ -1090,40 +1086,42 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1090 switch (cmd_header.cmd.cmd) { 1086 switch (cmd_header.cmd.cmd) {
1091 case SAVAGE_CMD_STATE: 1087 case SAVAGE_CMD_STATE:
1092 j = (cmd_header.state.count + 1) / 2; 1088 j = (cmd_header.state.count + 1) / 2;
1093 if (i + j > cmdbuf.size) { 1089 if (i + j > cmdbuf->size) {
1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1090 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1095 "beyond end of command buffer\n"); 1091 "beyond end of command buffer\n");
1096 DMA_FLUSH(); 1092 DMA_FLUSH();
1097 ret = DRM_ERR(EINVAL); 1093 ret = -EINVAL;
1098 goto done; 1094 goto done;
1099 } 1095 }
1100 ret = savage_dispatch_state(dev_priv, &cmd_header, 1096 ret = savage_dispatch_state(dev_priv, &cmd_header,
1101 (const uint32_t *)cmdbuf.cmd_addr); 1097 (const uint32_t *)cmdbuf->cmd_addr);
1102 cmdbuf.cmd_addr += j; 1098 cmdbuf->cmd_addr += j;
1103 i += j; 1099 i += j;
1104 break; 1100 break;
1105 case SAVAGE_CMD_CLEAR: 1101 case SAVAGE_CMD_CLEAR:
1106 if (i + 1 > cmdbuf.size) { 1102 if (i + 1 > cmdbuf->size) {
1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1103 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1108 "beyond end of command buffer\n"); 1104 "beyond end of command buffer\n");
1109 DMA_FLUSH(); 1105 DMA_FLUSH();
1110 ret = DRM_ERR(EINVAL); 1106 ret = -EINVAL;
1111 goto done; 1107 goto done;
1112 } 1108 }
1113 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1109 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1114 cmdbuf.cmd_addr, 1110 cmdbuf->cmd_addr,
1115 cmdbuf.nbox, cmdbuf.box_addr); 1111 cmdbuf->nbox,
1116 cmdbuf.cmd_addr++; 1112 cmdbuf->box_addr);
1113 cmdbuf->cmd_addr++;
1117 i++; 1114 i++;
1118 break; 1115 break;
1119 case SAVAGE_CMD_SWAP: 1116 case SAVAGE_CMD_SWAP:
1120 ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, 1117 ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
1121 cmdbuf.box_addr); 1118 cmdbuf->box_addr);
1122 break; 1119 break;
1123 default: 1120 default:
1124 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1121 DRM_ERROR("invalid command 0x%x\n",
1122 cmd_header.cmd.cmd);
1125 DMA_FLUSH(); 1123 DMA_FLUSH();
1126 ret = DRM_ERR(EINVAL); 1124 ret = -EINVAL;
1127 goto done; 1125 goto done;
1128 } 1126 }
1129 1127
@@ -1135,9 +1133,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1135 1133
1136 if (first_draw_cmd) { 1134 if (first_draw_cmd) {
1137 ret = savage_dispatch_draw ( 1135 ret = savage_dispatch_draw (
1138 dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, 1136 dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
1139 cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, 1137 cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
1140 cmdbuf.nbox, cmdbuf.box_addr); 1138 cmdbuf->nbox, cmdbuf->box_addr);
1141 if (ret != 0) { 1139 if (ret != 0) {
1142 DMA_FLUSH(); 1140 DMA_FLUSH();
1143 goto done; 1141 goto done;
@@ -1146,7 +1144,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1146 1144
1147 DMA_FLUSH(); 1145 DMA_FLUSH();
1148 1146
1149 if (dmabuf && cmdbuf.discard) { 1147 if (dmabuf && cmdbuf->discard) {
1150 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; 1148 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1151 uint16_t event; 1149 uint16_t event;
1152 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); 1150 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
@@ -1156,9 +1154,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1156 1154
1157done: 1155done:
1158 /* If we didn't need to allocate them, these'll be NULL */ 1156 /* If we didn't need to allocate them, these'll be NULL */
1159 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1157 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1160 drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); 1158 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
1161 drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), 1159 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
1162 DRM_MEM_DRIVER); 1160 DRM_MEM_DRIVER);
1163 1161
1164 return ret; 1162 return ret;
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 1912f5857051..7dacc64e9b56 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -42,7 +42,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
42 42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
44 if (dev_priv == NULL) 44 if (dev_priv == NULL)
45 return DRM_ERR(ENOMEM); 45 return -ENOMEM;
46 46
47 dev->dev_private = (void *)dev_priv; 47 dev->dev_private = (void *)dev_priv;
48 dev_priv->chipset = chipset; 48 dev_priv->chipset = chipset;
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index 5630df874353..ef940bad63f7 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -63,10 +63,11 @@ typedef struct drm_sis_private {
63} drm_sis_private_t; 63} drm_sis_private_t;
64 64
65extern int sis_idle(struct drm_device *dev); 65extern int sis_idle(struct drm_device *dev);
66extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); 66extern void sis_reclaim_buffers_locked(struct drm_device *dev,
67 struct drm_file *file_priv);
67extern void sis_lastclose(struct drm_device *dev); 68extern void sis_lastclose(struct drm_device *dev);
68 69
69extern drm_ioctl_desc_t sis_ioctls[]; 70extern struct drm_ioctl_desc sis_ioctls[];
70extern int sis_max_ioctl; 71extern int sis_max_ioctl;
71 72
72#endif 73#endif
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 441bbdbf1510..8c66838ff515 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -82,15 +82,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
82 82
83#endif /* CONFIG_FB_SIS */ 83#endif /* CONFIG_FB_SIS */
84 84
85static int sis_fb_init(DRM_IOCTL_ARGS) 85static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
86{ 86{
87 DRM_DEVICE;
88 drm_sis_private_t *dev_priv = dev->dev_private; 87 drm_sis_private_t *dev_priv = dev->dev_private;
89 drm_sis_fb_t fb; 88 drm_sis_fb_t *fb = data;
90 int ret; 89 int ret;
91 90
92 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
93
94 mutex_lock(&dev->struct_mutex); 91 mutex_lock(&dev->struct_mutex);
95#if defined(CONFIG_FB_SIS) 92#if defined(CONFIG_FB_SIS)
96 { 93 {
@@ -105,7 +102,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
105 } 102 }
106#else 103#else
107 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, 104 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
108 fb.size >> SIS_MM_ALIGN_SHIFT); 105 fb->size >> SIS_MM_ALIGN_SHIFT);
109#endif 106#endif
110 107
111 if (ret) { 108 if (ret) {
@@ -115,98 +112,87 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
115 } 112 }
116 113
117 dev_priv->vram_initialized = 1; 114 dev_priv->vram_initialized = 1;
118 dev_priv->vram_offset = fb.offset; 115 dev_priv->vram_offset = fb->offset;
119 116
120 mutex_unlock(&dev->struct_mutex); 117 mutex_unlock(&dev->struct_mutex);
121 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 118 DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
122 119
123 return 0; 120 return 0;
124} 121}
125 122
126static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv, 123static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
127 unsigned long data, int pool) 124 void *data, int pool)
128{ 125{
129 drm_sis_private_t *dev_priv = dev->dev_private; 126 drm_sis_private_t *dev_priv = dev->dev_private;
130 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; 127 drm_sis_mem_t *mem = data;
131 drm_sis_mem_t mem;
132 int retval = 0; 128 int retval = 0;
133 struct drm_memblock_item *item; 129 struct drm_memblock_item *item;
134 130
135 DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
136
137 mutex_lock(&dev->struct_mutex); 131 mutex_lock(&dev->struct_mutex);
138 132
139 if (0 == ((pool == 0) ? dev_priv->vram_initialized : 133 if (0 == ((pool == 0) ? dev_priv->vram_initialized :
140 dev_priv->agp_initialized)) { 134 dev_priv->agp_initialized)) {
141 DRM_ERROR 135 DRM_ERROR
142 ("Attempt to allocate from uninitialized memory manager.\n"); 136 ("Attempt to allocate from uninitialized memory manager.\n");
143 return DRM_ERR(EINVAL); 137 return -EINVAL;
144 } 138 }
145 139
146 mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; 140 mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
147 item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, 141 item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
148 (unsigned long)priv); 142 (unsigned long)file_priv);
149 143
150 mutex_unlock(&dev->struct_mutex); 144 mutex_unlock(&dev->struct_mutex);
151 if (item) { 145 if (item) {
152 mem.offset = ((pool == 0) ? 146 mem->offset = ((pool == 0) ?
153 dev_priv->vram_offset : dev_priv->agp_offset) + 147 dev_priv->vram_offset : dev_priv->agp_offset) +
154 (item->mm-> 148 (item->mm->
155 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); 149 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
156 mem.free = item->user_hash.key; 150 mem->free = item->user_hash.key;
157 mem.size = mem.size << SIS_MM_ALIGN_SHIFT; 151 mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
158 } else { 152 } else {
159 mem.offset = 0; 153 mem->offset = 0;
160 mem.size = 0; 154 mem->size = 0;
161 mem.free = 0; 155 mem->free = 0;
162 retval = DRM_ERR(ENOMEM); 156 retval = -ENOMEM;
163 } 157 }
164 158
165 DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); 159 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
166 160 mem->offset);
167 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size,
168 mem.offset);
169 161
170 return retval; 162 return retval;
171} 163}
172 164
173static int sis_drm_free(DRM_IOCTL_ARGS) 165static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
174{ 166{
175 DRM_DEVICE;
176 drm_sis_private_t *dev_priv = dev->dev_private; 167 drm_sis_private_t *dev_priv = dev->dev_private;
177 drm_sis_mem_t mem; 168 drm_sis_mem_t *mem = data;
178 int ret; 169 int ret;
179 170
180 DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data,
181 sizeof(mem));
182
183 mutex_lock(&dev->struct_mutex); 171 mutex_lock(&dev->struct_mutex);
184 ret = drm_sman_free_key(&dev_priv->sman, mem.free); 172 ret = drm_sman_free_key(&dev_priv->sman, mem->free);
185 mutex_unlock(&dev->struct_mutex); 173 mutex_unlock(&dev->struct_mutex);
186 DRM_DEBUG("free = 0x%lx\n", mem.free); 174 DRM_DEBUG("free = 0x%lx\n", mem->free);
187 175
188 return ret; 176 return ret;
189} 177}
190 178
191static int sis_fb_alloc(DRM_IOCTL_ARGS) 179static int sis_fb_alloc(struct drm_device *dev, void *data,
180 struct drm_file *file_priv)
192{ 181{
193 DRM_DEVICE; 182 return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
194 return sis_drm_alloc(dev, priv, data, VIDEO_TYPE);
195} 183}
196 184
197static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) 185static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
186 struct drm_file *file_priv)
198{ 187{
199 DRM_DEVICE;
200 drm_sis_private_t *dev_priv = dev->dev_private; 188 drm_sis_private_t *dev_priv = dev->dev_private;
201 drm_sis_agp_t agp; 189 drm_sis_agp_t *agp = data;
202 int ret; 190 int ret;
203 dev_priv = dev->dev_private; 191 dev_priv = dev->dev_private;
204 192
205 DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
206 sizeof(agp));
207 mutex_lock(&dev->struct_mutex); 193 mutex_lock(&dev->struct_mutex);
208 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, 194 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
209 agp.size >> SIS_MM_ALIGN_SHIFT); 195 agp->size >> SIS_MM_ALIGN_SHIFT);
210 196
211 if (ret) { 197 if (ret) {
212 DRM_ERROR("AGP memory manager initialisation error\n"); 198 DRM_ERROR("AGP memory manager initialisation error\n");
@@ -215,18 +201,18 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
215 } 201 }
216 202
217 dev_priv->agp_initialized = 1; 203 dev_priv->agp_initialized = 1;
218 dev_priv->agp_offset = agp.offset; 204 dev_priv->agp_offset = agp->offset;
219 mutex_unlock(&dev->struct_mutex); 205 mutex_unlock(&dev->struct_mutex);
220 206
221 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 207 DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
222 return 0; 208 return 0;
223} 209}
224 210
225static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) 211static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
212 struct drm_file *file_priv)
226{ 213{
227 DRM_DEVICE;
228 214
229 return sis_drm_alloc(dev, priv, data, AGP_TYPE); 215 return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
230} 216}
231 217
232static drm_local_map_t *sis_reg_init(struct drm_device *dev) 218static drm_local_map_t *sis_reg_init(struct drm_device *dev)
@@ -314,13 +300,13 @@ void sis_lastclose(struct drm_device *dev)
314 mutex_unlock(&dev->struct_mutex); 300 mutex_unlock(&dev->struct_mutex);
315} 301}
316 302
317void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 303void sis_reclaim_buffers_locked(struct drm_device * dev,
304 struct drm_file *file_priv)
318{ 305{
319 drm_sis_private_t *dev_priv = dev->dev_private; 306 drm_sis_private_t *dev_priv = dev->dev_private;
320 struct drm_file *priv = filp->private_data;
321 307
322 mutex_lock(&dev->struct_mutex); 308 mutex_lock(&dev->struct_mutex);
323 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { 309 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
324 mutex_unlock(&dev->struct_mutex); 310 mutex_unlock(&dev->struct_mutex);
325 return; 311 return;
326 } 312 }
@@ -329,20 +315,18 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
329 dev->driver->dma_quiescent(dev); 315 dev->driver->dma_quiescent(dev);
330 } 316 }
331 317
332 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); 318 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
333 mutex_unlock(&dev->struct_mutex); 319 mutex_unlock(&dev->struct_mutex);
334 return; 320 return;
335} 321}
336 322
337drm_ioctl_desc_t sis_ioctls[] = { 323struct drm_ioctl_desc sis_ioctls[] = {
338 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, 324 DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
339 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, 325 DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
340 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = 326 DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
341 {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, 327 DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
342 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, 328 DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
343 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, 329 DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
344 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] =
345 {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}
346}; 330};
347 331
348int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 332int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 7ff2b623c2d4..75d6b748c2c0 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -175,24 +175,24 @@ static int via_initialize(struct drm_device * dev,
175{ 175{
176 if (!dev_priv || !dev_priv->mmio) { 176 if (!dev_priv || !dev_priv->mmio) {
177 DRM_ERROR("via_dma_init called before via_map_init\n"); 177 DRM_ERROR("via_dma_init called before via_map_init\n");
178 return DRM_ERR(EFAULT); 178 return -EFAULT;
179 } 179 }
180 180
181 if (dev_priv->ring.virtual_start != NULL) { 181 if (dev_priv->ring.virtual_start != NULL) {
182 DRM_ERROR("%s called again without calling cleanup\n", 182 DRM_ERROR("%s called again without calling cleanup\n",
183 __FUNCTION__); 183 __FUNCTION__);
184 return DRM_ERR(EFAULT); 184 return -EFAULT;
185 } 185 }
186 186
187 if (!dev->agp || !dev->agp->base) { 187 if (!dev->agp || !dev->agp->base) {
188 DRM_ERROR("%s called with no agp memory available\n", 188 DRM_ERROR("%s called with no agp memory available\n",
189 __FUNCTION__); 189 __FUNCTION__);
190 return DRM_ERR(EFAULT); 190 return -EFAULT;
191 } 191 }
192 192
193 if (dev_priv->chipset == VIA_DX9_0) { 193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n"); 194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return DRM_ERR(EINVAL); 195 return -EINVAL;
196 } 196 }
197 197
198 dev_priv->ring.map.offset = dev->agp->base + init->offset; 198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
@@ -207,7 +207,7 @@ static int via_initialize(struct drm_device * dev,
207 via_dma_cleanup(dev); 207 via_dma_cleanup(dev);
208 DRM_ERROR("can not ioremap virtual address for" 208 DRM_ERROR("can not ioremap virtual address for"
209 " ring buffer\n"); 209 " ring buffer\n");
210 return DRM_ERR(ENOMEM); 210 return -ENOMEM;
211 } 211 }
212 212
213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -227,35 +227,31 @@ static int via_initialize(struct drm_device * dev,
227 return 0; 227 return 0;
228} 228}
229 229
230static int via_dma_init(DRM_IOCTL_ARGS) 230static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
231{ 231{
232 DRM_DEVICE;
233 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 232 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
234 drm_via_dma_init_t init; 233 drm_via_dma_init_t *init = data;
235 int retcode = 0; 234 int retcode = 0;
236 235
237 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data, 236 switch (init->func) {
238 sizeof(init));
239
240 switch (init.func) {
241 case VIA_INIT_DMA: 237 case VIA_INIT_DMA:
242 if (!DRM_SUSER(DRM_CURPROC)) 238 if (!DRM_SUSER(DRM_CURPROC))
243 retcode = DRM_ERR(EPERM); 239 retcode = -EPERM;
244 else 240 else
245 retcode = via_initialize(dev, dev_priv, &init); 241 retcode = via_initialize(dev, dev_priv, init);
246 break; 242 break;
247 case VIA_CLEANUP_DMA: 243 case VIA_CLEANUP_DMA:
248 if (!DRM_SUSER(DRM_CURPROC)) 244 if (!DRM_SUSER(DRM_CURPROC))
249 retcode = DRM_ERR(EPERM); 245 retcode = -EPERM;
250 else 246 else
251 retcode = via_dma_cleanup(dev); 247 retcode = via_dma_cleanup(dev);
252 break; 248 break;
253 case VIA_DMA_INITIALIZED: 249 case VIA_DMA_INITIALIZED:
254 retcode = (dev_priv->ring.virtual_start != NULL) ? 250 retcode = (dev_priv->ring.virtual_start != NULL) ?
255 0 : DRM_ERR(EFAULT); 251 0 : -EFAULT;
256 break; 252 break;
257 default: 253 default:
258 retcode = DRM_ERR(EINVAL); 254 retcode = -EINVAL;
259 break; 255 break;
260 } 256 }
261 257
@@ -273,15 +269,15 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
273 if (dev_priv->ring.virtual_start == NULL) { 269 if (dev_priv->ring.virtual_start == NULL) {
274 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 270 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
275 __FUNCTION__); 271 __FUNCTION__);
276 return DRM_ERR(EFAULT); 272 return -EFAULT;
277 } 273 }
278 274
279 if (cmd->size > VIA_PCI_BUF_SIZE) { 275 if (cmd->size > VIA_PCI_BUF_SIZE) {
280 return DRM_ERR(ENOMEM); 276 return -ENOMEM;
281 } 277 }
282 278
283 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 279 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
284 return DRM_ERR(EFAULT); 280 return -EFAULT;
285 281
286 /* 282 /*
287 * Running this function on AGP memory is dead slow. Therefore 283 * Running this function on AGP memory is dead slow. Therefore
@@ -297,7 +293,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
297 293
298 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); 294 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
299 if (vb == NULL) { 295 if (vb == NULL) {
300 return DRM_ERR(EAGAIN); 296 return -EAGAIN;
301 } 297 }
302 298
303 memcpy(vb, dev_priv->pci_buf, cmd->size); 299 memcpy(vb, dev_priv->pci_buf, cmd->size);
@@ -321,34 +317,30 @@ int via_driver_dma_quiescent(struct drm_device * dev)
321 drm_via_private_t *dev_priv = dev->dev_private; 317 drm_via_private_t *dev_priv = dev->dev_private;
322 318
323 if (!via_wait_idle(dev_priv)) { 319 if (!via_wait_idle(dev_priv)) {
324 return DRM_ERR(EBUSY); 320 return -EBUSY;
325 } 321 }
326 return 0; 322 return 0;
327} 323}
328 324
329static int via_flush_ioctl(DRM_IOCTL_ARGS) 325static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
330{ 326{
331 DRM_DEVICE;
332 327
333 LOCK_TEST_WITH_RETURN(dev, filp); 328 LOCK_TEST_WITH_RETURN(dev, file_priv);
334 329
335 return via_driver_dma_quiescent(dev); 330 return via_driver_dma_quiescent(dev);
336} 331}
337 332
338static int via_cmdbuffer(DRM_IOCTL_ARGS) 333static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
339{ 334{
340 DRM_DEVICE; 335 drm_via_cmdbuffer_t *cmdbuf = data;
341 drm_via_cmdbuffer_t cmdbuf;
342 int ret; 336 int ret;
343 337
344 LOCK_TEST_WITH_RETURN(dev, filp); 338 LOCK_TEST_WITH_RETURN(dev, file_priv);
345
346 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
347 sizeof(cmdbuf));
348 339
349 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); 340 DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
341 cmdbuf->size);
350 342
351 ret = via_dispatch_cmdbuffer(dev, &cmdbuf); 343 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
352 if (ret) { 344 if (ret) {
353 return ret; 345 return ret;
354 } 346 }
@@ -363,10 +355,10 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
363 int ret; 355 int ret;
364 356
365 if (cmd->size > VIA_PCI_BUF_SIZE) { 357 if (cmd->size > VIA_PCI_BUF_SIZE) {
366 return DRM_ERR(ENOMEM); 358 return -ENOMEM;
367 } 359 }
368 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 360 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
369 return DRM_ERR(EFAULT); 361 return -EFAULT;
370 362
371 if ((ret = 363 if ((ret =
372 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, 364 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
@@ -380,21 +372,17 @@ static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
380 return ret; 372 return ret;
381} 373}
382 374
383static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) 375static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
384{ 376{
385 DRM_DEVICE; 377 drm_via_cmdbuffer_t *cmdbuf = data;
386 drm_via_cmdbuffer_t cmdbuf;
387 int ret; 378 int ret;
388 379
389 LOCK_TEST_WITH_RETURN(dev, filp); 380 LOCK_TEST_WITH_RETURN(dev, file_priv);
390 381
391 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, 382 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
392 sizeof(cmdbuf)); 383 cmdbuf->size);
393 384
394 DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, 385 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
395 cmdbuf.size);
396
397 ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
398 if (ret) { 386 if (ret) {
399 return ret; 387 return ret;
400 } 388 }
@@ -653,80 +641,74 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
653 * User interface to the space and lag functions. 641 * User interface to the space and lag functions.
654 */ 642 */
655 643
656static int via_cmdbuf_size(DRM_IOCTL_ARGS) 644static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
657{ 645{
658 DRM_DEVICE; 646 drm_via_cmdbuf_size_t *d_siz = data;
659 drm_via_cmdbuf_size_t d_siz;
660 int ret = 0; 647 int ret = 0;
661 uint32_t tmp_size, count; 648 uint32_t tmp_size, count;
662 drm_via_private_t *dev_priv; 649 drm_via_private_t *dev_priv;
663 650
664 DRM_DEBUG("via cmdbuf_size\n"); 651 DRM_DEBUG("via cmdbuf_size\n");
665 LOCK_TEST_WITH_RETURN(dev, filp); 652 LOCK_TEST_WITH_RETURN(dev, file_priv);
666 653
667 dev_priv = (drm_via_private_t *) dev->dev_private; 654 dev_priv = (drm_via_private_t *) dev->dev_private;
668 655
669 if (dev_priv->ring.virtual_start == NULL) { 656 if (dev_priv->ring.virtual_start == NULL) {
670 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 657 DRM_ERROR("%s called without initializing AGP ring buffer.\n",
671 __FUNCTION__); 658 __FUNCTION__);
672 return DRM_ERR(EFAULT); 659 return -EFAULT;
673 } 660 }
674 661
675 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
676 sizeof(d_siz));
677
678 count = 1000000; 662 count = 1000000;
679 tmp_size = d_siz.size; 663 tmp_size = d_siz->size;
680 switch (d_siz.func) { 664 switch (d_siz->func) {
681 case VIA_CMDBUF_SPACE: 665 case VIA_CMDBUF_SPACE:
682 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) 666 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
683 && count--) { 667 && count--) {
684 if (!d_siz.wait) { 668 if (!d_siz->wait) {
685 break; 669 break;
686 } 670 }
687 } 671 }
688 if (!count) { 672 if (!count) {
689 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); 673 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
690 ret = DRM_ERR(EAGAIN); 674 ret = -EAGAIN;
691 } 675 }
692 break; 676 break;
693 case VIA_CMDBUF_LAG: 677 case VIA_CMDBUF_LAG:
694 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) 678 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
695 && count--) { 679 && count--) {
696 if (!d_siz.wait) { 680 if (!d_siz->wait) {
697 break; 681 break;
698 } 682 }
699 } 683 }
700 if (!count) { 684 if (!count) {
701 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); 685 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
702 ret = DRM_ERR(EAGAIN); 686 ret = -EAGAIN;
703 } 687 }
704 break; 688 break;
705 default: 689 default:
706 ret = DRM_ERR(EFAULT); 690 ret = -EFAULT;
707 } 691 }
708 d_siz.size = tmp_size; 692 d_siz->size = tmp_size;
709 693
710 DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
711 sizeof(d_siz));
712 return ret; 694 return ret;
713} 695}
714 696
715drm_ioctl_desc_t via_ioctls[] = { 697struct drm_ioctl_desc via_ioctls[] = {
716 [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, 698 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
717 [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, 699 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
718 [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, 700 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
719 [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER}, 701 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
720 [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER}, 702 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
721 [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH}, 703 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
722 [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH}, 704 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
723 [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH}, 705 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
724 [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, 706 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
725 [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, 707 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
726 [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, 708 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
727 [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, 709 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
728 [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, 710 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
729 [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} 711 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
730}; 712};
731 713
732int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); 714int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 3dd1ed3d1bf5..c6fd16f3cb43 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -237,7 +237,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
237 first_pfn + 1; 237 first_pfn + 1;
238 238
239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 return DRM_ERR(ENOMEM); 240 return -ENOMEM;
241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
242 down_read(&current->mm->mmap_sem); 242 down_read(&current->mm->mmap_sem);
243 ret = get_user_pages(current, current->mm, 243 ret = get_user_pages(current, current->mm,
@@ -251,7 +251,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
251 if (ret < 0) 251 if (ret < 0)
252 return ret; 252 return ret;
253 vsg->state = dr_via_pages_locked; 253 vsg->state = dr_via_pages_locked;
254 return DRM_ERR(EINVAL); 254 return -EINVAL;
255 } 255 }
256 vsg->state = dr_via_pages_locked; 256 vsg->state = dr_via_pages_locked;
257 DRM_DEBUG("DMA pages locked\n"); 257 DRM_DEBUG("DMA pages locked\n");
@@ -274,13 +274,13 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
274 vsg->descriptors_per_page; 274 vsg->descriptors_per_page;
275 275
276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 return DRM_ERR(ENOMEM); 277 return -ENOMEM;
278 278
279 vsg->state = dr_via_desc_pages_alloc; 279 vsg->state = dr_via_desc_pages_alloc;
280 for (i=0; i<vsg->num_desc_pages; ++i) { 280 for (i=0; i<vsg->num_desc_pages; ++i) {
281 if (NULL == (vsg->desc_pages[i] = 281 if (NULL == (vsg->desc_pages[i] =
282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 return DRM_ERR(ENOMEM); 283 return -ENOMEM;
284 } 284 }
285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
286 vsg->num_desc); 286 vsg->num_desc);
@@ -593,7 +593,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
593 593
594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
595 DRM_ERROR("Zero size bitblt.\n"); 595 DRM_ERROR("Zero size bitblt.\n");
596 return DRM_ERR(EINVAL); 596 return -EINVAL;
597 } 597 }
598 598
599 /* 599 /*
@@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { 606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
607 DRM_ERROR("Too large system memory stride. Stride: %d, " 607 DRM_ERROR("Too large system memory stride. Stride: %d, "
608 "Length: %d\n", xfer->mem_stride, xfer->line_length); 608 "Length: %d\n", xfer->mem_stride, xfer->line_length);
609 return DRM_ERR(EINVAL); 609 return -EINVAL;
610 } 610 }
611 611
612 if ((xfer->mem_stride == xfer->line_length) && 612 if ((xfer->mem_stride == xfer->line_length) &&
@@ -624,7 +624,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
624 624
625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 DRM_ERROR("Too large PCI DMA bitblt.\n"); 626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 return DRM_ERR(EINVAL); 627 return -EINVAL;
628 } 628 }
629 629
630 /* 630 /*
@@ -635,7 +635,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
635 if (xfer->mem_stride < xfer->line_length || 635 if (xfer->mem_stride < xfer->line_length ||
636 abs(xfer->fb_stride) < xfer->line_length) { 636 abs(xfer->fb_stride) < xfer->line_length) {
637 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 637 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
638 return DRM_ERR(EINVAL); 638 return -EINVAL;
639 } 639 }
640 640
641 /* 641 /*
@@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
650 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 650 DRM_ERROR("Invalid DRM bitblt alignment.\n");
651 return DRM_ERR(EINVAL); 651 return -EINVAL;
652 } 652 }
653#else 653#else
654 if ((((unsigned long)xfer->mem_addr & 15) || 654 if ((((unsigned long)xfer->mem_addr & 15) ||
@@ -656,7 +656,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
656 ((xfer->num_lines > 1) && 656 ((xfer->num_lines > 1) &&
657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 return DRM_ERR(EINVAL); 659 return -EINVAL;
660 } 660 }
661#endif 661#endif
662 662
@@ -696,7 +696,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
696 696
697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
698 if (ret) { 698 if (ret) {
699 return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; 699 return (-EINTR == ret) ? -EAGAIN : ret;
700 } 700 }
701 701
702 spin_lock_irqsave(&blitq->blit_lock, irqsave); 702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
@@ -740,7 +740,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
740 740
741 if (dev_priv == NULL) { 741 if (dev_priv == NULL) {
742 DRM_ERROR("Called without initialization.\n"); 742 DRM_ERROR("Called without initialization.\n");
743 return DRM_ERR(EINVAL); 743 return -EINVAL;
744 } 744 }
745 745
746 engine = (xfer->to_fb) ? 0 : 1; 746 engine = (xfer->to_fb) ? 0 : 1;
@@ -750,7 +750,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
750 } 750 }
751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
752 via_dmablit_release_slot(blitq); 752 via_dmablit_release_slot(blitq);
753 return DRM_ERR(ENOMEM); 753 return -ENOMEM;
754 } 754 }
755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
756 via_dmablit_release_slot(blitq); 756 via_dmablit_release_slot(blitq);
@@ -781,21 +781,18 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
781 */ 781 */
782 782
783int 783int
784via_dma_blit_sync( DRM_IOCTL_ARGS ) 784via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
785{ 785{
786 drm_via_blitsync_t sync; 786 drm_via_blitsync_t *sync = data;
787 int err; 787 int err;
788 DRM_DEVICE;
789 788
790 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); 789 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
791 790 return -EINVAL;
792 if (sync.engine >= VIA_NUM_BLIT_ENGINES)
793 return DRM_ERR(EINVAL);
794 791
795 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); 792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
796 793
797 if (DRM_ERR(EINTR) == err) 794 if (-EINTR == err)
798 err = DRM_ERR(EAGAIN); 795 err = -EAGAIN;
799 796
800 return err; 797 return err;
801} 798}
@@ -808,17 +805,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS )
808 */ 805 */
809 806
810int 807int
811via_dma_blit( DRM_IOCTL_ARGS ) 808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
812{ 809{
813 drm_via_dmablit_t xfer; 810 drm_via_dmablit_t *xfer = data;
814 int err; 811 int err;
815 DRM_DEVICE;
816
817 DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
818
819 err = via_dmablit(dev, &xfer);
820 812
821 DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); 813 err = via_dmablit(dev, xfer);
822 814
823 return err; 815 return err;
824} 816}
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 576711564a11..2daae81874cd 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -110,18 +110,18 @@ enum via_family {
110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
112 112
113extern drm_ioctl_desc_t via_ioctls[]; 113extern struct drm_ioctl_desc via_ioctls[];
114extern int via_max_ioctl; 114extern int via_max_ioctl;
115 115
116extern int via_fb_init(DRM_IOCTL_ARGS); 116extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
117extern int via_mem_alloc(DRM_IOCTL_ARGS); 117extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
118extern int via_mem_free(DRM_IOCTL_ARGS); 118extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
119extern int via_agp_init(DRM_IOCTL_ARGS); 119extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
120extern int via_map_init(DRM_IOCTL_ARGS); 120extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
121extern int via_decoder_futex(DRM_IOCTL_ARGS); 121extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
122extern int via_wait_irq(DRM_IOCTL_ARGS); 122extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
123extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 123extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
124extern int via_dma_blit( DRM_IOCTL_ARGS ); 124extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
125 125
126extern int via_driver_load(struct drm_device *dev, unsigned long chipset); 126extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
127extern int via_driver_unload(struct drm_device *dev); 127extern int via_driver_unload(struct drm_device *dev);
@@ -144,7 +144,7 @@ extern void via_init_futex(drm_via_private_t * dev_priv);
144extern void via_cleanup_futex(drm_via_private_t * dev_priv); 144extern void via_cleanup_futex(drm_via_private_t * dev_priv);
145extern void via_release_futex(drm_via_private_t * dev_priv, int context); 145extern void via_release_futex(drm_via_private_t * dev_priv, int context);
146 146
147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp); 147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
148extern void via_lastclose(struct drm_device *dev); 148extern void via_lastclose(struct drm_device *dev);
149 149
150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); 150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 8dc99b5fbab6..9c1d52bc92d7 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -205,13 +205,13 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
205 205
206 if (!dev_priv) { 206 if (!dev_priv) {
207 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 207 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
208 return DRM_ERR(EINVAL); 208 return -EINVAL;
209 } 209 }
210 210
211 if (irq >= drm_via_irq_num) { 211 if (irq >= drm_via_irq_num) {
212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
213 irq); 213 irq);
214 return DRM_ERR(EINVAL); 214 return -EINVAL;
215 } 215 }
216 216
217 real_irq = dev_priv->irq_map[irq]; 217 real_irq = dev_priv->irq_map[irq];
@@ -219,7 +219,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
219 if (real_irq < 0) { 219 if (real_irq < 0) {
220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", 220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
221 __FUNCTION__, irq); 221 __FUNCTION__, irq);
222 return DRM_ERR(EINVAL); 222 return -EINVAL;
223 } 223 }
224 224
225 masks = dev_priv->irq_masks; 225 masks = dev_priv->irq_masks;
@@ -331,11 +331,9 @@ void via_driver_irq_uninstall(struct drm_device * dev)
331 } 331 }
332} 332}
333 333
334int via_wait_irq(DRM_IOCTL_ARGS) 334int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
335{ 335{
336 DRM_DEVICE; 336 drm_via_irqwait_t *irqwait = data;
337 drm_via_irqwait_t __user *argp = (void __user *)data;
338 drm_via_irqwait_t irqwait;
339 struct timeval now; 337 struct timeval now;
340 int ret = 0; 338 int ret = 0;
341 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 339 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -343,42 +341,39 @@ int via_wait_irq(DRM_IOCTL_ARGS)
343 int force_sequence; 341 int force_sequence;
344 342
345 if (!dev->irq) 343 if (!dev->irq)
346 return DRM_ERR(EINVAL); 344 return -EINVAL;
347 345
348 DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); 346 if (irqwait->request.irq >= dev_priv->num_irqs) {
349 if (irqwait.request.irq >= dev_priv->num_irqs) {
350 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 347 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
351 irqwait.request.irq); 348 irqwait->request.irq);
352 return DRM_ERR(EINVAL); 349 return -EINVAL;
353 } 350 }
354 351
355 cur_irq += irqwait.request.irq; 352 cur_irq += irqwait->request.irq;
356 353
357 switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) { 354 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
358 case VIA_IRQ_RELATIVE: 355 case VIA_IRQ_RELATIVE:
359 irqwait.request.sequence += atomic_read(&cur_irq->irq_received); 356 irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
360 irqwait.request.type &= ~_DRM_VBLANK_RELATIVE; 357 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
361 case VIA_IRQ_ABSOLUTE: 358 case VIA_IRQ_ABSOLUTE:
362 break; 359 break;
363 default: 360 default:
364 return DRM_ERR(EINVAL); 361 return -EINVAL;
365 } 362 }
366 363
367 if (irqwait.request.type & VIA_IRQ_SIGNAL) { 364 if (irqwait->request.type & VIA_IRQ_SIGNAL) {
368 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", 365 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
369 __FUNCTION__); 366 __FUNCTION__);
370 return DRM_ERR(EINVAL); 367 return -EINVAL;
371 } 368 }
372 369
373 force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); 370 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
374 371
375 ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence, 372 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
376 &irqwait.request.sequence); 373 &irqwait->request.sequence);
377 do_gettimeofday(&now); 374 do_gettimeofday(&now);
378 irqwait.reply.tval_sec = now.tv_sec; 375 irqwait->reply.tval_sec = now.tv_sec;
379 irqwait.reply.tval_usec = now.tv_usec; 376 irqwait->reply.tval_usec = now.tv_usec;
380
381 DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait));
382 377
383 return ret; 378 return ret;
384} 379}
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 7fb9d2a2cce2..10091507a0dc 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -75,19 +75,15 @@ int via_do_cleanup_map(struct drm_device * dev)
75 return 0; 75 return 0;
76} 76}
77 77
78int via_map_init(DRM_IOCTL_ARGS) 78int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{ 79{
80 DRM_DEVICE; 80 drm_via_init_t *init = data;
81 drm_via_init_t init;
82 81
83 DRM_DEBUG("%s\n", __FUNCTION__); 82 DRM_DEBUG("%s\n", __FUNCTION__);
84 83
85 DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data, 84 switch (init->func) {
86 sizeof(init));
87
88 switch (init.func) {
89 case VIA_INIT_MAP: 85 case VIA_INIT_MAP:
90 return via_do_init_map(dev, &init); 86 return via_do_init_map(dev, init);
91 case VIA_CLEANUP_MAP: 87 case VIA_CLEANUP_MAP:
92 return via_do_cleanup_map(dev); 88 return via_do_cleanup_map(dev);
93 } 89 }
@@ -102,7 +98,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
102 98
103 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
104 if (dev_priv == NULL) 100 if (dev_priv == NULL)
105 return DRM_ERR(ENOMEM); 101 return -ENOMEM;
106 102
107 dev->dev_private = (void *)dev_priv; 103 dev->dev_private = (void *)dev_priv;
108 104
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 85d56acd9d82..9afc1684348d 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -33,18 +33,15 @@
33#define VIA_MM_ALIGN_SHIFT 4 33#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) 34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
35 35
36int via_agp_init(DRM_IOCTL_ARGS) 36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{ 37{
38 DRM_DEVICE; 38 drm_via_agp_t *agp = data;
39 drm_via_agp_t agp;
40 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
41 int ret; 40 int ret;
42 41
43 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
44 sizeof(agp));
45 mutex_lock(&dev->struct_mutex); 42 mutex_lock(&dev->struct_mutex);
46 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, 43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
47 agp.size >> VIA_MM_ALIGN_SHIFT); 44 agp->size >> VIA_MM_ALIGN_SHIFT);
48 45
49 if (ret) { 46 if (ret) {
50 DRM_ERROR("AGP memory manager initialisation error\n"); 47 DRM_ERROR("AGP memory manager initialisation error\n");
@@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS)
53 } 50 }
54 51
55 dev_priv->agp_initialized = 1; 52 dev_priv->agp_initialized = 1;
56 dev_priv->agp_offset = agp.offset; 53 dev_priv->agp_offset = agp->offset;
57 mutex_unlock(&dev->struct_mutex); 54 mutex_unlock(&dev->struct_mutex);
58 55
59 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 56 DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
60 return 0; 57 return 0;
61} 58}
62 59
63int via_fb_init(DRM_IOCTL_ARGS) 60int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
64{ 61{
65 DRM_DEVICE; 62 drm_via_fb_t *fb = data;
66 drm_via_fb_t fb;
67 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 63 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
68 int ret; 64 int ret;
69 65
70 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
71
72 mutex_lock(&dev->struct_mutex); 66 mutex_lock(&dev->struct_mutex);
73 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, 67 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
74 fb.size >> VIA_MM_ALIGN_SHIFT); 68 fb->size >> VIA_MM_ALIGN_SHIFT);
75 69
76 if (ret) { 70 if (ret) {
77 DRM_ERROR("VRAM memory manager initialisation error\n"); 71 DRM_ERROR("VRAM memory manager initialisation error\n");
@@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS)
80 } 74 }
81 75
82 dev_priv->vram_initialized = 1; 76 dev_priv->vram_initialized = 1;
83 dev_priv->vram_offset = fb.offset; 77 dev_priv->vram_offset = fb->offset;
84 78
85 mutex_unlock(&dev->struct_mutex); 79 mutex_unlock(&dev->struct_mutex);
86 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 80 DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
87 81
88 return 0; 82 return 0;
89 83
@@ -121,80 +115,71 @@ void via_lastclose(struct drm_device *dev)
121 mutex_unlock(&dev->struct_mutex); 115 mutex_unlock(&dev->struct_mutex);
122} 116}
123 117
124int via_mem_alloc(DRM_IOCTL_ARGS) 118int via_mem_alloc(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
125{ 120{
126 DRM_DEVICE; 121 drm_via_mem_t *mem = data;
127
128 drm_via_mem_t mem;
129 int retval = 0; 122 int retval = 0;
130 struct drm_memblock_item *item; 123 struct drm_memblock_item *item;
131 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 124 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
132 unsigned long tmpSize; 125 unsigned long tmpSize;
133 126
134 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 127 if (mem->type > VIA_MEM_AGP) {
135 sizeof(mem));
136
137 if (mem.type > VIA_MEM_AGP) {
138 DRM_ERROR("Unknown memory type allocation\n"); 128 DRM_ERROR("Unknown memory type allocation\n");
139 return DRM_ERR(EINVAL); 129 return -EINVAL;
140 } 130 }
141 mutex_lock(&dev->struct_mutex); 131 mutex_lock(&dev->struct_mutex);
142 if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : 132 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
143 dev_priv->agp_initialized)) { 133 dev_priv->agp_initialized)) {
144 DRM_ERROR 134 DRM_ERROR
145 ("Attempt to allocate from uninitialized memory manager.\n"); 135 ("Attempt to allocate from uninitialized memory manager.\n");
146 mutex_unlock(&dev->struct_mutex); 136 mutex_unlock(&dev->struct_mutex);
147 return DRM_ERR(EINVAL); 137 return -EINVAL;
148 } 138 }
149 139
150 tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; 140 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
151 item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, 141 item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
152 (unsigned long)priv); 142 (unsigned long)file_priv);
153 mutex_unlock(&dev->struct_mutex); 143 mutex_unlock(&dev->struct_mutex);
154 if (item) { 144 if (item) {
155 mem.offset = ((mem.type == VIA_MEM_VIDEO) ? 145 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
156 dev_priv->vram_offset : dev_priv->agp_offset) + 146 dev_priv->vram_offset : dev_priv->agp_offset) +
157 (item->mm-> 147 (item->mm->
158 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); 148 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
159 mem.index = item->user_hash.key; 149 mem->index = item->user_hash.key;
160 } else { 150 } else {
161 mem.offset = 0; 151 mem->offset = 0;
162 mem.size = 0; 152 mem->size = 0;
163 mem.index = 0; 153 mem->index = 0;
164 DRM_DEBUG("Video memory allocation failed\n"); 154 DRM_DEBUG("Video memory allocation failed\n");
165 retval = DRM_ERR(ENOMEM); 155 retval = -ENOMEM;
166 } 156 }
167 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem));
168 157
169 return retval; 158 return retval;
170} 159}
171 160
172int via_mem_free(DRM_IOCTL_ARGS) 161int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
173{ 162{
174 DRM_DEVICE;
175 drm_via_private_t *dev_priv = dev->dev_private; 163 drm_via_private_t *dev_priv = dev->dev_private;
176 drm_via_mem_t mem; 164 drm_via_mem_t *mem = data;
177 int ret; 165 int ret;
178 166
179 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
180 sizeof(mem));
181
182 mutex_lock(&dev->struct_mutex); 167 mutex_lock(&dev->struct_mutex);
183 ret = drm_sman_free_key(&dev_priv->sman, mem.index); 168 ret = drm_sman_free_key(&dev_priv->sman, mem->index);
184 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
185 DRM_DEBUG("free = 0x%lx\n", mem.index); 170 DRM_DEBUG("free = 0x%lx\n", mem->index);
186 171
187 return ret; 172 return ret;
188} 173}
189 174
190 175
191void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) 176void via_reclaim_buffers_locked(struct drm_device * dev,
177 struct drm_file *file_priv)
192{ 178{
193 drm_via_private_t *dev_priv = dev->dev_private; 179 drm_via_private_t *dev_priv = dev->dev_private;
194 struct drm_file *priv = filp->private_data;
195 180
196 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
197 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { 182 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
198 mutex_unlock(&dev->struct_mutex); 183 mutex_unlock(&dev->struct_mutex);
199 return; 184 return;
200 } 185 }
@@ -203,7 +188,7 @@ void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
203 dev->driver->dma_quiescent(dev); 188 dev->driver->dma_quiescent(dev);
204 } 189 }
205 190
206 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); 191 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
207 mutex_unlock(&dev->struct_mutex); 192 mutex_unlock(&dev->struct_mutex);
208 return; 193 return;
209} 194}
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 832d48356e91..46a579198747 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -1026,12 +1026,12 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
1026 case state_error: 1026 case state_error:
1027 default: 1027 default:
1028 *hc_state = saved_state; 1028 *hc_state = saved_state;
1029 return DRM_ERR(EINVAL); 1029 return -EINVAL;
1030 } 1030 }
1031 } 1031 }
1032 if (state == state_error) { 1032 if (state == state_error) {
1033 *hc_state = saved_state; 1033 *hc_state = saved_state;
1034 return DRM_ERR(EINVAL); 1034 return -EINVAL;
1035 } 1035 }
1036 return 0; 1036 return 0;
1037} 1037}
@@ -1082,11 +1082,11 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1082 break; 1082 break;
1083 case state_error: 1083 case state_error:
1084 default: 1084 default:
1085 return DRM_ERR(EINVAL); 1085 return -EINVAL;
1086 } 1086 }
1087 } 1087 }
1088 if (state == state_error) { 1088 if (state == state_error) {
1089 return DRM_ERR(EINVAL); 1089 return -EINVAL;
1090 } 1090 }
1091 return 0; 1091 return 0;
1092} 1092}
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index 300ac61b09ed..c15e75b54cb1 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -65,10 +65,9 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
65 } 65 }
66} 66}
67 67
68int via_decoder_futex(DRM_IOCTL_ARGS) 68int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
69{ 69{
70 DRM_DEVICE; 70 drm_via_futex_t *fx = data;
71 drm_via_futex_t fx;
72 volatile int *lock; 71 volatile int *lock;
73 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 72 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
74 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; 73 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
@@ -76,21 +75,18 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
76 75
77 DRM_DEBUG("%s\n", __FUNCTION__); 76 DRM_DEBUG("%s\n", __FUNCTION__);
78 77
79 DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data, 78 if (fx->lock > VIA_NR_XVMC_LOCKS)
80 sizeof(fx));
81
82 if (fx.lock > VIA_NR_XVMC_LOCKS)
83 return -EFAULT; 79 return -EFAULT;
84 80
85 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); 81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
86 82
87 switch (fx.func) { 83 switch (fx->func) {
88 case VIA_FUTEX_WAIT: 84 case VIA_FUTEX_WAIT:
89 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock], 85 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
90 (fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val); 86 (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
91 return ret; 87 return ret;
92 case VIA_FUTEX_WAKE: 88 case VIA_FUTEX_WAKE:
93 DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock])); 89 DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
94 return 0; 90 return 0;
95 } 91 }
96 return 0; 92 return 0;