aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/pktcdvd.c35
-rw-r--r--drivers/char/tty_io.c7
-rw-r--r--drivers/char/tty_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_irq.c20
-rw-r--r--drivers/gpu/drm/drm_lock.c33
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c196
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h19
-rw-r--r--drivers/net/e1000/e1000_param.c81
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/scsi/sg.c17
12 files changed, 306 insertions, 156 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 158eed4d5161..29b7a648cc6e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -49,7 +49,6 @@
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/smp_lock.h>
53#include <linux/errno.h> 52#include <linux/errno.h>
54#include <linux/spinlock.h> 53#include <linux/spinlock.h>
55#include <linux/file.h> 54#include <linux/file.h>
@@ -2798,14 +2797,9 @@ out_mem:
2798 return ret; 2797 return ret;
2799} 2798}
2800 2799
2801static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2800static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2802{ 2801{
2803 struct inode *inode = file->f_path.dentry->d_inode; 2802 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2804 struct pktcdvd_device *pd;
2805 long ret;
2806
2807 lock_kernel();
2808 pd = inode->i_bdev->bd_disk->private_data;
2809 2803
2810 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2804 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2811 2805
@@ -2818,8 +2812,7 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2818 case CDROM_LAST_WRITTEN: 2812 case CDROM_LAST_WRITTEN:
2819 case CDROM_SEND_PACKET: 2813 case CDROM_SEND_PACKET:
2820 case SCSI_IOCTL_SEND_COMMAND: 2814 case SCSI_IOCTL_SEND_COMMAND:
2821 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2815 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2822 break;
2823 2816
2824 case CDROMEJECT: 2817 case CDROMEJECT:
2825 /* 2818 /*
@@ -2828,15 +2821,14 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2828 */ 2821 */
2829 if (pd->refcnt == 1) 2822 if (pd->refcnt == 1)
2830 pkt_lock_door(pd, 0); 2823 pkt_lock_door(pd, 0);
2831 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2824 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2832 break;
2833 2825
2834 default: 2826 default:
2835 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2827 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2836 ret = -ENOTTY; 2828 return -ENOTTY;
2837 } 2829 }
2838 unlock_kernel(); 2830
2839 return ret; 2831 return 0;
2840} 2832}
2841 2833
2842static int pkt_media_changed(struct gendisk *disk) 2834static int pkt_media_changed(struct gendisk *disk)
@@ -2858,7 +2850,7 @@ static struct block_device_operations pktcdvd_ops = {
2858 .owner = THIS_MODULE, 2850 .owner = THIS_MODULE,
2859 .open = pkt_open, 2851 .open = pkt_open,
2860 .release = pkt_close, 2852 .release = pkt_close,
2861 .unlocked_ioctl = pkt_ioctl, 2853 .ioctl = pkt_ioctl,
2862 .media_changed = pkt_media_changed, 2854 .media_changed = pkt_media_changed,
2863}; 2855};
2864 2856
@@ -3023,8 +3015,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
3023 mutex_unlock(&ctl_mutex); 3015 mutex_unlock(&ctl_mutex);
3024} 3016}
3025 3017
3026static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, 3018static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
3027 unsigned long arg)
3028{ 3019{
3029 void __user *argp = (void __user *)arg; 3020 void __user *argp = (void __user *)arg;
3030 struct pkt_ctrl_command ctrl_cmd; 3021 struct pkt_ctrl_command ctrl_cmd;
@@ -3041,22 +3032,16 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3041 case PKT_CTRL_CMD_SETUP: 3032 case PKT_CTRL_CMD_SETUP:
3042 if (!capable(CAP_SYS_ADMIN)) 3033 if (!capable(CAP_SYS_ADMIN))
3043 return -EPERM; 3034 return -EPERM;
3044 lock_kernel();
3045 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 3035 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
3046 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 3036 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
3047 unlock_kernel();
3048 break; 3037 break;
3049 case PKT_CTRL_CMD_TEARDOWN: 3038 case PKT_CTRL_CMD_TEARDOWN:
3050 if (!capable(CAP_SYS_ADMIN)) 3039 if (!capable(CAP_SYS_ADMIN))
3051 return -EPERM; 3040 return -EPERM;
3052 lock_kernel();
3053 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 3041 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
3054 unlock_kernel();
3055 break; 3042 break;
3056 case PKT_CTRL_CMD_STATUS: 3043 case PKT_CTRL_CMD_STATUS:
3057 lock_kernel();
3058 pkt_get_status(&ctrl_cmd); 3044 pkt_get_status(&ctrl_cmd);
3059 unlock_kernel();
3060 break; 3045 break;
3061 default: 3046 default:
3062 return -ENOTTY; 3047 return -ENOTTY;
@@ -3069,7 +3054,7 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3069 3054
3070 3055
3071static const struct file_operations pkt_ctl_fops = { 3056static const struct file_operations pkt_ctl_fops = {
3072 .unlocked_ioctl = pkt_ctl_ioctl, 3057 .ioctl = pkt_ctl_ioctl,
3073 .owner = THIS_MODULE, 3058 .owner = THIS_MODULE,
3074}; 3059};
3075 3060
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index a27160ba21d7..daeb8f766971 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2498,7 +2498,7 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
2498/** 2498/**
2499 * tty_do_resize - resize event 2499 * tty_do_resize - resize event
2500 * @tty: tty being resized 2500 * @tty: tty being resized
2501 * @real_tty: real tty (if using a pty/tty pair) 2501 * @real_tty: real tty (not the same as tty if using a pty/tty pair)
2502 * @rows: rows (character) 2502 * @rows: rows (character)
2503 * @cols: cols (character) 2503 * @cols: cols (character)
2504 * 2504 *
@@ -2512,7 +2512,8 @@ int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
2512 struct pid *pgrp, *rpgrp; 2512 struct pid *pgrp, *rpgrp;
2513 unsigned long flags; 2513 unsigned long flags;
2514 2514
2515 mutex_lock(&tty->termios_mutex); 2515 /* For a PTY we need to lock the tty side */
2516 mutex_lock(&real_tty->termios_mutex);
2516 if (!memcmp(ws, &tty->winsize, sizeof(*ws))) 2517 if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
2517 goto done; 2518 goto done;
2518 /* Get the PID values and reference them so we can 2519 /* Get the PID values and reference them so we can
@@ -2533,7 +2534,7 @@ int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
2533 tty->winsize = *ws; 2534 tty->winsize = *ws;
2534 real_tty->winsize = *ws; 2535 real_tty->winsize = *ws;
2535done: 2536done:
2536 mutex_unlock(&tty->termios_mutex); 2537 mutex_unlock(&real_tty->termios_mutex);
2537 return 0; 2538 return 0;
2538} 2539}
2539 2540
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index ea9fc5d03b99..bf34e4597421 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -937,12 +937,14 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
937 return 0; 937 return 0;
938#endif 938#endif
939 case TIOCGSOFTCAR: 939 case TIOCGSOFTCAR:
940 return put_user(C_CLOCAL(tty) ? 1 : 0, 940 /* FIXME: for correctness we may need to take the termios
941 lock here - review */
942 return put_user(C_CLOCAL(real_tty) ? 1 : 0,
941 (int __user *)arg); 943 (int __user *)arg);
942 case TIOCSSOFTCAR: 944 case TIOCSSOFTCAR:
943 if (get_user(arg, (unsigned int __user *) arg)) 945 if (get_user(arg, (unsigned int __user *) arg))
944 return -EFAULT; 946 return -EFAULT;
945 return tty_change_softcar(tty, arg); 947 return tty_change_softcar(real_tty, arg);
946 default: 948 default:
947 return -ENOIOCTLCMD; 949 return -ENOIOCTLCMD;
948 } 950 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 089c015c01d1..53f0e5af1cc8 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -400,27 +400,31 @@ static void drm_locked_tasklet_func(unsigned long data)
400{ 400{
401 struct drm_device *dev = (struct drm_device *)data; 401 struct drm_device *dev = (struct drm_device *)data;
402 unsigned long irqflags; 402 unsigned long irqflags;
403 403 void (*tasklet_func)(struct drm_device *);
404
404 spin_lock_irqsave(&dev->tasklet_lock, irqflags); 405 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
406 tasklet_func = dev->locked_tasklet_func;
407 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
405 408
406 if (!dev->locked_tasklet_func || 409 if (!tasklet_func ||
407 !drm_lock_take(&dev->lock, 410 !drm_lock_take(&dev->lock,
408 DRM_KERNEL_CONTEXT)) { 411 DRM_KERNEL_CONTEXT)) {
409 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
410 return; 412 return;
411 } 413 }
412 414
413 dev->lock.lock_time = jiffies; 415 dev->lock.lock_time = jiffies;
414 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 416 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
415 417
416 dev->locked_tasklet_func(dev); 418 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
419 tasklet_func = dev->locked_tasklet_func;
420 dev->locked_tasklet_func = NULL;
421 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
422
423 if (tasklet_func != NULL)
424 tasklet_func(dev);
417 425
418 drm_lock_free(&dev->lock, 426 drm_lock_free(&dev->lock,
419 DRM_KERNEL_CONTEXT); 427 DRM_KERNEL_CONTEXT);
420
421 dev->locked_tasklet_func = NULL;
422
423 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
424} 428}
425 429
426/** 430/**
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 0998723cde79..a4caf95485d7 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -105,14 +105,19 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
105 ret ? "interrupted" : "has lock"); 105 ret ? "interrupted" : "has lock");
106 if (ret) return ret; 106 if (ret) return ret;
107 107
108 sigemptyset(&dev->sigmask); 108 /* don't set the block all signals on the master process for now
109 sigaddset(&dev->sigmask, SIGSTOP); 109 * really probably not the correct answer but lets us debug xkb
110 sigaddset(&dev->sigmask, SIGTSTP); 110 * xserver for now */
111 sigaddset(&dev->sigmask, SIGTTIN); 111 if (!file_priv->master) {
112 sigaddset(&dev->sigmask, SIGTTOU); 112 sigemptyset(&dev->sigmask);
113 dev->sigdata.context = lock->context; 113 sigaddset(&dev->sigmask, SIGSTOP);
114 dev->sigdata.lock = dev->lock.hw_lock; 114 sigaddset(&dev->sigmask, SIGTSTP);
115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 115 sigaddset(&dev->sigmask, SIGTTIN);
116 sigaddset(&dev->sigmask, SIGTTOU);
117 dev->sigdata.context = lock->context;
118 dev->sigdata.lock = dev->lock.hw_lock;
119 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
120 }
116 121
117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) 122 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
118 dev->driver->dma_ready(dev); 123 dev->driver->dma_ready(dev);
@@ -150,6 +155,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
150{ 155{
151 struct drm_lock *lock = data; 156 struct drm_lock *lock = data;
152 unsigned long irqflags; 157 unsigned long irqflags;
158 void (*tasklet_func)(struct drm_device *);
153 159
154 if (lock->context == DRM_KERNEL_CONTEXT) { 160 if (lock->context == DRM_KERNEL_CONTEXT) {
155 DRM_ERROR("Process %d using kernel context %d\n", 161 DRM_ERROR("Process %d using kernel context %d\n",
@@ -158,14 +164,11 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
158 } 164 }
159 165
160 spin_lock_irqsave(&dev->tasklet_lock, irqflags); 166 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
161 167 tasklet_func = dev->locked_tasklet_func;
162 if (dev->locked_tasklet_func) { 168 dev->locked_tasklet_func = NULL;
163 dev->locked_tasklet_func(dev);
164
165 dev->locked_tasklet_func = NULL;
166 }
167
168 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 169 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
170 if (tasklet_func != NULL)
171 tasklet_func(dev);
169 172
170 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 173 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
171 174
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 702df45320f7..4b27d9abb7bc 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -77,6 +77,9 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 79
80 box.x2--; /* Hardware expects inclusive bottom-right corner */
81 box.y2--;
82
80 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { 83 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
81 box.x1 = (box.x1) & 84 box.x1 = (box.x1) &
82 R300_CLIPRECT_MASK; 85 R300_CLIPRECT_MASK;
@@ -95,8 +98,8 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
95 R300_CLIPRECT_MASK; 98 R300_CLIPRECT_MASK;
96 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & 99 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
97 R300_CLIPRECT_MASK; 100 R300_CLIPRECT_MASK;
98
99 } 101 }
102
100 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | 103 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
101 (box.y1 << R300_CLIPRECT_Y_SHIFT)); 104 (box.y1 << R300_CLIPRECT_Y_SHIFT));
102 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | 105 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
@@ -136,6 +139,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
136 ADVANCE_RING(); 139 ADVANCE_RING();
137 } 140 }
138 141
142 /* flus cache and wait idle clean after cliprect change */
143 BEGIN_RING(2);
144 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
145 OUT_RING(R300_RB3D_DC_FLUSH);
146 ADVANCE_RING();
147 BEGIN_RING(2);
148 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
149 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
150 ADVANCE_RING();
151 /* set flush flag */
152 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
153
139 return 0; 154 return 0;
140} 155}
141 156
@@ -166,13 +181,13 @@ void r300_init_reg_flags(struct drm_device *dev)
166 ADD_RANGE(0x21DC, 1); 181 ADD_RANGE(0x21DC, 1);
167 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); 182 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
168 ADD_RANGE(R300_VAP_CLIP_X_0, 4); 183 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
169 ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); 184 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
170 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); 185 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
171 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); 186 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
172 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); 187 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
173 ADD_RANGE(R300_GB_ENABLE, 1); 188 ADD_RANGE(R300_GB_ENABLE, 1);
174 ADD_RANGE(R300_GB_MSPOS0, 5); 189 ADD_RANGE(R300_GB_MSPOS0, 5);
175 ADD_RANGE(R300_TX_CNTL, 1); 190 ADD_RANGE(R300_TX_INVALTAGS, 1);
176 ADD_RANGE(R300_TX_ENABLE, 1); 191 ADD_RANGE(R300_TX_ENABLE, 1);
177 ADD_RANGE(0x4200, 4); 192 ADD_RANGE(0x4200, 4);
178 ADD_RANGE(0x4214, 1); 193 ADD_RANGE(0x4214, 1);
@@ -388,15 +403,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
388 if (sz * 16 > cmdbuf->bufsz) 403 if (sz * 16 > cmdbuf->bufsz)
389 return -EINVAL; 404 return -EINVAL;
390 405
391 BEGIN_RING(5 + sz * 4); 406 /* VAP is very sensitive so we purge cache before we program it
392 /* Wait for VAP to come to senses.. */ 407 * and we also flush its state before & after */
393 /* there is no need to emit it multiple times, (only once before VAP is programmed, 408 BEGIN_RING(6);
394 but this optimization is for later */ 409 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
395 OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0); 410 OUT_RING(R300_RB3D_DC_FLUSH);
411 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
412 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
413 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
414 OUT_RING(0);
415 ADVANCE_RING();
416 /* set flush flag */
417 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
418
419 BEGIN_RING(3 + sz * 4);
396 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 420 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
397 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 421 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
398 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 422 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
423 ADVANCE_RING();
399 424
425 BEGIN_RING(2);
426 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
427 OUT_RING(0);
400 ADVANCE_RING(); 428 ADVANCE_RING();
401 429
402 cmdbuf->buf += sz * 16; 430 cmdbuf->buf += sz * 16;
@@ -424,6 +452,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
424 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 452 OUT_RING_TABLE((int *)cmdbuf->buf, 8);
425 ADVANCE_RING(); 453 ADVANCE_RING();
426 454
455 BEGIN_RING(4);
456 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
457 OUT_RING(R300_RB3D_DC_FLUSH);
458 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
459 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
460 ADVANCE_RING();
461 /* set flush flag */
462 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
463
427 cmdbuf->buf += 8 * 4; 464 cmdbuf->buf += 8 * 4;
428 cmdbuf->bufsz -= 8 * 4; 465 cmdbuf->bufsz -= 8 * 4;
429 466
@@ -543,22 +580,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
543 return 0; 580 return 0;
544} 581}
545 582
546static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, 583static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
547 drm_radeon_kcmd_buffer_t *cmdbuf) 584 drm_radeon_kcmd_buffer_t *cmdbuf)
548{ 585{
549 u32 *cmd = (u32 *) cmdbuf->buf; 586 u32 *cmd;
550 int count, ret; 587 int count;
588 int expected_count;
551 RING_LOCALS; 589 RING_LOCALS;
552 590
553 count=(cmd[0]>>16) & 0x3fff; 591 cmd = (u32 *) cmdbuf->buf;
592 count = (cmd[0]>>16) & 0x3fff;
593 expected_count = cmd[1] >> 16;
594 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
595 expected_count = (expected_count+1)/2;
554 596
555 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 597 if (count && count != expected_count) {
556 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 598 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
557 return -EINVAL; 599 count, expected_count);
558 }
559 ret = !radeon_check_offset(dev_priv, cmd[2]);
560 if (ret) {
561 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
562 return -EINVAL; 600 return -EINVAL;
563 } 601 }
564 602
@@ -570,6 +608,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
570 cmdbuf->buf += (count+2)*4; 608 cmdbuf->buf += (count+2)*4;
571 cmdbuf->bufsz -= (count+2)*4; 609 cmdbuf->bufsz -= (count+2)*4;
572 610
611 if (!count) {
612 drm_r300_cmd_header_t header;
613
614 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
615 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
616 return -EINVAL;
617 }
618
619 header.u = *(unsigned int *)cmdbuf->buf;
620
621 cmdbuf->buf += sizeof(header);
622 cmdbuf->bufsz -= sizeof(header);
623 cmd = (u32 *) cmdbuf->buf;
624
625 if (header.header.cmd_type != R300_CMD_PACKET3 ||
626 header.packet3.packet != R300_CMD_PACKET3_RAW ||
627 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
628 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
629 return -EINVAL;
630 }
631
632 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
633 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
634 return -EINVAL;
635 }
636 if (!radeon_check_offset(dev_priv, cmd[2])) {
637 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
638 return -EINVAL;
639 }
640 if (cmd[3] != expected_count) {
641 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
642 cmd[3], expected_count);
643 return -EINVAL;
644 }
645
646 BEGIN_RING(4);
647 OUT_RING(cmd[0]);
648 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
649 ADVANCE_RING();
650
651 cmdbuf->buf += 4*4;
652 cmdbuf->bufsz -= 4*4;
653 }
654
573 return 0; 655 return 0;
574} 656}
575 657
@@ -613,11 +695,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
613 case RADEON_CNTL_BITBLT_MULTI: 695 case RADEON_CNTL_BITBLT_MULTI:
614 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 696 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
615 697
616 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ 698 case RADEON_CP_INDX_BUFFER:
617 return r300_emit_indx_buffer(dev_priv, cmdbuf); 699 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
618 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ 700 return -EINVAL;
619 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ 701 case RADEON_CP_3D_DRAW_IMMD_2:
620 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ 702 /* triggers drawing using in-packet vertex data */
703 case RADEON_CP_3D_DRAW_VBUF_2:
704 /* triggers drawing of vertex buffers setup elsewhere */
705 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706 RADEON_PURGE_EMITED);
707 break;
708 case RADEON_CP_3D_DRAW_INDX_2:
709 /* triggers drawing using indices to vertex buffer */
710 /* whenever we send vertex we clear flush & purge */
711 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
712 RADEON_PURGE_EMITED);
713 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
621 case RADEON_WAIT_FOR_IDLE: 714 case RADEON_WAIT_FOR_IDLE:
622 case RADEON_CP_NOP: 715 case RADEON_CP_NOP:
623 /* these packets are safe */ 716 /* these packets are safe */
@@ -713,17 +806,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
713 */ 806 */
714static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) 807static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
715{ 808{
809 uint32_t cache_z, cache_3d, cache_2d;
716 RING_LOCALS; 810 RING_LOCALS;
717 811
718 BEGIN_RING(6); 812 cache_z = R300_ZC_FLUSH;
719 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 813 cache_2d = R300_RB2D_DC_FLUSH;
720 OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A); 814 cache_3d = R300_RB3D_DC_FLUSH;
815 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
816 /* we can purge, primitive where draw since last purge */
817 cache_z |= R300_ZC_FREE;
818 cache_2d |= R300_RB2D_DC_FREE;
819 cache_3d |= R300_RB3D_DC_FREE;
820 }
821
822 /* flush & purge zbuffer */
823 BEGIN_RING(2);
721 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); 824 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
722 OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE| 825 OUT_RING(cache_z);
723 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE); 826 ADVANCE_RING();
724 OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0)); 827 /* flush & purge 3d */
725 OUT_RING(0x0); 828 BEGIN_RING(2);
829 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
830 OUT_RING(cache_3d);
831 ADVANCE_RING();
832 /* flush & purge texture */
833 BEGIN_RING(2);
834 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
835 OUT_RING(0);
836 ADVANCE_RING();
837 /* FIXME: is this one really needed ? */
838 BEGIN_RING(2);
839 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
840 OUT_RING(0);
841 ADVANCE_RING();
842 BEGIN_RING(2);
843 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
844 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
845 ADVANCE_RING();
846 /* flush & purge 2d through E2 as RB2D will trigger lockup */
847 BEGIN_RING(4);
848 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
849 OUT_RING(cache_2d);
850 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
851 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
852 RADEON_WAIT_HOST_IDLECLEAN);
726 ADVANCE_RING(); 853 ADVANCE_RING();
854 /* set flush & purge flags */
855 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
727} 856}
728 857
729/** 858/**
@@ -905,8 +1034,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
905 1034
906 DRM_DEBUG("\n"); 1035 DRM_DEBUG("\n");
907 1036
908 /* See the comment above r300_emit_begin3d for why this call must be here, 1037 /* pacify */
909 * and what the cleanup gotos are for. */
910 r300_pacify(dev_priv); 1038 r300_pacify(dev_priv);
911 1039
912 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { 1040 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index a6802f26afc4..ee6f811599a3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -317,7 +317,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
317 * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and 317 * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
318 * avoids bugs caused by still running shaders reading bad data from memory. 318 * avoids bugs caused by still running shaders reading bad data from memory.
319 */ 319 */
320#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */ 320#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
321 321
322/* Absolutely no clue what this register is about. */ 322/* Absolutely no clue what this register is about. */
323#define R300_VAP_UNKNOWN_2288 0x2288 323#define R300_VAP_UNKNOWN_2288 0x2288
@@ -513,7 +513,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
513/* gap */ 513/* gap */
514 514
515/* Zero to flush caches. */ 515/* Zero to flush caches. */
516#define R300_TX_CNTL 0x4100 516#define R300_TX_INVALTAGS 0x4100
517#define R300_TX_FLUSH 0x0 517#define R300_TX_FLUSH 0x0
518 518
519/* The upper enable bits are guessed, based on fglrx reported limits. */ 519/* The upper enable bits are guessed, based on fglrx reported limits. */
@@ -1362,6 +1362,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
1362#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1362#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1363#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1363#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1364 1364
1365#define R300_RB3D_AARESOLVE_CTL 0x4E88
1365/* gap */ 1366/* gap */
1366 1367
1367/* Guess by Vladimir. 1368/* Guess by Vladimir.
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index f0de81a5689d..3331f88dcfb6 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -40,6 +40,7 @@
40#define RADEON_FIFO_DEBUG 0 40#define RADEON_FIFO_DEBUG 0
41 41
42static int radeon_do_cleanup_cp(struct drm_device * dev); 42static int radeon_do_cleanup_cp(struct drm_device * dev);
43static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
43 44
44static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 45static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
45{ 46{
@@ -198,23 +199,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
198 DRM_UDELAY(1); 199 DRM_UDELAY(1);
199 } 200 }
200 } else { 201 } else {
201 /* 3D */ 202 /* don't flush or purge cache here or lockup */
202 tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT); 203 return 0;
203 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
204 RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
205
206 /* 2D */
207 tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT);
208 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
209 RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp);
210
211 for (i = 0; i < dev_priv->usec_timeout; i++) {
212 if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT)
213 & RADEON_RB3D_DC_BUSY)) {
214 return 0;
215 }
216 DRM_UDELAY(1);
217 }
218 } 204 }
219 205
220#if RADEON_FIFO_DEBUG 206#if RADEON_FIFO_DEBUG
@@ -237,6 +223,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
237 return 0; 223 return 0;
238 DRM_UDELAY(1); 224 DRM_UDELAY(1);
239 } 225 }
226 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
227 RADEON_READ(RADEON_RBBM_STATUS),
228 RADEON_READ(R300_VAP_CNTL_STATUS));
240 229
241#if RADEON_FIFO_DEBUG 230#if RADEON_FIFO_DEBUG
242 DRM_ERROR("failed!\n"); 231 DRM_ERROR("failed!\n");
@@ -263,6 +252,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
263 } 252 }
264 DRM_UDELAY(1); 253 DRM_UDELAY(1);
265 } 254 }
255 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
256 RADEON_READ(RADEON_RBBM_STATUS),
257 RADEON_READ(R300_VAP_CNTL_STATUS));
266 258
267#if RADEON_FIFO_DEBUG 259#if RADEON_FIFO_DEBUG
268 DRM_ERROR("failed!\n"); 260 DRM_ERROR("failed!\n");
@@ -443,14 +435,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
443 435
444 dev_priv->cp_running = 1; 436 dev_priv->cp_running = 1;
445 437
446 BEGIN_RING(6); 438 BEGIN_RING(8);
447 439 /* isync can only be written through cp on r5xx write it here */
440 OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
441 OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
442 RADEON_ISYNC_ANY3D_IDLE2D |
443 RADEON_ISYNC_WAIT_IDLEGUI |
444 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
448 RADEON_PURGE_CACHE(); 445 RADEON_PURGE_CACHE();
449 RADEON_PURGE_ZCACHE(); 446 RADEON_PURGE_ZCACHE();
450 RADEON_WAIT_UNTIL_IDLE(); 447 RADEON_WAIT_UNTIL_IDLE();
451
452 ADVANCE_RING(); 448 ADVANCE_RING();
453 COMMIT_RING(); 449 COMMIT_RING();
450
451 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
454} 452}
455 453
456/* Reset the Command Processor. This will not flush any pending 454/* Reset the Command Processor. This will not flush any pending
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3f0eca957aa7..099381693175 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -220,6 +220,9 @@ struct radeon_virt_surface {
220 struct drm_file *file_priv; 220 struct drm_file *file_priv;
221}; 221};
222 222
223#define RADEON_FLUSH_EMITED (1 < 0)
224#define RADEON_PURGE_EMITED (1 < 1)
225
223typedef struct drm_radeon_private { 226typedef struct drm_radeon_private {
224 drm_radeon_ring_buffer_t ring; 227 drm_radeon_ring_buffer_t ring;
225 drm_radeon_sarea_t *sarea_priv; 228 drm_radeon_sarea_t *sarea_priv;
@@ -311,6 +314,7 @@ typedef struct drm_radeon_private {
311 unsigned long fb_aper_offset; 314 unsigned long fb_aper_offset;
312 315
313 int num_gb_pipes; 316 int num_gb_pipes;
317 int track_flush;
314} drm_radeon_private_t; 318} drm_radeon_private_t;
315 319
316typedef struct drm_radeon_buf_priv { 320typedef struct drm_radeon_buf_priv {
@@ -693,7 +697,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
693#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 697#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
694# define R300_ZC_FLUSH (1 << 0) 698# define R300_ZC_FLUSH (1 << 0)
695# define R300_ZC_FREE (1 << 1) 699# define R300_ZC_FREE (1 << 1)
696# define R300_ZC_FLUSH_ALL 0x3
697# define R300_ZC_BUSY (1 << 31) 700# define R300_ZC_BUSY (1 << 31)
698#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c 701#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
699# define RADEON_RB3D_DC_FLUSH (3 << 0) 702# define RADEON_RB3D_DC_FLUSH (3 << 0)
@@ -701,6 +704,8 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
701# define RADEON_RB3D_DC_FLUSH_ALL 0xf 704# define RADEON_RB3D_DC_FLUSH_ALL 0xf
702# define RADEON_RB3D_DC_BUSY (1 << 31) 705# define RADEON_RB3D_DC_BUSY (1 << 31)
703#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c 706#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
707# define R300_RB3D_DC_FLUSH (2 << 0)
708# define R300_RB3D_DC_FREE (2 << 2)
704# define R300_RB3D_DC_FINISH (1 << 4) 709# define R300_RB3D_DC_FINISH (1 << 4)
705#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c 710#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
706# define RADEON_Z_TEST_MASK (7 << 4) 711# define RADEON_Z_TEST_MASK (7 << 4)
@@ -1246,17 +1251,17 @@ do { \
1246 OUT_RING(RADEON_RB3D_DC_FLUSH); \ 1251 OUT_RING(RADEON_RB3D_DC_FLUSH); \
1247 } else { \ 1252 } else { \
1248 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1253 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1249 OUT_RING(RADEON_RB3D_DC_FLUSH); \ 1254 OUT_RING(R300_RB3D_DC_FLUSH); \
1250 } \ 1255 } \
1251} while (0) 1256} while (0)
1252 1257
1253#define RADEON_PURGE_CACHE() do { \ 1258#define RADEON_PURGE_CACHE() do { \
1254 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ 1259 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1255 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1260 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1256 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ 1261 OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
1257 } else { \ 1262 } else { \
1258 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1263 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1259 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ 1264 OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \
1260 } \ 1265 } \
1261} while (0) 1266} while (0)
1262 1267
@@ -1273,10 +1278,10 @@ do { \
1273#define RADEON_PURGE_ZCACHE() do { \ 1278#define RADEON_PURGE_ZCACHE() do { \
1274 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ 1279 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1275 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ 1280 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1276 OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \ 1281 OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
1277 } else { \ 1282 } else { \
1278 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1283 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
1279 OUT_RING(R300_ZC_FLUSH_ALL); \ 1284 OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
1280 } \ 1285 } \
1281} while (0) 1286} while (0)
1282 1287
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index b9f90a5d3d4d..213437d13154 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -208,7 +208,7 @@ struct e1000_option {
208 } r; 208 } r;
209 struct { /* list_option info */ 209 struct { /* list_option info */
210 int nr; 210 int nr;
211 struct e1000_opt_list { int i; char *str; } *p; 211 const struct e1000_opt_list { int i; char *str; } *p;
212 } l; 212 } l;
213 } arg; 213 } arg;
214}; 214};
@@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
242 break; 242 break;
243 case list_option: { 243 case list_option: {
244 int i; 244 int i;
245 struct e1000_opt_list *ent; 245 const struct e1000_opt_list *ent;
246 246
247 for (i = 0; i < opt->arg.l.nr; i++) { 247 for (i = 0; i < opt->arg.l.nr; i++) {
248 ent = &opt->arg.l.p[i]; 248 ent = &opt->arg.l.p[i];
@@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
279 279
280void __devinit e1000_check_options(struct e1000_adapter *adapter) 280void __devinit e1000_check_options(struct e1000_adapter *adapter)
281{ 281{
282 struct e1000_option opt;
282 int bd = adapter->bd_number; 283 int bd = adapter->bd_number;
284
283 if (bd >= E1000_MAX_NIC) { 285 if (bd >= E1000_MAX_NIC) {
284 DPRINTK(PROBE, NOTICE, 286 DPRINTK(PROBE, NOTICE,
285 "Warning: no configuration for board #%i\n", bd); 287 "Warning: no configuration for board #%i\n", bd);
@@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
287 } 289 }
288 290
289 { /* Transmit Descriptor Count */ 291 { /* Transmit Descriptor Count */
290 struct e1000_option opt = { 292 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
293 int i;
294 e1000_mac_type mac_type = adapter->hw.mac_type;
295
296 opt = (struct e1000_option) {
291 .type = range_option, 297 .type = range_option,
292 .name = "Transmit Descriptors", 298 .name = "Transmit Descriptors",
293 .err = "using default of " 299 .err = "using default of "
294 __MODULE_STRING(E1000_DEFAULT_TXD), 300 __MODULE_STRING(E1000_DEFAULT_TXD),
295 .def = E1000_DEFAULT_TXD, 301 .def = E1000_DEFAULT_TXD,
296 .arg = { .r = { .min = E1000_MIN_TXD }} 302 .arg = { .r = {
303 .min = E1000_MIN_TXD,
304 .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
305 }}
297 }; 306 };
298 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
299 int i;
300 e1000_mac_type mac_type = adapter->hw.mac_type;
301 opt.arg.r.max = mac_type < e1000_82544 ?
302 E1000_MAX_TXD : E1000_MAX_82544_TXD;
303 307
304 if (num_TxDescriptors > bd) { 308 if (num_TxDescriptors > bd) {
305 tx_ring->count = TxDescriptors[bd]; 309 tx_ring->count = TxDescriptors[bd];
@@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
313 tx_ring[i].count = tx_ring->count; 317 tx_ring[i].count = tx_ring->count;
314 } 318 }
315 { /* Receive Descriptor Count */ 319 { /* Receive Descriptor Count */
316 struct e1000_option opt = { 320 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
321 int i;
322 e1000_mac_type mac_type = adapter->hw.mac_type;
323
324 opt = (struct e1000_option) {
317 .type = range_option, 325 .type = range_option,
318 .name = "Receive Descriptors", 326 .name = "Receive Descriptors",
319 .err = "using default of " 327 .err = "using default of "
320 __MODULE_STRING(E1000_DEFAULT_RXD), 328 __MODULE_STRING(E1000_DEFAULT_RXD),
321 .def = E1000_DEFAULT_RXD, 329 .def = E1000_DEFAULT_RXD,
322 .arg = { .r = { .min = E1000_MIN_RXD }} 330 .arg = { .r = {
331 .min = E1000_MIN_RXD,
332 .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
333 }}
323 }; 334 };
324 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
325 int i;
326 e1000_mac_type mac_type = adapter->hw.mac_type;
327 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
328 E1000_MAX_82544_RXD;
329 335
330 if (num_RxDescriptors > bd) { 336 if (num_RxDescriptors > bd) {
331 rx_ring->count = RxDescriptors[bd]; 337 rx_ring->count = RxDescriptors[bd];
@@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
339 rx_ring[i].count = rx_ring->count; 345 rx_ring[i].count = rx_ring->count;
340 } 346 }
341 { /* Checksum Offload Enable/Disable */ 347 { /* Checksum Offload Enable/Disable */
342 struct e1000_option opt = { 348 opt = (struct e1000_option) {
343 .type = enable_option, 349 .type = enable_option,
344 .name = "Checksum Offload", 350 .name = "Checksum Offload",
345 .err = "defaulting to Enabled", 351 .err = "defaulting to Enabled",
@@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
363 { E1000_FC_FULL, "Flow Control Enabled" }, 369 { E1000_FC_FULL, "Flow Control Enabled" },
364 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; 370 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
365 371
366 struct e1000_option opt = { 372 opt = (struct e1000_option) {
367 .type = list_option, 373 .type = list_option,
368 .name = "Flow Control", 374 .name = "Flow Control",
369 .err = "reading default settings from EEPROM", 375 .err = "reading default settings from EEPROM",
@@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
381 } 387 }
382 } 388 }
383 { /* Transmit Interrupt Delay */ 389 { /* Transmit Interrupt Delay */
384 struct e1000_option opt = { 390 opt = (struct e1000_option) {
385 .type = range_option, 391 .type = range_option,
386 .name = "Transmit Interrupt Delay", 392 .name = "Transmit Interrupt Delay",
387 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), 393 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
@@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
399 } 405 }
400 } 406 }
401 { /* Transmit Absolute Interrupt Delay */ 407 { /* Transmit Absolute Interrupt Delay */
402 struct e1000_option opt = { 408 opt = (struct e1000_option) {
403 .type = range_option, 409 .type = range_option,
404 .name = "Transmit Absolute Interrupt Delay", 410 .name = "Transmit Absolute Interrupt Delay",
405 .err = "using default of " __MODULE_STRING(DEFAULT_TADV), 411 .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
@@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
417 } 423 }
418 } 424 }
419 { /* Receive Interrupt Delay */ 425 { /* Receive Interrupt Delay */
420 struct e1000_option opt = { 426 opt = (struct e1000_option) {
421 .type = range_option, 427 .type = range_option,
422 .name = "Receive Interrupt Delay", 428 .name = "Receive Interrupt Delay",
423 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), 429 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
@@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
435 } 441 }
436 } 442 }
437 { /* Receive Absolute Interrupt Delay */ 443 { /* Receive Absolute Interrupt Delay */
438 struct e1000_option opt = { 444 opt = (struct e1000_option) {
439 .type = range_option, 445 .type = range_option,
440 .name = "Receive Absolute Interrupt Delay", 446 .name = "Receive Absolute Interrupt Delay",
441 .err = "using default of " __MODULE_STRING(DEFAULT_RADV), 447 .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
@@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
453 } 459 }
454 } 460 }
455 { /* Interrupt Throttling Rate */ 461 { /* Interrupt Throttling Rate */
456 struct e1000_option opt = { 462 opt = (struct e1000_option) {
457 .type = range_option, 463 .type = range_option,
458 .name = "Interrupt Throttling Rate (ints/sec)", 464 .name = "Interrupt Throttling Rate (ints/sec)",
459 .err = "using default of " __MODULE_STRING(DEFAULT_ITR), 465 .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
@@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
497 } 503 }
498 } 504 }
499 { /* Smart Power Down */ 505 { /* Smart Power Down */
500 struct e1000_option opt = { 506 opt = (struct e1000_option) {
501 .type = enable_option, 507 .type = enable_option,
502 .name = "PHY Smart Power Down", 508 .name = "PHY Smart Power Down",
503 .err = "defaulting to Disabled", 509 .err = "defaulting to Disabled",
@@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
513 } 519 }
514 } 520 }
515 { /* Kumeran Lock Loss Workaround */ 521 { /* Kumeran Lock Loss Workaround */
516 struct e1000_option opt = { 522 opt = (struct e1000_option) {
517 .type = enable_option, 523 .type = enable_option,
518 .name = "Kumeran Lock Loss Workaround", 524 .name = "Kumeran Lock Loss Workaround",
519 .err = "defaulting to Enabled", 525 .err = "defaulting to Enabled",
@@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
578 584
579static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) 585static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
580{ 586{
587 struct e1000_option opt;
581 unsigned int speed, dplx, an; 588 unsigned int speed, dplx, an;
582 int bd = adapter->bd_number; 589 int bd = adapter->bd_number;
583 590
584 { /* Speed */ 591 { /* Speed */
585 struct e1000_opt_list speed_list[] = {{ 0, "" }, 592 static const struct e1000_opt_list speed_list[] = {
586 { SPEED_10, "" }, 593 { 0, "" },
587 { SPEED_100, "" }, 594 { SPEED_10, "" },
588 { SPEED_1000, "" }}; 595 { SPEED_100, "" },
596 { SPEED_1000, "" }};
589 597
590 struct e1000_option opt = { 598 opt = (struct e1000_option) {
591 .type = list_option, 599 .type = list_option,
592 .name = "Speed", 600 .name = "Speed",
593 .err = "parameter ignored", 601 .err = "parameter ignored",
@@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
604 } 612 }
605 } 613 }
606 { /* Duplex */ 614 { /* Duplex */
607 struct e1000_opt_list dplx_list[] = {{ 0, "" }, 615 static const struct e1000_opt_list dplx_list[] = {
608 { HALF_DUPLEX, "" }, 616 { 0, "" },
609 { FULL_DUPLEX, "" }}; 617 { HALF_DUPLEX, "" },
618 { FULL_DUPLEX, "" }};
610 619
611 struct e1000_option opt = { 620 opt = (struct e1000_option) {
612 .type = list_option, 621 .type = list_option,
613 .name = "Duplex", 622 .name = "Duplex",
614 .err = "parameter ignored", 623 .err = "parameter ignored",
@@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
637 "parameter ignored\n"); 646 "parameter ignored\n");
638 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 647 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
639 } else { /* Autoneg */ 648 } else { /* Autoneg */
640 struct e1000_opt_list an_list[] = 649 static const struct e1000_opt_list an_list[] =
641 #define AA "AutoNeg advertising " 650 #define AA "AutoNeg advertising "
642 {{ 0x01, AA "10/HD" }, 651 {{ 0x01, AA "10/HD" },
643 { 0x02, AA "10/FD" }, 652 { 0x02, AA "10/FD" },
@@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
671 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, 680 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
672 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; 681 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
673 682
674 struct e1000_option opt = { 683 opt = (struct e1000_option) {
675 .type = list_option, 684 .type = list_option,
676 .name = "AutoNeg", 685 .name = "AutoNeg",
677 .err = "parameter ignored", 686 .err = "parameter ignored",
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index db85f1fb131e..711b3004b3e6 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -384,9 +384,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
384 * get minor, add to list 384 * get minor, add to list
385 */ 385 */
386 down_write(&dcssblk_devices_sem); 386 down_write(&dcssblk_devices_sem);
387 if (dcssblk_get_segment_by_name(local_buf)) { 387 if (dcssblk_get_device_by_name(local_buf)) {
388 up_write(&dcssblk_devices_sem);
388 rc = -EEXIST; 389 rc = -EEXIST;
389 goto release_gd; 390 goto unload_seg;
390 } 391 }
391 rc = dcssblk_assign_free_minor(dev_info); 392 rc = dcssblk_assign_free_minor(dev_info);
392 if (rc) { 393 if (rc) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3d36270a8b4d..661f9f21650a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -217,6 +217,18 @@ static int sg_last_dev(void);
217#define SZ_SG_IOVEC sizeof(sg_iovec_t) 217#define SZ_SG_IOVEC sizeof(sg_iovec_t)
218#define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 218#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
219 219
220static int sg_allow_access(struct file *filp, unsigned char *cmd)
221{
222 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
223 struct request_queue *q = sfp->parentdp->device->request_queue;
224
225 if (sfp->parentdp->device->type == TYPE_SCANNER)
226 return 0;
227
228 return blk_verify_command(&q->cmd_filter,
229 cmd, filp->f_mode & FMODE_WRITE);
230}
231
220static int 232static int
221sg_open(struct inode *inode, struct file *filp) 233sg_open(struct inode *inode, struct file *filp)
222{ 234{
@@ -689,7 +701,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
689 sg_remove_request(sfp, srp); 701 sg_remove_request(sfp, srp);
690 return -EFAULT; 702 return -EFAULT;
691 } 703 }
692 if (read_only && !blk_verify_command(file, cmnd)) { 704 if (read_only && sg_allow_access(file, cmnd)) {
693 sg_remove_request(sfp, srp); 705 sg_remove_request(sfp, srp);
694 return -EPERM; 706 return -EPERM;
695 } 707 }
@@ -793,6 +805,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
793 805
794 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 806 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
795 return -ENXIO; 807 return -ENXIO;
808
796 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 809 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
797 sdp->disk->disk_name, (int) cmd_in)); 810 sdp->disk->disk_name, (int) cmd_in));
798 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 811 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1061,7 +1074,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 1074
1062 if (copy_from_user(&opcode, siocp->data, 1)) 1075 if (copy_from_user(&opcode, siocp->data, 1))
1063 return -EFAULT; 1076 return -EFAULT;
1064 if (!blk_verify_command(filp, &opcode)) 1077 if (sg_allow_access(filp, &opcode))
1065 return -EPERM; 1078 return -EPERM;
1066 } 1079 }
1067 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1080 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);