aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/pktcdvd.c35
-rw-r--r--drivers/gpu/drm/drm_irq.c20
-rw-r--r--drivers/gpu/drm/drm_lock.c33
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c196
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h19
-rw-r--r--drivers/lguest/lguest_device.c8
-rw-r--r--drivers/misc/acer-wmi.c5
-rw-r--r--drivers/net/bnx2x.h2
-rw-r--r--drivers/net/bnx2x_main.c220
-rw-r--r--drivers/net/e1000/e1000_param.c81
-rw-r--r--drivers/oprofile/cpu_buffer.c4
-rw-r--r--drivers/oprofile/event_buffer.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c21
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/shpchp_core.c34
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/xen/manage.c2
23 files changed, 464 insertions, 299 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 158eed4d5161..29b7a648cc6e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -49,7 +49,6 @@
49#include <linux/types.h> 49#include <linux/types.h>
50#include <linux/kernel.h> 50#include <linux/kernel.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/smp_lock.h>
53#include <linux/errno.h> 52#include <linux/errno.h>
54#include <linux/spinlock.h> 53#include <linux/spinlock.h>
55#include <linux/file.h> 54#include <linux/file.h>
@@ -2798,14 +2797,9 @@ out_mem:
2798 return ret; 2797 return ret;
2799} 2798}
2800 2799
2801static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2800static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2802{ 2801{
2803 struct inode *inode = file->f_path.dentry->d_inode; 2802 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2804 struct pktcdvd_device *pd;
2805 long ret;
2806
2807 lock_kernel();
2808 pd = inode->i_bdev->bd_disk->private_data;
2809 2803
2810 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2804 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2811 2805
@@ -2818,8 +2812,7 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2818 case CDROM_LAST_WRITTEN: 2812 case CDROM_LAST_WRITTEN:
2819 case CDROM_SEND_PACKET: 2813 case CDROM_SEND_PACKET:
2820 case SCSI_IOCTL_SEND_COMMAND: 2814 case SCSI_IOCTL_SEND_COMMAND:
2821 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2815 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2822 break;
2823 2816
2824 case CDROMEJECT: 2817 case CDROMEJECT:
2825 /* 2818 /*
@@ -2828,15 +2821,14 @@ static long pkt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2828 */ 2821 */
2829 if (pd->refcnt == 1) 2822 if (pd->refcnt == 1)
2830 pkt_lock_door(pd, 0); 2823 pkt_lock_door(pd, 0);
2831 ret = blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2824 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2832 break;
2833 2825
2834 default: 2826 default:
2835 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2827 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2836 ret = -ENOTTY; 2828 return -ENOTTY;
2837 } 2829 }
2838 unlock_kernel(); 2830
2839 return ret; 2831 return 0;
2840} 2832}
2841 2833
2842static int pkt_media_changed(struct gendisk *disk) 2834static int pkt_media_changed(struct gendisk *disk)
@@ -2858,7 +2850,7 @@ static struct block_device_operations pktcdvd_ops = {
2858 .owner = THIS_MODULE, 2850 .owner = THIS_MODULE,
2859 .open = pkt_open, 2851 .open = pkt_open,
2860 .release = pkt_close, 2852 .release = pkt_close,
2861 .unlocked_ioctl = pkt_ioctl, 2853 .ioctl = pkt_ioctl,
2862 .media_changed = pkt_media_changed, 2854 .media_changed = pkt_media_changed,
2863}; 2855};
2864 2856
@@ -3023,8 +3015,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
3023 mutex_unlock(&ctl_mutex); 3015 mutex_unlock(&ctl_mutex);
3024} 3016}
3025 3017
3026static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, 3018static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
3027 unsigned long arg)
3028{ 3019{
3029 void __user *argp = (void __user *)arg; 3020 void __user *argp = (void __user *)arg;
3030 struct pkt_ctrl_command ctrl_cmd; 3021 struct pkt_ctrl_command ctrl_cmd;
@@ -3041,22 +3032,16 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3041 case PKT_CTRL_CMD_SETUP: 3032 case PKT_CTRL_CMD_SETUP:
3042 if (!capable(CAP_SYS_ADMIN)) 3033 if (!capable(CAP_SYS_ADMIN))
3043 return -EPERM; 3034 return -EPERM;
3044 lock_kernel();
3045 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 3035 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
3046 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 3036 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
3047 unlock_kernel();
3048 break; 3037 break;
3049 case PKT_CTRL_CMD_TEARDOWN: 3038 case PKT_CTRL_CMD_TEARDOWN:
3050 if (!capable(CAP_SYS_ADMIN)) 3039 if (!capable(CAP_SYS_ADMIN))
3051 return -EPERM; 3040 return -EPERM;
3052 lock_kernel();
3053 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 3041 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
3054 unlock_kernel();
3055 break; 3042 break;
3056 case PKT_CTRL_CMD_STATUS: 3043 case PKT_CTRL_CMD_STATUS:
3057 lock_kernel();
3058 pkt_get_status(&ctrl_cmd); 3044 pkt_get_status(&ctrl_cmd);
3059 unlock_kernel();
3060 break; 3045 break;
3061 default: 3046 default:
3062 return -ENOTTY; 3047 return -ENOTTY;
@@ -3069,7 +3054,7 @@ static long pkt_ctl_ioctl(struct file *file, unsigned int cmd,
3069 3054
3070 3055
3071static const struct file_operations pkt_ctl_fops = { 3056static const struct file_operations pkt_ctl_fops = {
3072 .unlocked_ioctl = pkt_ctl_ioctl, 3057 .ioctl = pkt_ctl_ioctl,
3073 .owner = THIS_MODULE, 3058 .owner = THIS_MODULE,
3074}; 3059};
3075 3060
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 089c015c01d1..53f0e5af1cc8 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -400,27 +400,31 @@ static void drm_locked_tasklet_func(unsigned long data)
400{ 400{
401 struct drm_device *dev = (struct drm_device *)data; 401 struct drm_device *dev = (struct drm_device *)data;
402 unsigned long irqflags; 402 unsigned long irqflags;
403 403 void (*tasklet_func)(struct drm_device *);
404
404 spin_lock_irqsave(&dev->tasklet_lock, irqflags); 405 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
406 tasklet_func = dev->locked_tasklet_func;
407 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
405 408
406 if (!dev->locked_tasklet_func || 409 if (!tasklet_func ||
407 !drm_lock_take(&dev->lock, 410 !drm_lock_take(&dev->lock,
408 DRM_KERNEL_CONTEXT)) { 411 DRM_KERNEL_CONTEXT)) {
409 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
410 return; 412 return;
411 } 413 }
412 414
413 dev->lock.lock_time = jiffies; 415 dev->lock.lock_time = jiffies;
414 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 416 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
415 417
416 dev->locked_tasklet_func(dev); 418 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
419 tasklet_func = dev->locked_tasklet_func;
420 dev->locked_tasklet_func = NULL;
421 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
422
423 if (tasklet_func != NULL)
424 tasklet_func(dev);
417 425
418 drm_lock_free(&dev->lock, 426 drm_lock_free(&dev->lock,
419 DRM_KERNEL_CONTEXT); 427 DRM_KERNEL_CONTEXT);
420
421 dev->locked_tasklet_func = NULL;
422
423 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
424} 428}
425 429
426/** 430/**
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 0998723cde79..a4caf95485d7 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -105,14 +105,19 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
105 ret ? "interrupted" : "has lock"); 105 ret ? "interrupted" : "has lock");
106 if (ret) return ret; 106 if (ret) return ret;
107 107
108 sigemptyset(&dev->sigmask); 108 /* don't set the block all signals on the master process for now
109 sigaddset(&dev->sigmask, SIGSTOP); 109 * really probably not the correct answer but lets us debug xkb
110 sigaddset(&dev->sigmask, SIGTSTP); 110 * xserver for now */
111 sigaddset(&dev->sigmask, SIGTTIN); 111 if (!file_priv->master) {
112 sigaddset(&dev->sigmask, SIGTTOU); 112 sigemptyset(&dev->sigmask);
113 dev->sigdata.context = lock->context; 113 sigaddset(&dev->sigmask, SIGSTOP);
114 dev->sigdata.lock = dev->lock.hw_lock; 114 sigaddset(&dev->sigmask, SIGTSTP);
115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 115 sigaddset(&dev->sigmask, SIGTTIN);
116 sigaddset(&dev->sigmask, SIGTTOU);
117 dev->sigdata.context = lock->context;
118 dev->sigdata.lock = dev->lock.hw_lock;
119 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
120 }
116 121
117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) 122 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
118 dev->driver->dma_ready(dev); 123 dev->driver->dma_ready(dev);
@@ -150,6 +155,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
150{ 155{
151 struct drm_lock *lock = data; 156 struct drm_lock *lock = data;
152 unsigned long irqflags; 157 unsigned long irqflags;
158 void (*tasklet_func)(struct drm_device *);
153 159
154 if (lock->context == DRM_KERNEL_CONTEXT) { 160 if (lock->context == DRM_KERNEL_CONTEXT) {
155 DRM_ERROR("Process %d using kernel context %d\n", 161 DRM_ERROR("Process %d using kernel context %d\n",
@@ -158,14 +164,11 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
158 } 164 }
159 165
160 spin_lock_irqsave(&dev->tasklet_lock, irqflags); 166 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
161 167 tasklet_func = dev->locked_tasklet_func;
162 if (dev->locked_tasklet_func) { 168 dev->locked_tasklet_func = NULL;
163 dev->locked_tasklet_func(dev);
164
165 dev->locked_tasklet_func = NULL;
166 }
167
168 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 169 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
170 if (tasklet_func != NULL)
171 tasklet_func(dev);
169 172
170 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 173 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
171 174
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 702df45320f7..4b27d9abb7bc 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -77,6 +77,9 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 79
80 box.x2--; /* Hardware expects inclusive bottom-right corner */
81 box.y2--;
82
80 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { 83 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
81 box.x1 = (box.x1) & 84 box.x1 = (box.x1) &
82 R300_CLIPRECT_MASK; 85 R300_CLIPRECT_MASK;
@@ -95,8 +98,8 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
95 R300_CLIPRECT_MASK; 98 R300_CLIPRECT_MASK;
96 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & 99 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
97 R300_CLIPRECT_MASK; 100 R300_CLIPRECT_MASK;
98
99 } 101 }
102
100 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | 103 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
101 (box.y1 << R300_CLIPRECT_Y_SHIFT)); 104 (box.y1 << R300_CLIPRECT_Y_SHIFT));
102 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | 105 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
@@ -136,6 +139,18 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
136 ADVANCE_RING(); 139 ADVANCE_RING();
137 } 140 }
138 141
142 /* flus cache and wait idle clean after cliprect change */
143 BEGIN_RING(2);
144 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
145 OUT_RING(R300_RB3D_DC_FLUSH);
146 ADVANCE_RING();
147 BEGIN_RING(2);
148 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
149 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
150 ADVANCE_RING();
151 /* set flush flag */
152 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
153
139 return 0; 154 return 0;
140} 155}
141 156
@@ -166,13 +181,13 @@ void r300_init_reg_flags(struct drm_device *dev)
166 ADD_RANGE(0x21DC, 1); 181 ADD_RANGE(0x21DC, 1);
167 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); 182 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
168 ADD_RANGE(R300_VAP_CLIP_X_0, 4); 183 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
169 ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1); 184 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
170 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); 185 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
171 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); 186 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
172 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); 187 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
173 ADD_RANGE(R300_GB_ENABLE, 1); 188 ADD_RANGE(R300_GB_ENABLE, 1);
174 ADD_RANGE(R300_GB_MSPOS0, 5); 189 ADD_RANGE(R300_GB_MSPOS0, 5);
175 ADD_RANGE(R300_TX_CNTL, 1); 190 ADD_RANGE(R300_TX_INVALTAGS, 1);
176 ADD_RANGE(R300_TX_ENABLE, 1); 191 ADD_RANGE(R300_TX_ENABLE, 1);
177 ADD_RANGE(0x4200, 4); 192 ADD_RANGE(0x4200, 4);
178 ADD_RANGE(0x4214, 1); 193 ADD_RANGE(0x4214, 1);
@@ -388,15 +403,28 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
388 if (sz * 16 > cmdbuf->bufsz) 403 if (sz * 16 > cmdbuf->bufsz)
389 return -EINVAL; 404 return -EINVAL;
390 405
391 BEGIN_RING(5 + sz * 4); 406 /* VAP is very sensitive so we purge cache before we program it
392 /* Wait for VAP to come to senses.. */ 407 * and we also flush its state before & after */
393 /* there is no need to emit it multiple times, (only once before VAP is programmed, 408 BEGIN_RING(6);
394 but this optimization is for later */ 409 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
395 OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0); 410 OUT_RING(R300_RB3D_DC_FLUSH);
411 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
412 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
413 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
414 OUT_RING(0);
415 ADVANCE_RING();
416 /* set flush flag */
417 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
418
419 BEGIN_RING(3 + sz * 4);
396 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 420 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
397 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 421 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
398 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 422 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
423 ADVANCE_RING();
399 424
425 BEGIN_RING(2);
426 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
427 OUT_RING(0);
400 ADVANCE_RING(); 428 ADVANCE_RING();
401 429
402 cmdbuf->buf += sz * 16; 430 cmdbuf->buf += sz * 16;
@@ -424,6 +452,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
424 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 452 OUT_RING_TABLE((int *)cmdbuf->buf, 8);
425 ADVANCE_RING(); 453 ADVANCE_RING();
426 454
455 BEGIN_RING(4);
456 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
457 OUT_RING(R300_RB3D_DC_FLUSH);
458 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
459 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
460 ADVANCE_RING();
461 /* set flush flag */
462 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
463
427 cmdbuf->buf += 8 * 4; 464 cmdbuf->buf += 8 * 4;
428 cmdbuf->bufsz -= 8 * 4; 465 cmdbuf->bufsz -= 8 * 4;
429 466
@@ -543,22 +580,23 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
543 return 0; 580 return 0;
544} 581}
545 582
546static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, 583static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
547 drm_radeon_kcmd_buffer_t *cmdbuf) 584 drm_radeon_kcmd_buffer_t *cmdbuf)
548{ 585{
549 u32 *cmd = (u32 *) cmdbuf->buf; 586 u32 *cmd;
550 int count, ret; 587 int count;
588 int expected_count;
551 RING_LOCALS; 589 RING_LOCALS;
552 590
553 count=(cmd[0]>>16) & 0x3fff; 591 cmd = (u32 *) cmdbuf->buf;
592 count = (cmd[0]>>16) & 0x3fff;
593 expected_count = cmd[1] >> 16;
594 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
595 expected_count = (expected_count+1)/2;
554 596
555 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 597 if (count && count != expected_count) {
556 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 598 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
557 return -EINVAL; 599 count, expected_count);
558 }
559 ret = !radeon_check_offset(dev_priv, cmd[2]);
560 if (ret) {
561 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
562 return -EINVAL; 600 return -EINVAL;
563 } 601 }
564 602
@@ -570,6 +608,50 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
570 cmdbuf->buf += (count+2)*4; 608 cmdbuf->buf += (count+2)*4;
571 cmdbuf->bufsz -= (count+2)*4; 609 cmdbuf->bufsz -= (count+2)*4;
572 610
611 if (!count) {
612 drm_r300_cmd_header_t header;
613
614 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
615 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
616 return -EINVAL;
617 }
618
619 header.u = *(unsigned int *)cmdbuf->buf;
620
621 cmdbuf->buf += sizeof(header);
622 cmdbuf->bufsz -= sizeof(header);
623 cmd = (u32 *) cmdbuf->buf;
624
625 if (header.header.cmd_type != R300_CMD_PACKET3 ||
626 header.packet3.packet != R300_CMD_PACKET3_RAW ||
627 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
628 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
629 return -EINVAL;
630 }
631
632 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
633 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
634 return -EINVAL;
635 }
636 if (!radeon_check_offset(dev_priv, cmd[2])) {
637 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
638 return -EINVAL;
639 }
640 if (cmd[3] != expected_count) {
641 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
642 cmd[3], expected_count);
643 return -EINVAL;
644 }
645
646 BEGIN_RING(4);
647 OUT_RING(cmd[0]);
648 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
649 ADVANCE_RING();
650
651 cmdbuf->buf += 4*4;
652 cmdbuf->bufsz -= 4*4;
653 }
654
573 return 0; 655 return 0;
574} 656}
575 657
@@ -613,11 +695,22 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
613 case RADEON_CNTL_BITBLT_MULTI: 695 case RADEON_CNTL_BITBLT_MULTI:
614 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 696 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
615 697
616 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ 698 case RADEON_CP_INDX_BUFFER:
617 return r300_emit_indx_buffer(dev_priv, cmdbuf); 699 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
618 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ 700 return -EINVAL;
619 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ 701 case RADEON_CP_3D_DRAW_IMMD_2:
620 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ 702 /* triggers drawing using in-packet vertex data */
703 case RADEON_CP_3D_DRAW_VBUF_2:
704 /* triggers drawing of vertex buffers setup elsewhere */
705 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706 RADEON_PURGE_EMITED);
707 break;
708 case RADEON_CP_3D_DRAW_INDX_2:
709 /* triggers drawing using indices to vertex buffer */
710 /* whenever we send vertex we clear flush & purge */
711 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
712 RADEON_PURGE_EMITED);
713 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
621 case RADEON_WAIT_FOR_IDLE: 714 case RADEON_WAIT_FOR_IDLE:
622 case RADEON_CP_NOP: 715 case RADEON_CP_NOP:
623 /* these packets are safe */ 716 /* these packets are safe */
@@ -713,17 +806,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
713 */ 806 */
714static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) 807static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
715{ 808{
809 uint32_t cache_z, cache_3d, cache_2d;
716 RING_LOCALS; 810 RING_LOCALS;
717 811
718 BEGIN_RING(6); 812 cache_z = R300_ZC_FLUSH;
719 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 813 cache_2d = R300_RB2D_DC_FLUSH;
720 OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A); 814 cache_3d = R300_RB3D_DC_FLUSH;
815 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
816 /* we can purge, primitive where draw since last purge */
817 cache_z |= R300_ZC_FREE;
818 cache_2d |= R300_RB2D_DC_FREE;
819 cache_3d |= R300_RB3D_DC_FREE;
820 }
821
822 /* flush & purge zbuffer */
823 BEGIN_RING(2);
721 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); 824 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
722 OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE| 825 OUT_RING(cache_z);
723 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE); 826 ADVANCE_RING();
724 OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0)); 827 /* flush & purge 3d */
725 OUT_RING(0x0); 828 BEGIN_RING(2);
829 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
830 OUT_RING(cache_3d);
831 ADVANCE_RING();
832 /* flush & purge texture */
833 BEGIN_RING(2);
834 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
835 OUT_RING(0);
836 ADVANCE_RING();
837 /* FIXME: is this one really needed ? */
838 BEGIN_RING(2);
839 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
840 OUT_RING(0);
841 ADVANCE_RING();
842 BEGIN_RING(2);
843 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
844 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
845 ADVANCE_RING();
846 /* flush & purge 2d through E2 as RB2D will trigger lockup */
847 BEGIN_RING(4);
848 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
849 OUT_RING(cache_2d);
850 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
851 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
852 RADEON_WAIT_HOST_IDLECLEAN);
726 ADVANCE_RING(); 853 ADVANCE_RING();
854 /* set flush & purge flags */
855 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
727} 856}
728 857
729/** 858/**
@@ -905,8 +1034,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
905 1034
906 DRM_DEBUG("\n"); 1035 DRM_DEBUG("\n");
907 1036
908 /* See the comment above r300_emit_begin3d for why this call must be here, 1037 /* pacify */
909 * and what the cleanup gotos are for. */
910 r300_pacify(dev_priv); 1038 r300_pacify(dev_priv);
911 1039
912 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { 1040 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index a6802f26afc4..ee6f811599a3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -317,7 +317,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
317 * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and 317 * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
318 * avoids bugs caused by still running shaders reading bad data from memory. 318 * avoids bugs caused by still running shaders reading bad data from memory.
319 */ 319 */
320#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */ 320#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
321 321
322/* Absolutely no clue what this register is about. */ 322/* Absolutely no clue what this register is about. */
323#define R300_VAP_UNKNOWN_2288 0x2288 323#define R300_VAP_UNKNOWN_2288 0x2288
@@ -513,7 +513,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
513/* gap */ 513/* gap */
514 514
515/* Zero to flush caches. */ 515/* Zero to flush caches. */
516#define R300_TX_CNTL 0x4100 516#define R300_TX_INVALTAGS 0x4100
517#define R300_TX_FLUSH 0x0 517#define R300_TX_FLUSH 0x0
518 518
519/* The upper enable bits are guessed, based on fglrx reported limits. */ 519/* The upper enable bits are guessed, based on fglrx reported limits. */
@@ -1362,6 +1362,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
1362#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1362#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1363#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1363#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1364 1364
1365#define R300_RB3D_AARESOLVE_CTL 0x4E88
1365/* gap */ 1366/* gap */
1366 1367
1367/* Guess by Vladimir. 1368/* Guess by Vladimir.
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index f0de81a5689d..3331f88dcfb6 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -40,6 +40,7 @@
40#define RADEON_FIFO_DEBUG 0 40#define RADEON_FIFO_DEBUG 0
41 41
42static int radeon_do_cleanup_cp(struct drm_device * dev); 42static int radeon_do_cleanup_cp(struct drm_device * dev);
43static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
43 44
44static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 45static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
45{ 46{
@@ -198,23 +199,8 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
198 DRM_UDELAY(1); 199 DRM_UDELAY(1);
199 } 200 }
200 } else { 201 } else {
201 /* 3D */ 202 /* don't flush or purge cache here or lockup */
202 tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT); 203 return 0;
203 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
204 RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
205
206 /* 2D */
207 tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT);
208 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
209 RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp);
210
211 for (i = 0; i < dev_priv->usec_timeout; i++) {
212 if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT)
213 & RADEON_RB3D_DC_BUSY)) {
214 return 0;
215 }
216 DRM_UDELAY(1);
217 }
218 } 204 }
219 205
220#if RADEON_FIFO_DEBUG 206#if RADEON_FIFO_DEBUG
@@ -237,6 +223,9 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
237 return 0; 223 return 0;
238 DRM_UDELAY(1); 224 DRM_UDELAY(1);
239 } 225 }
226 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
227 RADEON_READ(RADEON_RBBM_STATUS),
228 RADEON_READ(R300_VAP_CNTL_STATUS));
240 229
241#if RADEON_FIFO_DEBUG 230#if RADEON_FIFO_DEBUG
242 DRM_ERROR("failed!\n"); 231 DRM_ERROR("failed!\n");
@@ -263,6 +252,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
263 } 252 }
264 DRM_UDELAY(1); 253 DRM_UDELAY(1);
265 } 254 }
255 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
256 RADEON_READ(RADEON_RBBM_STATUS),
257 RADEON_READ(R300_VAP_CNTL_STATUS));
266 258
267#if RADEON_FIFO_DEBUG 259#if RADEON_FIFO_DEBUG
268 DRM_ERROR("failed!\n"); 260 DRM_ERROR("failed!\n");
@@ -443,14 +435,20 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
443 435
444 dev_priv->cp_running = 1; 436 dev_priv->cp_running = 1;
445 437
446 BEGIN_RING(6); 438 BEGIN_RING(8);
447 439 /* isync can only be written through cp on r5xx write it here */
440 OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
441 OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
442 RADEON_ISYNC_ANY3D_IDLE2D |
443 RADEON_ISYNC_WAIT_IDLEGUI |
444 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
448 RADEON_PURGE_CACHE(); 445 RADEON_PURGE_CACHE();
449 RADEON_PURGE_ZCACHE(); 446 RADEON_PURGE_ZCACHE();
450 RADEON_WAIT_UNTIL_IDLE(); 447 RADEON_WAIT_UNTIL_IDLE();
451
452 ADVANCE_RING(); 448 ADVANCE_RING();
453 COMMIT_RING(); 449 COMMIT_RING();
450
451 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
454} 452}
455 453
456/* Reset the Command Processor. This will not flush any pending 454/* Reset the Command Processor. This will not flush any pending
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3f0eca957aa7..099381693175 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -220,6 +220,9 @@ struct radeon_virt_surface {
220 struct drm_file *file_priv; 220 struct drm_file *file_priv;
221}; 221};
222 222
223#define RADEON_FLUSH_EMITED (1 < 0)
224#define RADEON_PURGE_EMITED (1 < 1)
225
223typedef struct drm_radeon_private { 226typedef struct drm_radeon_private {
224 drm_radeon_ring_buffer_t ring; 227 drm_radeon_ring_buffer_t ring;
225 drm_radeon_sarea_t *sarea_priv; 228 drm_radeon_sarea_t *sarea_priv;
@@ -311,6 +314,7 @@ typedef struct drm_radeon_private {
311 unsigned long fb_aper_offset; 314 unsigned long fb_aper_offset;
312 315
313 int num_gb_pipes; 316 int num_gb_pipes;
317 int track_flush;
314} drm_radeon_private_t; 318} drm_radeon_private_t;
315 319
316typedef struct drm_radeon_buf_priv { 320typedef struct drm_radeon_buf_priv {
@@ -693,7 +697,6 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
693#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 697#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
694# define R300_ZC_FLUSH (1 << 0) 698# define R300_ZC_FLUSH (1 << 0)
695# define R300_ZC_FREE (1 << 1) 699# define R300_ZC_FREE (1 << 1)
696# define R300_ZC_FLUSH_ALL 0x3
697# define R300_ZC_BUSY (1 << 31) 700# define R300_ZC_BUSY (1 << 31)
698#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c 701#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
699# define RADEON_RB3D_DC_FLUSH (3 << 0) 702# define RADEON_RB3D_DC_FLUSH (3 << 0)
@@ -701,6 +704,8 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
701# define RADEON_RB3D_DC_FLUSH_ALL 0xf 704# define RADEON_RB3D_DC_FLUSH_ALL 0xf
702# define RADEON_RB3D_DC_BUSY (1 << 31) 705# define RADEON_RB3D_DC_BUSY (1 << 31)
703#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c 706#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
707# define R300_RB3D_DC_FLUSH (2 << 0)
708# define R300_RB3D_DC_FREE (2 << 2)
704# define R300_RB3D_DC_FINISH (1 << 4) 709# define R300_RB3D_DC_FINISH (1 << 4)
705#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c 710#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
706# define RADEON_Z_TEST_MASK (7 << 4) 711# define RADEON_Z_TEST_MASK (7 << 4)
@@ -1246,17 +1251,17 @@ do { \
1246 OUT_RING(RADEON_RB3D_DC_FLUSH); \ 1251 OUT_RING(RADEON_RB3D_DC_FLUSH); \
1247 } else { \ 1252 } else { \
1248 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1253 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1249 OUT_RING(RADEON_RB3D_DC_FLUSH); \ 1254 OUT_RING(R300_RB3D_DC_FLUSH); \
1250 } \ 1255 } \
1251} while (0) 1256} while (0)
1252 1257
1253#define RADEON_PURGE_CACHE() do { \ 1258#define RADEON_PURGE_CACHE() do { \
1254 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ 1259 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1255 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1260 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1256 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ 1261 OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
1257 } else { \ 1262 } else { \
1258 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1263 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1259 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \ 1264 OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \
1260 } \ 1265 } \
1261} while (0) 1266} while (0)
1262 1267
@@ -1273,10 +1278,10 @@ do { \
1273#define RADEON_PURGE_ZCACHE() do { \ 1278#define RADEON_PURGE_ZCACHE() do { \
1274 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ 1279 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1275 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ 1280 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1276 OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \ 1281 OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
1277 } else { \ 1282 } else { \
1278 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ 1283 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
1279 OUT_RING(R300_ZC_FLUSH_ALL); \ 1284 OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
1280 } \ 1285 } \
1281} while (0) 1286} while (0)
1282 1287
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 37344aaee22f..a661bbdae3d6 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -98,6 +98,10 @@ static u32 lg_get_features(struct virtio_device *vdev)
98 return features; 98 return features;
99} 99}
100 100
101/* The virtio core takes the features the Host offers, and copies the
102 * ones supported by the driver into the vdev->features array. Once
103 * that's all sorted out, this routine is called so we can tell the
104 * Host which features we understand and accept. */
101static void lg_finalize_features(struct virtio_device *vdev) 105static void lg_finalize_features(struct virtio_device *vdev)
102{ 106{
103 unsigned int i, bits; 107 unsigned int i, bits;
@@ -108,6 +112,10 @@ static void lg_finalize_features(struct virtio_device *vdev)
108 /* Give virtio_ring a chance to accept features. */ 112 /* Give virtio_ring a chance to accept features. */
109 vring_transport_features(vdev); 113 vring_transport_features(vdev);
110 114
115 /* The vdev->feature array is a Linux bitmask: this isn't the
116 * same as a the simple array of bits used by lguest devices
117 * for features. So we do this slow, manual conversion which is
118 * completely general. */
111 memset(out_features, 0, desc->feature_len); 119 memset(out_features, 0, desc->feature_len);
112 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; 120 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
113 for (i = 0; i < bits; i++) { 121 for (i = 0; i < bits; i++) {
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index b2d9878dc3f0..c6c77a505ec1 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -192,6 +192,9 @@ static struct quirk_entry *quirks;
192 192
193static void set_quirks(void) 193static void set_quirks(void)
194{ 194{
195 if (!interface)
196 return;
197
195 if (quirks->mailled) 198 if (quirks->mailled)
196 interface->capability |= ACER_CAP_MAILLED; 199 interface->capability |= ACER_CAP_MAILLED;
197 200
@@ -1237,6 +1240,8 @@ static int __init acer_wmi_init(void)
1237 return -ENODEV; 1240 return -ENODEV;
1238 } 1241 }
1239 1242
1243 set_quirks();
1244
1240 if (platform_driver_register(&acer_platform_driver)) { 1245 if (platform_driver_register(&acer_platform_driver)) {
1241 printk(ACER_ERR "Unable to register platform driver.\n"); 1246 printk(ACER_ERR "Unable to register platform driver.\n");
1242 goto error_platform_register; 1247 goto error_platform_register;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index b468f904c7f8..a14dba1afcc5 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -271,7 +271,7 @@ struct bnx2x_fastpath {
271 (fp->tx_pkt_prod != fp->tx_pkt_cons)) 271 (fp->tx_pkt_prod != fp->tx_pkt_cons))
272 272
273#define BNX2X_HAS_RX_WORK(fp) \ 273#define BNX2X_HAS_RX_WORK(fp) \
274 (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) 274 (fp->rx_comp_cons != rx_cons_sb)
275 275
276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) 276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
277 277
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 971576b43687..82deea0a63f5 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.17" 62#define DRV_MODULE_VERSION "1.45.20"
63#define DRV_MODULE_RELDATE "2008/08/13" 63#define DRV_MODULE_RELDATE "2008/08/25"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1717 return -EEXIST; 1717 return -EEXIST;
1718 } 1718 }
1719 1719
1720 /* Try for 1 second every 5ms */ 1720 /* Try for 5 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) { 1721 for (cnt = 0; cnt < 1000; cnt++) {
1722 /* Try to acquire the lock */ 1722 /* Try to acquire the lock */
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg); 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
@@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2550 BNX2X_ERR("SPIO5 hw attention\n"); 2550 BNX2X_ERR("SPIO5 hw attention\n");
2551 2551
2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */ 2555 /* Fan failure attention */
2555 2556
@@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4605{ 4606{
4606 int i; 4607 int i;
4607 4608
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612 tpa.tpa_exist = 1;
4613
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4618 }
4619
4608 /* Zero this manually as its initialization is 4620 /* Zero this manually as its initialization is
4609 currently missing in the initTool */ 4621 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5337 } 5349 }
5338 5350
5339 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5340 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5341 /* Fan failure is indicated by SPIO 5 */ 5354 /* Fan failure is indicated by SPIO 5 */
5342 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5363 5376
5364 enable_blocks_attention(bp); 5377 enable_blocks_attention(bp);
5365 5378
5366 if (bp->flags & TPA_ENABLE_FLAG) {
5367 struct tstorm_eth_tpa_exist tmp = {0};
5368
5369 tmp.tpa_exist = 1;
5370
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5372 ((u32 *)&tmp)[0]);
5373 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5374 ((u32 *)&tmp)[1]);
5375 }
5376
5377 if (!BP_NOMCP(bp)) { 5379 if (!BP_NOMCP(bp)) {
5378 bnx2x_acquire_phy_lock(bp); 5380 bnx2x_acquire_phy_lock(bp);
5379 bnx2x_common_init_phy(bp, bp->common.shmem_base); 5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
@@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
5531 /* Port DMAE comes here */ 5533 /* Port DMAE comes here */
5532 5534
5533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535 /* add SPIO 5 to group 0 */ 5538 /* add SPIO 5 to group 0 */
5536 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6055 return rc; 6058 return rc;
6056} 6059}
6057 6060
6061static void bnx2x_napi_enable(struct bnx2x *bp)
6062{
6063 int i;
6064
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6067}
6068
6069static void bnx2x_napi_disable(struct bnx2x *bp)
6070{
6071 int i;
6072
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6075}
6076
6077static void bnx2x_netif_start(struct bnx2x *bp)
6078{
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6085 }
6086 }
6087}
6088
6089static void bnx2x_netif_stop(struct bnx2x *bp)
6090{
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096 }
6097}
6098
6058/* 6099/*
6059 * Init service functions 6100 * Init service functions
6060 */ 6101 */
@@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6338 rc = bnx2x_init_hw(bp, load_code); 6379 rc = bnx2x_init_hw(bp, load_code);
6339 if (rc) { 6380 if (rc) {
6340 BNX2X_ERR("HW init failed, aborting\n"); 6381 BNX2X_ERR("HW init failed, aborting\n");
6341 goto load_error; 6382 goto load_int_disable;
6342 } 6383 }
6343 6384
6344 /* Setup NIC internals and enable interrupts */ 6385 /* Setup NIC internals and enable interrupts */
@@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6350 if (!load_code) { 6391 if (!load_code) {
6351 BNX2X_ERR("MCP response failure, aborting\n"); 6392 BNX2X_ERR("MCP response failure, aborting\n");
6352 rc = -EBUSY; 6393 rc = -EBUSY;
6353 goto load_int_disable; 6394 goto load_rings_free;
6354 } 6395 }
6355 } 6396 }
6356 6397
@@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6360 6401
6361 /* Enable Rx interrupt handling before sending the ramrod 6402 /* Enable Rx interrupt handling before sending the ramrod
6362 as it's completed on Rx FP queue */ 6403 as it's completed on Rx FP queue */
6363 for_each_queue(bp, i) 6404 bnx2x_napi_enable(bp);
6364 napi_enable(&bnx2x_fp(bp, i, napi));
6365 6405
6366 /* Enable interrupt handling */ 6406 /* Enable interrupt handling */
6367 atomic_set(&bp->intr_sem, 0); 6407 atomic_set(&bp->intr_sem, 0);
@@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6369 rc = bnx2x_setup_leading(bp); 6409 rc = bnx2x_setup_leading(bp);
6370 if (rc) { 6410 if (rc) {
6371 BNX2X_ERR("Setup leading failed!\n"); 6411 BNX2X_ERR("Setup leading failed!\n");
6372 goto load_stop_netif; 6412 goto load_netif_stop;
6373 } 6413 }
6374 6414
6375 if (CHIP_IS_E1H(bp)) 6415 if (CHIP_IS_E1H(bp))
@@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6382 for_each_nondefault_queue(bp, i) { 6422 for_each_nondefault_queue(bp, i) {
6383 rc = bnx2x_setup_multi(bp, i); 6423 rc = bnx2x_setup_multi(bp, i);
6384 if (rc) 6424 if (rc)
6385 goto load_stop_netif; 6425 goto load_netif_stop;
6386 } 6426 }
6387 6427
6388 if (CHIP_IS_E1(bp)) 6428 if (CHIP_IS_E1(bp))
@@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6427 6467
6428 return 0; 6468 return 0;
6429 6469
6430load_stop_netif: 6470load_netif_stop:
6471 bnx2x_napi_disable(bp);
6472load_rings_free:
6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6474 bnx2x_free_skbs(bp);
6431 for_each_queue(bp, i) 6475 for_each_queue(bp, i)
6432 napi_disable(&bnx2x_fp(bp, i, napi)); 6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6433
6434load_int_disable: 6477load_int_disable:
6435 bnx2x_int_disable_sync(bp); 6478 bnx2x_int_disable_sync(bp);
6436
6437 /* Release IRQs */ 6479 /* Release IRQs */
6438 bnx2x_free_irq(bp); 6480 bnx2x_free_irq(bp);
6439
6440 /* Free SKBs, SGEs, TPA pool and driver internals */
6441 bnx2x_free_skbs(bp);
6442 for_each_queue(bp, i)
6443 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6444load_error: 6481load_error:
6445 bnx2x_free_mem(bp); 6482 bnx2x_free_mem(bp);
6446 6483
@@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6455 6492
6456 /* halt the connection */ 6493 /* halt the connection */
6457 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 6494 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6458 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 6495 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6459 6496
6460 /* Wait for completion */ 6497 /* Wait for completion */
6461 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6498 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6613 bp->rx_mode = BNX2X_RX_MODE_NONE; 6650 bp->rx_mode = BNX2X_RX_MODE_NONE;
6614 bnx2x_set_storm_rx_mode(bp); 6651 bnx2x_set_storm_rx_mode(bp);
6615 6652
6616 if (netif_running(bp->dev)) { 6653 bnx2x_netif_stop(bp);
6617 netif_tx_disable(bp->dev); 6654 if (!netif_running(bp->dev))
6618 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6655 bnx2x_napi_disable(bp);
6619 }
6620
6621 del_timer_sync(&bp->timer); 6656 del_timer_sync(&bp->timer);
6622 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6657 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6623 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6658 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
@@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6631 smp_rmb(); 6666 smp_rmb();
6632 while (BNX2X_HAS_TX_WORK(fp)) { 6667 while (BNX2X_HAS_TX_WORK(fp)) {
6633 6668
6634 if (!netif_running(bp->dev)) 6669 bnx2x_tx_int(fp, 1000);
6635 bnx2x_tx_int(fp, 1000);
6636
6637 if (!cnt) { 6670 if (!cnt) {
6638 BNX2X_ERR("timeout waiting for queue[%d]\n", 6671 BNX2X_ERR("timeout waiting for queue[%d]\n",
6639 i); 6672 i);
@@ -6649,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6649 smp_rmb(); 6682 smp_rmb();
6650 } 6683 }
6651 } 6684 }
6652
6653 /* Give HW time to discard old tx messages */ 6685 /* Give HW time to discard old tx messages */
6654 msleep(1); 6686 msleep(1);
6655 6687
6656 for_each_queue(bp, i)
6657 napi_disable(&bnx2x_fp(bp, i, napi));
6658 /* Disable interrupts after Tx and Rx are disabled on stack level */
6659 bnx2x_int_disable_sync(bp);
6660
6661 /* Release IRQs */ 6688 /* Release IRQs */
6662 bnx2x_free_irq(bp); 6689 bnx2x_free_irq(bp);
6663 6690
6664 if (unload_mode == UNLOAD_NORMAL)
6665 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6666
6667 else if (bp->flags & NO_WOL_FLAG) {
6668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6669 if (CHIP_IS_E1H(bp))
6670 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6671
6672 } else if (bp->wol) {
6673 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6674 u8 *mac_addr = bp->dev->dev_addr;
6675 u32 val;
6676 /* The mac address is written to entries 1-4 to
6677 preserve entry 0 which is used by the PMF */
6678 u8 entry = (BP_E1HVN(bp) + 1)*8;
6679
6680 val = (mac_addr[0] << 8) | mac_addr[1];
6681 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6682
6683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6684 (mac_addr[4] << 8) | mac_addr[5];
6685 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6686
6687 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6688
6689 } else
6690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6691
6692 if (CHIP_IS_E1(bp)) { 6691 if (CHIP_IS_E1(bp)) {
6693 struct mac_configuration_cmd *config = 6692 struct mac_configuration_cmd *config =
6694 bnx2x_sp(bp, mcast_config); 6693 bnx2x_sp(bp, mcast_config);
@@ -6711,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6711 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 6710 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712 6711
6713 } else { /* E1H */ 6712 } else { /* E1H */
6713 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6714 bnx2x_set_mac_addr_e1h(bp, 0); 6715 bnx2x_set_mac_addr_e1h(bp, 0);
6715 6716
6716 for (i = 0; i < MC_HASH_SIZE; i++) 6717 for (i = 0; i < MC_HASH_SIZE; i++)
6717 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718 } 6719 }
6719 6720
6720 if (CHIP_IS_E1H(bp)) 6721 if (unload_mode == UNLOAD_NORMAL)
6721 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724 else if (bp->flags & NO_WOL_FLAG) {
6725 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726 if (CHIP_IS_E1H(bp))
6727 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6728
6729 } else if (bp->wol) {
6730 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6731 u8 *mac_addr = bp->dev->dev_addr;
6732 u32 val;
6733 /* The mac address is written to entries 1-4 to
6734 preserve entry 0 which is used by the PMF */
6735 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6737 val = (mac_addr[0] << 8) | mac_addr[1];
6738 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6739
6740 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6741 (mac_addr[4] << 8) | mac_addr[5];
6742 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6743
6744 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6745
6746 } else
6747 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6722 6748
6723 /* Close multi and leading connections 6749 /* Close multi and leading connections
6724 Completions for ramrods are collected in a synchronous way */ 6750 Completions for ramrods are collected in a synchronous way */
@@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6821 */ 6847 */
6822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6823 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6824 if (val == 0x7) { 6854 if (val == 0x7) {
6825 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6826 /* save our func */ 6856 /* save our func */
@@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6898 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6899 DRV_MSG_SEQ_NUMBER_MASK); 6929 DRV_MSG_SEQ_NUMBER_MASK);
6900 } 6930 }
6901 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6902 } 6931 }
6903} 6932}
6904 6933
@@ -8617,34 +8646,6 @@ test_mem_exit:
8617 return rc; 8646 return rc;
8618} 8647}
8619 8648
8620static void bnx2x_netif_start(struct bnx2x *bp)
8621{
8622 int i;
8623
8624 if (atomic_dec_and_test(&bp->intr_sem)) {
8625 if (netif_running(bp->dev)) {
8626 bnx2x_int_enable(bp);
8627 for_each_queue(bp, i)
8628 napi_enable(&bnx2x_fp(bp, i, napi));
8629 if (bp->state == BNX2X_STATE_OPEN)
8630 netif_wake_queue(bp->dev);
8631 }
8632 }
8633}
8634
8635static void bnx2x_netif_stop(struct bnx2x *bp)
8636{
8637 int i;
8638
8639 if (netif_running(bp->dev)) {
8640 netif_tx_disable(bp->dev);
8641 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8642 for_each_queue(bp, i)
8643 napi_disable(&bnx2x_fp(bp, i, napi));
8644 }
8645 bnx2x_int_disable_sync(bp);
8646}
8647
8648static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 8649static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8649{ 8650{
8650 int cnt = 1000; 8651 int cnt = 1000;
@@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9250 napi); 9251 napi);
9251 struct bnx2x *bp = fp->bp; 9252 struct bnx2x *bp = fp->bp;
9252 int work_done = 0; 9253 int work_done = 0;
9254 u16 rx_cons_sb;
9253 9255
9254#ifdef BNX2X_STOP_ON_ERROR 9256#ifdef BNX2X_STOP_ON_ERROR
9255 if (unlikely(bp->panic)) 9257 if (unlikely(bp->panic))
@@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9265 if (BNX2X_HAS_TX_WORK(fp)) 9267 if (BNX2X_HAS_TX_WORK(fp))
9266 bnx2x_tx_int(fp, budget); 9268 bnx2x_tx_int(fp, budget);
9267 9269
9270 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9271 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9272 rx_cons_sb++;
9268 if (BNX2X_HAS_RX_WORK(fp)) 9273 if (BNX2X_HAS_RX_WORK(fp))
9269 work_done = bnx2x_rx_int(fp, budget); 9274 work_done = bnx2x_rx_int(fp, budget);
9270 9275
9271 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9276 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9277 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9278 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9279 rx_cons_sb++;
9272 9280
9273 /* must not complete if we consumed full budget */ 9281 /* must not complete if we consumed full budget */
9274 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { 9282 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
@@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9484 fp_index = (smp_processor_id() % bp->num_queues); 9492 fp_index = (smp_processor_id() % bp->num_queues);
9485 fp = &bp->fp[fp_index]; 9493 fp = &bp->fp[fp_index];
9486 9494
9487 if (unlikely(bnx2x_tx_avail(bp->fp) < 9495 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9488 (skb_shinfo(skb)->nr_frags + 3))) {
9489 bp->eth_stats.driver_xoff++, 9496 bp->eth_stats.driver_xoff++,
9490 netif_stop_queue(dev); 9497 netif_stop_queue(dev);
9491 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 9498 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9548 tx_bd->vlan = cpu_to_le16(pkt_prod); 9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9549 9556
9550 if (xmit_type) { 9557 if (xmit_type) {
9551
9552 /* turn on parsing and get a BD */ 9558 /* turn on parsing and get a BD */
9553 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9554 pbd = (void *)&fp->tx_desc_ring[bd_prod]; 9560 pbd = (void *)&fp->tx_desc_ring[bd_prod];
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index b9f90a5d3d4d..213437d13154 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -208,7 +208,7 @@ struct e1000_option {
208 } r; 208 } r;
209 struct { /* list_option info */ 209 struct { /* list_option info */
210 int nr; 210 int nr;
211 struct e1000_opt_list { int i; char *str; } *p; 211 const struct e1000_opt_list { int i; char *str; } *p;
212 } l; 212 } l;
213 } arg; 213 } arg;
214}; 214};
@@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
242 break; 242 break;
243 case list_option: { 243 case list_option: {
244 int i; 244 int i;
245 struct e1000_opt_list *ent; 245 const struct e1000_opt_list *ent;
246 246
247 for (i = 0; i < opt->arg.l.nr; i++) { 247 for (i = 0; i < opt->arg.l.nr; i++) {
248 ent = &opt->arg.l.p[i]; 248 ent = &opt->arg.l.p[i];
@@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
279 279
280void __devinit e1000_check_options(struct e1000_adapter *adapter) 280void __devinit e1000_check_options(struct e1000_adapter *adapter)
281{ 281{
282 struct e1000_option opt;
282 int bd = adapter->bd_number; 283 int bd = adapter->bd_number;
284
283 if (bd >= E1000_MAX_NIC) { 285 if (bd >= E1000_MAX_NIC) {
284 DPRINTK(PROBE, NOTICE, 286 DPRINTK(PROBE, NOTICE,
285 "Warning: no configuration for board #%i\n", bd); 287 "Warning: no configuration for board #%i\n", bd);
@@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
287 } 289 }
288 290
289 { /* Transmit Descriptor Count */ 291 { /* Transmit Descriptor Count */
290 struct e1000_option opt = { 292 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
293 int i;
294 e1000_mac_type mac_type = adapter->hw.mac_type;
295
296 opt = (struct e1000_option) {
291 .type = range_option, 297 .type = range_option,
292 .name = "Transmit Descriptors", 298 .name = "Transmit Descriptors",
293 .err = "using default of " 299 .err = "using default of "
294 __MODULE_STRING(E1000_DEFAULT_TXD), 300 __MODULE_STRING(E1000_DEFAULT_TXD),
295 .def = E1000_DEFAULT_TXD, 301 .def = E1000_DEFAULT_TXD,
296 .arg = { .r = { .min = E1000_MIN_TXD }} 302 .arg = { .r = {
303 .min = E1000_MIN_TXD,
304 .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
305 }}
297 }; 306 };
298 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
299 int i;
300 e1000_mac_type mac_type = adapter->hw.mac_type;
301 opt.arg.r.max = mac_type < e1000_82544 ?
302 E1000_MAX_TXD : E1000_MAX_82544_TXD;
303 307
304 if (num_TxDescriptors > bd) { 308 if (num_TxDescriptors > bd) {
305 tx_ring->count = TxDescriptors[bd]; 309 tx_ring->count = TxDescriptors[bd];
@@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
313 tx_ring[i].count = tx_ring->count; 317 tx_ring[i].count = tx_ring->count;
314 } 318 }
315 { /* Receive Descriptor Count */ 319 { /* Receive Descriptor Count */
316 struct e1000_option opt = { 320 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
321 int i;
322 e1000_mac_type mac_type = adapter->hw.mac_type;
323
324 opt = (struct e1000_option) {
317 .type = range_option, 325 .type = range_option,
318 .name = "Receive Descriptors", 326 .name = "Receive Descriptors",
319 .err = "using default of " 327 .err = "using default of "
320 __MODULE_STRING(E1000_DEFAULT_RXD), 328 __MODULE_STRING(E1000_DEFAULT_RXD),
321 .def = E1000_DEFAULT_RXD, 329 .def = E1000_DEFAULT_RXD,
322 .arg = { .r = { .min = E1000_MIN_RXD }} 330 .arg = { .r = {
331 .min = E1000_MIN_RXD,
332 .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
333 }}
323 }; 334 };
324 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
325 int i;
326 e1000_mac_type mac_type = adapter->hw.mac_type;
327 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
328 E1000_MAX_82544_RXD;
329 335
330 if (num_RxDescriptors > bd) { 336 if (num_RxDescriptors > bd) {
331 rx_ring->count = RxDescriptors[bd]; 337 rx_ring->count = RxDescriptors[bd];
@@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
339 rx_ring[i].count = rx_ring->count; 345 rx_ring[i].count = rx_ring->count;
340 } 346 }
341 { /* Checksum Offload Enable/Disable */ 347 { /* Checksum Offload Enable/Disable */
342 struct e1000_option opt = { 348 opt = (struct e1000_option) {
343 .type = enable_option, 349 .type = enable_option,
344 .name = "Checksum Offload", 350 .name = "Checksum Offload",
345 .err = "defaulting to Enabled", 351 .err = "defaulting to Enabled",
@@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
363 { E1000_FC_FULL, "Flow Control Enabled" }, 369 { E1000_FC_FULL, "Flow Control Enabled" },
364 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; 370 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
365 371
366 struct e1000_option opt = { 372 opt = (struct e1000_option) {
367 .type = list_option, 373 .type = list_option,
368 .name = "Flow Control", 374 .name = "Flow Control",
369 .err = "reading default settings from EEPROM", 375 .err = "reading default settings from EEPROM",
@@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
381 } 387 }
382 } 388 }
383 { /* Transmit Interrupt Delay */ 389 { /* Transmit Interrupt Delay */
384 struct e1000_option opt = { 390 opt = (struct e1000_option) {
385 .type = range_option, 391 .type = range_option,
386 .name = "Transmit Interrupt Delay", 392 .name = "Transmit Interrupt Delay",
387 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), 393 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
@@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
399 } 405 }
400 } 406 }
401 { /* Transmit Absolute Interrupt Delay */ 407 { /* Transmit Absolute Interrupt Delay */
402 struct e1000_option opt = { 408 opt = (struct e1000_option) {
403 .type = range_option, 409 .type = range_option,
404 .name = "Transmit Absolute Interrupt Delay", 410 .name = "Transmit Absolute Interrupt Delay",
405 .err = "using default of " __MODULE_STRING(DEFAULT_TADV), 411 .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
@@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
417 } 423 }
418 } 424 }
419 { /* Receive Interrupt Delay */ 425 { /* Receive Interrupt Delay */
420 struct e1000_option opt = { 426 opt = (struct e1000_option) {
421 .type = range_option, 427 .type = range_option,
422 .name = "Receive Interrupt Delay", 428 .name = "Receive Interrupt Delay",
423 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), 429 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
@@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
435 } 441 }
436 } 442 }
437 { /* Receive Absolute Interrupt Delay */ 443 { /* Receive Absolute Interrupt Delay */
438 struct e1000_option opt = { 444 opt = (struct e1000_option) {
439 .type = range_option, 445 .type = range_option,
440 .name = "Receive Absolute Interrupt Delay", 446 .name = "Receive Absolute Interrupt Delay",
441 .err = "using default of " __MODULE_STRING(DEFAULT_RADV), 447 .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
@@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
453 } 459 }
454 } 460 }
455 { /* Interrupt Throttling Rate */ 461 { /* Interrupt Throttling Rate */
456 struct e1000_option opt = { 462 opt = (struct e1000_option) {
457 .type = range_option, 463 .type = range_option,
458 .name = "Interrupt Throttling Rate (ints/sec)", 464 .name = "Interrupt Throttling Rate (ints/sec)",
459 .err = "using default of " __MODULE_STRING(DEFAULT_ITR), 465 .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
@@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
497 } 503 }
498 } 504 }
499 { /* Smart Power Down */ 505 { /* Smart Power Down */
500 struct e1000_option opt = { 506 opt = (struct e1000_option) {
501 .type = enable_option, 507 .type = enable_option,
502 .name = "PHY Smart Power Down", 508 .name = "PHY Smart Power Down",
503 .err = "defaulting to Disabled", 509 .err = "defaulting to Disabled",
@@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
513 } 519 }
514 } 520 }
515 { /* Kumeran Lock Loss Workaround */ 521 { /* Kumeran Lock Loss Workaround */
516 struct e1000_option opt = { 522 opt = (struct e1000_option) {
517 .type = enable_option, 523 .type = enable_option,
518 .name = "Kumeran Lock Loss Workaround", 524 .name = "Kumeran Lock Loss Workaround",
519 .err = "defaulting to Enabled", 525 .err = "defaulting to Enabled",
@@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
578 584
579static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) 585static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
580{ 586{
587 struct e1000_option opt;
581 unsigned int speed, dplx, an; 588 unsigned int speed, dplx, an;
582 int bd = adapter->bd_number; 589 int bd = adapter->bd_number;
583 590
584 { /* Speed */ 591 { /* Speed */
585 struct e1000_opt_list speed_list[] = {{ 0, "" }, 592 static const struct e1000_opt_list speed_list[] = {
586 { SPEED_10, "" }, 593 { 0, "" },
587 { SPEED_100, "" }, 594 { SPEED_10, "" },
588 { SPEED_1000, "" }}; 595 { SPEED_100, "" },
596 { SPEED_1000, "" }};
589 597
590 struct e1000_option opt = { 598 opt = (struct e1000_option) {
591 .type = list_option, 599 .type = list_option,
592 .name = "Speed", 600 .name = "Speed",
593 .err = "parameter ignored", 601 .err = "parameter ignored",
@@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
604 } 612 }
605 } 613 }
606 { /* Duplex */ 614 { /* Duplex */
607 struct e1000_opt_list dplx_list[] = {{ 0, "" }, 615 static const struct e1000_opt_list dplx_list[] = {
608 { HALF_DUPLEX, "" }, 616 { 0, "" },
609 { FULL_DUPLEX, "" }}; 617 { HALF_DUPLEX, "" },
618 { FULL_DUPLEX, "" }};
610 619
611 struct e1000_option opt = { 620 opt = (struct e1000_option) {
612 .type = list_option, 621 .type = list_option,
613 .name = "Duplex", 622 .name = "Duplex",
614 .err = "parameter ignored", 623 .err = "parameter ignored",
@@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
637 "parameter ignored\n"); 646 "parameter ignored\n");
638 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 647 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
639 } else { /* Autoneg */ 648 } else { /* Autoneg */
640 struct e1000_opt_list an_list[] = 649 static const struct e1000_opt_list an_list[] =
641 #define AA "AutoNeg advertising " 650 #define AA "AutoNeg advertising "
642 {{ 0x01, AA "10/HD" }, 651 {{ 0x01, AA "10/HD" },
643 { 0x02, AA "10/FD" }, 652 { 0x02, AA "10/FD" },
@@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
671 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, 680 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
672 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; 681 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
673 682
674 struct e1000_option opt = { 683 opt = (struct e1000_option) {
675 .type = list_option, 684 .type = list_option,
676 .name = "AutoNeg", 685 .name = "AutoNeg",
677 .err = "parameter ignored", 686 .err = "parameter ignored",
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 2450b3a393ff..7ba78e6d210e 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -38,8 +38,10 @@ void free_cpu_buffers(void)
38{ 38{
39 int i; 39 int i;
40 40
41 for_each_online_cpu(i) 41 for_each_online_cpu(i) {
42 vfree(per_cpu(cpu_buffer, i).buffer); 42 vfree(per_cpu(cpu_buffer, i).buffer);
43 per_cpu(cpu_buffer, i).buffer = NULL;
44 }
43} 45}
44 46
45int alloc_cpu_buffers(void) 47int alloc_cpu_buffers(void)
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index e7fbac529935..8d692a5c8e73 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -93,6 +93,8 @@ out:
93void free_event_buffer(void) 93void free_event_buffer(void)
94{ 94{
95 vfree(event_buffer); 95 vfree(event_buffer);
96
97 event_buffer = NULL;
96} 98}
97 99
98 100
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e3a1e7e7dba2..9e6cec67e1cc 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -43,7 +43,6 @@ extern int pciehp_poll_mode;
43extern int pciehp_poll_time; 43extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46extern int pciehp_slot_with_bus;
47extern struct workqueue_struct *pciehp_wq; 46extern struct workqueue_struct *pciehp_wq;
48 47
49#define dbg(format, arg...) \ 48#define dbg(format, arg...) \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3677495c4f91..4fd5355bc3b5 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -41,7 +41,6 @@ int pciehp_debug;
41int pciehp_poll_mode; 41int pciehp_poll_mode;
42int pciehp_poll_time; 42int pciehp_poll_time;
43int pciehp_force; 43int pciehp_force;
44int pciehp_slot_with_bus;
45struct workqueue_struct *pciehp_wq; 44struct workqueue_struct *pciehp_wq;
46 45
47#define DRIVER_VERSION "0.4" 46#define DRIVER_VERSION "0.4"
@@ -56,12 +55,10 @@ module_param(pciehp_debug, bool, 0644);
56module_param(pciehp_poll_mode, bool, 0644); 55module_param(pciehp_poll_mode, bool, 0644);
57module_param(pciehp_poll_time, int, 0644); 56module_param(pciehp_poll_time, int, 0644);
58module_param(pciehp_force, bool, 0644); 57module_param(pciehp_force, bool, 0644);
59module_param(pciehp_slot_with_bus, bool, 0644);
60MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 58MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
61MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 59MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
62MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 60MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
63MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 61MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
64MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name");
65 62
66#define PCIE_MODULE_NAME "pciehp" 63#define PCIE_MODULE_NAME "pciehp"
67 64
@@ -194,6 +191,7 @@ static int init_slots(struct controller *ctrl)
194 struct slot *slot; 191 struct slot *slot;
195 struct hotplug_slot *hotplug_slot; 192 struct hotplug_slot *hotplug_slot;
196 struct hotplug_slot_info *info; 193 struct hotplug_slot_info *info;
194 int len, dup = 1;
197 int retval = -ENOMEM; 195 int retval = -ENOMEM;
198 196
199 list_for_each_entry(slot, &ctrl->slot_list, slot_list) { 197 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -220,15 +218,24 @@ static int init_slots(struct controller *ctrl)
220 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 218 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
221 "slot_device_offset=%x\n", slot->bus, slot->device, 219 "slot_device_offset=%x\n", slot->bus, slot->device,
222 slot->hp_slot, slot->number, ctrl->slot_device_offset); 220 slot->hp_slot, slot->number, ctrl->slot_device_offset);
221duplicate_name:
223 retval = pci_hp_register(hotplug_slot, 222 retval = pci_hp_register(hotplug_slot,
224 ctrl->pci_dev->subordinate, 223 ctrl->pci_dev->subordinate,
225 slot->device); 224 slot->device);
226 if (retval) { 225 if (retval) {
226 /*
227 * If slot N already exists, we'll try to create
228 * slot N-1, N-2 ... N-M, until we overflow.
229 */
230 if (retval == -EEXIST) {
231 len = snprintf(slot->name, SLOT_NAME_SIZE,
232 "%d-%d", slot->number, dup++);
233 if (len < SLOT_NAME_SIZE)
234 goto duplicate_name;
235 else
236 err("duplicate slot name overflow\n");
237 }
227 err("pci_hp_register failed with error %d\n", retval); 238 err("pci_hp_register failed with error %d\n", retval);
228 if (retval == -EEXIST)
229 err("Failed to register slot because of name "
230 "collision. Try \'pciehp_slot_with_bus\' "
231 "module option.\n");
232 goto error_info; 239 goto error_info;
233 } 240 }
234 /* create additional sysfs entries */ 241 /* create additional sysfs entries */
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index ad27e9e225a6..ab31f5ba665d 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1030,15 +1030,6 @@ static void pcie_shutdown_notification(struct controller *ctrl)
1030 pciehp_free_irq(ctrl); 1030 pciehp_free_irq(ctrl);
1031} 1031}
1032 1032
1033static void make_slot_name(struct slot *slot)
1034{
1035 if (pciehp_slot_with_bus)
1036 snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
1037 slot->bus, slot->number);
1038 else
1039 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1040}
1041
1042static int pcie_init_slot(struct controller *ctrl) 1033static int pcie_init_slot(struct controller *ctrl)
1043{ 1034{
1044 struct slot *slot; 1035 struct slot *slot;
@@ -1053,7 +1044,7 @@ static int pcie_init_slot(struct controller *ctrl)
1053 slot->device = ctrl->slot_device_offset + slot->hp_slot; 1044 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1054 slot->hpc_ops = ctrl->hpc_ops; 1045 slot->hpc_ops = ctrl->hpc_ops;
1055 slot->number = ctrl->first_slot; 1046 slot->number = ctrl->first_slot;
1056 make_slot_name(slot); 1047 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1057 mutex_init(&slot->lock); 1048 mutex_init(&slot->lock);
1058 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 1049 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1059 list_add(&slot->slot_list, &ctrl->slot_list); 1050 list_add(&slot->slot_list, &ctrl->slot_list);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index a8cbd039b85b..cc38615395f1 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -39,7 +39,6 @@
39int shpchp_debug; 39int shpchp_debug;
40int shpchp_poll_mode; 40int shpchp_poll_mode;
41int shpchp_poll_time; 41int shpchp_poll_time;
42static int shpchp_slot_with_bus;
43struct workqueue_struct *shpchp_wq; 42struct workqueue_struct *shpchp_wq;
44 43
45#define DRIVER_VERSION "0.4" 44#define DRIVER_VERSION "0.4"
@@ -53,11 +52,9 @@ MODULE_LICENSE("GPL");
53module_param(shpchp_debug, bool, 0644); 52module_param(shpchp_debug, bool, 0644);
54module_param(shpchp_poll_mode, bool, 0644); 53module_param(shpchp_poll_mode, bool, 0644);
55module_param(shpchp_poll_time, int, 0644); 54module_param(shpchp_poll_time, int, 0644);
56module_param(shpchp_slot_with_bus, bool, 0644);
57MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); 55MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
58MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); 56MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
59MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); 57MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
60MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name");
61 58
62#define SHPC_MODULE_NAME "shpchp" 59#define SHPC_MODULE_NAME "shpchp"
63 60
@@ -99,23 +96,13 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
99 kfree(slot); 96 kfree(slot);
100} 97}
101 98
102static void make_slot_name(struct slot *slot)
103{
104 if (shpchp_slot_with_bus)
105 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
106 slot->bus, slot->number);
107 else
108 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
109 slot->number);
110}
111
112static int init_slots(struct controller *ctrl) 99static int init_slots(struct controller *ctrl)
113{ 100{
114 struct slot *slot; 101 struct slot *slot;
115 struct hotplug_slot *hotplug_slot; 102 struct hotplug_slot *hotplug_slot;
116 struct hotplug_slot_info *info; 103 struct hotplug_slot_info *info;
117 int retval = -ENOMEM; 104 int retval = -ENOMEM;
118 int i; 105 int i, len, dup = 1;
119 106
120 for (i = 0; i < ctrl->num_slots; i++) { 107 for (i = 0; i < ctrl->num_slots; i++) {
121 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 108 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -146,7 +133,7 @@ static int init_slots(struct controller *ctrl)
146 /* register this slot with the hotplug pci core */ 133 /* register this slot with the hotplug pci core */
147 hotplug_slot->private = slot; 134 hotplug_slot->private = slot;
148 hotplug_slot->release = &release_slot; 135 hotplug_slot->release = &release_slot;
149 make_slot_name(slot); 136 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
150 hotplug_slot->ops = &shpchp_hotplug_slot_ops; 137 hotplug_slot->ops = &shpchp_hotplug_slot_ops;
151 138
152 get_power_status(hotplug_slot, &info->power_status); 139 get_power_status(hotplug_slot, &info->power_status);
@@ -157,14 +144,23 @@ static int init_slots(struct controller *ctrl)
157 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 144 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
158 "slot_device_offset=%x\n", slot->bus, slot->device, 145 "slot_device_offset=%x\n", slot->bus, slot->device,
159 slot->hp_slot, slot->number, ctrl->slot_device_offset); 146 slot->hp_slot, slot->number, ctrl->slot_device_offset);
147duplicate_name:
160 retval = pci_hp_register(slot->hotplug_slot, 148 retval = pci_hp_register(slot->hotplug_slot,
161 ctrl->pci_dev->subordinate, slot->device); 149 ctrl->pci_dev->subordinate, slot->device);
162 if (retval) { 150 if (retval) {
151 /*
152 * If slot N already exists, we'll try to create
153 * slot N-1, N-2 ... N-M, until we overflow.
154 */
155 if (retval == -EEXIST) {
156 len = snprintf(slot->name, SLOT_NAME_SIZE,
157 "%d-%d", slot->number, dup++);
158 if (len < SLOT_NAME_SIZE)
159 goto duplicate_name;
160 else
161 err("duplicate slot name overflow\n");
162 }
163 err("pci_hp_register failed with error %d\n", retval); 163 err("pci_hp_register failed with error %d\n", retval);
164 if (retval == -EEXIST)
165 err("Failed to register slot because of name "
166 "collision. Try \'shpchp_slot_with_bus\' "
167 "module option.\n");
168 goto error_info; 164 goto error_info;
169 } 165 }
170 166
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 217814fef4ef..3b3b5f178797 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -280,6 +280,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
280 match_pci_dev_by_id); 280 match_pci_dev_by_id);
281 if (dev) 281 if (dev)
282 pdev = to_pci_dev(dev); 282 pdev = to_pci_dev(dev);
283 if (from)
284 pci_dev_put(from);
283 return pdev; 285 return pdev;
284} 286}
285 287
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index db85f1fb131e..711b3004b3e6 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -384,9 +384,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
384 * get minor, add to list 384 * get minor, add to list
385 */ 385 */
386 down_write(&dcssblk_devices_sem); 386 down_write(&dcssblk_devices_sem);
387 if (dcssblk_get_segment_by_name(local_buf)) { 387 if (dcssblk_get_device_by_name(local_buf)) {
388 up_write(&dcssblk_devices_sem);
388 rc = -EEXIST; 389 rc = -EEXIST;
389 goto release_gd; 390 goto unload_seg;
390 } 391 }
391 rc = dcssblk_assign_free_minor(dev_info); 392 rc = dcssblk_assign_free_minor(dev_info);
392 if (rc) { 393 if (rc) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3d36270a8b4d..661f9f21650a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -217,6 +217,18 @@ static int sg_last_dev(void);
217#define SZ_SG_IOVEC sizeof(sg_iovec_t) 217#define SZ_SG_IOVEC sizeof(sg_iovec_t)
218#define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 218#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
219 219
220static int sg_allow_access(struct file *filp, unsigned char *cmd)
221{
222 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
223 struct request_queue *q = sfp->parentdp->device->request_queue;
224
225 if (sfp->parentdp->device->type == TYPE_SCANNER)
226 return 0;
227
228 return blk_verify_command(&q->cmd_filter,
229 cmd, filp->f_mode & FMODE_WRITE);
230}
231
220static int 232static int
221sg_open(struct inode *inode, struct file *filp) 233sg_open(struct inode *inode, struct file *filp)
222{ 234{
@@ -689,7 +701,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
689 sg_remove_request(sfp, srp); 701 sg_remove_request(sfp, srp);
690 return -EFAULT; 702 return -EFAULT;
691 } 703 }
692 if (read_only && !blk_verify_command(file, cmnd)) { 704 if (read_only && sg_allow_access(file, cmnd)) {
693 sg_remove_request(sfp, srp); 705 sg_remove_request(sfp, srp);
694 return -EPERM; 706 return -EPERM;
695 } 707 }
@@ -793,6 +805,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
793 805
794 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 806 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
795 return -ENXIO; 807 return -ENXIO;
808
796 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 809 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
797 sdp->disk->disk_name, (int) cmd_in)); 810 sdp->disk->disk_name, (int) cmd_in));
798 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 811 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
@@ -1061,7 +1074,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 1074
1062 if (copy_from_user(&opcode, siocp->data, 1)) 1075 if (copy_from_user(&opcode, siocp->data, 1))
1063 return -EFAULT; 1076 return -EFAULT;
1064 if (!blk_verify_command(filp, &opcode)) 1077 if (sg_allow_access(filp, &opcode))
1065 return -EPERM; 1078 return -EPERM;
1066 } 1079 }
1067 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p); 1080 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfef604160d1..62eab43152d2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -158,7 +158,7 @@ static inline s64 towards_target(struct virtio_balloon *vb)
158 vb->vdev->config->get(vb->vdev, 158 vb->vdev->config->get(vb->vdev,
159 offsetof(struct virtio_balloon_config, num_pages), 159 offsetof(struct virtio_balloon_config, num_pages),
160 &v, sizeof(v)); 160 &v, sizeof(v));
161 return v - vb->num_pages; 161 return (s64)v - vb->num_pages;
162} 162}
163 163
164static void update_balloon_size(struct virtio_balloon *vb) 164static void update_balloon_size(struct virtio_balloon *vb)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index a5bc91ae6ff6..d0e87cbe157c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -102,7 +102,7 @@ static void do_suspend(void)
102 /* XXX use normal device tree? */ 102 /* XXX use normal device tree? */
103 xenbus_suspend(); 103 xenbus_suspend();
104 104
105 err = stop_machine_run(xen_suspend, &cancelled, 0); 105 err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
106 if (err) { 106 if (err) {
107 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 107 printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
108 goto out; 108 goto out;