aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/bluetooth/hci_usb.c25
-rw-r--r--drivers/char/pcmcia/synclink_cs.c14
-rw-r--r--drivers/char/synclink.c14
-rw-r--r--drivers/char/synclink_gt.c14
-rw-r--r--drivers/char/synclinkmp.c14
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--drivers/dma/ioatdma.c2
-rw-r--r--drivers/fc4/fc.c4
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide.c5
-rw-r--r--drivers/ide/pci/it821x.c11
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/user_mad.c87
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c42
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c76
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/message/fusion/Kconfig2
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c99
-rw-r--r--drivers/message/fusion/mptbase.h13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptctl.h5
-rw-r--r--drivers/message/fusion/mptfc.c14
-rw-r--r--drivers/message/fusion/mptsas.c109
-rw-r--r--drivers/message/fusion/mptscsih.c118
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c52
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/spider_net.c580
-rw-r--r--drivers/net/spider_net.h73
-rw-r--r--drivers/net/sunhme.c9
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c116
-rw-r--r--drivers/net/wan/c101.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c1
-rw-r--r--drivers/net/wan/hdlc_raw.c1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c1
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c2
-rw-r--r--drivers/net/wireless/orinoco.c4
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/s390/block/xpram.c17
-rw-r--r--drivers/s390/char/raw3270.c52
-rw-r--r--drivers/s390/char/tape_class.c10
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/cmf.c1
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/net/ctcmain.c21
-rw-r--r--drivers/s390/net/qeth_main.c7
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/53c7xx.c8
-rw-r--r--drivers/scsi/NCR53C9x.c18
-rw-r--r--drivers/scsi/NCR_D700.c14
-rw-r--r--drivers/scsi/aha152x.c43
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c21
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/ata_piix.c165
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/constants.c126
-rw-r--r--drivers/scsi/esp.c16
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c72
-rw-r--r--drivers/scsi/scsi_error.c210
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/scsi/scsi_lib.c88
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_sas.c64
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/sun3_NCR5380.c2
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--drivers/serial/sunzilog.c125
116 files changed, 1855 insertions, 1488 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
1233 } 1233 }
1234} 1234}
1235 1235
1236static void cciss_check_queues(ctlr_info_t *h)
1237{
1238 int start_queue = h->next_to_run;
1239 int i;
1240
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1245 */
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1247 return;
1248
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1252 */
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1257 */
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1259 continue;
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1261
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1264 */
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1267 h->next_to_run =
1268 (start_queue + 1) % (h->highest_lun + 1);
1269 break;
1270 } else {
1271 h->next_to_run = curr_queue;
1272 break;
1273 }
1274 } else {
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1276 }
1277 }
1278}
1279
1236static void cciss_softirq_done(struct request *rq) 1280static void cciss_softirq_done(struct request *rq)
1237{ 1281{
1238 CommandList_struct *cmd = rq->completion_data; 1282 CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
1264 spin_lock_irqsave(&h->lock, flags); 1308 spin_lock_irqsave(&h->lock, flags);
1265 end_that_request_last(rq, rq->errors); 1309 end_that_request_last(rq, rq->errors);
1266 cmd_free(h, cmd, 1); 1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1267 spin_unlock_irqrestore(&h->lock, flags); 1312 spin_unlock_irqrestore(&h->lock, flags);
1268} 1313}
1269 1314
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2528 CommandList_struct *c; 2573 CommandList_struct *c;
2529 unsigned long flags; 2574 unsigned long flags;
2530 __u32 a, a1, a2; 2575 __u32 a, a1, a2;
2531 int j;
2532 int start_queue = h->next_to_run;
2533 2576
2534 if (interrupt_not_for_us(h)) 2577 if (interrupt_not_for_us(h))
2535 return IRQ_NONE; 2578 return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2588 } 2631 }
2589 } 2632 }
2590 2633
2591 /* check to see if we have maxed out the number of commands that can
2592 * be placed on the queue. If so then exit. We do this check here
2593 * in case the interrupt we serviced was from an ioctl and did not
2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2602 */
2603 for (j = 0; j < h->highest_lun + 1; j++) {
2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2605 /* make sure the disk has been added and the drive is real
2606 * because this can be called from the middle of init_one.
2607 */
2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2609 continue;
2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2611
2612 /* check to see if we have maxed out the number of commands
2613 * that can be placed on the queue.
2614 */
2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2616 if (curr_queue == start_queue) {
2617 h->next_to_run =
2618 (start_queue + 1) % (h->highest_lun + 1);
2619 goto cleanup;
2620 } else {
2621 h->next_to_run = curr_queue;
2622 goto cleanup;
2623 }
2624 } else {
2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2626 }
2627 }
2628
2629 cleanup:
2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2634 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2631 return IRQ_HANDLED; 2635 return IRQ_HANDLED;
2632} 2636}
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 6a0c2230f82f..e2d4beac7420 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -67,6 +67,8 @@ static int ignore = 0;
67static int ignore_dga = 0; 67static int ignore_dga = 0;
68static int ignore_csr = 0; 68static int ignore_csr = 0;
69static int ignore_sniffer = 0; 69static int ignore_sniffer = 0;
70static int disable_scofix = 0;
71static int force_scofix = 0;
70static int reset = 0; 72static int reset = 0;
71 73
72#ifdef CONFIG_BT_HCIUSB_SCO 74#ifdef CONFIG_BT_HCIUSB_SCO
@@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = {
107 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, 109 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
108 110
109 /* Broadcom BCM2035 */ 111 /* Broadcom BCM2035 */
110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC }, 112 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
111 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, 113 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
112 114
115 /* IBM/Lenovo ThinkPad with Broadcom chip */
116 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
117
113 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 118 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
114 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 119 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
115 120
@@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = {
119 /* ISSC Bluetooth Adapter v3.1 */ 124 /* ISSC Bluetooth Adapter v3.1 */
120 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, 125 { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
121 126
122 /* RTX Telecom based adapter with buggy SCO support */ 127 /* RTX Telecom based adapters with buggy SCO support */
123 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 128 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
129 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
124 130
125 /* Belkin F8T012 */ 131 /* Belkin F8T012 and F8T013 devices */
126 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, 132 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
133 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU },
127 134
128 /* Digianswer devices */ 135 /* Digianswer devices */
129 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, 136 { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
@@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
990 if (reset || id->driver_info & HCI_RESET) 997 if (reset || id->driver_info & HCI_RESET)
991 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); 998 set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
992 999
993 if (id->driver_info & HCI_WRONG_SCO_MTU) 1000 if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) {
994 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); 1001 if (!disable_scofix)
1002 set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
1003 }
995 1004
996 if (id->driver_info & HCI_SNIFFER) { 1005 if (id->driver_info & HCI_SNIFFER) {
997 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) 1006 if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
@@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
1161module_param(ignore_sniffer, bool, 0644); 1170module_param(ignore_sniffer, bool, 0644);
1162MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); 1171MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
1163 1172
1173module_param(disable_scofix, bool, 0644);
1174MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
1175
1176module_param(force_scofix, bool, 0644);
1177MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
1178
1164module_param(reset, bool, 0644); 1179module_param(reset, bool, 0644);
1165MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); 1180MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
1166 1181
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 17bc8abd5df5..00f574cbb0d4 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1174,8 +1174,12 @@ static void dcd_change(MGSLPC_INFO *info)
1174 else 1174 else
1175 info->input_signal_events.dcd_down++; 1175 info->input_signal_events.dcd_down++;
1176#ifdef CONFIG_HDLC 1176#ifdef CONFIG_HDLC
1177 if (info->netcount) 1177 if (info->netcount) {
1178 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, info->netdev); 1178 if (info->serial_signals & SerialSignal_DCD)
1179 netif_carrier_on(info->netdev);
1180 else
1181 netif_carrier_off(info->netdev);
1182 }
1179#endif 1183#endif
1180 wake_up_interruptible(&info->status_event_wait_q); 1184 wake_up_interruptible(&info->status_event_wait_q);
1181 wake_up_interruptible(&info->event_wait_q); 1185 wake_up_interruptible(&info->event_wait_q);
@@ -4251,8 +4255,10 @@ static int hdlcdev_open(struct net_device *dev)
4251 spin_lock_irqsave(&info->lock, flags); 4255 spin_lock_irqsave(&info->lock, flags);
4252 get_signals(info); 4256 get_signals(info);
4253 spin_unlock_irqrestore(&info->lock, flags); 4257 spin_unlock_irqrestore(&info->lock, flags);
4254 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 4258 if (info->serial_signals & SerialSignal_DCD)
4255 4259 netif_carrier_on(dev);
4260 else
4261 netif_carrier_off(dev);
4256 return 0; 4262 return 0;
4257} 4263}
4258 4264
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index df782dd1098c..78b1b1a2732b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -1344,8 +1344,12 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
1344 } else 1344 } else
1345 info->input_signal_events.dcd_down++; 1345 info->input_signal_events.dcd_down++;
1346#ifdef CONFIG_HDLC 1346#ifdef CONFIG_HDLC
1347 if (info->netcount) 1347 if (info->netcount) {
1348 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); 1348 if (status & MISCSTATUS_DCD)
1349 netif_carrier_on(info->netdev);
1350 else
1351 netif_carrier_off(info->netdev);
1352 }
1349#endif 1353#endif
1350 } 1354 }
1351 if (status & MISCSTATUS_CTS_LATCHED) 1355 if (status & MISCSTATUS_CTS_LATCHED)
@@ -7844,8 +7848,10 @@ static int hdlcdev_open(struct net_device *dev)
7844 spin_lock_irqsave(&info->irq_spinlock, flags); 7848 spin_lock_irqsave(&info->irq_spinlock, flags);
7845 usc_get_serial_signals(info); 7849 usc_get_serial_signals(info);
7846 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7850 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7847 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 7851 if (info->serial_signals & SerialSignal_DCD)
7848 7852 netif_carrier_on(dev);
7853 else
7854 netif_carrier_off(dev);
7849 return 0; 7855 return 0;
7850} 7856}
7851 7857
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index e829594195c1..b2dbbdb1bf81 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1497,8 +1497,10 @@ static int hdlcdev_open(struct net_device *dev)
1497 spin_lock_irqsave(&info->lock, flags); 1497 spin_lock_irqsave(&info->lock, flags);
1498 get_signals(info); 1498 get_signals(info);
1499 spin_unlock_irqrestore(&info->lock, flags); 1499 spin_unlock_irqrestore(&info->lock, flags);
1500 hdlc_set_carrier(info->signals & SerialSignal_DCD, dev); 1500 if (info->signals & SerialSignal_DCD)
1501 1501 netif_carrier_on(dev);
1502 else
1503 netif_carrier_off(dev);
1502 return 0; 1504 return 0;
1503} 1505}
1504 1506
@@ -1997,8 +1999,12 @@ static void dcd_change(struct slgt_info *info)
1997 info->input_signal_events.dcd_down++; 1999 info->input_signal_events.dcd_down++;
1998 } 2000 }
1999#ifdef CONFIG_HDLC 2001#ifdef CONFIG_HDLC
2000 if (info->netcount) 2002 if (info->netcount) {
2001 hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev); 2003 if (info->signals & SerialSignal_DCD)
2004 netif_carrier_on(info->netdev);
2005 else
2006 netif_carrier_off(info->netdev);
2007 }
2002#endif 2008#endif
2003 wake_up_interruptible(&info->status_event_wait_q); 2009 wake_up_interruptible(&info->status_event_wait_q);
2004 wake_up_interruptible(&info->event_wait_q); 2010 wake_up_interruptible(&info->event_wait_q);
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 1e443a233f51..66f3754fbbdf 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -1752,8 +1752,10 @@ static int hdlcdev_open(struct net_device *dev)
1752 spin_lock_irqsave(&info->lock, flags); 1752 spin_lock_irqsave(&info->lock, flags);
1753 get_signals(info); 1753 get_signals(info);
1754 spin_unlock_irqrestore(&info->lock, flags); 1754 spin_unlock_irqrestore(&info->lock, flags);
1755 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 1755 if (info->serial_signals & SerialSignal_DCD)
1756 1756 netif_carrier_on(dev);
1757 else
1758 netif_carrier_off(dev);
1757 return 0; 1759 return 0;
1758} 1760}
1759 1761
@@ -2522,8 +2524,12 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
2522 } else 2524 } else
2523 info->input_signal_events.dcd_down++; 2525 info->input_signal_events.dcd_down++;
2524#ifdef CONFIG_HDLC 2526#ifdef CONFIG_HDLC
2525 if (info->netcount) 2527 if (info->netcount) {
2526 hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); 2528 if (status & SerialSignal_DCD)
2529 netif_carrier_on(info->netdev);
2530 else
2531 netif_carrier_off(info->netdev);
2532 }
2527#endif 2533#endif
2528 } 2534 }
2529 if (status & MISCSTATUS_CTS_LATCHED) 2535 if (status & MISCSTATUS_CTS_LATCHED)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
364 if (ret != 1) \ 364 if (ret != 1) \
365 return -EINVAL; \ 365 return -EINVAL; \
366 \ 366 \
367 lock_cpu_hotplug(); \
367 mutex_lock(&policy->lock); \ 368 mutex_lock(&policy->lock); \
368 ret = __cpufreq_set_policy(policy, &new_policy); \ 369 ret = __cpufreq_set_policy(policy, &new_policy); \
369 policy->user_policy.object = policy->object; \ 370 policy->user_policy.object = policy->object; \
370 mutex_unlock(&policy->lock); \ 371 mutex_unlock(&policy->lock); \
372 unlock_cpu_hotplug(); \
371 \ 373 \
372 return ret ? ret : count; \ 374 return ret ? ret : count; \
373} 375}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1197 *********************************************************************/ 1199 *********************************************************************/
1198 1200
1199 1201
1202/* Must be called with lock_cpu_hotplug held */
1200int __cpufreq_driver_target(struct cpufreq_policy *policy, 1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1201 unsigned int target_freq, 1204 unsigned int target_freq,
1202 unsigned int relation) 1205 unsigned int relation)
1203{ 1206{
1204 int retval = -EINVAL; 1207 int retval = -EINVAL;
1205 1208
1206 lock_cpu_hotplug();
1207 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1208 target_freq, relation); 1210 target_freq, relation);
1209 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1210 retval = cpufreq_driver->target(policy, target_freq, relation); 1212 retval = cpufreq_driver->target(policy, target_freq, relation);
1211 1213
1212 unlock_cpu_hotplug();
1213
1214 return retval; 1214 return retval;
1215} 1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1225 if (!policy) 1225 if (!policy)
1226 return -EINVAL; 1226 return -EINVAL;
1227 1227
1228 lock_cpu_hotplug();
1228 mutex_lock(&policy->lock); 1229 mutex_lock(&policy->lock);
1229 1230
1230 ret = __cpufreq_driver_target(policy, target_freq, relation); 1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1231 1232
1232 mutex_unlock(&policy->lock); 1233 mutex_unlock(&policy->lock);
1234 unlock_cpu_hotplug();
1233 1235
1234 cpufreq_cpu_put(policy); 1236 cpufreq_cpu_put(policy);
1235 return ret; 1237 return ret;
1236} 1238}
1237EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1238 1240
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1239 1245
1240static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1241{ 1247{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1257} 1263}
1258 1264
1259 1265
1260int cpufreq_governor(unsigned int cpu, unsigned int event)
1261{
1262 int ret = 0;
1263 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1264
1265 if (!policy)
1266 return -EINVAL;
1267
1268 mutex_lock(&policy->lock);
1269 ret = __cpufreq_governor(policy, event);
1270 mutex_unlock(&policy->lock);
1271
1272 cpufreq_cpu_put(policy);
1273 return ret;
1274}
1275EXPORT_SYMBOL_GPL(cpufreq_governor);
1276
1277
1278int cpufreq_register_governor(struct cpufreq_governor *governor) 1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1279{ 1267{
1280 struct cpufreq_governor *t; 1268 struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1342EXPORT_SYMBOL(cpufreq_get_policy); 1330EXPORT_SYMBOL(cpufreq_get_policy);
1343 1331
1344 1332
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1345static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1346{ 1337{
1347 int ret = 0; 1338 int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1436 if (!data) 1427 if (!data)
1437 return -EINVAL; 1428 return -EINVAL;
1438 1429
1430 lock_cpu_hotplug();
1431
1439 /* lock this CPU */ 1432 /* lock this CPU */
1440 mutex_lock(&data->lock); 1433 mutex_lock(&data->lock);
1441 1434
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1446 data->user_policy.governor = data->governor; 1439 data->user_policy.governor = data->governor;
1447 1440
1448 mutex_unlock(&data->lock); 1441 mutex_unlock(&data->lock);
1442
1443 unlock_cpu_hotplug();
1449 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1450 1445
1451 return ret; 1446 return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
1469 if (!data) 1464 if (!data)
1470 return -ENODEV; 1465 return -ENODEV;
1471 1466
1467 lock_cpu_hotplug();
1472 mutex_lock(&data->lock); 1468 mutex_lock(&data->lock);
1473 1469
1474 dprintk("updating policy for CPU %u\n", cpu); 1470 dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
1494 ret = __cpufreq_set_policy(data, &policy); 1490 ret = __cpufreq_set_policy(data, &policy);
1495 1491
1496 mutex_unlock(&data->lock); 1492 mutex_unlock(&data->lock);
1497 1493 unlock_cpu_hotplug();
1498 cpufreq_cpu_put(data); 1494 cpufreq_cpu_put(data);
1499 return ret; 1495 return ret;
1500} 1496}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_LIMITS: 527 case CPUFREQ_GOV_LIMITS:
528 lock_cpu_hotplug();
529 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
530 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
531 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
537 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
538 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
539 unlock_cpu_hotplug();
540 break; 538 break;
541 } 539 }
542 return 0; 540 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 87299924e735..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall); 240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies; 241 this_dbs_info->prev_cpu_wall = cur_jiffies;
242 if (!total_ticks)
243 return;
242 /* 244 /*
243 * Every sampling_rate, we check, if current idle time is less 245 * Every sampling_rate, we check, if current idle time is less
244 * than 20% (default), then we try to increase frequency 246 * than 20% (default), then we try to increase frequency
@@ -304,7 +306,12 @@ static void do_dbs_timer(void *data)
304 unsigned int cpu = smp_processor_id(); 306 unsigned int cpu = smp_processor_id();
305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 308
309 if (!dbs_info->enable)
310 return;
311
312 lock_cpu_hotplug();
307 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
310} 317}
@@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu)
319 return; 326 return;
320} 327}
321 328
322static inline void dbs_timer_exit(unsigned int cpu) 329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
323{ 330{
324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 331 dbs_info->enable = 0;
325 332 cancel_delayed_work(&dbs_info->work);
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 333 flush_workqueue(kondemand_wq);
327} 334}
328 335
329static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 336static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
396 403
397 case CPUFREQ_GOV_STOP: 404 case CPUFREQ_GOV_STOP:
398 mutex_lock(&dbs_mutex); 405 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu); 406 dbs_timer_exit(this_dbs_info);
400 this_dbs_info->enable = 0;
401 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 407 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
402 dbs_enable--; 408 dbs_enable--;
403 if (dbs_enable == 0) 409 if (dbs_enable == 0)
@@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
408 break; 414 break;
409 415
410 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
411 lock_cpu_hotplug();
412 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
413 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
414 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
419 policy->min, 424 policy->min,
420 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
421 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
422 unlock_cpu_hotplug();
423 break; 427 break;
424 } 428 }
425 return 0; 429 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpu.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
74 lock_cpu_hotplug();
73 mutex_lock(&userspace_mutex); 75 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[policy->cpu]) 76 if (!cpu_is_managed[policy->cpu])
75 goto err; 77 goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
92 94
93 err: 95 err:
94 mutex_unlock(&userspace_mutex); 96 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
95 return ret; 98 return ret;
96} 99}
97 100
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 78bf46d917b7..dbd4d6c3698e 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -828,7 +828,7 @@ static int __init ioat_init_module(void)
828 /* if forced, worst case is that rmmod hangs */ 828 /* if forced, worst case is that rmmod hangs */
829 __unsafe(THIS_MODULE); 829 __unsafe(THIS_MODULE);
830 830
831 return pci_module_init(&ioat_pci_drv); 831 return pci_register_driver(&ioat_pci_drv);
832} 832}
833 833
834module_init(ioat_init_module); 834module_init(ioat_init_module);
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 66d03f242d3c..1a159e8843ca 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -429,7 +429,7 @@ static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hd
429 429
430 if (fcmd->data) { 430 if (fcmd->data) {
431 if (SCpnt->use_sg) 431 if (SCpnt->use_sg)
432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer, 432 dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer,
433 SCpnt->use_sg, 433 SCpnt->use_sg,
434 SCpnt->sc_data_direction); 434 SCpnt->sc_data_direction);
435 else 435 else
@@ -810,7 +810,7 @@ static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, i
810 SCpnt->request_bufflen, 810 SCpnt->request_bufflen,
811 SCpnt->sc_data_direction); 811 SCpnt->sc_data_direction);
812 } else { 812 } else {
813 struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer; 813 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
814 int nents; 814 int nents;
815 815
816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length)) 816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length))
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f712e4cfd9dc..7cf3eb023521 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
776 * not available so we don't need to recheck that. 776 * not available so we don't need to recheck that.
777 */ 777 */
778 capacity = idedisk_capacity(drive); 778 capacity = idedisk_capacity(drive);
779 barrier = ide_id_has_flush_cache(id) && 779 barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
780 (drive->addressing == 0 || capacity <= (1ULL << 28) || 780 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
781 ide_id_has_flush_cache_ext(id)); 781 ide_id_has_flush_cache_ext(id));
782 782
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 98918fb6b2ce..7c3a13e1cf64 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
750 goto bug_dma_off; 750 goto bug_dma_off;
751 printk(", DMA"); 751 printk(", DMA");
752 } else if (id->field_valid & 1) { 752 } else if (id->field_valid & 1) {
753 printk(", BUG"); 753 goto bug_dma_off;
754 } 754 }
755 return; 755 return;
756bug_dma_off: 756bug_dma_off:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 05fbd9298db7..defd4b4bd374 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
1539 const char *hd_words[] = { 1539 const char *hd_words[] = {
1540 "none", "noprobe", "nowerr", "cdrom", "serialize", 1540 "none", "noprobe", "nowerr", "cdrom", "serialize",
1541 "autotune", "noautotune", "minus8", "swapdata", "bswap", 1541 "autotune", "noautotune", "minus8", "swapdata", "bswap",
1542 "minus11", "remap", "remap63", "scsi", NULL }; 1542 "noflush", "remap", "remap63", "scsi", NULL };
1543 unit = s[2] - 'a'; 1543 unit = s[2] - 'a';
1544 hw = unit / MAX_DRIVES; 1544 hw = unit / MAX_DRIVES;
1545 unit = unit % MAX_DRIVES; 1545 unit = unit % MAX_DRIVES;
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
1578 case -10: /* "bswap" */ 1578 case -10: /* "bswap" */
1579 drive->bswap = 1; 1579 drive->bswap = 1;
1580 goto done; 1580 goto done;
1581 case -11: /* noflush */
1582 drive->noflush = 1;
1583 goto done;
1581 case -12: /* "remap" */ 1584 case -12: /* "remap" */
1582 drive->remap_0_to_1 = 1; 1585 drive->remap_0_to_1 = 1;
1583 goto done; 1586 goto done;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3cb04424d351..e9bad185968a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
498{ 498{
499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); 499 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
500 500
501 config_it821x_chipset_for_pio(drive, !speed); 501 if (speed) {
502 it821x_tune_chipset(drive, speed); 502 config_it821x_chipset_for_pio(drive, 0);
503 return ide_dma_enable(drive); 503 it821x_tune_chipset(drive, speed);
504
505 return ide_dma_enable(drive);
506 }
507
508 return 0;
504} 509}
505 510
506/** 511/**
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed4dab52a6f..1c3cfbbe6a97 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
167 return 0; 167 return 0;
168} 168}
169 169
170int ib_response_mad(struct ib_mad *mad)
171{
172 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
173 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
174 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
175 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
176}
177EXPORT_SYMBOL(ib_response_mad);
178
170/* 179/*
171 * ib_register_mad_agent - Register to send/receive MADs 180 * ib_register_mad_agent - Register to send/receive MADs
172 */ 181 */
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
570} 579}
571EXPORT_SYMBOL(ib_unregister_mad_agent); 580EXPORT_SYMBOL(ib_unregister_mad_agent);
572 581
573static inline int response_mad(struct ib_mad *mad)
574{
575 /* Trap represses are responses although response bit is reset */
576 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
577 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
578}
579
580static void dequeue_mad(struct ib_mad_list_head *mad_list) 582static void dequeue_mad(struct ib_mad_list_head *mad_list)
581{ 583{
582 struct ib_mad_queue *mad_queue; 584 struct ib_mad_queue *mad_queue;
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
723 switch (ret) 725 switch (ret)
724 { 726 {
725 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 727 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
726 if (response_mad(&mad_priv->mad.mad) && 728 if (ib_response_mad(&mad_priv->mad.mad) &&
727 mad_agent_priv->agent.recv_handler) { 729 mad_agent_priv->agent.recv_handler) {
728 local->mad_priv = mad_priv; 730 local->mad_priv = mad_priv;
729 local->recv_mad_agent = mad_agent_priv; 731 local->recv_mad_agent = mad_agent_priv;
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1551 unsigned long flags; 1553 unsigned long flags;
1552 1554
1553 spin_lock_irqsave(&port_priv->reg_lock, flags); 1555 spin_lock_irqsave(&port_priv->reg_lock, flags);
1554 if (response_mad(mad)) { 1556 if (ib_response_mad(mad)) {
1555 u32 hi_tid; 1557 u32 hi_tid;
1556 struct ib_mad_agent_private *entry; 1558 struct ib_mad_agent_private *entry;
1557 1559
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1799 } 1801 }
1800 1802
1801 /* Complete corresponding request */ 1803 /* Complete corresponding request */
1802 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1804 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1803 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1804 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1806 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1805 if (!mad_send_wr) { 1807 if (!mad_send_wr) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index afe70a549c2f..1273f8807e84 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -112,8 +112,10 @@ struct ib_umad_device {
112struct ib_umad_file { 112struct ib_umad_file {
113 struct ib_umad_port *port; 113 struct ib_umad_port *port;
114 struct list_head recv_list; 114 struct list_head recv_list;
115 struct list_head send_list;
115 struct list_head port_list; 116 struct list_head port_list;
116 spinlock_t recv_lock; 117 spinlock_t recv_lock;
118 spinlock_t send_lock;
117 wait_queue_head_t recv_wait; 119 wait_queue_head_t recv_wait;
118 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
119 int agents_dead; 121 int agents_dead;
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
177 return ret; 179 return ret;
178} 180}
179 181
182static void dequeue_send(struct ib_umad_file *file,
183 struct ib_umad_packet *packet)
184 {
185 spin_lock_irq(&file->send_lock);
186 list_del(&packet->list);
187 spin_unlock_irq(&file->send_lock);
188 }
189
180static void send_handler(struct ib_mad_agent *agent, 190static void send_handler(struct ib_mad_agent *agent,
181 struct ib_mad_send_wc *send_wc) 191 struct ib_mad_send_wc *send_wc)
182{ 192{
183 struct ib_umad_file *file = agent->context; 193 struct ib_umad_file *file = agent->context;
184 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 194 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
185 195
196 dequeue_send(file, packet);
186 ib_destroy_ah(packet->msg->ah); 197 ib_destroy_ah(packet->msg->ah);
187 ib_free_send_mad(packet->msg); 198 ib_free_send_mad(packet->msg);
188 199
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
370 return 0; 381 return 0;
371} 382}
372 383
384static int same_destination(struct ib_user_mad_hdr *hdr1,
385 struct ib_user_mad_hdr *hdr2)
386{
387 if (!hdr1->grh_present && !hdr2->grh_present)
388 return (hdr1->lid == hdr2->lid);
389
390 if (hdr1->grh_present && hdr2->grh_present)
391 return !memcmp(hdr1->gid, hdr2->gid, 16);
392
393 return 0;
394}
395
396static int is_duplicate(struct ib_umad_file *file,
397 struct ib_umad_packet *packet)
398{
399 struct ib_umad_packet *sent_packet;
400 struct ib_mad_hdr *sent_hdr, *hdr;
401
402 hdr = (struct ib_mad_hdr *) packet->mad.data;
403 list_for_each_entry(sent_packet, &file->send_list, list) {
404 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
405
406 if ((hdr->tid != sent_hdr->tid) ||
407 (hdr->mgmt_class != sent_hdr->mgmt_class))
408 continue;
409
410 /*
411 * No need to be overly clever here. If two new operations have
412 * the same TID, reject the second as a duplicate. This is more
413 * restrictive than required by the spec.
414 */
415 if (!ib_response_mad((struct ib_mad *) hdr)) {
416 if (!ib_response_mad((struct ib_mad *) sent_hdr))
417 return 1;
418 continue;
419 } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
420 continue;
421
422 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
423 return 1;
424 }
425
426 return 0;
427}
428
373static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 429static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
374 size_t count, loff_t *pos) 430 size_t count, loff_t *pos)
375{ 431{
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
379 struct ib_ah_attr ah_attr; 435 struct ib_ah_attr ah_attr;
380 struct ib_ah *ah; 436 struct ib_ah *ah;
381 struct ib_rmpp_mad *rmpp_mad; 437 struct ib_rmpp_mad *rmpp_mad;
382 u8 method;
383 __be64 *tid; 438 __be64 *tid;
384 int ret, data_len, hdr_len, copy_offset, rmpp_active; 439 int ret, data_len, hdr_len, copy_offset, rmpp_active;
385 440
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
473 } 528 }
474 529
475 /* 530 /*
476 * If userspace is generating a request that will generate a 531 * Set the high-order part of the transaction ID to make MADs from
477 * response, we need to make sure the high-order part of the 532 * different agents unique, and allow routing responses back to the
478 * transaction ID matches the agent being used to send the 533 * original requestor.
479 * MAD.
480 */ 534 */
481 method = ((struct ib_mad_hdr *) packet->msg->mad)->method; 535 if (!ib_response_mad(packet->msg->mad)) {
482
483 if (!(method & IB_MGMT_METHOD_RESP) &&
484 method != IB_MGMT_METHOD_TRAP_REPRESS &&
485 method != IB_MGMT_METHOD_SEND) {
486 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 536 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
487 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 537 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
488 (be64_to_cpup(tid) & 0xffffffff)); 538 (be64_to_cpup(tid) & 0xffffffff));
539 rmpp_mad->mad_hdr.tid = *tid;
540 }
541
542 spin_lock_irq(&file->send_lock);
543 ret = is_duplicate(file, packet);
544 if (!ret)
545 list_add_tail(&packet->list, &file->send_list);
546 spin_unlock_irq(&file->send_lock);
547 if (ret) {
548 ret = -EINVAL;
549 goto err_msg;
489 } 550 }
490 551
491 ret = ib_post_send_mad(packet->msg, NULL); 552 ret = ib_post_send_mad(packet->msg, NULL);
492 if (ret) 553 if (ret)
493 goto err_msg; 554 goto err_send;
494 555
495 up_read(&file->port->mutex); 556 up_read(&file->port->mutex);
496 return count; 557 return count;
497 558
559err_send:
560 dequeue_send(file, packet);
498err_msg: 561err_msg:
499 ib_free_send_mad(packet->msg); 562 ib_free_send_mad(packet->msg);
500err_ah: 563err_ah:
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
657 } 720 }
658 721
659 spin_lock_init(&file->recv_lock); 722 spin_lock_init(&file->recv_lock);
723 spin_lock_init(&file->send_lock);
660 INIT_LIST_HEAD(&file->recv_list); 724 INIT_LIST_HEAD(&file->recv_list);
725 INIT_LIST_HEAD(&file->send_list);
661 init_waitqueue_head(&file->recv_wait); 726 init_waitqueue_head(&file->recv_wait);
662 727
663 file->port = port; 728 file->port = port;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bdf5d5098190..30923eb68ec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -42,6 +42,13 @@
42 42
43#include "uverbs.h" 43#include "uverbs.h"
44 44
45static struct lock_class_key pd_lock_key;
46static struct lock_class_key mr_lock_key;
47static struct lock_class_key cq_lock_key;
48static struct lock_class_key qp_lock_key;
49static struct lock_class_key ah_lock_key;
50static struct lock_class_key srq_lock_key;
51
45#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 52#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
46 do { \ 53 do { \
47 (udata)->inbuf = (void __user *) (ibuf); \ 54 (udata)->inbuf = (void __user *) (ibuf); \
@@ -76,12 +83,13 @@
76 */ 83 */
77 84
78static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
79 struct ib_ucontext *context) 86 struct ib_ucontext *context, struct lock_class_key *key)
80{ 87{
81 uobj->user_handle = user_handle; 88 uobj->user_handle = user_handle;
82 uobj->context = context; 89 uobj->context = context;
83 kref_init(&uobj->ref); 90 kref_init(&uobj->ref);
84 init_rwsem(&uobj->mutex); 91 init_rwsem(&uobj->mutex);
92 lockdep_set_class(&uobj->mutex, key);
85 uobj->live = 0; 93 uobj->live = 0;
86} 94}
87 95
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
470 if (!uobj) 478 if (!uobj)
471 return -ENOMEM; 479 return -ENOMEM;
472 480
473 init_uobj(uobj, 0, file->ucontext); 481 init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
474 down_write(&uobj->mutex); 482 down_write(&uobj->mutex);
475 483
476 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, 484 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
591 if (!obj) 599 if (!obj)
592 return -ENOMEM; 600 return -ENOMEM;
593 601
594 init_uobj(&obj->uobject, 0, file->ucontext); 602 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
595 down_write(&obj->uobject.mutex); 603 down_write(&obj->uobject.mutex);
596 604
597 /* 605 /*
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
770 if (!obj) 778 if (!obj)
771 return -ENOMEM; 779 return -ENOMEM;
772 780
773 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 781 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
774 down_write(&obj->uobject.mutex); 782 down_write(&obj->uobject.mutex);
775 783
776 if (cmd.comp_channel >= 0) { 784 if (cmd.comp_channel >= 0) {
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1051 if (!obj) 1059 if (!obj)
1052 return -ENOMEM; 1060 return -ENOMEM;
1053 1061
1054 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); 1062 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1055 down_write(&obj->uevent.uobject.mutex); 1063 down_write(&obj->uevent.uobject.mutex);
1056 1064
1065 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1057 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1058 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); 1067 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
1059 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); 1068 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1060 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1069 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
1061 1070
1062 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1071 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1063 ret = -EINVAL; 1072 ret = -EINVAL;
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1125 1134
1126 put_pd_read(pd); 1135 put_pd_read(pd);
1127 put_cq_read(scq); 1136 put_cq_read(scq);
1128 put_cq_read(rcq); 1137 if (rcq != scq)
1138 put_cq_read(rcq);
1129 if (srq) 1139 if (srq)
1130 put_srq_read(srq); 1140 put_srq_read(srq);
1131 1141
@@ -1150,7 +1160,7 @@ err_put:
1150 put_pd_read(pd); 1160 put_pd_read(pd);
1151 if (scq) 1161 if (scq)
1152 put_cq_read(scq); 1162 put_cq_read(scq);
1153 if (rcq) 1163 if (rcq && rcq != scq)
1154 put_cq_read(rcq); 1164 put_cq_read(rcq);
1155 if (srq) 1165 if (srq)
1156 put_srq_read(srq); 1166 put_srq_read(srq);
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1751 if (!uobj) 1761 if (!uobj)
1752 return -ENOMEM; 1762 return -ENOMEM;
1753 1763
1754 init_uobj(uobj, cmd.user_handle, file->ucontext); 1764 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1755 down_write(&uobj->mutex); 1765 down_write(&uobj->mutex);
1756 1766
1757 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1767 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1775 ah = ib_create_ah(pd, &attr); 1785 ah = ib_create_ah(pd, &attr);
1776 if (IS_ERR(ah)) { 1786 if (IS_ERR(ah)) {
1777 ret = PTR_ERR(ah); 1787 ret = PTR_ERR(ah);
1778 goto err; 1788 goto err_put;
1779 } 1789 }
1780 1790
1781 ah->uobject = uobj; 1791 ah->uobject = uobj;
@@ -1811,6 +1821,9 @@ err_copy:
1811err_destroy: 1821err_destroy:
1812 ib_destroy_ah(ah); 1822 ib_destroy_ah(ah);
1813 1823
1824err_put:
1825 put_pd_read(pd);
1826
1814err: 1827err:
1815 put_uobj_write(uobj); 1828 put_uobj_write(uobj);
1816 return ret; 1829 return ret;
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1963 if (!obj) 1976 if (!obj)
1964 return -ENOMEM; 1977 return -ENOMEM;
1965 1978
1966 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); 1979 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
1967 down_write(&obj->uobject.mutex); 1980 down_write(&obj->uobject.mutex);
1968 1981
1969 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1982 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1984 srq = pd->device->create_srq(pd, &attr, &udata); 1997 srq = pd->device->create_srq(pd, &attr, &udata);
1985 if (IS_ERR(srq)) { 1998 if (IS_ERR(srq)) {
1986 ret = PTR_ERR(srq); 1999 ret = PTR_ERR(srq);
1987 goto err; 2000 goto err_put;
1988 } 2001 }
1989 2002
1990 srq->device = pd->device; 2003 srq->device = pd->device;
@@ -2029,6 +2042,9 @@ err_copy:
2029err_destroy: 2042err_destroy:
2030 ib_destroy_srq(srq); 2043 ib_destroy_srq(srq);
2031 2044
2045err_put:
2046 put_pd_read(pd);
2047
2032err: 2048err:
2033 put_uobj_write(&obj->uobject); 2049 put_uobj_write(&obj->uobject);
2034 return ret; 2050 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 823131d58b34..f98518d912b5 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
859 __ipath_layer_rcv_lid(dd, hdr); 859 __ipath_layer_rcv_lid(dd, hdr);
860} 860}
861 861
862static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
863 u32 eflags,
864 u32 l,
865 u32 etail,
866 u64 *rc)
867{
868 char emsg[128];
869 struct ipath_message_header *hdr;
870
871 get_rhf_errstring(eflags, emsg, sizeof emsg);
872 hdr = (struct ipath_message_header *)&rc[1];
873 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
874 "tlen=%x opcode=%x egridx=%x: %s\n",
875 eflags, l,
876 ipath_hdrget_rcv_type((__le32 *) rc),
877 ipath_hdrget_length_in_bytes((__le32 *) rc),
878 be32_to_cpu(hdr->bth[0]) >> 24,
879 etail, emsg);
880
881 /* Count local link integrity errors. */
882 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
883 u8 n = (dd->ipath_ibcctrl >>
884 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
885 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
886
887 if (++dd->ipath_lli_counter > n) {
888 dd->ipath_lli_counter = 0;
889 dd->ipath_lli_errors++;
890 }
891 }
892}
893
862/* 894/*
863 * ipath_kreceive - receive a packet 895 * ipath_kreceive - receive a packet
864 * @dd: the infinipath device 896 * @dd: the infinipath device
@@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd)
875 struct ipath_message_header *hdr; 907 struct ipath_message_header *hdr;
876 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; 908 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
877 static u64 totcalls; /* stats, may eventually remove */ 909 static u64 totcalls; /* stats, may eventually remove */
878 char emsg[128];
879 910
880 if (!dd->ipath_hdrqtailptr) { 911 if (!dd->ipath_hdrqtailptr) {
881 ipath_dev_err(dd, 912 ipath_dev_err(dd,
@@ -938,26 +969,9 @@ reloop:
938 "%x\n", etype); 969 "%x\n", etype);
939 } 970 }
940 971
941 if (eflags & ~(INFINIPATH_RHF_H_TIDERR | 972 if (unlikely(eflags))
942 INFINIPATH_RHF_H_IHDRERR)) { 973 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
943 get_rhf_errstring(eflags, emsg, sizeof emsg); 974 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
944 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
945 "tlen=%x opcode=%x egridx=%x: %s\n",
946 eflags, l, etype, tlen, bthbytes[0],
947 ipath_hdrget_index((__le32 *) rc), emsg);
948 /* Count local link integrity errors. */
949 if (eflags & (INFINIPATH_RHF_H_ICRCERR |
950 INFINIPATH_RHF_H_VCRCERR)) {
951 u8 n = (dd->ipath_ibcctrl >>
952 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
953 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
954
955 if (++dd->ipath_lli_counter > n) {
956 dd->ipath_lli_counter = 0;
957 dd->ipath_lli_errors++;
958 }
959 }
960 } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
961 int ret = __ipath_verbs_rcv(dd, rc + 1, 975 int ret = __ipath_verbs_rcv(dd, rc + 1,
962 ebuf, tlen); 976 ebuf, tlen);
963 if (ret == -ENODEV) 977 if (ret == -ENODEV)
@@ -981,25 +995,7 @@ reloop:
981 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 995 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
982 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 996 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
983 be32_to_cpu(hdr->bth[0]) & 0xff); 997 be32_to_cpu(hdr->bth[0]) & 0xff);
984 else if (eflags & (INFINIPATH_RHF_H_TIDERR | 998 else {
985 INFINIPATH_RHF_H_IHDRERR)) {
986 /*
987 * This is a type 3 packet, only the LRH is in the
988 * rcvhdrq, the rest of the header is in the eager
989 * buffer.
990 */
991 u8 opcode;
992 if (ebuf) {
993 bthbytes = (u8 *) ebuf;
994 opcode = *bthbytes;
995 }
996 else
997 opcode = 0;
998 get_rhf_errstring(eflags, emsg, sizeof emsg);
999 ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
1000 "len %x\n", eflags, emsg, opcode, etail,
1001 tlen);
1002 } else {
1003 /* 999 /*
1004 * error packet, type of error unknown. 1000 * error packet, type of error unknown.
1005 * Probably type 3, but we don't know, so don't 1001 * Probably type 3, but we don't know, so don't
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 46773c673a1a..a5ca279370aa 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
197 size_t off; 197 size_t off;
198 int ret; 198 int ret;
199 199
200 /*
201 * We use RKEY == zero for physical addresses
202 * (see ipath_get_dma_mr).
203 */
204 if (rkey == 0) {
205 sge->mr = NULL;
206 sge->vaddr = phys_to_virt(vaddr);
207 sge->length = len;
208 sge->sge_length = len;
209 ss->sg_list = NULL;
210 ss->num_sge = 1;
211 ret = 1;
212 goto bail;
213 }
214
200 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; 215 mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
201 if (unlikely(mr == NULL || mr->lkey != rkey)) { 216 if (unlikely(mr == NULL || mr->lkey != rkey)) {
202 ret = 0; 217 ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 56ac336dd1ec..d70a9b6b5239 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
191{ 191{
192 struct ipath_sge *sge = &ss->sge; 192 struct ipath_sge *sge = &ss->sge;
193 193
194 while (length > sge->sge_length) {
195 length -= sge->sge_length;
196 ss->sge = *ss->sg_list++;
197 }
198 while (length) { 194 while (length) {
199 u32 len = sge->length; 195 u32 len = sge->length;
200 196
@@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev,
627 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 623 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
628 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 624 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
629 IB_DEVICE_SYS_IMAGE_GUID; 625 IB_DEVICE_SYS_IMAGE_GUID;
626 props->page_size_cap = PAGE_SIZE;
630 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 627 props->vendor_id = ipath_layer_get_vendorid(dev->dd);
631 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 628 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
632 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 629 props->hw_ver = ipath_layer_get_pcirev(dev->dd);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index d0f7731802c9..deabc14b4ea4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
778 ((dev->fw_ver & 0xffff0000ull) >> 16) | 778 ((dev->fw_ver & 0xffff0000ull) >> 16) |
779 ((dev->fw_ver & 0x0000ffffull) << 16); 779 ((dev->fw_ver & 0x0000ffffull) << 16);
780 780
781 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
782 dev->cmd.max_cmds = 1 << lg;
783
781 mthca_dbg(dev, "FW version %012llx, max commands %d\n", 784 mthca_dbg(dev, "FW version %012llx, max commands %d\n",
782 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); 785 (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
783 786
784 MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
785 dev->cmd.max_cmds = 1 << lg;
786 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); 787 MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
787 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 788 MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
788 789
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index fab417c5cf43..b60a9d79ae54 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
370 return -EINVAL; 370 return -EINVAL;
371 371
372 if (attr_mask & IB_SRQ_LIMIT) { 372 if (attr_mask & IB_SRQ_LIMIT) {
373 if (attr->srq_limit > srq->max) 373 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
374 if (attr->srq_limit > max_wr)
374 return -EINVAL; 375 return -EINVAL;
375 376
376 mutex_lock(&srq->mutex); 377 mutex_lock(&srq->mutex);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3f89f5e19036..474aa214ab57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -212,6 +212,7 @@ struct ipoib_path {
212 212
213struct ipoib_neigh { 213struct ipoib_neigh {
214 struct ipoib_ah *ah; 214 struct ipoib_ah *ah;
215 union ib_gid dgid;
215 struct sk_buff_head queue; 216 struct sk_buff_head queue;
216 217
217 struct neighbour *neighbour; 218 struct neighbour *neighbour;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1c6ea1c682a5..cf71d2a5515c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -404,6 +404,8 @@ static void path_rec_completion(int status,
404 list_for_each_entry(neigh, &path->neigh_list, list) { 404 list_for_each_entry(neigh, &path->neigh_list, list) {
405 kref_get(&path->ah->ref); 405 kref_get(&path->ah->ref);
406 neigh->ah = path->ah; 406 neigh->ah = path->ah;
407 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
408 sizeof(union ib_gid));
407 409
408 while ((skb = __skb_dequeue(&neigh->queue))) 410 while ((skb = __skb_dequeue(&neigh->queue)))
409 __skb_queue_tail(&skqueue, skb); 411 __skb_queue_tail(&skqueue, skb);
@@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
510 if (path->ah) { 512 if (path->ah) {
511 kref_get(&path->ah->ref); 513 kref_get(&path->ah->ref);
512 neigh->ah = path->ah; 514 neigh->ah = path->ah;
515 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
516 sizeof(union ib_gid));
513 517
514 ipoib_send(dev, skb, path->ah, 518 ipoib_send(dev, skb, path->ah,
515 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 519 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
@@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
633 neigh = *to_ipoib_neigh(skb->dst->neighbour); 637 neigh = *to_ipoib_neigh(skb->dst->neighbour);
634 638
635 if (likely(neigh->ah)) { 639 if (likely(neigh->ah)) {
640 if (unlikely(memcmp(&neigh->dgid.raw,
641 skb->dst->neighbour->ha + 4,
642 sizeof(union ib_gid)))) {
643 spin_lock(&priv->lock);
644 /*
645 * It's safe to call ipoib_put_ah() inside
646 * priv->lock here, because we know that
647 * path->ah will always hold one more reference,
648 * so ipoib_put_ah() will never do more than
649 * decrement the ref count.
650 */
651 ipoib_put_ah(neigh->ah);
652 list_del(&neigh->list);
653 ipoib_neigh_free(neigh);
654 spin_unlock(&priv->lock);
655 ipoib_path_lookup(skb, dev);
656 goto out;
657 }
658
636 ipoib_send(dev, skb, neigh->ah, 659 ipoib_send(dev, skb, neigh->ah,
637 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 660 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
638 goto out; 661 goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ab40488182b3..b5e6a7be603d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
264 if (!ah) { 264 if (!ah) {
265 ipoib_warn(priv, "ib_address_create failed\n"); 265 ipoib_warn(priv, "ib_address_create failed\n");
266 } else { 266 } else {
267 spin_lock_irq(&priv->lock);
268 mcast->ah = ah;
269 spin_unlock_irq(&priv->lock);
270
267 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT 271 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
268 " AV %p, LID 0x%04x, SL %d\n", 272 " AV %p, LID 0x%04x, SL %d\n",
269 IPOIB_GID_ARG(mcast->mcmember.mgid), 273 IPOIB_GID_ARG(mcast->mcmember.mgid),
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
271 be16_to_cpu(mcast->mcmember.mlid), 275 be16_to_cpu(mcast->mcmember.mlid),
272 mcast->mcmember.sl); 276 mcast->mcmember.sl);
273 } 277 }
274
275 spin_lock_irq(&priv->lock);
276 mcast->ah = ah;
277 spin_unlock_irq(&priv->lock);
278 } 278 }
279 279
280 /* actually send any queued packets */ 280 /* actually send any queued packets */
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index bbc229852881..ea31d8470510 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -48,10 +48,8 @@ config FUSION_SAS
48 List of supported controllers: 48 List of supported controllers:
49 49
50 LSISAS1064 50 LSISAS1064
51 LSISAS1066
52 LSISAS1068 51 LSISAS1068
53 LSISAS1064E 52 LSISAS1064E
54 LSISAS1066E
55 LSISAS1068E 53 LSISAS1068E
56 54
57config FUSION_MAX_SGE 55config FUSION_MAX_SGE
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index b114236f4395..341691390e86 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -9,7 +9,6 @@
9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT 9#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT
10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL 10#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL
11 11
12
13# 12#
14# driver/module specifics... 13# driver/module specifics...
15# 14#
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 43308df64623..29d0635cce1d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -436,8 +436,6 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
436 */ 436 */
437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { 437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
438 freereq = 0; 438 freereq = 0;
439 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
440 ioc->name, pEvReply));
441 } else { 439 } else {
442 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 440 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
443 ioc->name, pEvReply)); 441 ioc->name, pEvReply));
@@ -678,19 +676,19 @@ int
678mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) 676mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
679{ 677{
680 MPT_ADAPTER *ioc; 678 MPT_ADAPTER *ioc;
679 const struct pci_device_id *id;
681 680
682 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) { 681 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
683 return -EINVAL; 682 return -EINVAL;
684 }
685 683
686 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; 684 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
687 685
688 /* call per pci device probe entry point */ 686 /* call per pci device probe entry point */
689 list_for_each_entry(ioc, &ioc_list, list) { 687 list_for_each_entry(ioc, &ioc_list, list) {
690 if(dd_cbfunc->probe) { 688 id = ioc->pcidev->driver ?
691 dd_cbfunc->probe(ioc->pcidev, 689 ioc->pcidev->driver->id_table : NULL;
692 ioc->pcidev->driver->id_table); 690 if (dd_cbfunc->probe)
693 } 691 dd_cbfunc->probe(ioc->pcidev, id);
694 } 692 }
695 693
696 return 0; 694 return 0;
@@ -1056,9 +1054,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1056 1054
1057 dinitprintk((MYIOC_s_INFO_FMT 1055 dinitprintk((MYIOC_s_INFO_FMT
1058 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1056 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1059 ioc->name, 1057 ioc->name, ioc->HostPageBuffer,
1060 ioc->HostPageBuffer, 1058 (u32)ioc->HostPageBuffer_dma,
1061 ioc->HostPageBuffer_dma,
1062 host_page_buffer_sz)); 1059 host_page_buffer_sz));
1063 ioc->alloc_total += host_page_buffer_sz; 1060 ioc->alloc_total += host_page_buffer_sz;
1064 ioc->HostPageBuffer_sz = host_page_buffer_sz; 1061 ioc->HostPageBuffer_sz = host_page_buffer_sz;
@@ -1380,6 +1377,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1380 printk(KERN_WARNING MYNAM 1377 printk(KERN_WARNING MYNAM
1381 ": WARNING - %s did not initialize properly! (%d)\n", 1378 ": WARNING - %s did not initialize properly! (%d)\n",
1382 ioc->name, r); 1379 ioc->name, r);
1380
1383 list_del(&ioc->list); 1381 list_del(&ioc->list);
1384 if (ioc->alt_ioc) 1382 if (ioc->alt_ioc)
1385 ioc->alt_ioc->alt_ioc = NULL; 1383 ioc->alt_ioc->alt_ioc = NULL;
@@ -1762,9 +1760,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1762 * chips (mpt_adapter_disable, 1760 * chips (mpt_adapter_disable,
1763 * mpt_diag_reset) 1761 * mpt_diag_reset)
1764 */ 1762 */
1765 ioc->cached_fw = NULL;
1766 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n", 1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1767 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); 1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 ioc->alt_ioc->cached_fw = NULL;
1768 } 1766 }
1769 } else { 1767 } else {
1770 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1768 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
@@ -1885,7 +1883,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1885 /* FIXME? Examine results here? */ 1883 /* FIXME? Examine results here? */
1886 } 1884 }
1887 1885
1888out: 1886 out:
1889 if ((ret != 0) && irq_allocated) { 1887 if ((ret != 0) && irq_allocated) {
1890 free_irq(ioc->pci_irq, ioc); 1888 free_irq(ioc->pci_irq, ioc);
1891 if (mpt_msi_enable) 1889 if (mpt_msi_enable)
@@ -2670,6 +2668,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2670 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", 2668 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
2671 ioc->name, count)); 2669 ioc->name, count));
2672 2670
2671 ioc->aen_event_read_flag=0;
2673 return r; 2672 return r;
2674} 2673}
2675 2674
@@ -2737,6 +2736,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
2737 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 2736 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2738 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ 2737 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
2739 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; 2738 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
2739 ioc->alloc_total += size;
2740 ioc->alt_ioc->alloc_total -= size;
2740 } else { 2741 } else {
2741 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) ) 2742 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
2742 ioc->alloc_total += size; 2743 ioc->alloc_total += size;
@@ -3166,6 +3167,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
3166static int 3167static int
3167mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) 3168mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3168{ 3169{
3170 MPT_ADAPTER *iocp=NULL;
3169 u32 diag0val; 3171 u32 diag0val;
3170 u32 doorbell; 3172 u32 doorbell;
3171 int hard_reset_done = 0; 3173 int hard_reset_done = 0;
@@ -3301,17 +3303,23 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3301 /* FIXME? Examine results here? */ 3303 /* FIXME? Examine results here? */
3302 } 3304 }
3303 3305
3304 if (ioc->cached_fw) { 3306 if (ioc->cached_fw)
3307 iocp = ioc;
3308 else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
3309 iocp = ioc->alt_ioc;
3310 if (iocp) {
3305 /* If the DownloadBoot operation fails, the 3311 /* If the DownloadBoot operation fails, the
3306 * IOC will be left unusable. This is a fatal error 3312 * IOC will be left unusable. This is a fatal error
3307 * case. _diag_reset will return < 0 3313 * case. _diag_reset will return < 0
3308 */ 3314 */
3309 for (count = 0; count < 30; count ++) { 3315 for (count = 0; count < 30; count ++) {
3310 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3316 diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
3311 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 3317 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3312 break; 3318 break;
3313 } 3319 }
3314 3320
3321 dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n",
3322 iocp->name, diag0val, count));
3315 /* wait 1 sec */ 3323 /* wait 1 sec */
3316 if (sleepFlag == CAN_SLEEP) { 3324 if (sleepFlag == CAN_SLEEP) {
3317 msleep (1000); 3325 msleep (1000);
@@ -3320,7 +3328,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3320 } 3328 }
3321 } 3329 }
3322 if ((count = mpt_downloadboot(ioc, 3330 if ((count = mpt_downloadboot(ioc,
3323 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) { 3331 (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
3324 printk(KERN_WARNING MYNAM 3332 printk(KERN_WARNING MYNAM
3325 ": firmware downloadboot failure (%d)!\n", count); 3333 ": firmware downloadboot failure (%d)!\n", count);
3326 } 3334 }
@@ -3907,18 +3915,18 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3907 3915
3908 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3909 while (--cntdn) { 3917 while (--cntdn) {
3918 msleep (1);
3910 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3919 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3911 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3920 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3912 break; 3921 break;
3913 msleep (1);
3914 count++; 3922 count++;
3915 } 3923 }
3916 } else { 3924 } else {
3917 while (--cntdn) { 3925 while (--cntdn) {
3926 mdelay (1);
3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3927 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3919 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3928 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3920 break; 3929 break;
3921 mdelay (1);
3922 count++; 3930 count++;
3923 } 3931 }
3924 } 3932 }
@@ -4883,6 +4891,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4883 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 4891 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
4884 if (!pIoc4) 4892 if (!pIoc4)
4885 return; 4893 return;
4894 ioc->alloc_total += iocpage4sz;
4886 } else { 4895 } else {
4887 ioc4_dma = ioc->spi_data.IocPg4_dma; 4896 ioc4_dma = ioc->spi_data.IocPg4_dma;
4888 iocpage4sz = ioc->spi_data.IocPg4Sz; 4897 iocpage4sz = ioc->spi_data.IocPg4Sz;
@@ -4899,6 +4908,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4899 } else { 4908 } else {
4900 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 4909 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
4901 ioc->spi_data.pIocPg4 = NULL; 4910 ioc->spi_data.pIocPg4 = NULL;
4911 ioc->alloc_total -= iocpage4sz;
4902 } 4912 }
4903} 4913}
4904 4914
@@ -5030,19 +5040,18 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5030 EventAck_t *pAck; 5040 EventAck_t *pAck;
5031 5041
5032 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5042 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5033 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK " 5043 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5034 "request frame for Event=%x EventContext=%x EventData=%x!\n", 5044 ioc->name,__FUNCTION__));
5035 ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
5036 le32_to_cpu(evnp->Data[0]));
5037 return -1; 5045 return -1;
5038 } 5046 }
5039 memset(pAck, 0, sizeof(*pAck));
5040 5047
5041 dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); 5048 devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
5042 5049
5043 pAck->Function = MPI_FUNCTION_EVENT_ACK; 5050 pAck->Function = MPI_FUNCTION_EVENT_ACK;
5044 pAck->ChainOffset = 0; 5051 pAck->ChainOffset = 0;
5052 pAck->Reserved[0] = pAck->Reserved[1] = 0;
5045 pAck->MsgFlags = 0; 5053 pAck->MsgFlags = 0;
5054 pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
5046 pAck->Event = evnp->Event; 5055 pAck->Event = evnp->Event;
5047 pAck->EventContext = evnp->EventContext; 5056 pAck->EventContext = evnp->EventContext;
5048 5057
@@ -5704,9 +5713,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5704 break; 5713 break;
5705 case MPI_EVENT_EVENT_CHANGE: 5714 case MPI_EVENT_EVENT_CHANGE:
5706 if (evData0) 5715 if (evData0)
5707 ds = "Events(ON) Change"; 5716 ds = "Events ON";
5708 else 5717 else
5709 ds = "Events(OFF) Change"; 5718 ds = "Events OFF";
5710 break; 5719 break;
5711 case MPI_EVENT_INTEGRATED_RAID: 5720 case MPI_EVENT_INTEGRATED_RAID:
5712 { 5721 {
@@ -5777,8 +5786,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5777 break; 5786 break;
5778 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5787 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5779 snprintf(evStr, EVENT_DESCR_STR_SZ, 5788 snprintf(evStr, EVENT_DESCR_STR_SZ,
5780 "SAS Device Status Change: No Persistancy " 5789 "SAS Device Status Change: No Persistancy: id=%d", id);
5781 "Added: id=%d", id); 5790 break;
5791 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
5792 snprintf(evStr, EVENT_DESCR_STR_SZ,
5793 "SAS Device Status Change: Internal Device Reset : id=%d", id);
5794 break;
5795 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
5796 snprintf(evStr, EVENT_DESCR_STR_SZ,
5797 "SAS Device Status Change: Internal Task Abort : id=%d", id);
5798 break;
5799 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
5800 snprintf(evStr, EVENT_DESCR_STR_SZ,
5801 "SAS Device Status Change: Internal Abort Task Set : id=%d", id);
5802 break;
5803 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
5804 snprintf(evStr, EVENT_DESCR_STR_SZ,
5805 "SAS Device Status Change: Internal Clear Task Set : id=%d", id);
5806 break;
5807 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
5808 snprintf(evStr, EVENT_DESCR_STR_SZ,
5809 "SAS Device Status Change: Internal Query Task : id=%d", id);
5782 break; 5810 break;
5783 default: 5811 default:
5784 snprintf(evStr, EVENT_DESCR_STR_SZ, 5812 snprintf(evStr, EVENT_DESCR_STR_SZ,
@@ -6034,7 +6062,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6034 * @ioc: Pointer to MPT_ADAPTER structure 6062 * @ioc: Pointer to MPT_ADAPTER structure
6035 * @log_info: U32 LogInfo reply word from the IOC 6063 * @log_info: U32 LogInfo reply word from the IOC
6036 * 6064 *
6037 * Refer to lsi/fc_log.h. 6065 * Refer to lsi/mpi_log_fc.h.
6038 */ 6066 */
6039static void 6067static void
6040mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) 6068mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
@@ -6131,8 +6159,10 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6131 "Invalid SAS Address", /* 01h */ 6159 "Invalid SAS Address", /* 01h */
6132 NULL, /* 02h */ 6160 NULL, /* 02h */
6133 "Invalid Page", /* 03h */ 6161 "Invalid Page", /* 03h */
6134 NULL, /* 04h */ 6162 "Diag Message Error", /* 04h */
6135 "Task Terminated" /* 05h */ 6163 "Task Terminated", /* 05h */
6164 "Enclosure Management", /* 06h */
6165 "Target Mode" /* 07h */
6136 }; 6166 };
6137 static char *pl_code_str[] = { 6167 static char *pl_code_str[] = {
6138 NULL, /* 00h */ 6168 NULL, /* 00h */
@@ -6158,7 +6188,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
6158 "IO Executed", /* 14h */ 6188 "IO Executed", /* 14h */
6159 "Persistant Reservation Out Not Affiliation Owner", /* 15h */ 6189 "Persistant Reservation Out Not Affiliation Owner", /* 15h */
6160 "Open Transmit DMA Abort", /* 16h */ 6190 "Open Transmit DMA Abort", /* 16h */
6161 NULL, /* 17h */ 6191 "IO Device Missing Delay Retry", /* 17h */
6162 NULL, /* 18h */ 6192 NULL, /* 18h */
6163 NULL, /* 19h */ 6193 NULL, /* 19h */
6164 NULL, /* 1Ah */ 6194 NULL, /* 1Ah */
@@ -6238,7 +6268,7 @@ static void
6238mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 6268mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6239{ 6269{
6240 u32 status = ioc_status & MPI_IOCSTATUS_MASK; 6270 u32 status = ioc_status & MPI_IOCSTATUS_MASK;
6241 char *desc = ""; 6271 char *desc = NULL;
6242 6272
6243 switch (status) { 6273 switch (status) {
6244 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ 6274 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
@@ -6348,7 +6378,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
6348 desc = "Others"; 6378 desc = "Others";
6349 break; 6379 break;
6350 } 6380 }
6351 if (desc != "") 6381 if (desc != NULL)
6352 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc); 6382 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
6353} 6383}
6354 6384
@@ -6386,7 +6416,6 @@ EXPORT_SYMBOL(mpt_alloc_fw_memory);
6386EXPORT_SYMBOL(mpt_free_fw_memory); 6416EXPORT_SYMBOL(mpt_free_fw_memory);
6387EXPORT_SYMBOL(mptbase_sas_persist_operation); 6417EXPORT_SYMBOL(mptbase_sas_persist_operation);
6388 6418
6389
6390/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6419/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6391/* 6420/*
6392 * fusion_init - Fusion MPT base driver initialization routine. 6421 * fusion_init - Fusion MPT base driver initialization routine.
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index a5ce10b67d02..d4cb144ab402 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -75,8 +75,8 @@
75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 75#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
76#endif 76#endif
77 77
78#define MPT_LINUX_VERSION_COMMON "3.04.00" 78#define MPT_LINUX_VERSION_COMMON "3.04.01"
79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00" 79#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01"
80#define WHAT_MAGIC_STRING "@" "(" "#" ")" 80#define WHAT_MAGIC_STRING "@" "(" "#" ")"
81 81
82#define show_mptmod_ver(s,ver) \ 82#define show_mptmod_ver(s,ver) \
@@ -307,8 +307,8 @@ typedef struct _SYSIF_REGS
307 u32 HostIndex; /* 50 Host Index register */ 307 u32 HostIndex; /* 50 Host Index register */
308 u32 Reserved4[15]; /* 54-8F */ 308 u32 Reserved4[15]; /* 54-8F */
309 u32 Fubar; /* 90 For Fubar usage */ 309 u32 Fubar; /* 90 For Fubar usage */
310 u32 Reserved5[1050];/* 94-10F8 */ 310 u32 Reserved5[1050];/* 94-10F8 */
311 u32 Reset_1078; /* 10FC Reset 1078 */ 311 u32 Reset_1078; /* 10FC Reset 1078 */
312} SYSIF_REGS; 312} SYSIF_REGS;
313 313
314/* 314/*
@@ -363,6 +363,7 @@ typedef struct _VirtDevice {
363#define MPT_TARGET_FLAGS_VALID_56 0x10 363#define MPT_TARGET_FLAGS_VALID_56 0x10
364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20 364#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40 365#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
366#define MPT_TARGET_FLAGS_LED_ON 0x80
366 367
367/* 368/*
368 * /proc/mpt interface 369 * /proc/mpt interface
@@ -634,7 +635,6 @@ typedef struct _MPT_ADAPTER
634 u16 handle; 635 u16 handle;
635 int sas_index; /* index refrencing */ 636 int sas_index; /* index refrencing */
636 MPT_SAS_MGMT sas_mgmt; 637 MPT_SAS_MGMT sas_mgmt;
637 int num_ports;
638 struct work_struct sas_persist_task; 638 struct work_struct sas_persist_task;
639 639
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
@@ -644,7 +644,6 @@ typedef struct _MPT_ADAPTER
644 struct work_struct fc_rescan_work; 644 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 645 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 646 struct workqueue_struct *fc_rescan_work_q;
647 u8 port_serial_number;
648} MPT_ADAPTER; 647} MPT_ADAPTER;
649 648
650/* 649/*
@@ -982,7 +981,7 @@ typedef struct _MPT_SCSI_HOST {
982 wait_queue_head_t scandv_waitq; 981 wait_queue_head_t scandv_waitq;
983 int scandv_wait_done; 982 int scandv_wait_done;
984 long last_queue_full; 983 long last_queue_full;
985 u8 mpt_pq_filter; 984 u16 tm_iocstatus;
986} MPT_SCSI_HOST; 985} MPT_SCSI_HOST;
987 986
988/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 987/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b4967bb8a7d6..30975ccd9947 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2332,7 +2332,7 @@ done_free_mem:
2332} 2332}
2333 2333
2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2335/* Prototype Routine for the HP HOST INFO command. 2335/* Prototype Routine for the HOST INFO command.
2336 * 2336 *
2337 * Outputs: None. 2337 * Outputs: None.
2338 * Return: 0 if successful 2338 * Return: 0 if successful
@@ -2568,7 +2568,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2568} 2568}
2569 2569
2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2571/* Prototype Routine for the HP TARGET INFO command. 2571/* Prototype Routine for the TARGET INFO command.
2572 * 2572 *
2573 * Outputs: None. 2573 * Outputs: None.
2574 * Return: 0 if successful 2574 * Return: 0 if successful
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index a2f8a97992e6..043941882c6e 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -354,9 +354,6 @@ struct mpt_ioctl_command32 {
354 354
355 355
356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
357/*
358 * HP Specific IOCTL Defines and Structures
359 */
360 357
361#define CPQFCTS_IOC_MAGIC 'Z' 358#define CPQFCTS_IOC_MAGIC 'Z'
362#define HP_IOC_MAGIC 'Z' 359#define HP_IOC_MAGIC 'Z'
@@ -364,8 +361,6 @@ struct mpt_ioctl_command32 {
364#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) 361#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
365#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) 362#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
366 363
367/* All HP IOCTLs must include this header
368 */
369typedef struct _hp_header { 364typedef struct _hp_header {
370 unsigned int iocnum; 365 unsigned int iocnum;
371 unsigned int host; 366 unsigned int host;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index a8f2fa985455..90da7d63b08e 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -77,10 +77,6 @@ MODULE_DESCRIPTION(my_NAME);
77MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
78 78
79/* Command line args */ 79/* Command line args */
80static int mpt_pq_filter = 0;
81module_param(mpt_pq_filter, int, 0);
82MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
83
84#define MPTFC_DEV_LOSS_TMO (60) 80#define MPTFC_DEV_LOSS_TMO (60)
85static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ 81static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
86module_param(mptfc_dev_loss_tmo, int, 0); 82module_param(mptfc_dev_loss_tmo, int, 0);
@@ -513,8 +509,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
513 509
514 if (vtarget->num_luns == 0) { 510 if (vtarget->num_luns == 0) {
515 vtarget->ioc_id = hd->ioc->id; 511 vtarget->ioc_id = hd->ioc->id;
516 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES | 512 vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
517 MPT_TARGET_FLAGS_VALID_INQUIRY;
518 hd->Targets[sdev->id] = vtarget; 513 hd->Targets[sdev->id] = vtarget;
519 } 514 }
520 515
@@ -1129,13 +1124,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1129 hd->timer.data = (unsigned long) hd; 1124 hd->timer.data = (unsigned long) hd;
1130 hd->timer.function = mptscsih_timer_expired; 1125 hd->timer.function = mptscsih_timer_expired;
1131 1126
1132 hd->mpt_pq_filter = mpt_pq_filter;
1133
1134 ddvprintk((MYIOC_s_INFO_FMT
1135 "mpt_pq_filter %x\n",
1136 ioc->name,
1137 mpt_pq_filter));
1138
1139 init_waitqueue_head(&hd->scandv_waitq); 1127 init_waitqueue_head(&hd->scandv_waitq);
1140 hd->scandv_wait_done = 0; 1128 hd->scandv_wait_done = 0;
1141 hd->last_queue_full = 0; 1129 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f7bd8b11ed3b..f66f2203143a 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -67,20 +67,19 @@
67#define my_VERSION MPT_LINUX_VERSION_COMMON 67#define my_VERSION MPT_LINUX_VERSION_COMMON
68#define MYNAM "mptsas" 68#define MYNAM "mptsas"
69 69
70/*
71 * Reserved channel for integrated raid
72 */
73#define MPTSAS_RAID_CHANNEL 1
74
70MODULE_AUTHOR(MODULEAUTHOR); 75MODULE_AUTHOR(MODULEAUTHOR);
71MODULE_DESCRIPTION(my_NAME); 76MODULE_DESCRIPTION(my_NAME);
72MODULE_LICENSE("GPL"); 77MODULE_LICENSE("GPL");
73 78
74static int mpt_pq_filter;
75module_param(mpt_pq_filter, int, 0);
76MODULE_PARM_DESC(mpt_pq_filter,
77 "Enable peripheral qualifier filter: enable=1 "
78 "(default=0)");
79
80static int mpt_pt_clear; 79static int mpt_pt_clear;
81module_param(mpt_pt_clear, int, 0); 80module_param(mpt_pt_clear, int, 0);
82MODULE_PARM_DESC(mpt_pt_clear, 81MODULE_PARM_DESC(mpt_pt_clear,
83 "Clear persistency table: enable=1 " 82 " Clear persistency table: enable=1 "
84 "(default=MPTSCSIH_PT_CLEAR=0)"); 83 "(default=MPTSCSIH_PT_CLEAR=0)");
85 84
86static int mptsasDoneCtx = -1; 85static int mptsasDoneCtx = -1;
@@ -144,7 +143,6 @@ struct mptsas_devinfo {
144 * Specific details on ports, wide/narrow 143 * Specific details on ports, wide/narrow
145 */ 144 */
146struct mptsas_portinfo_details{ 145struct mptsas_portinfo_details{
147 u8 port_id; /* port number provided to transport */
148 u16 num_phys; /* number of phys belong to this port */ 146 u16 num_phys; /* number of phys belong to this port */
149 u64 phy_bitmask; /* TODO, extend support for 255 phys */ 147 u64 phy_bitmask; /* TODO, extend support for 255 phys */
150 struct sas_rphy *rphy; /* transport layer rphy object */ 148 struct sas_rphy *rphy; /* transport layer rphy object */
@@ -350,10 +348,10 @@ mptsas_port_delete(struct mptsas_portinfo_details * port_details)
350 port_info = port_details->port_info; 348 port_info = port_details->port_info;
351 phy_info = port_info->phy_info; 349 phy_info = port_info->phy_info;
352 350
353 dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d " 351 dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d "
354 "bitmask=0x%016llX\n", 352 "bitmask=0x%016llX\n",
355 __FUNCTION__, port_details, port_details->port_id, 353 __FUNCTION__, port_details, port_details->num_phys,
356 port_details->num_phys, port_details->phy_bitmask)); 354 port_details->phy_bitmask));
357 355
358 for (i = 0; i < port_info->num_phys; i++, phy_info++) { 356 for (i = 0; i < port_info->num_phys; i++, phy_info++) {
359 if(phy_info->port_details != port_details) 357 if(phy_info->port_details != port_details)
@@ -462,9 +460,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
462 * phy be removed by firmware events. 460 * phy be removed by firmware events.
463 */ 461 */
464 dsaswideprintk((KERN_DEBUG 462 dsaswideprintk((KERN_DEBUG
465 "%s: [%p]: port=%d deleting phy = %d\n", 463 "%s: [%p]: deleting phy = %d\n",
466 __FUNCTION__, port_details, 464 __FUNCTION__, port_details, i));
467 port_details->port_id, i));
468 port_details->num_phys--; 465 port_details->num_phys--;
469 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 466 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
470 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 467 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -493,7 +490,6 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
493 goto out; 490 goto out;
494 port_details->num_phys = 1; 491 port_details->num_phys = 1;
495 port_details->port_info = port_info; 492 port_details->port_info = port_info;
496 port_details->port_id = ioc->port_serial_number++;
497 if (phy_info->phy_id < 64 ) 493 if (phy_info->phy_id < 64 )
498 port_details->phy_bitmask |= 494 port_details->phy_bitmask |=
499 (1 << phy_info->phy_id); 495 (1 << phy_info->phy_id);
@@ -525,12 +521,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
525 mptsas_get_port(phy_info_cmp); 521 mptsas_get_port(phy_info_cmp);
526 port_details->starget = 522 port_details->starget =
527 mptsas_get_starget(phy_info_cmp); 523 mptsas_get_starget(phy_info_cmp);
528 port_details->port_id =
529 phy_info_cmp->port_details->port_id;
530 port_details->num_phys = 524 port_details->num_phys =
531 phy_info_cmp->port_details->num_phys; 525 phy_info_cmp->port_details->num_phys;
532// port_info->port_serial_number--;
533 ioc->port_serial_number--;
534 if (!phy_info_cmp->port_details->num_phys) 526 if (!phy_info_cmp->port_details->num_phys)
535 kfree(phy_info_cmp->port_details); 527 kfree(phy_info_cmp->port_details);
536 } else 528 } else
@@ -554,11 +546,11 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
554 if (!port_details) 546 if (!port_details)
555 continue; 547 continue;
556 dsaswideprintk((KERN_DEBUG 548 dsaswideprintk((KERN_DEBUG
557 "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d " 549 "%s: [%p]: phy_id=%02d num_phys=%02d "
558 "bitmask=0x%016llX\n", 550 "bitmask=0x%016llX\n",
559 __FUNCTION__, 551 __FUNCTION__,
560 port_details, i, port_details->port_id, 552 port_details, i, port_details->num_phys,
561 port_details->num_phys, port_details->phy_bitmask)); 553 port_details->phy_bitmask));
562 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n", 554 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n",
563 port_details->port, port_details->rphy)); 555 port_details->port, port_details->rphy));
564 } 556 }
@@ -651,16 +643,13 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
651static int 643static int
652mptsas_slave_configure(struct scsi_device *sdev) 644mptsas_slave_configure(struct scsi_device *sdev)
653{ 645{
654 struct Scsi_Host *host = sdev->host;
655 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
656 646
657 /* 647 if (sdev->channel == MPTSAS_RAID_CHANNEL)
658 * RAID volumes placed beyond the last expected port. 648 goto out;
659 * Ignore sending sas mode pages in that case.. 649
660 */ 650 sas_read_port_mode_page(sdev);
661 if (sdev->channel < hd->ioc->num_ports)
662 sas_read_port_mode_page(sdev);
663 651
652 out:
664 return mptscsih_slave_configure(sdev); 653 return mptscsih_slave_configure(sdev);
665} 654}
666 655
@@ -689,10 +678,7 @@ mptsas_target_alloc(struct scsi_target *starget)
689 678
690 hd->Targets[target_id] = vtarget; 679 hd->Targets[target_id] = vtarget;
691 680
692 /* 681 if (starget->channel == MPTSAS_RAID_CHANNEL)
693 * RAID volumes placed beyond the last expected port.
694 */
695 if (starget->channel == hd->ioc->num_ports)
696 goto out; 682 goto out;
697 683
698 rphy = dev_to_rphy(starget->dev.parent); 684 rphy = dev_to_rphy(starget->dev.parent);
@@ -743,7 +729,7 @@ mptsas_target_destroy(struct scsi_target *starget)
743 if (!starget->hostdata) 729 if (!starget->hostdata)
744 return; 730 return;
745 731
746 if (starget->channel == hd->ioc->num_ports) 732 if (starget->channel == MPTSAS_RAID_CHANNEL)
747 goto out; 733 goto out;
748 734
749 rphy = dev_to_rphy(starget->dev.parent); 735 rphy = dev_to_rphy(starget->dev.parent);
@@ -783,10 +769,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
783 starget = scsi_target(sdev); 769 starget = scsi_target(sdev);
784 vdev->vtarget = starget->hostdata; 770 vdev->vtarget = starget->hostdata;
785 771
786 /* 772 if (sdev->channel == MPTSAS_RAID_CHANNEL)
787 * RAID volumes placed beyond the last expected port.
788 */
789 if (sdev->channel == hd->ioc->num_ports)
790 goto out; 773 goto out;
791 774
792 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); 775 rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
@@ -1608,11 +1591,7 @@ static int mptsas_probe_one_phy(struct device *dev,
1608 if (phy_info->sas_port_add_phy) { 1591 if (phy_info->sas_port_add_phy) {
1609 1592
1610 if (!port) { 1593 if (!port) {
1611 port = sas_port_alloc(dev, 1594 port = sas_port_alloc_num(dev);
1612 phy_info->port_details->port_id);
1613 dsaswideprintk((KERN_DEBUG
1614 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1615 port, dev, phy_info->port_details->port_id));
1616 if (!port) { 1595 if (!port) {
1617 error = -ENOMEM; 1596 error = -ENOMEM;
1618 goto out; 1597 goto out;
@@ -1625,6 +1604,9 @@ static int mptsas_probe_one_phy(struct device *dev,
1625 goto out; 1604 goto out;
1626 } 1605 }
1627 mptsas_set_port(phy_info, port); 1606 mptsas_set_port(phy_info, port);
1607 dsaswideprintk((KERN_DEBUG
1608 "sas_port_alloc: port=%p dev=%p port_id=%d\n",
1609 port, dev, port->port_identifier));
1628 } 1610 }
1629 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n", 1611 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n",
1630 phy_info->phy_id)); 1612 phy_info->phy_id));
@@ -1736,7 +1718,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
1736 hba = NULL; 1718 hba = NULL;
1737 } 1719 }
1738 mutex_unlock(&ioc->sas_topology_mutex); 1720 mutex_unlock(&ioc->sas_topology_mutex);
1739 ioc->num_ports = port_info->num_phys;
1740 1721
1741 for (i = 0; i < port_info->num_phys; i++) { 1722 for (i = 0; i < port_info->num_phys; i++) {
1742 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 1723 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
@@ -1939,7 +1920,8 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
1939 expander_sas_address) 1920 expander_sas_address)
1940 continue; 1921 continue;
1941#ifdef MPT_DEBUG_SAS_WIDE 1922#ifdef MPT_DEBUG_SAS_WIDE
1942 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 1923 dev_printk(KERN_DEBUG, &port->dev,
1924 "delete port (%d)\n", port->port_identifier);
1943#endif 1925#endif
1944 sas_port_delete(port); 1926 sas_port_delete(port);
1945 mptsas_port_delete(phy_info->port_details); 1927 mptsas_port_delete(phy_info->port_details);
@@ -1984,7 +1966,7 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
1984 if (!ioc->raid_data.pIocPg2->NumActiveVolumes) 1966 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
1985 goto out; 1967 goto out;
1986 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 1968 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1987 scsi_add_device(ioc->sh, ioc->num_ports, 1969 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
1988 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 1970 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
1989 } 1971 }
1990 out: 1972 out:
@@ -2185,7 +2167,8 @@ mptsas_hotplug_work(void *arg)
2185 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); 2167 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2186 2168
2187#ifdef MPT_DEBUG_SAS_WIDE 2169#ifdef MPT_DEBUG_SAS_WIDE
2188 dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 2170 dev_printk(KERN_DEBUG, &port->dev,
2171 "delete port (%d)\n", port->port_identifier);
2189#endif 2172#endif
2190 sas_port_delete(port); 2173 sas_port_delete(port);
2191 mptsas_port_delete(phy_info->port_details); 2174 mptsas_port_delete(phy_info->port_details);
@@ -2289,35 +2272,26 @@ mptsas_hotplug_work(void *arg)
2289 mptsas_set_rphy(phy_info, rphy); 2272 mptsas_set_rphy(phy_info, rphy);
2290 break; 2273 break;
2291 case MPTSAS_ADD_RAID: 2274 case MPTSAS_ADD_RAID:
2292 sdev = scsi_device_lookup( 2275 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2293 ioc->sh, 2276 ev->id, 0);
2294 ioc->num_ports,
2295 ev->id,
2296 0);
2297 if (sdev) { 2277 if (sdev) {
2298 scsi_device_put(sdev); 2278 scsi_device_put(sdev);
2299 break; 2279 break;
2300 } 2280 }
2301 printk(MYIOC_s_INFO_FMT 2281 printk(MYIOC_s_INFO_FMT
2302 "attaching raid volume, channel %d, id %d\n", 2282 "attaching raid volume, channel %d, id %d\n",
2303 ioc->name, ioc->num_ports, ev->id); 2283 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2304 scsi_add_device(ioc->sh, 2284 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2305 ioc->num_ports,
2306 ev->id,
2307 0);
2308 mpt_findImVolumes(ioc); 2285 mpt_findImVolumes(ioc);
2309 break; 2286 break;
2310 case MPTSAS_DEL_RAID: 2287 case MPTSAS_DEL_RAID:
2311 sdev = scsi_device_lookup( 2288 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
2312 ioc->sh, 2289 ev->id, 0);
2313 ioc->num_ports,
2314 ev->id,
2315 0);
2316 if (!sdev) 2290 if (!sdev)
2317 break; 2291 break;
2318 printk(MYIOC_s_INFO_FMT 2292 printk(MYIOC_s_INFO_FMT
2319 "removing raid volume, channel %d, id %d\n", 2293 "removing raid volume, channel %d, id %d\n",
2320 ioc->name, ioc->num_ports, ev->id); 2294 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2321 vdevice = sdev->hostdata; 2295 vdevice = sdev->hostdata;
2322 vdevice->vtarget->deleted = 1; 2296 vdevice->vtarget->deleted = 1;
2323 mptsas_target_reset(ioc, vdevice->vtarget); 2297 mptsas_target_reset(ioc, vdevice->vtarget);
@@ -2723,7 +2697,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2723 hd->timer.data = (unsigned long) hd; 2697 hd->timer.data = (unsigned long) hd;
2724 hd->timer.function = mptscsih_timer_expired; 2698 hd->timer.function = mptscsih_timer_expired;
2725 2699
2726 hd->mpt_pq_filter = mpt_pq_filter;
2727 ioc->sas_data.ptClear = mpt_pt_clear; 2700 ioc->sas_data.ptClear = mpt_pt_clear;
2728 2701
2729 if (ioc->sas_data.ptClear==1) { 2702 if (ioc->sas_data.ptClear==1) {
@@ -2731,12 +2704,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2731 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT); 2704 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
2732 } 2705 }
2733 2706
2734 ddvprintk((MYIOC_s_INFO_FMT
2735 "mpt_pq_filter %x mpt_pq_filter %x\n",
2736 ioc->name,
2737 mpt_pq_filter,
2738 mpt_pq_filter));
2739
2740 init_waitqueue_head(&hd->scandv_waitq); 2707 init_waitqueue_head(&hd->scandv_waitq);
2741 hd->scandv_wait_done = 0; 2708 hd->scandv_wait_done = 0;
2742 hd->last_queue_full = 0; 2709 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8242b16e3168..30524dc54b16 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -66,6 +66,7 @@
66 66
67#include "mptbase.h" 67#include "mptbase.h"
68#include "mptscsih.h" 68#include "mptscsih.h"
69#include "lsi/mpi_log_sas.h"
69 70
70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 71/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
71#define my_NAME "Fusion MPT SCSI Host driver" 72#define my_NAME "Fusion MPT SCSI Host driver"
@@ -127,7 +128,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
127static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 128static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
128static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); 129static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
129static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); 130static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
130static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); 131static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
131 132
132static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); 133static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
133 134
@@ -497,6 +498,34 @@ nextSGEset:
497 return SUCCESS; 498 return SUCCESS;
498} /* mptscsih_AddSGE() */ 499} /* mptscsih_AddSGE() */
499 500
501static void
502mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
503 U32 SlotStatus)
504{
505 MPT_FRAME_HDR *mf;
506 SEPRequest_t *SEPMsg;
507
508 if (ioc->bus_type == FC)
509 return;
510
511 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
512 dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
513 ioc->name,__FUNCTION__));
514 return;
515 }
516
517 SEPMsg = (SEPRequest_t *)mf;
518 SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
519 SEPMsg->Bus = vtarget->bus_id;
520 SEPMsg->TargetID = vtarget->target_id;
521 SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
522 SEPMsg->SlotStatus = SlotStatus;
523 devtverboseprintk((MYIOC_s_WARN_FMT
524 "Sending SEP cmd=%x id=%d bus=%d\n",
525 ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus));
526 mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
527}
528
500/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
501/* 530/*
502 * mptscsih_io_done - Main SCSI IO callback routine registered to 531 * mptscsih_io_done - Main SCSI IO callback routine registered to
@@ -520,6 +549,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
520 SCSIIORequest_t *pScsiReq; 549 SCSIIORequest_t *pScsiReq;
521 SCSIIOReply_t *pScsiReply; 550 SCSIIOReply_t *pScsiReply;
522 u16 req_idx, req_idx_MR; 551 u16 req_idx, req_idx_MR;
552 VirtDevice *vdev;
553 VirtTarget *vtarget;
523 554
524 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; 555 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
525 556
@@ -538,6 +569,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
538 } 569 }
539 570
540 sc = hd->ScsiLookup[req_idx]; 571 sc = hd->ScsiLookup[req_idx];
572 hd->ScsiLookup[req_idx] = NULL;
541 if (sc == NULL) { 573 if (sc == NULL) {
542 MPIHeader_t *hdr = (MPIHeader_t *)mf; 574 MPIHeader_t *hdr = (MPIHeader_t *)mf;
543 575
@@ -553,6 +585,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
553 return 1; 585 return 1;
554 } 586 }
555 587
588 if ((unsigned char *)mf != sc->host_scribble) {
589 mptscsih_freeChainBuffers(ioc, req_idx);
590 return 1;
591 }
592
593 sc->host_scribble = NULL;
556 sc->result = DID_OK << 16; /* Set default reply as OK */ 594 sc->result = DID_OK << 16; /* Set default reply as OK */
557 pScsiReq = (SCSIIORequest_t *) mf; 595 pScsiReq = (SCSIIORequest_t *) mf;
558 pScsiReply = (SCSIIOReply_t *) mr; 596 pScsiReply = (SCSIIOReply_t *) mr;
@@ -640,10 +678,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
640 678
641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 679 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
642 hd->sel_timeout[pScsiReq->TargetID]++; 680 hd->sel_timeout[pScsiReq->TargetID]++;
681
682 vdev = sc->device->hostdata;
683 if (!vdev)
684 break;
685 vtarget = vdev->vtarget;
686 if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
687 mptscsih_issue_sep_command(ioc, vtarget,
688 MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
689 vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
690 }
643 break; 691 break;
644 692
645 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
646 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 693 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
694 if ( ioc->bus_type == SAS ) {
695 u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus);
696 if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
697 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
698 log_info &=SAS_LOGINFO_MASK;
699 if (log_info == SAS_LOGINFO_NEXUS_LOSS) {
700 sc->result = (DID_BUS_BUSY << 16);
701 break;
702 }
703 }
704 }
705
706 /*
707 * Allow non-SAS & non-NEXUS_LOSS to drop into below code
708 */
709
710 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
647 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 711 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
648 /* Linux handles an unsolicited DID_RESET better 712 /* Linux handles an unsolicited DID_RESET better
649 * than an unsolicited DID_ABORT. 713 * than an unsolicited DID_ABORT.
@@ -658,7 +722,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
658 sc->result=DID_SOFT_ERROR << 16; 722 sc->result=DID_SOFT_ERROR << 16;
659 else /* Sufficient data transfer occurred */ 723 else /* Sufficient data transfer occurred */
660 sc->result = (DID_OK << 16) | scsi_status; 724 sc->result = (DID_OK << 16) | scsi_status;
661 dreplyprintk((KERN_NOTICE 725 dreplyprintk((KERN_NOTICE
662 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id)); 726 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
663 break; 727 break;
664 728
@@ -784,8 +848,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
784 sc->request_bufflen, sc->sc_data_direction); 848 sc->request_bufflen, sc->sc_data_direction);
785 } 849 }
786 850
787 hd->ScsiLookup[req_idx] = NULL;
788
789 sc->scsi_done(sc); /* Issue the command callback */ 851 sc->scsi_done(sc); /* Issue the command callback */
790 852
791 /* Free Chain buffers */ 853 /* Free Chain buffers */
@@ -827,9 +889,17 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
827 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", 889 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
828 mf, SCpnt)); 890 mf, SCpnt));
829 891
892 /* Free Chain buffers */
893 mptscsih_freeChainBuffers(ioc, ii);
894
895 /* Free Message frames */
896 mpt_free_msg_frame(ioc, mf);
897
898 if ((unsigned char *)mf != SCpnt->host_scribble)
899 continue;
900
830 /* Set status, free OS resources (SG DMA buffers) 901 /* Set status, free OS resources (SG DMA buffers)
831 * Do OS callback 902 * Do OS callback
832 * Free driver resources (chain, msg buffers)
833 */ 903 */
834 if (SCpnt->use_sg) { 904 if (SCpnt->use_sg) {
835 pci_unmap_sg(ioc->pcidev, 905 pci_unmap_sg(ioc->pcidev,
@@ -845,12 +915,6 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
845 SCpnt->result = DID_RESET << 16; 915 SCpnt->result = DID_RESET << 16;
846 SCpnt->host_scribble = NULL; 916 SCpnt->host_scribble = NULL;
847 917
848 /* Free Chain buffers */
849 mptscsih_freeChainBuffers(ioc, ii);
850
851 /* Free Message frames */
852 mpt_free_msg_frame(ioc, mf);
853
854 SCpnt->scsi_done(SCpnt); /* Issue the command callback */ 918 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
855 } 919 }
856 } 920 }
@@ -887,10 +951,10 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
887 if ((sc = hd->ScsiLookup[ii]) != NULL) { 951 if ((sc = hd->ScsiLookup[ii]) != NULL) {
888 952
889 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 953 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
890 954 if (mf == NULL)
955 continue;
891 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", 956 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
892 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); 957 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
893
894 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun))) 958 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
895 continue; 959 continue;
896 960
@@ -899,6 +963,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
899 hd->ScsiLookup[ii] = NULL; 963 hd->ScsiLookup[ii] = NULL;
900 mptscsih_freeChainBuffers(hd->ioc, ii); 964 mptscsih_freeChainBuffers(hd->ioc, ii);
901 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 965 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
966 if ((unsigned char *)mf != sc->host_scribble)
967 continue;
902 if (sc->use_sg) { 968 if (sc->use_sg) {
903 pci_unmap_sg(hd->ioc->pcidev, 969 pci_unmap_sg(hd->ioc->pcidev,
904 (struct scatterlist *) sc->request_buffer, 970 (struct scatterlist *) sc->request_buffer,
@@ -1341,8 +1407,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1341 goto fail; 1407 goto fail;
1342 } 1408 }
1343 1409
1410 SCpnt->host_scribble = (unsigned char *)mf;
1344 hd->ScsiLookup[my_idx] = SCpnt; 1411 hd->ScsiLookup[my_idx] = SCpnt;
1345 SCpnt->host_scribble = NULL;
1346 1412
1347 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); 1413 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf);
1348 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", 1414 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
@@ -1529,6 +1595,12 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
1529 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); 1595 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
1530 } 1596 }
1531 1597
1598 /*
1599 * Check IOCStatus from TM reply message
1600 */
1601 if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS)
1602 rc = FAILED;
1603
1532 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); 1604 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
1533 1605
1534 return rc; 1606 return rc;
@@ -1654,6 +1726,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1654 int scpnt_idx; 1726 int scpnt_idx;
1655 int retval; 1727 int retval;
1656 VirtDevice *vdev; 1728 VirtDevice *vdev;
1729 ulong sn = SCpnt->serial_number;
1657 1730
1658 /* If we can't locate our host adapter structure, return FAILED status. 1731 /* If we can't locate our host adapter structure, return FAILED status.
1659 */ 1732 */
@@ -1707,6 +1780,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1780 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); 1781 ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
1709 1782
1783 if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
1784 SCpnt->serial_number == sn) {
1785 retval = FAILED;
1786 }
1787
1710 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1788 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1711 hd->ioc->name, 1789 hd->ioc->name,
1712 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1790 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2023,6 +2101,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
2023 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply); 2101 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
2024 2102
2025 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; 2103 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2104 hd->tm_iocstatus = iocstatus;
2026 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n", 2105 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
2027 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo))); 2106 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
2028 /* Error? (anything non-zero?) */ 2107 /* Error? (anything non-zero?) */
@@ -2401,6 +2480,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2401 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; 2480 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
2402 2481
2403 ioc->eventContext++; 2482 ioc->eventContext++;
2483 if (hd->ioc->pcidev->vendor ==
2484 PCI_VENDOR_ID_IBM) {
2485 mptscsih_issue_sep_command(hd->ioc,
2486 vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
2487 vdev->vtarget->tflags |=
2488 MPT_TARGET_FLAGS_LED_ON;
2489 }
2404 } 2490 }
2405 } 2491 }
2406 } else { 2492 } else {
@@ -2409,7 +2495,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2409 } 2495 }
2410} 2496}
2411 2497
2412static u32 2498static int
2413SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc) 2499SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
2414{ 2500{
2415 MPT_SCSI_HOST *hd; 2501 MPT_SCSI_HOST *hd;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 0a1ff762205f..e4cc3dd5fc9f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -83,10 +83,6 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE;
83module_param(mpt_saf_te, int, 0); 83module_param(mpt_saf_te, int, 0);
84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); 84MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
85 85
86static int mpt_pq_filter = 0;
87module_param(mpt_pq_filter, int, 0);
88MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
89
90static void mptspi_write_offset(struct scsi_target *, int); 86static void mptspi_write_offset(struct scsi_target *, int);
91static void mptspi_write_width(struct scsi_target *, int); 87static void mptspi_write_width(struct scsi_target *, int);
92static int mptspi_write_spi_device_pg1(struct scsi_target *, 88static int mptspi_write_spi_device_pg1(struct scsi_target *,
@@ -1047,14 +1043,12 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1047 hd->timer.function = mptscsih_timer_expired; 1043 hd->timer.function = mptscsih_timer_expired;
1048 1044
1049 ioc->spi_data.Saf_Te = mpt_saf_te; 1045 ioc->spi_data.Saf_Te = mpt_saf_te;
1050 hd->mpt_pq_filter = mpt_pq_filter;
1051 1046
1052 hd->negoNvram = MPT_SCSICFG_USE_NVRAM; 1047 hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
1053 ddvprintk((MYIOC_s_INFO_FMT 1048 ddvprintk((MYIOC_s_INFO_FMT
1054 "saf_te %x mpt_pq_filter %x\n", 1049 "saf_te %x\n",
1055 ioc->name, 1050 ioc->name,
1056 mpt_saf_te, 1051 mpt_saf_te));
1057 mpt_pq_filter));
1058 ioc->spi_data.noQas = 0; 1052 ioc->spi_data.noQas = 0;
1059 1053
1060 init_waitqueue_head(&hd->scandv_waitq); 1054 init_waitqueue_head(&hd->scandv_waitq);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 36d511729f71..2146cf74425e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
132 for (i = 0; i < numdummies && !err; i++) 132 for (i = 0; i < numdummies && !err; i++)
133 err = dummy_init_one(i); 133 err = dummy_init_one(i);
134 if (err) { 134 if (err) {
135 i--;
135 while (--i >= 0) 136 while (--i >= 0)
136 dummy_free_one(i); 137 dummy_free_one(i);
137 } 138 }
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f411bbb44f86..d304297c496c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -110,6 +110,9 @@ struct e1000_adapter;
110#define E1000_MIN_RXD 80 110#define E1000_MIN_RXD 80
111#define E1000_MAX_82544_RXD 4096 111#define E1000_MAX_82544_RXD 4096
112 112
113/* this is the size past which hardware will drop packets when setting LPE=0 */
114#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
115
113/* Supported Rx Buffer Sizes */ 116/* Supported Rx Buffer Sizes */
114#define E1000_RXBUFFER_128 128 /* Used for packet split */ 117#define E1000_RXBUFFER_128 128 /* Used for packet split */
115#define E1000_RXBUFFER_256 256 /* Used for packet split */ 118#define E1000_RXBUFFER_256 256 /* Used for packet split */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6d3d41934503..da62db897426 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.1.9-k2"DRIVERNAPI 39#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
1068 1068
1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1070 1070
1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; 1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1073 hw->max_frame_size = netdev->mtu + 1073 hw->max_frame_size = netdev->mtu +
1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3149 3149
3150 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3150 /* adjust allocation if LPE protects us, and we aren't using SBP */
3151#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
3152 if (!adapter->hw.tbi_compatibility_on && 3151 if (!adapter->hw.tbi_compatibility_on &&
3153 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3154 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3387 E1000_WRITE_REG(hw, IMC, ~0); 3386 E1000_WRITE_REG(hw, IMC, ~0);
3388 E1000_WRITE_FLUSH(hw); 3387 E1000_WRITE_FLUSH(hw);
3389 } 3388 }
3390 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3389 if (likely(netif_rx_schedule_prep(netdev)))
3391 __netif_rx_schedule(&adapter->polling_netdev[0]); 3390 __netif_rx_schedule(netdev);
3392 else 3391 else
3393 e1000_irq_enable(adapter); 3392 e1000_irq_enable(adapter);
3394#else 3393#else
@@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3431{ 3430{
3432 struct e1000_adapter *adapter; 3431 struct e1000_adapter *adapter;
3433 int work_to_do = min(*budget, poll_dev->quota); 3432 int work_to_do = min(*budget, poll_dev->quota);
3434 int tx_cleaned = 0, i = 0, work_done = 0; 3433 int tx_cleaned = 0, work_done = 0;
3435 3434
3436 /* Must NOT use netdev_priv macro here. */ 3435 /* Must NOT use netdev_priv macro here. */
3437 adapter = poll_dev->priv; 3436 adapter = poll_dev->priv;
3438 3437
3439 /* Keep link state information with original netdev */ 3438 /* Keep link state information with original netdev */
3440 if (!netif_carrier_ok(adapter->netdev)) 3439 if (!netif_carrier_ok(poll_dev))
3441 goto quit_polling; 3440 goto quit_polling;
3442 3441
3443 while (poll_dev != &adapter->polling_netdev[i]) { 3442 /* e1000_clean is called per-cpu. This lock protects
3444 i++; 3443 * tx_ring[0] from being cleaned by multiple cpus
3445 BUG_ON(i == adapter->num_rx_queues); 3444 * simultaneously. A failure obtaining the lock means
3445 * tx_ring[0] is currently being cleaned anyway. */
3446 if (spin_trylock(&adapter->tx_queue_lock)) {
3447 tx_cleaned = e1000_clean_tx_irq(adapter,
3448 &adapter->tx_ring[0]);
3449 spin_unlock(&adapter->tx_queue_lock);
3446 } 3450 }
3447 3451
3448 if (likely(adapter->num_tx_queues == 1)) { 3452 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3449 /* e1000_clean is called per-cpu. This lock protects
3450 * tx_ring[0] from being cleaned by multiple cpus
3451 * simultaneously. A failure obtaining the lock means
3452 * tx_ring[0] is currently being cleaned anyway. */
3453 if (spin_trylock(&adapter->tx_queue_lock)) {
3454 tx_cleaned = e1000_clean_tx_irq(adapter,
3455 &adapter->tx_ring[0]);
3456 spin_unlock(&adapter->tx_queue_lock);
3457 }
3458 } else
3459 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3460
3461 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3462 &work_done, work_to_do); 3453 &work_done, work_to_do);
3463 3454
3464 *budget -= work_done; 3455 *budget -= work_done;
@@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3466 3457
3467 /* If no Tx and not enough Rx work done, exit the polling mode */ 3458 /* If no Tx and not enough Rx work done, exit the polling mode */
3468 if ((!tx_cleaned && (work_done == 0)) || 3459 if ((!tx_cleaned && (work_done == 0)) ||
3469 !netif_running(adapter->netdev)) { 3460 !netif_running(poll_dev)) {
3470quit_polling: 3461quit_polling:
3471 netif_rx_complete(poll_dev); 3462 netif_rx_complete(poll_dev);
3472 e1000_irq_enable(adapter); 3463 e1000_irq_enable(adapter);
@@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3681 3672
3682 length = le16_to_cpu(rx_desc->length); 3673 length = le16_to_cpu(rx_desc->length);
3683 3674
3675 /* adjust length to remove Ethernet CRC */
3676 length -= 4;
3677
3684 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3678 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3685 /* All receives must fit into a single buffer */ 3679 /* All receives must fit into a single buffer */
3686 E1000_DBG("%s: Receive packet consumed multiple" 3680 E1000_DBG("%s: Receive packet consumed multiple"
@@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3885 pci_dma_sync_single_for_device(pdev, 3879 pci_dma_sync_single_for_device(pdev,
3886 ps_page_dma->ps_page_dma[0], 3880 ps_page_dma->ps_page_dma[0],
3887 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3881 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3882 /* remove the CRC */
3883 l1 -= 4;
3888 skb_put(skb, l1); 3884 skb_put(skb, l1);
3889 length += l1;
3890 goto copydone; 3885 goto copydone;
3891 } /* if */ 3886 } /* if */
3892 } 3887 }
@@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3905 skb->truesize += length; 3900 skb->truesize += length;
3906 } 3901 }
3907 3902
3903 /* strip the ethernet crc, problem is we're using pages now so
3904 * this whole operation can get a little cpu intensive */
3905 pskb_trim(skb, skb->len - 4);
3906
3908copydone: 3907copydone:
3909 e1000_rx_checksum(adapter, staterr, 3908 e1000_rx_checksum(adapter, staterr,
3910 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 3909 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -4752,6 +4751,7 @@ static void
4752e1000_netpoll(struct net_device *netdev) 4751e1000_netpoll(struct net_device *netdev)
4753{ 4752{
4754 struct e1000_adapter *adapter = netdev_priv(netdev); 4753 struct e1000_adapter *adapter = netdev_priv(netdev);
4754
4755 disable_irq(adapter->pdev->irq); 4755 disable_irq(adapter->pdev->irq);
4756 e1000_intr(adapter->pdev->irq, netdev, NULL); 4756 e1000_intr(adapter->pdev->irq, netdev, NULL);
4757 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4757 e1000_clean_tx_irq(adapter, adapter->tx_ring);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 3a42afab5036..43e3f33ed5e2 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
271 for (i = 0; i < numifbs && !err; i++) 271 for (i = 0; i < numifbs && !err; i++)
272 err = ifb_init_one(i); 272 err = ifb_init_one(i);
273 if (err) { 273 if (err) {
274 i--;
274 while (--i >= 0) 275 while (--i >= 0)
275 ifb_free_one(i); 276 ifb_free_one(i);
276 } 277 }
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 07ca9480a6fe..c3e52c806b13 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
620 return -ENXIO; 620 return -ENXIO;
621 } 621 }
622 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 622 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
623 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 623 myri10ge_dummy_rdma(mgp, 1);
624 624
625 return 0; 625 return 0;
626} 626}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..7de9a07b2ac2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 516/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 517static inline u32 hwkhz(const struct skge_hw *hw)
518{ 518{
519 if (hw->chip_id == CHIP_ID_GENESIS) 519 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 520}
524 521
525/* Chip HZ to microseconds */ 522/* Chip HZ to microseconds */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d98f28c34e5c..de91609ca112 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.4" 53#define DRV_VERSION "1.5"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -2204,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2204 int work_done = 0; 2204 int work_done = 0;
2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2206 2206
2207 if (!~status)
2208 goto out;
2209
2210 if (status & Y2_IS_HW_ERR) 2207 if (status & Y2_IS_HW_ERR)
2211 sky2_hw_intr(hw); 2208 sky2_hw_intr(hw);
2212 2209
@@ -2243,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2243 2240
2244 if (sky2_more_work(hw)) 2241 if (sky2_more_work(hw))
2245 return 1; 2242 return 1;
2246out: 2243
2247 netif_rx_complete(dev0); 2244 netif_rx_complete(dev0);
2248 2245
2249 sky2_read32(hw, B0_Y2_SP_LISR); 2246 sky2_read32(hw, B0_Y2_SP_LISR);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index fb1d5a8a45cf..647f62e9707d 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
84 * 84 *
85 * returns the content of the specified SMMIO register. 85 * returns the content of the specified SMMIO register.
86 */ 86 */
87static u32 87static inline u32
88spider_net_read_reg(struct spider_net_card *card, u32 reg) 88spider_net_read_reg(struct spider_net_card *card, u32 reg)
89{ 89{
90 u32 value; 90 u32 value;
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
101 * @reg: register to write to 101 * @reg: register to write to
102 * @value: value to write into the specified SMMIO register 102 * @value: value to write into the specified SMMIO register
103 */ 103 */
104static void 104static inline void
105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
106{ 106{
107 value = cpu_to_le32(value); 107 value = cpu_to_le32(value);
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
259 * 259 *
260 * returns the status as in the dmac_cmd_status field of the descriptor 260 * returns the status as in the dmac_cmd_status field of the descriptor
261 */ 261 */
262static enum spider_net_descr_status 262static inline int
263spider_net_get_descr_status(struct spider_net_descr *descr) 263spider_net_get_descr_status(struct spider_net_descr *descr)
264{ 264{
265 u32 cmd_status; 265 return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
266
267 cmd_status = descr->dmac_cmd_status;
268 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
269 /* no need to mask out any bits, as cmd_status is 32 bits wide only
270 * (and unsigned) */
271 return cmd_status;
272}
273
274/**
275 * spider_net_set_descr_status -- sets the status of a descriptor
276 * @descr: descriptor to change
277 * @status: status to set in the descriptor
278 *
279 * changes the status to the specified value. Doesn't change other bits
280 * in the status
281 */
282static void
283spider_net_set_descr_status(struct spider_net_descr *descr,
284 enum spider_net_descr_status status)
285{
286 u32 cmd_status;
287 /* read the status */
288 cmd_status = descr->dmac_cmd_status;
289 /* clean the upper 4 bits */
290 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
291 /* add the status to it */
292 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
293 /* and write it back */
294 descr->dmac_cmd_status = cmd_status;
295} 266}
296 267
297/** 268/**
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
328static int 299static int
329spider_net_init_chain(struct spider_net_card *card, 300spider_net_init_chain(struct spider_net_card *card,
330 struct spider_net_descr_chain *chain, 301 struct spider_net_descr_chain *chain,
331 struct spider_net_descr *start_descr, int no) 302 struct spider_net_descr *start_descr,
303 int direction, int no)
332{ 304{
333 int i; 305 int i;
334 struct spider_net_descr *descr; 306 struct spider_net_descr *descr;
335 dma_addr_t buf; 307 dma_addr_t buf;
336 308
337 atomic_set(&card->rx_chain_refill,0);
338
339 descr = start_descr; 309 descr = start_descr;
340 memset(descr, 0, sizeof(*descr) * no); 310 memset(descr, 0, sizeof(*descr) * no);
341 311
342 /* set up the hardware pointers in each descriptor */ 312 /* set up the hardware pointers in each descriptor */
343 for (i=0; i<no; i++, descr++) { 313 for (i=0; i<no; i++, descr++) {
344 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 314 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
345 315
346 buf = pci_map_single(card->pdev, descr, 316 buf = pci_map_single(card->pdev, descr,
347 SPIDER_NET_DESCR_SIZE, 317 SPIDER_NET_DESCR_SIZE,
348 PCI_DMA_BIDIRECTIONAL); 318 direction);
349 319
350 if (buf == DMA_ERROR_CODE) 320 if (buf == DMA_ERROR_CODE)
351 goto iommu_error; 321 goto iommu_error;
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
360 start_descr->prev = descr-1; 330 start_descr->prev = descr-1;
361 331
362 descr = start_descr; 332 descr = start_descr;
363 for (i=0; i < no; i++, descr++) { 333 if (direction == PCI_DMA_FROMDEVICE)
364 descr->next_descr_addr = descr->next->bus_addr; 334 for (i=0; i < no; i++, descr++)
365 } 335 descr->next_descr_addr = descr->next->bus_addr;
366 336
337 spin_lock_init(&chain->lock);
367 chain->head = start_descr; 338 chain->head = start_descr;
368 chain->tail = start_descr; 339 chain->tail = start_descr;
369 340
@@ -375,7 +346,7 @@ iommu_error:
375 if (descr->bus_addr) 346 if (descr->bus_addr)
376 pci_unmap_single(card->pdev, descr->bus_addr, 347 pci_unmap_single(card->pdev, descr->bus_addr,
377 SPIDER_NET_DESCR_SIZE, 348 SPIDER_NET_DESCR_SIZE,
378 PCI_DMA_BIDIRECTIONAL); 349 direction);
379 return -ENOMEM; 350 return -ENOMEM;
380} 351}
381 352
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
396 dev_kfree_skb(descr->skb); 367 dev_kfree_skb(descr->skb);
397 pci_unmap_single(card->pdev, descr->buf_addr, 368 pci_unmap_single(card->pdev, descr->buf_addr,
398 SPIDER_NET_MAX_FRAME, 369 SPIDER_NET_MAX_FRAME,
399 PCI_DMA_BIDIRECTIONAL); 370 PCI_DMA_FROMDEVICE);
400 } 371 }
401 descr = descr->next; 372 descr = descr->next;
402 } 373 }
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 417 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
447 /* io-mmu-map the skb */ 418 /* io-mmu-map the skb */
448 buf = pci_map_single(card->pdev, descr->skb->data, 419 buf = pci_map_single(card->pdev, descr->skb->data,
449 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 420 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
450 descr->buf_addr = buf; 421 descr->buf_addr = buf;
451 if (buf == DMA_ERROR_CODE) { 422 if (buf == DMA_ERROR_CODE) {
452 dev_kfree_skb_any(descr->skb); 423 dev_kfree_skb_any(descr->skb);
453 if (netif_msg_rx_err(card) && net_ratelimit()) 424 if (netif_msg_rx_err(card) && net_ratelimit())
454 pr_err("Could not iommu-map rx buffer\n"); 425 pr_err("Could not iommu-map rx buffer\n");
455 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
456 } else { 427 } else {
457 descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; 428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE;
458 } 430 }
459 431
460 return error; 432 return error;
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
468 * chip by writing to the appropriate register. DMA is enabled in 440 * chip by writing to the appropriate register. DMA is enabled in
469 * spider_net_enable_rxdmac. 441 * spider_net_enable_rxdmac.
470 */ 442 */
471static void 443static inline void
472spider_net_enable_rxchtails(struct spider_net_card *card) 444spider_net_enable_rxchtails(struct spider_net_card *card)
473{ 445{
474 /* assume chain is aligned correctly */ 446 /* assume chain is aligned correctly */
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
483 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
484 * in the GDADMACCNTR register 456 * in the GDADMACCNTR register
485 */ 457 */
486static void 458static inline void
487spider_net_enable_rxdmac(struct spider_net_card *card) 459spider_net_enable_rxdmac(struct spider_net_card *card)
488{ 460{
489 wmb(); 461 wmb();
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
500static void 472static void
501spider_net_refill_rx_chain(struct spider_net_card *card) 473spider_net_refill_rx_chain(struct spider_net_card *card)
502{ 474{
503 struct spider_net_descr_chain *chain; 475 struct spider_net_descr_chain *chain = &card->rx_chain;
504 476 unsigned long flags;
505 chain = &card->rx_chain;
506 477
507 /* one context doing the refill (and a second context seeing that 478 /* one context doing the refill (and a second context seeing that
508 * and omitting it) is ok. If called by NAPI, we'll be called again 479 * and omitting it) is ok. If called by NAPI, we'll be called again
509 * as spider_net_decode_one_descr is called several times. If some 480 * as spider_net_decode_one_descr is called several times. If some
510 * interrupt calls us, the NAPI is about to clean up anyway. */ 481 * interrupt calls us, the NAPI is about to clean up anyway. */
511 if (atomic_inc_return(&card->rx_chain_refill) == 1) 482 if (!spin_trylock_irqsave(&chain->lock, flags))
512 while (spider_net_get_descr_status(chain->head) == 483 return;
513 SPIDER_NET_DESCR_NOT_IN_USE) { 484
514 if (spider_net_prepare_rx_descr(card, chain->head)) 485 while (spider_net_get_descr_status(chain->head) ==
515 break; 486 SPIDER_NET_DESCR_NOT_IN_USE) {
516 chain->head = chain->head->next; 487 if (spider_net_prepare_rx_descr(card, chain->head))
517 } 488 break;
489 chain->head = chain->head->next;
490 }
518 491
519 atomic_dec(&card->rx_chain_refill); 492 spin_unlock_irqrestore(&chain->lock, flags);
520} 493}
521 494
522/** 495/**
@@ -554,111 +527,6 @@ error:
554} 527}
555 528
556/** 529/**
557 * spider_net_release_tx_descr - processes a used tx descriptor
558 * @card: card structure
559 * @descr: descriptor to release
560 *
561 * releases a used tx descriptor (unmapping, freeing of skb)
562 */
563static void
564spider_net_release_tx_descr(struct spider_net_card *card,
565 struct spider_net_descr *descr)
566{
567 struct sk_buff *skb;
568
569 /* unmap the skb */
570 skb = descr->skb;
571 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
572 PCI_DMA_BIDIRECTIONAL);
573
574 dev_kfree_skb_any(skb);
575
576 /* set status to not used */
577 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
578}
579
580/**
581 * spider_net_release_tx_chain - processes sent tx descriptors
582 * @card: adapter structure
583 * @brutal: if set, don't care about whether descriptor seems to be in use
584 *
585 * returns 0 if the tx ring is empty, otherwise 1.
586 *
587 * spider_net_release_tx_chain releases the tx descriptors that spider has
588 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
589 * If some other context is calling this function, we return 1 so that we're
590 * scheduled again (if we were scheduled) and will not loose initiative.
591 */
592static int
593spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
594{
595 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
596 enum spider_net_descr_status status;
597
598 if (atomic_inc_return(&card->tx_chain_release) != 1) {
599 atomic_dec(&card->tx_chain_release);
600 return 1;
601 }
602
603 for (;;) {
604 status = spider_net_get_descr_status(tx_chain->tail);
605 switch (status) {
606 case SPIDER_NET_DESCR_CARDOWNED:
607 if (!brutal)
608 goto out;
609 /* fallthrough, if we release the descriptors
610 * brutally (then we don't care about
611 * SPIDER_NET_DESCR_CARDOWNED) */
612 case SPIDER_NET_DESCR_RESPONSE_ERROR:
613 case SPIDER_NET_DESCR_PROTECTION_ERROR:
614 case SPIDER_NET_DESCR_FORCE_END:
615 if (netif_msg_tx_err(card))
616 pr_err("%s: forcing end of tx descriptor "
617 "with status x%02x\n",
618 card->netdev->name, status);
619 card->netdev_stats.tx_dropped++;
620 break;
621
622 case SPIDER_NET_DESCR_COMPLETE:
623 card->netdev_stats.tx_packets++;
624 card->netdev_stats.tx_bytes +=
625 tx_chain->tail->skb->len;
626 break;
627
628 default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
629 goto out;
630 }
631 spider_net_release_tx_descr(card, tx_chain->tail);
632 tx_chain->tail = tx_chain->tail->next;
633 }
634out:
635 atomic_dec(&card->tx_chain_release);
636
637 netif_wake_queue(card->netdev);
638
639 if (status == SPIDER_NET_DESCR_CARDOWNED)
640 return 1;
641 return 0;
642}
643
644/**
645 * spider_net_cleanup_tx_ring - cleans up the TX ring
646 * @card: card structure
647 *
648 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
649 * interrupts to cleanup our TX ring) and returns sent packets to the stack
650 * by freeing them
651 */
652static void
653spider_net_cleanup_tx_ring(struct spider_net_card *card)
654{
655 if ( (spider_net_release_tx_chain(card, 0)) &&
656 (card->netdev->flags & IFF_UP) ) {
657 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
658 }
659}
660
661/**
662 * spider_net_get_multicast_hash - generates hash for multicast filter table 530 * spider_net_get_multicast_hash - generates hash for multicast filter table
663 * @addr: multicast address 531 * @addr: multicast address
664 * 532 *
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
761} 629}
762 630
763/** 631/**
764 * spider_net_stop - called upon ifconfig down
765 * @netdev: interface device structure
766 *
767 * always returns 0
768 */
769int
770spider_net_stop(struct net_device *netdev)
771{
772 struct spider_net_card *card = netdev_priv(netdev);
773
774 tasklet_kill(&card->rxram_full_tl);
775 netif_poll_disable(netdev);
776 netif_carrier_off(netdev);
777 netif_stop_queue(netdev);
778 del_timer_sync(&card->tx_timer);
779
780 /* disable/mask all interrupts */
781 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
782 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
783 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
784
785 /* free_irq(netdev->irq, netdev);*/
786 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
787
788 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
789 SPIDER_NET_DMA_TX_FEND_VALUE);
790
791 /* turn off DMA, force end */
792 spider_net_disable_rxdmac(card);
793
794 /* release chains */
795 spider_net_release_tx_chain(card, 1);
796
797 spider_net_free_chain(card, &card->tx_chain);
798 spider_net_free_chain(card, &card->rx_chain);
799
800 return 0;
801}
802
803/**
804 * spider_net_get_next_tx_descr - returns the next available tx descriptor
805 * @card: device structure to get descriptor from
806 *
807 * returns the address of the next descriptor, or NULL if not available.
808 */
809static struct spider_net_descr *
810spider_net_get_next_tx_descr(struct spider_net_card *card)
811{
812 /* check, if head points to not-in-use descr */
813 if ( spider_net_get_descr_status(card->tx_chain.head) ==
814 SPIDER_NET_DESCR_NOT_IN_USE ) {
815 return card->tx_chain.head;
816 } else {
817 return NULL;
818 }
819}
820
821/**
822 * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
823 * @descr: descriptor structure to fill out
824 * @skb: packet to consider
825 *
826 * fills out the command and status field of the descriptor structure,
827 * depending on hardware checksum settings.
828 */
829static void
830spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
831 struct sk_buff *skb)
832{
833 /* make sure the other fields in the descriptor are written */
834 wmb();
835
836 if (skb->ip_summed != CHECKSUM_HW) {
837 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
838 return;
839 }
840
841 /* is packet ip?
842 * if yes: tcp? udp? */
843 if (skb->protocol == htons(ETH_P_IP)) {
844 if (skb->nh.iph->protocol == IPPROTO_TCP)
845 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
846 else if (skb->nh.iph->protocol == IPPROTO_UDP)
847 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
848 else /* the stack should checksum non-tcp and non-udp
849 packets on his own: NETIF_F_IP_CSUM */
850 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
851 }
852}
853
854/**
855 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 632 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
856 * @card: card structure 633 * @card: card structure
857 * @descr: descriptor structure to fill out 634 * @descr: descriptor structure to fill out
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
864 */ 641 */
865static int 642static int
866spider_net_prepare_tx_descr(struct spider_net_card *card, 643spider_net_prepare_tx_descr(struct spider_net_card *card,
867 struct spider_net_descr *descr,
868 struct sk_buff *skb) 644 struct sk_buff *skb)
869{ 645{
646 struct spider_net_descr *descr = card->tx_chain.head;
870 dma_addr_t buf; 647 dma_addr_t buf;
871 648
872 buf = pci_map_single(card->pdev, skb->data, 649 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
873 skb->len, PCI_DMA_BIDIRECTIONAL);
874 if (buf == DMA_ERROR_CODE) { 650 if (buf == DMA_ERROR_CODE) {
875 if (netif_msg_tx_err(card) && net_ratelimit()) 651 if (netif_msg_tx_err(card) && net_ratelimit())
876 pr_err("could not iommu-map packet (%p, %i). " 652 pr_err("could not iommu-map packet (%p, %i). "
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
880 656
881 descr->buf_addr = buf; 657 descr->buf_addr = buf;
882 descr->buf_size = skb->len; 658 descr->buf_size = skb->len;
659 descr->next_descr_addr = 0;
883 descr->skb = skb; 660 descr->skb = skb;
884 descr->data_status = 0; 661 descr->data_status = 0;
885 662
886 spider_net_set_txdescr_cmdstat(descr,skb); 663 descr->dmac_cmd_status =
664 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
665 if (skb->protocol == htons(ETH_P_IP))
666 switch (skb->nh.iph->protocol) {
667 case IPPROTO_TCP:
668 descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
669 break;
670 case IPPROTO_UDP:
671 descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
672 break;
673 }
674
675 descr->prev->next_descr_addr = descr->bus_addr;
676
677 return 0;
678}
679
680/**
681 * spider_net_release_tx_descr - processes a used tx descriptor
682 * @card: card structure
683 * @descr: descriptor to release
684 *
685 * releases a used tx descriptor (unmapping, freeing of skb)
686 */
687static inline void
688spider_net_release_tx_descr(struct spider_net_card *card)
689{
690 struct spider_net_descr *descr = card->tx_chain.tail;
691 struct sk_buff *skb;
692
693 card->tx_chain.tail = card->tx_chain.tail->next;
694 descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
695
696 /* unmap the skb */
697 skb = descr->skb;
698 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
699 PCI_DMA_TODEVICE);
700 dev_kfree_skb_any(skb);
701}
702
703/**
704 * spider_net_release_tx_chain - processes sent tx descriptors
705 * @card: adapter structure
706 * @brutal: if set, don't care about whether descriptor seems to be in use
707 *
708 * returns 0 if the tx ring is empty, otherwise 1.
709 *
710 * spider_net_release_tx_chain releases the tx descriptors that spider has
711 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
712 * If some other context is calling this function, we return 1 so that we're
713 * scheduled again (if we were scheduled) and will not loose initiative.
714 */
715static int
716spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
717{
718 struct spider_net_descr_chain *chain = &card->tx_chain;
719 int status;
720
721 spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
722
723 while (chain->tail != chain->head) {
724 status = spider_net_get_descr_status(chain->tail);
725 switch (status) {
726 case SPIDER_NET_DESCR_COMPLETE:
727 card->netdev_stats.tx_packets++;
728 card->netdev_stats.tx_bytes += chain->tail->skb->len;
729 break;
730
731 case SPIDER_NET_DESCR_CARDOWNED:
732 if (!brutal)
733 return 1;
734 /* fallthrough, if we release the descriptors
735 * brutally (then we don't care about
736 * SPIDER_NET_DESCR_CARDOWNED) */
737
738 case SPIDER_NET_DESCR_RESPONSE_ERROR:
739 case SPIDER_NET_DESCR_PROTECTION_ERROR:
740 case SPIDER_NET_DESCR_FORCE_END:
741 if (netif_msg_tx_err(card))
742 pr_err("%s: forcing end of tx descriptor "
743 "with status x%02x\n",
744 card->netdev->name, status);
745 card->netdev_stats.tx_errors++;
746 break;
747
748 default:
749 card->netdev_stats.tx_dropped++;
750 return 1;
751 }
752 spider_net_release_tx_descr(card);
753 }
887 754
888 return 0; 755 return 0;
889} 756}
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
896 * spider_net_kick_tx_dma writes the current tx chain head as start address 763 * spider_net_kick_tx_dma writes the current tx chain head as start address
897 * of the tx descriptor chain and enables the transmission DMA engine 764 * of the tx descriptor chain and enables the transmission DMA engine
898 */ 765 */
899static void 766static inline void
900spider_net_kick_tx_dma(struct spider_net_card *card, 767spider_net_kick_tx_dma(struct spider_net_card *card)
901 struct spider_net_descr *descr)
902{ 768{
903 /* this is the only descriptor in the output chain. 769 struct spider_net_descr *descr;
904 * Enable TX DMA */
905 770
906 spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 771 if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
907 descr->bus_addr); 772 SPIDER_NET_TX_DMA_EN)
773 goto out;
908 774
909 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 775 descr = card->tx_chain.tail;
910 SPIDER_NET_DMA_TX_VALUE); 776 for (;;) {
777 if (spider_net_get_descr_status(descr) ==
778 SPIDER_NET_DESCR_CARDOWNED) {
779 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
780 descr->bus_addr);
781 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
782 SPIDER_NET_DMA_TX_VALUE);
783 break;
784 }
785 if (descr == card->tx_chain.head)
786 break;
787 descr = descr->next;
788 }
789
790out:
791 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
911} 792}
912 793
913/** 794/**
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
915 * @skb: packet to send out 796 * @skb: packet to send out
916 * @netdev: interface device structure 797 * @netdev: interface device structure
917 * 798 *
918 * returns 0 on success, <0 on failure 799 * returns 0 on success, !0 on failure
919 */ 800 */
920static int 801static int
921spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 802spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
922{ 803{
923 struct spider_net_card *card = netdev_priv(netdev); 804 struct spider_net_card *card = netdev_priv(netdev);
924 struct spider_net_descr *descr; 805 struct spider_net_descr_chain *chain = &card->tx_chain;
806 struct spider_net_descr *descr = chain->head;
807 unsigned long flags;
925 int result; 808 int result;
926 809
810 spin_lock_irqsave(&chain->lock, flags);
811
927 spider_net_release_tx_chain(card, 0); 812 spider_net_release_tx_chain(card, 0);
928 813
929 descr = spider_net_get_next_tx_descr(card); 814 if (chain->head->next == chain->tail->prev) {
815 card->netdev_stats.tx_dropped++;
816 result = NETDEV_TX_LOCKED;
817 goto out;
818 }
930 819
931 if (!descr) 820 if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
932 goto error; 821 result = NETDEV_TX_LOCKED;
822 goto out;
823 }
933 824
934 result = spider_net_prepare_tx_descr(card, descr, skb); 825 if (spider_net_prepare_tx_descr(card, skb) != 0) {
935 if (result) 826 card->netdev_stats.tx_dropped++;
936 goto error; 827 result = NETDEV_TX_BUSY;
828 goto out;
829 }
830
831 result = NETDEV_TX_OK;
937 832
833 spider_net_kick_tx_dma(card);
938 card->tx_chain.head = card->tx_chain.head->next; 834 card->tx_chain.head = card->tx_chain.head->next;
939 835
940 if (spider_net_get_descr_status(descr->prev) != 836out:
941 SPIDER_NET_DESCR_CARDOWNED) { 837 spin_unlock_irqrestore(&chain->lock, flags);
942 /* make sure the current descriptor is in memory. Then 838 netif_wake_queue(netdev);
943 * kicking it on again makes sense, if the previous is not 839 return result;
944 * card-owned anymore. Check the previous descriptor twice 840}
945 * to omit an mb() in heavy traffic cases */
946 mb();
947 if (spider_net_get_descr_status(descr->prev) !=
948 SPIDER_NET_DESCR_CARDOWNED)
949 spider_net_kick_tx_dma(card, descr);
950 }
951 841
952 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 842/**
843 * spider_net_cleanup_tx_ring - cleans up the TX ring
844 * @card: card structure
845 *
846 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
847 * interrupts to cleanup our TX ring) and returns sent packets to the stack
848 * by freeing them
849 */
850static void
851spider_net_cleanup_tx_ring(struct spider_net_card *card)
852{
853 unsigned long flags;
953 854
954 return NETDEV_TX_OK; 855 spin_lock_irqsave(&card->tx_chain.lock, flags);
955 856
956error: 857 if ((spider_net_release_tx_chain(card, 0) != 0) &&
957 card->netdev_stats.tx_dropped++; 858 (card->netdev->flags & IFF_UP))
958 return NETDEV_TX_BUSY; 859 spider_net_kick_tx_dma(card);
860
861 spin_unlock_irqrestore(&card->tx_chain.lock, flags);
959} 862}
960 863
961/** 864/**
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1002 905
1003 /* unmap descriptor */ 906 /* unmap descriptor */
1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, 907 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1005 PCI_DMA_BIDIRECTIONAL); 908 PCI_DMA_FROMDEVICE);
1006 909
1007 /* the cases we'll throw away the packet immediately */ 910 /* the cases we'll throw away the packet immediately */
1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 911 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1067static int 970static int
1068spider_net_decode_one_descr(struct spider_net_card *card, int napi) 971spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1069{ 972{
1070 enum spider_net_descr_status status; 973 struct spider_net_descr_chain *chain = &card->rx_chain;
1071 struct spider_net_descr *descr; 974 struct spider_net_descr *descr = chain->tail;
1072 struct spider_net_descr_chain *chain; 975 int status;
1073 int result; 976 int result;
1074 977
1075 chain = &card->rx_chain;
1076 descr = chain->tail;
1077
1078 status = spider_net_get_descr_status(descr); 978 status = spider_net_get_descr_status(descr);
1079 979
1080 if (status == SPIDER_NET_DESCR_CARDOWNED) { 980 if (status == SPIDER_NET_DESCR_CARDOWNED) {
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1103 card->netdev->name, status); 1003 card->netdev->name, status);
1104 card->netdev_stats.rx_dropped++; 1004 card->netdev_stats.rx_dropped++;
1105 pci_unmap_single(card->pdev, descr->buf_addr, 1005 pci_unmap_single(card->pdev, descr->buf_addr,
1106 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 1006 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1107 dev_kfree_skb_irq(descr->skb); 1007 dev_kfree_skb_irq(descr->skb);
1108 goto refill; 1008 goto refill;
1109 } 1009 }
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1119 /* ok, we've got a packet in descr */ 1019 /* ok, we've got a packet in descr */
1120 result = spider_net_pass_skb_up(descr, card, napi); 1020 result = spider_net_pass_skb_up(descr, card, napi);
1121refill: 1021refill:
1122 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1022 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1123 /* change the descriptor state: */ 1023 /* change the descriptor state: */
1124 if (!napi) 1024 if (!napi)
1125 spider_net_refill_rx_chain(card); 1025 spider_net_refill_rx_chain(card);
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1291} 1191}
1292 1192
1293/** 1193/**
1294 * spider_net_enable_txdmac - enables a TX DMA controller
1295 * @card: card structure
1296 *
1297 * spider_net_enable_txdmac enables the TX DMA controller by setting the
1298 * descriptor chain tail address
1299 */
1300static void
1301spider_net_enable_txdmac(struct spider_net_card *card)
1302{
1303 /* assume chain is aligned correctly */
1304 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
1305 card->tx_chain.tail->bus_addr);
1306}
1307
1308/**
1309 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt 1194 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1310 * @card: card structure 1195 * @card: card structure
1311 * 1196 *
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
1653 { SPIDER_NET_GMRWOLCTRL, 0 }, 1538 { SPIDER_NET_GMRWOLCTRL, 0 },
1654 { SPIDER_NET_GTESTMD, 0x10000000 }, 1539 { SPIDER_NET_GTESTMD, 0x10000000 },
1655 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1540 { SPIDER_NET_GTTQMSK, 0x00400040 },
1656 { SPIDER_NET_GTESTMD, 0 },
1657 1541
1658 { SPIDER_NET_GMACINTEN, 0 }, 1542 { SPIDER_NET_GMACINTEN, 0 },
1659 1543
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
1692 1576
1693 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1577 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1694 1578
1695 /* set chain tail adress for TX chain */
1696 spider_net_enable_txdmac(card);
1697
1698 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1579 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1699 SPIDER_NET_LENLMT_VALUE); 1580 SPIDER_NET_LENLMT_VALUE);
1700 spider_net_write_reg(card, SPIDER_NET_GMACMODE, 1581 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
1709 SPIDER_NET_INT1_MASK_VALUE); 1590 SPIDER_NET_INT1_MASK_VALUE);
1710 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1591 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1711 SPIDER_NET_INT2_MASK_VALUE); 1592 SPIDER_NET_INT2_MASK_VALUE);
1593
1594 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1595 SPIDER_NET_GDTDCEIDIS);
1712} 1596}
1713 1597
1714/** 1598/**
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev)
1728 1612
1729 result = -ENOMEM; 1613 result = -ENOMEM;
1730 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain,
1731 card->descr, tx_descriptors)) 1615 card->descr,
1616 PCI_DMA_TODEVICE, tx_descriptors))
1732 goto alloc_tx_failed; 1617 goto alloc_tx_failed;
1733 if (spider_net_init_chain(card, &card->rx_chain, 1618 if (spider_net_init_chain(card, &card->rx_chain,
1734 card->descr + tx_descriptors, rx_descriptors)) 1619 card->descr + tx_descriptors,
1620 PCI_DMA_FROMDEVICE, rx_descriptors))
1735 goto alloc_rx_failed; 1621 goto alloc_rx_failed;
1736 1622
1737 /* allocate rx skbs */ 1623 /* allocate rx skbs */
@@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1938 /* empty sequencer data */ 1824 /* empty sequencer data */
1939 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1825 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1940 sequencer++) { 1826 sequencer++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1827 spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
1942 sequencer * 8, 0x0); 1828 sequencer * 8, 0x0);
1943 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1829 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1944 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1830 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1955} 1841}
1956 1842
1957/** 1843/**
1844 * spider_net_stop - called upon ifconfig down
1845 * @netdev: interface device structure
1846 *
1847 * always returns 0
1848 */
1849int
1850spider_net_stop(struct net_device *netdev)
1851{
1852 struct spider_net_card *card = netdev_priv(netdev);
1853
1854 tasklet_kill(&card->rxram_full_tl);
1855 netif_poll_disable(netdev);
1856 netif_carrier_off(netdev);
1857 netif_stop_queue(netdev);
1858 del_timer_sync(&card->tx_timer);
1859
1860 /* disable/mask all interrupts */
1861 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1862 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1863 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1864
1865 /* free_irq(netdev->irq, netdev);*/
1866 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
1867
1868 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1869 SPIDER_NET_DMA_TX_FEND_VALUE);
1870
1871 /* turn off DMA, force end */
1872 spider_net_disable_rxdmac(card);
1873
1874 /* release chains */
1875 if (spin_trylock(&card->tx_chain.lock)) {
1876 spider_net_release_tx_chain(card, 1);
1877 spin_unlock(&card->tx_chain.lock);
1878 }
1879
1880 spider_net_free_chain(card, &card->tx_chain);
1881 spider_net_free_chain(card, &card->rx_chain);
1882
1883 return 0;
1884}
1885
1886/**
1958 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout 1887 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
1959 * function (to be called not under interrupt status) 1888 * function (to be called not under interrupt status)
1960 * @data: data, is interface device structure 1889 * @data: data, is interface device structure
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data)
1982 goto out; 1911 goto out;
1983 1912
1984 spider_net_open(netdev); 1913 spider_net_open(netdev);
1985 spider_net_kick_tx_dma(card, card->tx_chain.head); 1914 spider_net_kick_tx_dma(card);
1986 netif_device_attach(netdev); 1915 netif_device_attach(netdev);
1987 1916
1988out: 1917out:
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2065 1994
2066 pci_set_drvdata(card->pdev, netdev); 1995 pci_set_drvdata(card->pdev, netdev);
2067 1996
2068 atomic_set(&card->tx_chain_release,0);
2069 card->rxram_full_tl.data = (unsigned long) card; 1997 card->rxram_full_tl.data = (unsigned long) card;
2070 card->rxram_full_tl.func = 1998 card->rxram_full_tl.func =
2071 (void (*)(unsigned long)) spider_net_handle_rxram_full; 1999 (void (*)(unsigned long)) spider_net_handle_rxram_full;
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
2079 2007
2080 spider_net_setup_netdev_ops(netdev); 2008 spider_net_setup_netdev_ops(netdev);
2081 2009
2082 netdev->features = NETIF_F_HW_CSUM; 2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
2083 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2011 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2084 * NETIF_F_HW_VLAN_FILTER */ 2012 * NETIF_F_HW_VLAN_FILTER */
2085 2013
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3b8d951cf73c..f6dcf180ae3d 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
208#define SPIDER_NET_DMA_RX_VALUE 0x80000000 208#define SPIDER_NET_DMA_RX_VALUE 0x80000000
209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
210/* to set TX_DMA_EN */ 210/* to set TX_DMA_EN */
211#define SPIDER_NET_DMA_TX_VALUE 0x80000000 211#define SPIDER_NET_TX_DMA_EN 0x80000000
212#define SPIDER_NET_GDTDCEIDIS 0x00000002
213#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
214 SPIDER_NET_GDTDCEIDIS
212#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 215#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
213 216
214/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ 217/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -329,55 +332,23 @@ enum spider_net_int2_status {
329 (~SPIDER_NET_TXINT) & \ 332 (~SPIDER_NET_TXINT) & \
330 (~SPIDER_NET_RXINT) ) 333 (~SPIDER_NET_RXINT) )
331 334
332#define SPIDER_NET_GPREXEC 0x80000000 335#define SPIDER_NET_GPREXEC 0x80000000
333#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 336#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
334 337
335/* descriptor bits 338#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
336 * 339#define SPIDER_NET_DMAC_NOCS 0x00040000
337 * 1010 descriptor ready 340#define SPIDER_NET_DMAC_TCP 0x00020000
338 * 0 descr in middle of chain 341#define SPIDER_NET_DMAC_UDP 0x00030000
339 * 000 fixed to 0 342#define SPIDER_NET_TXDCEST 0x08000000
340 * 343
341 * 0 no interrupt on completion 344#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
342 * 000 fixed to 0 345#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
343 * 1 no ipsec processing 346#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
344 * 1 last descriptor for this frame 347#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
345 * 00 no checksum 348#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
346 * 10 tcp checksum 349#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
347 * 11 udp checksum 350#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
348 * 351#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
349 * 00 fixed to 0
350 * 0 fixed to 0
351 * 0 no interrupt on response errors
352 * 0 no interrupt on invalid descr
353 * 0 no interrupt on dma process termination
354 * 0 no interrupt on descr chain end
355 * 0 no interrupt on descr complete
356 *
357 * 000 fixed to 0
358 * 0 response error interrupt status
359 * 0 invalid descr status
360 * 0 dma termination status
361 * 0 descr chain end status
362 * 0 descr complete status */
363#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
364#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
365#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
366#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
367#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
368
369/* descr ready, descr is in middle of chain, get interrupt on completion */
370#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
371
372enum spider_net_descr_status {
373 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
374 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
375 SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
376 SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
377 SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
378 SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
379 SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
380};
381 352
382struct spider_net_descr { 353struct spider_net_descr {
383 /* as defined by the hardware */ 354 /* as defined by the hardware */
@@ -398,7 +369,7 @@ struct spider_net_descr {
398} __attribute__((aligned(32))); 369} __attribute__((aligned(32)));
399 370
400struct spider_net_descr_chain { 371struct spider_net_descr_chain {
401 /* we walk from tail to head */ 372 spinlock_t lock;
402 struct spider_net_descr *head; 373 struct spider_net_descr *head;
403 struct spider_net_descr *tail; 374 struct spider_net_descr *tail;
404}; 375};
@@ -453,8 +424,6 @@ struct spider_net_card {
453 424
454 struct spider_net_descr_chain tx_chain; 425 struct spider_net_descr_chain tx_chain;
455 struct spider_net_descr_chain rx_chain; 426 struct spider_net_descr_chain rx_chain;
456 atomic_t rx_chain_refill;
457 atomic_t tx_chain_release;
458 427
459 struct net_device_stats netdev_stats; 428 struct net_device_stats netdev_stats;
460 429
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 8673fd4c08c7..c6f5bc3c042f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
3255} 3255}
3256 3256
3257static struct pci_device_id happymeal_pci_ids[] = { 3257static struct pci_device_id happymeal_pci_ids[] = {
3258 { 3258 { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
3259 .vendor = PCI_VENDOR_ID_SUN,
3260 .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
3261 .subvendor = PCI_ANY_ID,
3262 .subdevice = PCI_ANY_ID,
3263 },
3264 { } /* Terminating entry */ 3259 { } /* Terminating entry */
3265}; 3260};
3266 3261
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
3275 3270
3276static int __init happy_meal_pci_init(void) 3271static int __init happy_meal_pci_init(void)
3277{ 3272{
3278 return pci_module_init(&hme_pci_driver); 3273 return pci_register_driver(&hme_pci_driver);
3279} 3274}
3280 3275
3281static void happy_meal_pci_exit(void) 3276static void happy_meal_pci_exit(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
1537{ 1537{
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || 1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) { 1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(sdev)); 1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; 1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6; 1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); 1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
1547 1547
1548static int __exit sunlance_sun4_remove(void) 1548static int __exit sunlance_sun4_remove(void)
1549{ 1549{
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); 1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1551 struct net_device *net_dev = lp->dev;
1552 1552
1553 unregister_netdevice(net_dev); 1553 unregister_netdevice(net_dev);
1554 1554
1555 lance_free_hwresources(root_lance_dev); 1555 lance_free_hwresources(lp);
1556 1556
1557 free_netdev(net_dev); 1557 free_netdev(net_dev);
1558 1558
1559 dev_set_drvdata(&sun4_sdev->dev, NULL); 1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1560 1560
1561 return 0; 1561 return 0;
1562} 1562}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ce6f3be86da0..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.62" 71#define DRV_MODULE_VERSION "3.63"
72#define DRV_MODULE_RELDATE "June 30, 2006" 72#define DRV_MODULE_RELDATE "July 25, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3590static int tg3_init_hw(struct tg3 *, int); 3590static int tg3_init_hw(struct tg3 *, int);
3591static int tg3_halt(struct tg3 *, int, int); 3591static int tg3_halt(struct tg3 *, int, int);
3592 3592
3593/* Restart hardware after configuration changes, self-test, etc.
3594 * Invoked with tp->lock held.
3595 */
3596static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3597{
3598 int err;
3599
3600 err = tg3_init_hw(tp, reset_phy);
3601 if (err) {
3602 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3603 "aborting.\n", tp->dev->name);
3604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3605 tg3_full_unlock(tp);
3606 del_timer_sync(&tp->timer);
3607 tp->irq_sync = 0;
3608 netif_poll_enable(tp->dev);
3609 dev_close(tp->dev);
3610 tg3_full_lock(tp, 0);
3611 }
3612 return err;
3613}
3614
3593#ifdef CONFIG_NET_POLL_CONTROLLER 3615#ifdef CONFIG_NET_POLL_CONTROLLER
3594static void tg3_poll_controller(struct net_device *dev) 3616static void tg3_poll_controller(struct net_device *dev)
3595{ 3617{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
3630 } 3652 }
3631 3653
3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3654 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3633 tg3_init_hw(tp, 1); 3655 if (tg3_init_hw(tp, 1))
3656 goto out;
3634 3657
3635 tg3_netif_start(tp); 3658 tg3_netif_start(tp);
3636 3659
3637 if (restart_timer) 3660 if (restart_timer)
3638 mod_timer(&tp->timer, jiffies + 1); 3661 mod_timer(&tp->timer, jiffies + 1);
3639 3662
3663out:
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3664 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641 3665
3642 tg3_full_unlock(tp); 3666 tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4124static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4148static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4125{ 4149{
4126 struct tg3 *tp = netdev_priv(dev); 4150 struct tg3 *tp = netdev_priv(dev);
4151 int err;
4127 4152
4128 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4153 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4129 return -EINVAL; 4154 return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4144 4169
4145 tg3_set_mtu(dev, tp, new_mtu); 4170 tg3_set_mtu(dev, tp, new_mtu);
4146 4171
4147 tg3_init_hw(tp, 0); 4172 err = tg3_restart_hw(tp, 0);
4148 4173
4149 tg3_netif_start(tp); 4174 if (!err)
4175 tg3_netif_start(tp);
4150 4176
4151 tg3_full_unlock(tp); 4177 tg3_full_unlock(tp);
4152 4178
4153 return 0; 4179 return err;
4154} 4180}
4155 4181
4156/* Free up pending packets in all rx/tx rings. 4182/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
4232 * end up in the driver. tp->{tx,}lock are held and thus 4258 * end up in the driver. tp->{tx,}lock are held and thus
4233 * we may not sleep. 4259 * we may not sleep.
4234 */ 4260 */
4235static void tg3_init_rings(struct tg3 *tp) 4261static int tg3_init_rings(struct tg3 *tp)
4236{ 4262{
4237 u32 i; 4263 u32 i;
4238 4264
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
4281 4307
4282 /* Now allocate fresh SKBs for each rx ring. */ 4308 /* Now allocate fresh SKBs for each rx ring. */
4283 for (i = 0; i < tp->rx_pending; i++) { 4309 for (i = 0; i < tp->rx_pending; i++) {
4284 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4310 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4285 -1, i) < 0) 4311 printk(KERN_WARNING PFX
4312 "%s: Using a smaller RX standard ring, "
4313 "only %d out of %d buffers were allocated "
4314 "successfully.\n",
4315 tp->dev->name, i, tp->rx_pending);
4316 if (i == 0)
4317 return -ENOMEM;
4318 tp->rx_pending = i;
4286 break; 4319 break;
4320 }
4287 } 4321 }
4288 4322
4289 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4323 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4290 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4324 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4291 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4325 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4292 -1, i) < 0) 4326 -1, i) < 0) {
4327 printk(KERN_WARNING PFX
4328 "%s: Using a smaller RX jumbo ring, "
4329 "only %d out of %d buffers were "
4330 "allocated successfully.\n",
4331 tp->dev->name, i, tp->rx_jumbo_pending);
4332 if (i == 0) {
4333 tg3_free_rings(tp);
4334 return -ENOMEM;
4335 }
4336 tp->rx_jumbo_pending = i;
4293 break; 4337 break;
4338 }
4294 } 4339 }
4295 } 4340 }
4341 return 0;
4296} 4342}
4297 4343
4298/* 4344/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5815{ 5861{
5816 struct tg3 *tp = netdev_priv(dev); 5862 struct tg3 *tp = netdev_priv(dev);
5817 struct sockaddr *addr = p; 5863 struct sockaddr *addr = p;
5864 int err = 0;
5818 5865
5819 if (!is_valid_ether_addr(addr->sa_data)) 5866 if (!is_valid_ether_addr(addr->sa_data))
5820 return -EINVAL; 5867 return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5832 tg3_full_lock(tp, 1); 5879 tg3_full_lock(tp, 1);
5833 5880
5834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5881 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5835 tg3_init_hw(tp, 0); 5882 err = tg3_restart_hw(tp, 0);
5836 5883 if (!err)
5837 tg3_netif_start(tp); 5884 tg3_netif_start(tp);
5838 tg3_full_unlock(tp); 5885 tg3_full_unlock(tp);
5839 } else { 5886 } else {
5840 spin_lock_bh(&tp->lock); 5887 spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5842 spin_unlock_bh(&tp->lock); 5889 spin_unlock_bh(&tp->lock);
5843 } 5890 }
5844 5891
5845 return 0; 5892 return err;
5846} 5893}
5847 5894
5848/* tp->lock is held. */ 5895/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5942 * can only do this after the hardware has been 5989 * can only do this after the hardware has been
5943 * successfully reset. 5990 * successfully reset.
5944 */ 5991 */
5945 tg3_init_rings(tp); 5992 err = tg3_init_rings(tp);
5993 if (err)
5994 return err;
5946 5995
5947 /* This value is determined during the probe time DMA 5996 /* This value is determined during the probe time DMA
5948 * engine test, tg3_test_dma. 5997 * engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
7956static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 8005static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7957{ 8006{
7958 struct tg3 *tp = netdev_priv(dev); 8007 struct tg3 *tp = netdev_priv(dev);
7959 int irq_sync = 0; 8008 int irq_sync = 0, err = 0;
7960 8009
7961 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 8010 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7962 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 8011 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7980 8029
7981 if (netif_running(dev)) { 8030 if (netif_running(dev)) {
7982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7983 tg3_init_hw(tp, 1); 8032 err = tg3_restart_hw(tp, 1);
7984 tg3_netif_start(tp); 8033 if (!err)
8034 tg3_netif_start(tp);
7985 } 8035 }
7986 8036
7987 tg3_full_unlock(tp); 8037 tg3_full_unlock(tp);
7988 8038
7989 return 0; 8039 return err;
7990} 8040}
7991 8041
7992static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8042static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8001static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8051static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8002{ 8052{
8003 struct tg3 *tp = netdev_priv(dev); 8053 struct tg3 *tp = netdev_priv(dev);
8004 int irq_sync = 0; 8054 int irq_sync = 0, err = 0;
8005 8055
8006 if (netif_running(dev)) { 8056 if (netif_running(dev)) {
8007 tg3_netif_stop(tp); 8057 tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
8025 8075
8026 if (netif_running(dev)) { 8076 if (netif_running(dev)) {
8027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8028 tg3_init_hw(tp, 1); 8078 err = tg3_restart_hw(tp, 1);
8029 tg3_netif_start(tp); 8079 if (!err)
8080 tg3_netif_start(tp);
8030 } 8081 }
8031 8082
8032 tg3_full_unlock(tp); 8083 tg3_full_unlock(tp);
8033 8084
8034 return 0; 8085 return err;
8035} 8086}
8036 8087
8037static u32 tg3_get_rx_csum(struct net_device *dev) 8088static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
8666 if (!netif_running(tp->dev)) 8717 if (!netif_running(tp->dev))
8667 return TG3_LOOPBACK_FAILED; 8718 return TG3_LOOPBACK_FAILED;
8668 8719
8669 tg3_reset_hw(tp, 1); 8720 err = tg3_reset_hw(tp, 1);
8721 if (err)
8722 return TG3_LOOPBACK_FAILED;
8670 8723
8671 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8724 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8672 err |= TG3_MAC_LOOPBACK_FAILED; 8725 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8793 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8741 if (netif_running(dev)) { 8794 if (netif_running(dev)) {
8742 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8795 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8743 tg3_init_hw(tp, 1); 8796 if (!tg3_restart_hw(tp, 1))
8744 tg3_netif_start(tp); 8797 tg3_netif_start(tp);
8745 } 8798 }
8746 8799
8747 tg3_full_unlock(tp); 8800 tg3_full_unlock(tp);
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11699 tg3_full_lock(tp, 0); 11752 tg3_full_lock(tp, 0);
11700 11753
11701 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11754 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11702 tg3_init_hw(tp, 1); 11755 if (tg3_restart_hw(tp, 1))
11756 goto out;
11703 11757
11704 tp->timer.expires = jiffies + tp->timer_offset; 11758 tp->timer.expires = jiffies + tp->timer_offset;
11705 add_timer(&tp->timer); 11759 add_timer(&tp->timer);
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11707 netif_device_attach(dev); 11761 netif_device_attach(dev);
11708 tg3_netif_start(tp); 11762 tg3_netif_start(tp);
11709 11763
11764out:
11710 tg3_full_unlock(tp); 11765 tg3_full_unlock(tp);
11711 } 11766 }
11712 11767
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
11733 tg3_full_lock(tp, 0); 11788 tg3_full_lock(tp, 0);
11734 11789
11735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11790 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11736 tg3_init_hw(tp, 1); 11791 err = tg3_restart_hw(tp, 1);
11792 if (err)
11793 goto out;
11737 11794
11738 tp->timer.expires = jiffies + tp->timer_offset; 11795 tp->timer.expires = jiffies + tp->timer_offset;
11739 add_timer(&tp->timer); 11796 add_timer(&tp->timer);
11740 11797
11741 tg3_netif_start(tp); 11798 tg3_netif_start(tp);
11742 11799
11800out:
11743 tg3_full_unlock(tp); 11801 tg3_full_unlock(tp);
11744 11802
11745 return 0; 11803 return err;
11746} 11804}
11747 11805
11748static struct pci_driver tg3_driver = { 11806static struct pci_driver tg3_driver = {
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 2c09ec908a3f..435e91ec4620 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -197,7 +197,6 @@ static int c101_open(struct net_device *dev)
197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
198 198
199 set_carrier(port); 199 set_carrier(port);
200 printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
201 200
202 /* enable MSCI1 CDCD interrupt */ 201 /* enable MSCI1 CDCD interrupt */
203 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); 202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
@@ -449,4 +448,5 @@ module_exit(c101_cleanup);
449MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 448MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
450MODULE_DESCRIPTION("Moxa C101 serial port driver"); 449MODULE_DESCRIPTION("Moxa C101 serial port driver");
451MODULE_LICENSE("GPL v2"); 450MODULE_LICENSE("GPL v2");
452module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ 451module_param(hw, charp, 0444);
452MODULE_PARM_DESC(hw, "irq,ram:irq,...");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b81263eaede0..fbaab5bf71eb 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
107 dev->hard_header = NULL; 107 dev->hard_header = NULL;
108 dev->type = ARPHRD_PPP; 108 dev->type = ARPHRD_PPP;
109 dev->addr_len = 0; 109 dev->addr_len = 0;
110 netif_dormant_off(dev);
110 return 0; 111 return 0;
111 } 112 }
112 113
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 9456d31cb1c1..f15aa6ba77f1 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
82 dev->type = ARPHRD_RAWHDLC; 82 dev->type = ARPHRD_RAWHDLC;
83 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 83 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
84 dev->addr_len = 0; 84 dev->addr_len = 0;
85 netif_dormant_off(dev);
85 return 0; 86 return 0;
86 } 87 }
87 88
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index b1285cc8fee6..d1884987f94e 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
100 dev->tx_queue_len = old_qlen; 100 dev->tx_queue_len = old_qlen;
101 memcpy(dev->dev_addr, "\x00\x01", 2); 101 memcpy(dev->dev_addr, "\x00\x01", 2);
102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
103 netif_dormant_off(dev);
103 return 0; 104 return 0;
104 } 105 }
105 106
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 07e5eef1fe0f..a867fb411f89 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
212 dev->hard_header = NULL; 212 dev->hard_header = NULL;
213 dev->type = ARPHRD_X25; 213 dev->type = ARPHRD_X25;
214 dev->addr_len = 0; 214 dev->addr_len = 0;
215 netif_dormant_off(dev);
215 return 0; 216 return 0;
216 } 217 }
217 218
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index e013b817cab8..dcf46add3adf 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -564,4 +564,5 @@ module_exit(n2_cleanup);
564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
565MODULE_DESCRIPTION("RISCom/N2 serial port driver"); 565MODULE_DESCRIPTION("RISCom/N2 serial port driver");
566MODULE_LICENSE("GPL v2"); 566MODULE_LICENSE("GPL v2");
567module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ 567module_param(hw, charp, 0444);
568MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..2e8ac995d56f 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -447,6 +447,7 @@ config AIRO_CS
447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
449 select CRYPTO 449 select CRYPTO
450 select CRYPTO_AES
450 ---help--- 451 ---help---
451 This is the standard Linux driver to support Cisco/Aironet PCMCIA 452 This is the standard Linux driver to support Cisco/Aironet PCMCIA
452 802.11 wireless cards. This driver is the same as the Aironet 453 802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 3889f79e7128..df317c1e12a8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3701 } 3701 }
3702 if (sec->flags & SEC_AUTH_MODE) { 3702 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode; 3703 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode); 3704 dprintk(", .auth_mode = %d", sec->auth_mode);
3705 } 3705 }
3706 dprintk("\n"); 3706 dprintk("\n");
3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..317ace7f9aae 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2875 if (orinoco_lock(priv, &flags) != 0) 2875 if (orinoco_lock(priv, &flags) != 0)
2876 return -EBUSY; 2876 return -EBUSY;
2877 2877
2878 if (erq->pointer) { 2878 if (erq->length > 0) {
2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
2880 index = priv->tx_key; 2880 index = priv->tx_key;
2881 2881
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2918 if (erq->flags & IW_ENCODE_RESTRICTED) 2918 if (erq->flags & IW_ENCODE_RESTRICTED)
2919 restricted = 1; 2919 restricted = 1;
2920 2920
2921 if (erq->pointer) { 2921 if (erq->pointer && erq->length > 0) {
2922 priv->keys[index].len = cpu_to_le16(xlen); 2922 priv->keys[index].len = cpu_to_le16(xlen);
2923 memset(priv->keys[index].data, 0, 2923 memset(priv->keys[index].data, 0,
2924 sizeof(priv->keys[index].data)); 2924 sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
1820 zd->dev->name); 1820 zd->dev->name);
1821 1821
1822 usb_set_intfdata(interface, zd); 1822 usb_set_intfdata(interface, zd);
1823 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1824 zd1201_disable(zd); /* interfering with all the wifis in range */
1823 return 0; 1825 return 0;
1824 1826
1825err_net: 1827err_net:
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 4cd879cb9bdd..1140302ff11d 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -304,6 +304,7 @@ static int __init xpram_setup_sizes(unsigned long pages)
304{ 304{
305 unsigned long mem_needed; 305 unsigned long mem_needed;
306 unsigned long mem_auto; 306 unsigned long mem_auto;
307 unsigned long long size;
307 int mem_auto_no; 308 int mem_auto_no;
308 int i; 309 int i;
309 310
@@ -321,9 +322,19 @@ static int __init xpram_setup_sizes(unsigned long pages)
321 mem_needed = 0; 322 mem_needed = 0;
322 mem_auto_no = 0; 323 mem_auto_no = 0;
323 for (i = 0; i < xpram_devs; i++) { 324 for (i = 0; i < xpram_devs; i++) {
324 if (sizes[i]) 325 if (sizes[i]) {
325 xpram_sizes[i] = 326 size = simple_strtoull(sizes[i], &sizes[i], 0);
326 (memparse(sizes[i], &sizes[i]) + 3) & -4UL; 327 switch (sizes[i][0]) {
328 case 'g':
329 case 'G':
330 size <<= 20;
331 break;
332 case 'm':
333 case 'M':
334 size <<= 10;
335 }
336 xpram_sizes[i] = (size + 3) & -4UL;
337 }
327 if (xpram_sizes[i]) 338 if (xpram_sizes[i])
328 mem_needed += xpram_sizes[i]; 339 mem_needed += xpram_sizes[i];
329 else 340 else
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 95e285b2e25c..7a84014f2037 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1106,10 +1106,10 @@ raw3270_delete_device(struct raw3270 *rp)
1106 1106
1107 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1108 mutex_lock(&raw3270_mutex); 1108 mutex_lock(&raw3270_mutex);
1109 if (rp->clttydev) 1109 if (rp->clttydev && !IS_ERR(rp->clttydev))
1110 class_device_destroy(class3270, 1110 class_device_destroy(class3270,
1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1112 if (rp->cltubdev) 1112 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1113 class_device_destroy(class3270, 1113 class_device_destroy(class3270,
1114 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1115 list_del_init(&rp->list); 1115 list_del_init(&rp->list);
@@ -1173,21 +1173,37 @@ static struct attribute_group raw3270_attr_group = {
1173 .attrs = raw3270_attrs, 1173 .attrs = raw3270_attrs,
1174}; 1174};
1175 1175
1176static void 1176static int raw3270_create_attributes(struct raw3270 *rp)
1177raw3270_create_attributes(struct raw3270 *rp)
1178{ 1177{
1179 //FIXME: check return code 1178 int rc;
1180 sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1179
1181 rp->clttydev = 1180 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1182 class_device_create(class3270, NULL, 1181 if (rc)
1183 MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1182 goto out;
1184 &rp->cdev->dev, "tty%s", 1183
1185 rp->cdev->dev.bus_id); 1184 rp->clttydev = class_device_create(class3270, NULL,
1186 rp->cltubdev = 1185 MKDEV(IBM_TTY3270_MAJOR, rp->minor),
1187 class_device_create(class3270, NULL, 1186 &rp->cdev->dev, "tty%s",
1188 MKDEV(IBM_FS3270_MAJOR, rp->minor), 1187 rp->cdev->dev.bus_id);
1189 &rp->cdev->dev, "tub%s", 1188 if (IS_ERR(rp->clttydev)) {
1190 rp->cdev->dev.bus_id); 1189 rc = PTR_ERR(rp->clttydev);
1190 goto out_ttydev;
1191 }
1192
1193 rp->cltubdev = class_device_create(class3270, NULL,
1194 MKDEV(IBM_FS3270_MAJOR, rp->minor),
1195 &rp->cdev->dev, "tub%s",
1196 rp->cdev->dev.bus_id);
1197 if (!IS_ERR(rp->cltubdev))
1198 goto out;
1199
1200 rc = PTR_ERR(rp->cltubdev);
1201 class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1202
1203out_ttydev:
1204 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1205out:
1206 return rc;
1191} 1207}
1192 1208
1193/* 1209/*
@@ -1255,7 +1271,9 @@ raw3270_set_online (struct ccw_device *cdev)
1255 rc = raw3270_reset_device(rp); 1271 rc = raw3270_reset_device(rp);
1256 if (rc) 1272 if (rc)
1257 goto failure; 1273 goto failure;
1258 raw3270_create_attributes(rp); 1274 rc = raw3270_create_attributes(rp);
1275 if (rc)
1276 goto failure;
1259 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1277 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1260 mutex_lock(&raw3270_mutex); 1278 mutex_lock(&raw3270_mutex);
1261 list_for_each_entry(np, &raw3270_notifier, list) 1279 list_for_each_entry(np, &raw3270_notifier, list)
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index a5c68e60fcf4..643b6d078563 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -76,14 +76,22 @@ struct tape_class_device *register_tape_dev(
76 device, 76 device,
77 "%s", tcd->device_name 77 "%s", tcd->device_name
78 ); 78 );
79 sysfs_create_link( 79 rc = PTR_ERR(tcd->class_device);
80 if (rc)
81 goto fail_with_cdev;
82 rc = sysfs_create_link(
80 &device->kobj, 83 &device->kobj,
81 &tcd->class_device->kobj, 84 &tcd->class_device->kobj,
82 tcd->mode_name 85 tcd->mode_name
83 ); 86 );
87 if (rc)
88 goto fail_with_class_device;
84 89
85 return tcd; 90 return tcd;
86 91
92fail_with_class_device:
93 class_device_destroy(tape_class, tcd->char_device->dev);
94
87fail_with_cdev: 95fail_with_cdev:
88 cdev_del(tcd->char_device); 96 cdev_del(tcd->char_device);
89 97
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 122b4d8965c3..2826aed91043 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -543,20 +543,24 @@ int
543tape_generic_probe(struct ccw_device *cdev) 543tape_generic_probe(struct ccw_device *cdev)
544{ 544{
545 struct tape_device *device; 545 struct tape_device *device;
546 int ret;
546 547
547 device = tape_alloc_device(); 548 device = tape_alloc_device();
548 if (IS_ERR(device)) 549 if (IS_ERR(device))
549 return -ENODEV; 550 return -ENODEV;
550 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 551 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
552 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
553 if (ret) {
554 tape_put_device(device);
555 PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id);
556 return ret;
557 }
551 cdev->dev.driver_data = device; 558 cdev->dev.driver_data = device;
559 cdev->handler = __tape_do_irq;
552 device->cdev = cdev; 560 device->cdev = cdev;
553 device->cdev_id = busid_to_int(cdev->dev.bus_id); 561 device->cdev_id = busid_to_int(cdev->dev.bus_id);
554 cdev->handler = __tape_do_irq; 562 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
555 563 return ret;
556 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
557 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
558
559 return 0;
560} 564}
561 565
562static inline void 566static inline void
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..3cba6c9fab11 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
152 struct ccwgroup_device *gdev; 152 struct ccwgroup_device *gdev;
153 int i; 153 int i;
154 int rc; 154 int rc;
155 int del_drvdata;
156 155
157 if (argc > 256) /* disallow dumb users */ 156 if (argc > 256) /* disallow dumb users */
158 return -EINVAL; 157 return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
163 162
164 atomic_set(&gdev->onoff, 0); 163 atomic_set(&gdev->onoff, 0);
165 164
166 del_drvdata = 0;
167 for (i = 0; i < argc; i++) { 165 for (i = 0; i < argc; i++) {
168 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 166 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
169 167
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root,
180 rc = -EINVAL; 178 rc = -EINVAL;
181 goto free_dev; 179 goto free_dev;
182 } 180 }
183 }
184 for (i = 0; i < argc; i++)
185 gdev->cdev[i]->dev.driver_data = gdev; 181 gdev->cdev[i]->dev.driver_data = gdev;
186 del_drvdata = 1; 182 }
187 183
188 gdev->creator_id = creator_id; 184 gdev->creator_id = creator_id;
189 gdev->count = argc; 185 gdev->count = argc;
@@ -226,9 +222,9 @@ error:
226free_dev: 222free_dev:
227 for (i = 0; i < argc; i++) 223 for (i = 0; i < argc; i++)
228 if (gdev->cdev[i]) { 224 if (gdev->cdev[i]) {
229 put_device(&gdev->cdev[i]->dev); 225 if (gdev->cdev[i]->dev.driver_data == gdev)
230 if (del_drvdata)
231 gdev->cdev[i]->dev.driver_data = NULL; 226 gdev->cdev[i]->dev.driver_data = NULL;
227 put_device(&gdev->cdev[i]->dev);
232 } 228 }
233 kfree(gdev); 229 kfree(gdev);
234 return rc; 230 return rc;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 0df3af1f08de..828b2d334f0a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1068,6 +1068,7 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr,
1068 if (count) { 1068 if (count) {
1069 interval = cmb_data->last_update - 1069 interval = cmb_data->last_update -
1070 cdev->private->cmb_start_time; 1070 cdev->private->cmb_start_time;
1071 interval = (interval * 1000) >> 12;
1071 interval /= count; 1072 interval /= count;
1072 } else 1073 } else
1073 interval = -1; 1074 interval = -1;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..7a39e0b0386c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
152 if (cdev->private->iretry) { 152 if (cdev->private->iretry) {
153 cdev->private->iretry--; 153 cdev->private->iretry--;
154 ret = cio_halt(sch); 154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret; 155 if (ret != -EBUSY)
156 return (ret == 0) ? -EBUSY : ret;
156 } 157 }
157 /* halt io unsuccessful. */ 158 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 20c8eb16f464..8a4b58120146 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2686,9 +2686,17 @@ static struct attribute_group ctc_attr_group = {
2686static int 2686static int
2687ctc_add_attributes(struct device *dev) 2687ctc_add_attributes(struct device *dev)
2688{ 2688{
2689 device_create_file(dev, &dev_attr_loglevel); 2689 int rc;
2690 device_create_file(dev, &dev_attr_stats); 2690
2691 return 0; 2691 rc = device_create_file(dev, &dev_attr_loglevel);
2692 if (rc)
2693 goto out;
2694 rc = device_create_file(dev, &dev_attr_stats);
2695 if (!rc)
2696 goto out;
2697 device_remove_file(dev, &dev_attr_loglevel);
2698out:
2699 return rc;
2692} 2700}
2693 2701
2694static void 2702static void
@@ -2901,7 +2909,12 @@ ctc_new_device(struct ccwgroup_device *cgdev)
2901 goto out; 2909 goto out;
2902 } 2910 }
2903 2911
2904 ctc_add_attributes(&cgdev->dev); 2912 if (ctc_add_attributes(&cgdev->dev)) {
2913 ctc_netdev_unregister(dev);
2914 dev->priv = NULL;
2915 ctc_free_netdevice(dev, 1);
2916 goto out;
2917 }
2905 2918
2906 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); 2919 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2907 2920
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 103c41470bd2..5fff1f93973a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8451,10 +8451,11 @@ __qeth_reboot_event_card(struct device *dev, void *data)
8451static int 8451static int
8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 8452qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
8453{ 8453{
8454 int ret;
8454 8455
8455 driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, 8456 ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL,
8456 __qeth_reboot_event_card); 8457 __qeth_reboot_event_card);
8457 return NOTIFY_DONE; 8458 return ret ? NOTIFY_BAD : NOTIFY_DONE;
8458} 8459}
8459 8460
8460 8461
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 16b59773c0bb..935952ef88f1 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -233,7 +233,7 @@ static void __init build_one_sbus(struct device_node *dp, int num_sbus)
233 sbus->ofdev.node = dp; 233 sbus->ofdev.node = dp;
234 sbus->ofdev.dev.parent = NULL; 234 sbus->ofdev.dev.parent = NULL;
235 sbus->ofdev.dev.bus = &sbus_bus_type; 235 sbus->ofdev.dev.bus = &sbus_bus_type;
236 strcpy(sbus->ofdev.dev.bus_id, dp->path_component_name); 236 sprintf(sbus->ofdev.dev.bus_id, "sbus%d", num_sbus);
237 237
238 if (of_device_register(&sbus->ofdev) != 0) 238 if (of_device_register(&sbus->ofdev) != 0)
239 printk(KERN_DEBUG "sbus: device registration error for %s!\n", 239 printk(KERN_DEBUG "sbus: device registration error for %s!\n",
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
index c690c2b89e41..acf292736b4e 100644
--- a/drivers/scsi/53c7xx.c
+++ b/drivers/scsi/53c7xx.c
@@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) {
3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, 3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3452 cmd_dataout += 4, ++i) { 3452 cmd_dataout += 4, ++i) {
3453 u32 vbuf = cmd->use_sg 3453 u32 vbuf = cmd->use_sg
3454 ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ 3454 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3455 ((struct scatterlist *)cmd->buffer)[i].offset 3455 ((struct scatterlist *)cmd->request_buffer)[i].offset
3456 : (u32)(cmd->request_buffer); 3456 : (u32)(cmd->request_buffer);
3457 u32 bbuf = virt_to_bus((void *)vbuf); 3457 u32 bbuf = virt_to_bus((void *)vbuf);
3458 u32 count = cmd->use_sg ? 3458 u32 count = cmd->use_sg ?
3459 ((struct scatterlist *)cmd->buffer)[i].length : 3459 ((struct scatterlist *)cmd->request_buffer)[i].length :
3460 cmd->request_bufflen; 3460 cmd->request_bufflen;
3461 3461
3462 /* 3462 /*
@@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5417 5417
5418 if ((buffers = cmd->use_sg)) { 5418 if ((buffers = cmd->use_sg)) {
5419 for (offset = 0, 5419 for (offset = 0,
5420 segment = (struct scatterlist *) cmd->buffer; 5420 segment = (struct scatterlist *) cmd->request_buffer;
5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && 5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); 5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5423 --buffers, offset += segment->length, ++segment) 5423 --buffers, offset += segment->length, ++segment)
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8a4659e94105..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
911 sp->SCp.ptr = 911 sp->SCp.ptr =
912 (char *) virt_to_phys(sp->request_buffer); 912 (char *) virt_to_phys(sp->request_buffer);
913 } else { 913 } else {
914 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 914 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
915 sp->SCp.buffers_residual = sp->use_sg - 1; 915 sp->SCp.buffers_residual = sp->use_sg - 1;
916 sp->SCp.this_residual = sp->SCp.buffer->length; 916 sp->SCp.this_residual = sp->SCp.buffer->length;
917 if (esp->dma_mmu_get_scsi_sgl) 917 if (esp->dma_mmu_get_scsi_sgl)
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
2152 */ 2152 */
2153static int esp_should_clear_sync(Scsi_Cmnd *sp) 2153static int esp_should_clear_sync(Scsi_Cmnd *sp)
2154{ 2154{
2155 unchar cmd1 = sp->cmnd[0]; 2155 unchar cmd = sp->cmnd[0];
2156 unchar cmd2 = sp->data_cmnd[0];
2157 2156
2158 /* These cases are for spinning up a disk and 2157 /* These cases are for spinning up a disk and
2159 * waiting for that spinup to complete. 2158 * waiting for that spinup to complete.
2160 */ 2159 */
2161 if(cmd1 == START_STOP || 2160 if(cmd == START_STOP)
2162 cmd2 == START_STOP)
2163 return 0; 2161 return 0;
2164 2162
2165 if(cmd1 == TEST_UNIT_READY || 2163 if(cmd == TEST_UNIT_READY)
2166 cmd2 == TEST_UNIT_READY)
2167 return 0; 2164 return 0;
2168 2165
2169 /* One more special case for SCSI tape drives, 2166 /* One more special case for SCSI tape drives,
2170 * this is what is used to probe the device for 2167 * this is what is used to probe the device for
2171 * completion of a rewind or tape load operation. 2168 * completion of a rewind or tape load operation.
2172 */ 2169 */
2173 if(sp->device->type == TYPE_TAPE) { 2170 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2174 if(cmd1 == MODE_SENSE || 2171 return 0;
2175 cmd2 == MODE_SENSE)
2176 return 0;
2177 }
2178 2172
2179 return 1; 2173 return 1;
2180} 2174}
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index a06f547e87f7..d05681f9d81a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
114MODULE_LICENSE("GPL"); 114MODULE_LICENSE("GPL");
115module_param(NCR_D700, charp, 0); 115module_param(NCR_D700, charp, 0);
116 116
117static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 117static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; 118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
119 119
120#ifdef MODULE 120#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
173 char pad; 173 char pad;
174}; 174};
175 175
176static int 176static int __devinit
177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, 177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
178 int slot, u32 region, int differential) 178 int slot, u32 region, int differential)
179{ 179{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs)
243 * essentially connectecd to the MCA bus independently, it is easier 243 * essentially connectecd to the MCA bus independently, it is easier
244 * to set them up as two separate host adapters, rather than one 244 * to set them up as two separate host adapters, rather than one
245 * adapter with two channels */ 245 * adapter with two channels */
246static int 246static int __devinit
247NCR_D700_probe(struct device *dev) 247NCR_D700_probe(struct device *dev)
248{ 248{
249 struct NCR_D700_private *p; 249 struct NCR_D700_private *p;
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev)
329 for (i = 0; i < 2; i++) { 329 for (i = 0; i < 2; i++) {
330 int err; 330 int err;
331 331
332 if ((err = NCR_D700_probe_one(p, i, slot, irq, 332 if ((err = NCR_D700_probe_one(p, i, irq, slot,
333 offset_addr + (0x80 * i), 333 offset_addr + (0x80 * i),
334 differential)) != 0) 334 differential)) != 0)
335 printk("D700: SIOP%d: probe failed, error = %d\n", 335 printk("D700: SIOP%d: probe failed, error = %d\n",
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
349 return 0; 349 return 0;
350} 350}
351 351
352static void 352static void __devexit
353NCR_D700_remove_one(struct Scsi_Host *host) 353NCR_D700_remove_one(struct Scsi_Host *host)
354{ 354{
355 scsi_remove_host(host); 355 scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
359 release_region(host->base, 64); 359 release_region(host->base, 64);
360} 360}
361 361
362static int 362static int __devexit
363NCR_D700_remove(struct device *dev) 363NCR_D700_remove(struct device *dev)
364{ 364{
365 struct NCR_D700_private *p = dev_get_drvdata(dev); 365 struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
380 .name = "NCR_D700", 380 .name = "NCR_D700",
381 .bus = &mca_bus_type, 381 .bus = &mca_bus_type,
382 .probe = NCR_D700_probe, 382 .probe = NCR_D700_probe,
383 .remove = NCR_D700_remove, 383 .remove = __devexit_p(NCR_D700_remove),
384 }, 384 },
385}; 385};
386 386
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 36e63f82d9f8..f974869ea323 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -551,6 +551,11 @@ struct aha152x_hostdata {
551struct aha152x_scdata { 551struct aha152x_scdata {
552 Scsi_Cmnd *next; /* next sc in queue */ 552 Scsi_Cmnd *next; /* next sc in queue */
553 struct semaphore *sem; /* semaphore to block on */ 553 struct semaphore *sem; /* semaphore to block on */
554 unsigned char cmd_len;
555 unsigned char cmnd[MAX_COMMAND_SIZE];
556 unsigned short use_sg;
557 unsigned request_bufflen;
558 void *request_buffer;
554}; 559};
555 560
556 561
@@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1006 return FAILED; 1011 return FAILED;
1007 } 1012 }
1008 } else { 1013 } else {
1014 struct aha152x_scdata *sc;
1015
1009 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1016 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
1010 if(SCpnt->host_scribble==0) { 1017 if(SCpnt->host_scribble==0) {
1011 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1018 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
1012 return FAILED; 1019 return FAILED;
1013 } 1020 }
1021
1022 sc = SCDATA(SCpnt);
1023 memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
1024 sc->request_buffer = SCpnt->request_buffer;
1025 sc->request_bufflen = SCpnt->request_bufflen;
1026 sc->use_sg = SCpnt->use_sg;
1027 sc->cmd_len = SCpnt->cmd_len;
1014 } 1028 }
1015 1029
1016 SCNEXT(SCpnt) = NULL; 1030 SCNEXT(SCpnt) = NULL;
@@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1165 DECLARE_MUTEX_LOCKED(sem); 1179 DECLARE_MUTEX_LOCKED(sem);
1166 struct timer_list timer; 1180 struct timer_list timer;
1167 int ret, issued, disconnected; 1181 int ret, issued, disconnected;
1182 unsigned char old_cmd_len = SCpnt->cmd_len;
1183 unsigned short old_use_sg = SCpnt->use_sg;
1184 void *old_buffer = SCpnt->request_buffer;
1185 unsigned old_bufflen = SCpnt->request_bufflen;
1168 unsigned long flags; 1186 unsigned long flags;
1169 1187
1170#if defined(AHA152X_DEBUG) 1188#if defined(AHA152X_DEBUG)
@@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1198 add_timer(&timer); 1216 add_timer(&timer);
1199 down(&sem); 1217 down(&sem);
1200 del_timer(&timer); 1218 del_timer(&timer);
1201 1219
1202 SCpnt->cmd_len = SCpnt->old_cmd_len; 1220 SCpnt->cmd_len = old_cmd_len;
1203 SCpnt->use_sg = SCpnt->old_use_sg; 1221 SCpnt->use_sg = old_use_sg;
1204 SCpnt->request_buffer = SCpnt->buffer; 1222 SCpnt->request_buffer = old_buffer;
1205 SCpnt->request_bufflen = SCpnt->bufflen; 1223 SCpnt->request_bufflen = old_bufflen;
1206 1224
1207 DO_LOCK(flags); 1225 DO_LOCK(flags);
1208 1226
@@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt)
1565#endif 1583#endif
1566 1584
1567 if(DONE_SC->SCp.phase & check_condition) { 1585 if(DONE_SC->SCp.phase & check_condition) {
1586 struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
1587 struct aha152x_scdata *sc = SCDATA(cmd);
1588
1568#if 0 1589#if 0
1569 if(HOSTDATA(shpnt)->debug & debug_eh) { 1590 if(HOSTDATA(shpnt)->debug & debug_eh) {
1570 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); 1591 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
@@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
1573#endif 1594#endif
1574 1595
1575 /* restore old command */ 1596 /* restore old command */
1576 memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); 1597 memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
1577 DONE_SC->request_buffer = DONE_SC->buffer; 1598 cmd->request_buffer = sc->request_buffer;
1578 DONE_SC->request_bufflen = DONE_SC->bufflen; 1599 cmd->request_bufflen = sc->request_bufflen;
1579 DONE_SC->use_sg = DONE_SC->old_use_sg; 1600 cmd->use_sg = sc->use_sg;
1580 DONE_SC->cmd_len = DONE_SC->old_cmd_len; 1601 cmd->cmd_len = sc->cmd_len;
1581 1602
1582 DONE_SC->SCp.Status = 0x02; 1603 cmd->SCp.Status = 0x02;
1583 1604
1584 HOSTDATA(shpnt)->commands--; 1605 HOSTDATA(shpnt)->commands--;
1585 if (!HOSTDATA(shpnt)->commands) 1606 if (!HOSTDATA(shpnt)->commands)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a1e8ca758594..653818d2f802 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7290} 7290}
7291 7291
7292void 7292static void
7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 7293ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
7294{ 7294{
7295 cam_status ostat; 7295 cam_status ostat;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index b244c7124179..998999c0a972 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
243static uint32_t aic79xx_no_reset; 243static uint32_t aic79xx_no_reset;
244 244
245/* 245/*
246 * Certain PCI motherboards will scan PCI devices from highest to lowest,
247 * others scan from lowest to highest, and they tend to do all kinds of
248 * strange things when they come into contact with PCI bridge chips. The
249 * net result of all this is that the PCI card that is actually used to boot
250 * the machine is very hard to detect. Most motherboards go from lowest
251 * PCI slot number to highest, and the first SCSI controller found is the
252 * one you boot from. The only exceptions to this are when a controller
253 * has its BIOS disabled. So, we by default sort all of our SCSI controllers
254 * from lowest PCI slot number to highest PCI slot number. We also force
255 * all controllers with their BIOS disabled to the end of the list. This
256 * works on *almost* all computers. Where it doesn't work, we have this
257 * option. Setting this option to non-0 will reverse the order of the sort
258 * to highest first, then lowest, but will still leave cards with their BIOS
259 * disabled at the very end. That should fix everyone up unless there are
260 * really strange cirumstances.
261 */
262static uint32_t aic79xx_reverse_scan;
263
264/*
265 * Should we force EXTENDED translation on a controller. 246 * Should we force EXTENDED translation on a controller.
266 * 0 == Use whatever is in the SEEPROM or default to off 247 * 0 == Use whatever is in the SEEPROM or default to off
267 * 1 == Use whatever is in the SEEPROM or default to on 248 * 1 == Use whatever is in the SEEPROM or default to on
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx,
350" periodically to prevent tag starvation.\n" 331" periodically to prevent tag starvation.\n"
351" This may be required by some older disk\n" 332" This may be required by some older disk\n"
352" or drives/RAID arrays.\n" 333" or drives/RAID arrays.\n"
353" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
354" tag_info:<tag_str> Set per-target tag depth\n" 334" tag_info:<tag_str> Set per-target tag depth\n"
355" global_tag_depth:<int> Global tag depth for all targets on all buses\n" 335" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
356" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" 336" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
@@ -1031,7 +1011,6 @@ aic79xx_setup(char *s)
1031#ifdef AHD_DEBUG 1011#ifdef AHD_DEBUG
1032 { "debug", &ahd_debug }, 1012 { "debug", &ahd_debug },
1033#endif 1013#endif
1034 { "reverse_scan", &aic79xx_reverse_scan },
1035 { "periodic_otag", &aic79xx_periodic_otag }, 1014 { "periodic_otag", &aic79xx_periodic_otag },
1036 { "pci_parity", &aic79xx_pci_parity }, 1015 { "pci_parity", &aic79xx_pci_parity },
1037 { "seltime", &aic79xx_seltime }, 1016 { "seltime", &aic79xx_seltime },
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index debf3e2a0798..aa4be8a31415 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx,
353" periodically to prevent tag starvation.\n" 353" periodically to prevent tag starvation.\n"
354" This may be required by some older disk\n" 354" This may be required by some older disk\n"
355" drives or RAID arrays.\n" 355" drives or RAID arrays.\n"
356" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
357" tag_info:<tag_str> Set per-target tag depth\n" 356" tag_info:<tag_str> Set per-target tag depth\n"
358" global_tag_depth:<int> Global tag depth for every target\n" 357" global_tag_depth:<int> Global tag depth for every target\n"
359" on every bus\n" 358" on every bus\n"
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
2427 info->stats.aborts += 1; 2427 info->stats.aborts += 1;
2428 2428
2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); 2429 printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
2430 __scsi_print_command(SCpnt->data_cmnd); 2430 __scsi_print_command(SCpnt->cmnd);
2431 2431
2432 print_debug_list(); 2432 print_debug_list();
2433 fas216_dumpstate(info); 2433 fas216_dumpstate(info);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 94b1261a259d..19745a31072b 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -105,9 +105,6 @@ enum {
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */ 105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
109 /* ICH6/7 use different scheme for map value */
110 PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
111 108
112 /* combined mode. if set, PATA is channel 0. 109 /* combined mode. if set, PATA is channel 0.
113 * if clear, PATA is channel 1. 110 * if clear, PATA is channel 1.
@@ -126,6 +123,7 @@ enum {
126 ich6_sata = 4, 123 ich6_sata = 4,
127 ich6_sata_ahci = 5, 124 ich6_sata_ahci = 5,
128 ich6m_sata_ahci = 6, 125 ich6m_sata_ahci = 6,
126 ich8_sata_ahci = 7,
129 127
130 /* constants for mapping table */ 128 /* constants for mapping table */
131 P0 = 0, /* port 0 */ 129 P0 = 0, /* port 0 */
@@ -141,11 +139,19 @@ enum {
141 139
142struct piix_map_db { 140struct piix_map_db {
143 const u32 mask; 141 const u32 mask;
142 const u16 port_enable;
143 const int present_shift;
144 const int map[][4]; 144 const int map[][4];
145}; 145};
146 146
147struct piix_host_priv {
148 const int *map;
149 const struct piix_map_db *map_db;
150};
151
147static int piix_init_one (struct pci_dev *pdev, 152static int piix_init_one (struct pci_dev *pdev,
148 const struct pci_device_id *ent); 153 const struct pci_device_id *ent);
154static void piix_host_stop(struct ata_host_set *host_set);
149static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 155static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
150static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 156static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
151static void piix_pata_error_handler(struct ata_port *ap); 157static void piix_pata_error_handler(struct ata_port *ap);
@@ -186,11 +192,11 @@ static const struct pci_device_id piix_pci_tbl[] = {
186 /* Enterprise Southbridge 2 (where's the datasheet?) */ 192 /* Enterprise Southbridge 2 (where's the datasheet?) */
187 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 193 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
188 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ 194 /* SATA Controller 1 IDE (ICH8, no datasheet yet) */
189 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 195 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
190 /* SATA Controller 2 IDE (ICH8, ditto) */ 196 /* SATA Controller 2 IDE (ICH8, ditto) */
191 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, 197 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
192 /* Mobile SATA Controller IDE (ICH8M, ditto) */ 198 /* Mobile SATA Controller IDE (ICH8M, ditto) */
193 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, 199 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
194 200
195 { } /* terminate list */ 201 { } /* terminate list */
196}; 202};
@@ -254,7 +260,7 @@ static const struct ata_port_operations piix_pata_ops = {
254 260
255 .port_start = ata_port_start, 261 .port_start = ata_port_start,
256 .port_stop = ata_port_stop, 262 .port_stop = ata_port_stop,
257 .host_stop = ata_host_stop, 263 .host_stop = piix_host_stop,
258}; 264};
259 265
260static const struct ata_port_operations piix_sata_ops = { 266static const struct ata_port_operations piix_sata_ops = {
@@ -284,11 +290,13 @@ static const struct ata_port_operations piix_sata_ops = {
284 290
285 .port_start = ata_port_start, 291 .port_start = ata_port_start,
286 .port_stop = ata_port_stop, 292 .port_stop = ata_port_stop,
287 .host_stop = ata_host_stop, 293 .host_stop = piix_host_stop,
288}; 294};
289 295
290static struct piix_map_db ich5_map_db = { 296static const struct piix_map_db ich5_map_db = {
291 .mask = 0x7, 297 .mask = 0x7,
298 .port_enable = 0x3,
299 .present_shift = 4,
292 .map = { 300 .map = {
293 /* PM PS SM SS MAP */ 301 /* PM PS SM SS MAP */
294 { P0, NA, P1, NA }, /* 000b */ 302 { P0, NA, P1, NA }, /* 000b */
@@ -302,8 +310,10 @@ static struct piix_map_db ich5_map_db = {
302 }, 310 },
303}; 311};
304 312
305static struct piix_map_db ich6_map_db = { 313static const struct piix_map_db ich6_map_db = {
306 .mask = 0x3, 314 .mask = 0x3,
315 .port_enable = 0xf,
316 .present_shift = 4,
307 .map = { 317 .map = {
308 /* PM PS SM SS MAP */ 318 /* PM PS SM SS MAP */
309 { P0, P2, P1, P3 }, /* 00b */ 319 { P0, P2, P1, P3 }, /* 00b */
@@ -313,8 +323,10 @@ static struct piix_map_db ich6_map_db = {
313 }, 323 },
314}; 324};
315 325
316static struct piix_map_db ich6m_map_db = { 326static const struct piix_map_db ich6m_map_db = {
317 .mask = 0x3, 327 .mask = 0x3,
328 .port_enable = 0x5,
329 .present_shift = 4,
318 .map = { 330 .map = {
319 /* PM PS SM SS MAP */ 331 /* PM PS SM SS MAP */
320 { P0, P2, RV, RV }, /* 00b */ 332 { P0, P2, RV, RV }, /* 00b */
@@ -324,6 +336,28 @@ static struct piix_map_db ich6m_map_db = {
324 }, 336 },
325}; 337};
326 338
339static const struct piix_map_db ich8_map_db = {
340 .mask = 0x3,
341 .port_enable = 0x3,
342 .present_shift = 8,
343 .map = {
344 /* PM PS SM SS MAP */
345 { P0, NA, P1, NA }, /* 00b (hardwired) */
346 { RV, RV, RV, RV },
347 { RV, RV, RV, RV }, /* 10b (never) */
348 { RV, RV, RV, RV },
349 },
350};
351
352static const struct piix_map_db *piix_map_db_table[] = {
353 [ich5_sata] = &ich5_map_db,
354 [esb_sata] = &ich5_map_db,
355 [ich6_sata] = &ich6_map_db,
356 [ich6_sata_ahci] = &ich6_map_db,
357 [ich6m_sata_ahci] = &ich6m_map_db,
358 [ich8_sata_ahci] = &ich8_map_db,
359};
360
327static struct ata_port_info piix_port_info[] = { 361static struct ata_port_info piix_port_info[] = {
328 /* piix4_pata */ 362 /* piix4_pata */
329 { 363 {
@@ -356,63 +390,69 @@ static struct ata_port_info piix_port_info[] = {
356 /* ich5_sata */ 390 /* ich5_sata */
357 { 391 {
358 .sht = &piix_sht, 392 .sht = &piix_sht,
359 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
360 PIIX_FLAG_CHECKINTR,
361 .pio_mask = 0x1f, /* pio0-4 */ 394 .pio_mask = 0x1f, /* pio0-4 */
362 .mwdma_mask = 0x07, /* mwdma0-2 */ 395 .mwdma_mask = 0x07, /* mwdma0-2 */
363 .udma_mask = 0x7f, /* udma0-6 */ 396 .udma_mask = 0x7f, /* udma0-6 */
364 .port_ops = &piix_sata_ops, 397 .port_ops = &piix_sata_ops,
365 .private_data = &ich5_map_db,
366 }, 398 },
367 399
368 /* i6300esb_sata */ 400 /* i6300esb_sata */
369 { 401 {
370 .sht = &piix_sht, 402 .sht = &piix_sht,
371 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | 403 .host_flags = ATA_FLAG_SATA |
372 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, 404 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
373 .pio_mask = 0x1f, /* pio0-4 */ 405 .pio_mask = 0x1f, /* pio0-4 */
374 .mwdma_mask = 0x07, /* mwdma0-2 */ 406 .mwdma_mask = 0x07, /* mwdma0-2 */
375 .udma_mask = 0x7f, /* udma0-6 */ 407 .udma_mask = 0x7f, /* udma0-6 */
376 .port_ops = &piix_sata_ops, 408 .port_ops = &piix_sata_ops,
377 .private_data = &ich5_map_db,
378 }, 409 },
379 410
380 /* ich6_sata */ 411 /* ich6_sata */
381 { 412 {
382 .sht = &piix_sht, 413 .sht = &piix_sht,
383 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 414 .host_flags = ATA_FLAG_SATA |
384 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, 415 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
385 .pio_mask = 0x1f, /* pio0-4 */ 416 .pio_mask = 0x1f, /* pio0-4 */
386 .mwdma_mask = 0x07, /* mwdma0-2 */ 417 .mwdma_mask = 0x07, /* mwdma0-2 */
387 .udma_mask = 0x7f, /* udma0-6 */ 418 .udma_mask = 0x7f, /* udma0-6 */
388 .port_ops = &piix_sata_ops, 419 .port_ops = &piix_sata_ops,
389 .private_data = &ich6_map_db,
390 }, 420 },
391 421
392 /* ich6_sata_ahci */ 422 /* ich6_sata_ahci */
393 { 423 {
394 .sht = &piix_sht, 424 .sht = &piix_sht,
395 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 425 .host_flags = ATA_FLAG_SATA |
396 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 426 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
397 PIIX_FLAG_AHCI, 427 PIIX_FLAG_AHCI,
398 .pio_mask = 0x1f, /* pio0-4 */ 428 .pio_mask = 0x1f, /* pio0-4 */
399 .mwdma_mask = 0x07, /* mwdma0-2 */ 429 .mwdma_mask = 0x07, /* mwdma0-2 */
400 .udma_mask = 0x7f, /* udma0-6 */ 430 .udma_mask = 0x7f, /* udma0-6 */
401 .port_ops = &piix_sata_ops, 431 .port_ops = &piix_sata_ops,
402 .private_data = &ich6_map_db,
403 }, 432 },
404 433
405 /* ich6m_sata_ahci */ 434 /* ich6m_sata_ahci */
406 { 435 {
407 .sht = &piix_sht, 436 .sht = &piix_sht,
408 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | 437 .host_flags = ATA_FLAG_SATA |
438 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
439 PIIX_FLAG_AHCI,
440 .pio_mask = 0x1f, /* pio0-4 */
441 .mwdma_mask = 0x07, /* mwdma0-2 */
442 .udma_mask = 0x7f, /* udma0-6 */
443 .port_ops = &piix_sata_ops,
444 },
445
446 /* ich8_sata_ahci */
447 {
448 .sht = &piix_sht,
449 .host_flags = ATA_FLAG_SATA |
409 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 450 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
410 PIIX_FLAG_AHCI, 451 PIIX_FLAG_AHCI,
411 .pio_mask = 0x1f, /* pio0-4 */ 452 .pio_mask = 0x1f, /* pio0-4 */
412 .mwdma_mask = 0x07, /* mwdma0-2 */ 453 .mwdma_mask = 0x07, /* mwdma0-2 */
413 .udma_mask = 0x7f, /* udma0-6 */ 454 .udma_mask = 0x7f, /* udma0-6 */
414 .port_ops = &piix_sata_ops, 455 .port_ops = &piix_sata_ops,
415 .private_data = &ich6m_map_db,
416 }, 456 },
417}; 457};
418 458
@@ -508,46 +548,29 @@ static void piix_pata_error_handler(struct ata_port *ap)
508static int piix_sata_prereset(struct ata_port *ap) 548static int piix_sata_prereset(struct ata_port *ap)
509{ 549{
510 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
511 const unsigned int *map = ap->host_set->private_data; 551 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map;
512 int base = 2 * ap->hard_port_no; 553 int base = 2 * ap->hard_port_no;
513 unsigned int present_mask = 0; 554 unsigned int present = 0;
514 int port, i; 555 int port, i;
515 u8 pcs; 556 u16 pcs;
516 557
517 pci_read_config_byte(pdev, ICH5_PCS, &pcs); 558 pci_read_config_word(pdev, ICH5_PCS, &pcs);
518 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); 559 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
519 560
520 /* enable all ports on this ap and wait for them to settle */
521 for (i = 0; i < 2; i++) {
522 port = map[base + i];
523 if (port >= 0)
524 pcs |= 1 << port;
525 }
526
527 pci_write_config_byte(pdev, ICH5_PCS, pcs);
528 msleep(100);
529
530 /* let's see which devices are present */
531 pci_read_config_byte(pdev, ICH5_PCS, &pcs);
532
533 for (i = 0; i < 2; i++) { 561 for (i = 0; i < 2; i++) {
534 port = map[base + i]; 562 port = map[base + i];
535 if (port < 0) 563 if (port < 0)
536 continue; 564 continue;
537 if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) 565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
538 present_mask |= 1 << i; 566 (pcs & 1 << (hpriv->map_db->present_shift + port)))
539 else 567 present = 1;
540 pcs &= ~(1 << port);
541 } 568 }
542 569
543 /* disable offline ports on non-AHCI controllers */
544 if (!(ap->flags & PIIX_FLAG_AHCI))
545 pci_write_config_byte(pdev, ICH5_PCS, pcs);
546
547 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", 570 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
548 ap->id, pcs, present_mask); 571 ap->id, pcs, present_mask);
549 572
550 if (!present_mask) { 573 if (!present) {
551 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
552 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
553 return 0; 576 return 0;
@@ -761,10 +784,27 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
761 return no_piix_dma; 784 return no_piix_dma;
762} 785}
763 786
787static void __devinit piix_init_pcs(struct pci_dev *pdev,
788 const struct piix_map_db *map_db)
789{
790 u16 pcs, new_pcs;
791
792 pci_read_config_word(pdev, ICH5_PCS, &pcs);
793
794 new_pcs = pcs | map_db->port_enable;
795
796 if (new_pcs != pcs) {
797 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150);
800 }
801}
802
764static void __devinit piix_init_sata_map(struct pci_dev *pdev, 803static void __devinit piix_init_sata_map(struct pci_dev *pdev,
765 struct ata_port_info *pinfo) 804 struct ata_port_info *pinfo,
805 const struct piix_map_db *map_db)
766{ 806{
767 struct piix_map_db *map_db = pinfo[0].private_data; 807 struct piix_host_priv *hpriv = pinfo[0].private_data;
768 const unsigned int *map; 808 const unsigned int *map;
769 int i, invalid_map = 0; 809 int i, invalid_map = 0;
770 u8 map_value; 810 u8 map_value;
@@ -805,8 +845,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
805 dev_printk(KERN_ERR, &pdev->dev, 845 dev_printk(KERN_ERR, &pdev->dev,
806 "invalid MAP value %u\n", map_value); 846 "invalid MAP value %u\n", map_value);
807 847
808 pinfo[0].private_data = (void *)map; 848 hpriv->map = map;
809 pinfo[1].private_data = (void *)map; 849 hpriv->map_db = map_db;
810} 850}
811 851
812/** 852/**
@@ -829,6 +869,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
829 static int printed_version; 869 static int printed_version;
830 struct ata_port_info port_info[2]; 870 struct ata_port_info port_info[2];
831 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 871 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
872 struct piix_host_priv *hpriv;
832 unsigned long host_flags; 873 unsigned long host_flags;
833 874
834 if (!printed_version++) 875 if (!printed_version++)
@@ -839,8 +880,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
839 if (!in_module_init) 880 if (!in_module_init)
840 return -ENODEV; 881 return -ENODEV;
841 882
883 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
884 if (!hpriv)
885 return -ENOMEM;
886
842 port_info[0] = piix_port_info[ent->driver_data]; 887 port_info[0] = piix_port_info[ent->driver_data];
843 port_info[1] = piix_port_info[ent->driver_data]; 888 port_info[1] = piix_port_info[ent->driver_data];
889 port_info[0].private_data = hpriv;
890 port_info[1].private_data = hpriv;
844 891
845 host_flags = port_info[0].host_flags; 892 host_flags = port_info[0].host_flags;
846 893
@@ -855,8 +902,11 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
855 } 902 }
856 903
857 /* Initialize SATA map */ 904 /* Initialize SATA map */
858 if (host_flags & ATA_FLAG_SATA) 905 if (host_flags & ATA_FLAG_SATA) {
859 piix_init_sata_map(pdev, port_info); 906 piix_init_sata_map(pdev, port_info,
907 piix_map_db_table[ent->driver_data]);
908 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]);
909 }
860 910
861 /* On ICH5, some BIOSen disable the interrupt using the 911 /* On ICH5, some BIOSen disable the interrupt using the
862 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 912 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -879,6 +929,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
879 return ata_pci_init_one(pdev, ppinfo, 2); 929 return ata_pci_init_one(pdev, ppinfo, 2);
880} 930}
881 931
932static void piix_host_stop(struct ata_host_set *host_set)
933{
934 if (host_set->next == NULL)
935 kfree(host_set->private_data);
936 ata_host_stop(host_set);
937}
938
882static int __init piix_init(void) 939static int __init piix_init(void)
883{ 940{
884 int rc; 941 int rc;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 007a14e5c3fd..e397129c90d1 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
507 */ 507 */
508 508
509 if (cmd->use_sg) { 509 if (cmd->use_sg) {
510 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 510 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
511 cmd->SCp.buffers_residual = cmd->use_sg - 1; 511 cmd->SCp.buffers_residual = cmd->use_sg - 1;
512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ 512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
513 cmd->SCp.buffer->offset; 513 cmd->SCp.buffer->offset;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index dddd2acce76f..61f6024b61ba 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -5,6 +5,7 @@
5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
6 * by D. Gilbert and aeb (20020609) 6 * by D. Gilbert and aeb (20020609)
7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
8 * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
8 */ 9 */
9 10
10#include <linux/blkdev.h> 11#include <linux/blkdev.h>
@@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = {
36/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", 37/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
37/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, 38/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
38 "Reasssign Blocks", 39 "Reasssign Blocks",
39/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, 40/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
40/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", 41/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
41/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", 42/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
42 "Reserve (6)", 43 "Reserve(6)",
43/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", 44/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
44/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", 45/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
45/* 1e-1f */ "Prevent/Allow Medium Removal", NULL, 46/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
46/* 20-22 */ NULL, NULL, NULL, 47/* 20-22 */ NULL, NULL, NULL,
47/* 23-28 */ "Read Format Capacities", "Set Window", 48/* 23-28 */ "Read Format Capacities", "Set Window",
48 "Read Capacity (10)", NULL, NULL, "Read (10)", 49 "Read Capacity(10)", NULL, NULL, "Read(10)",
49/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", 50/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
50 "Read updated block", 51 "Read updated block",
51/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", 52/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
52/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", 53/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
53/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", 54/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
54 "Read Defect Data(10)", 55 "Read Defect Data(10)",
55/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 56/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
56 "Read Buffer", 57 "Read Buffer",
57/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", 58/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
58/* 40-41 */ "Change Definition", "Write Same (10)", 59/* 40-41 */ "Change Definition", "Write Same(10)",
59/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 60/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
60 "Play audio (10)", "Get configuration", "Play audio msf", 61 "Play audio(10)", "Get configuration", "Play audio msf",
61 "Play audio track/index", 62 "Play audio track/index",
62/* 49-4f */ "Play track relative (10)", "Get event status notification", 63/* 49-4f */ "Play track relative(10)", "Get event status notification",
63 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 64 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
64 NULL, 65 NULL,
65/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", 66/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
66 "Reserve track", "Send OPC info", "Mode Select (10)", 67 "Reserve track", "Send OPC info", "Mode Select(10)",
67/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", 68/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
68 "Mode Sense (10)", "Close track/session", 69 "Mode Sense(10)", "Close track/session",
69/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", 70/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
70 "Persistent reserve out", 71 "Persistent reserve out",
71/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 75/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
75/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", 76/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
76 "Receive copy results", 77 "Receive copy results",
77/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", 78/* 85-89 */ "ATA command pass through(16)", "Access control in",
78 "Read (16)", "Memory Export Out (16)", 79 "Access control out", "Read(16)", "Memory Export Out(16)",
79/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", 80/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
80 "Write and verify (16)", "Verify (16)", 81 "Write and verify(16)", "Verify(16)",
81/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", 82/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
82 "Lock/unlock cache (16)", "Write same (16)", NULL, 83 "Lock/unlock cache(16)", "Write same(16)", NULL,
83/* 95-99 */ NULL, NULL, NULL, NULL, NULL, 84/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
84/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", 85/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)",
85 "Service action out (16)", 86 "Service action out(16)",
86/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", 87/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
87 "Maintenance out", "Move medium/play audio(12)", 88 "Security protocol in", "Maintenance in", "Maintenance out",
89 "Move medium/play audio(12)",
88/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", 90/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
89 "Play track relative(12)", 91 "Play track relative(12)",
90/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", 92/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
@@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = {
92/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", 94/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
93/* b2-b4 */ "Search data low(12)", "Set limits(12)", 95/* b2-b4 */ "Search data low(12)", "Set limits(12)",
94 "Read element status attached", 96 "Read element status attached",
95/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", 97/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
96/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", 98/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
97/* ba-bc */ "Redundancy group (in), Scan", 99/* ba-bc */ "Redundancy group (in), Scan",
98 "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", 100 "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
99/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", 101/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
100 "Volume set out, Send DVD structure", 102 "Volume set (out), Send DVD structure",
101}; 103};
102 104
103struct value_name_pair { 105struct value_name_pair {
@@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = {
112 {0xc, "Report supported operation codes"}, 114 {0xc, "Report supported operation codes"},
113 {0xd, "Report supported task management functions"}, 115 {0xd, "Report supported task management functions"},
114 {0xe, "Report priority"}, 116 {0xe, "Report priority"},
117 {0xf, "Report timestamp"},
115}; 118};
116#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
117 120
@@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = {
120 {0xa, "Set target port groups"}, 123 {0xa, "Set target port groups"},
121 {0xb, "Change aliases"}, 124 {0xb, "Change aliases"},
122 {0xe, "Set priority"}, 125 {0xe, "Set priority"},
126 {0xe, "Set timestamp"},
123}; 127};
124#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 128#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
125 129
@@ -427,6 +431,7 @@ static struct error_info additional[] =
427 {0x001A, "Rewind operation in progress"}, 431 {0x001A, "Rewind operation in progress"},
428 {0x001B, "Set capacity operation in progress"}, 432 {0x001B, "Set capacity operation in progress"},
429 {0x001C, "Verify operation in progress"}, 433 {0x001C, "Verify operation in progress"},
434 {0x001D, "ATA pass through information available"},
430 435
431 {0x0100, "No index/sector signal"}, 436 {0x0100, "No index/sector signal"},
432 437
@@ -438,7 +443,7 @@ static struct error_info additional[] =
438 443
439 {0x0400, "Logical unit not ready, cause not reportable"}, 444 {0x0400, "Logical unit not ready, cause not reportable"},
440 {0x0401, "Logical unit is in process of becoming ready"}, 445 {0x0401, "Logical unit is in process of becoming ready"},
441 {0x0402, "Logical unit not ready, initializing cmd. required"}, 446 {0x0402, "Logical unit not ready, initializing command required"},
442 {0x0403, "Logical unit not ready, manual intervention required"}, 447 {0x0403, "Logical unit not ready, manual intervention required"},
443 {0x0404, "Logical unit not ready, format in progress"}, 448 {0x0404, "Logical unit not ready, format in progress"},
444 {0x0405, "Logical unit not ready, rebuild in progress"}, 449 {0x0405, "Logical unit not ready, rebuild in progress"},
@@ -478,6 +483,9 @@ static struct error_info additional[] =
478 {0x0B00, "Warning"}, 483 {0x0B00, "Warning"},
479 {0x0B01, "Warning - specified temperature exceeded"}, 484 {0x0B01, "Warning - specified temperature exceeded"},
480 {0x0B02, "Warning - enclosure degraded"}, 485 {0x0B02, "Warning - enclosure degraded"},
486 {0x0B03, "Warning - background self-test failed"},
487 {0x0B04, "Warning - background pre-scan detected medium error"},
488 {0x0B05, "Warning - background medium scan detected medium error"},
481 489
482 {0x0C00, "Write error"}, 490 {0x0C00, "Write error"},
483 {0x0C01, "Write error - recovered with auto reallocation"}, 491 {0x0C01, "Write error - recovered with auto reallocation"},
@@ -493,6 +501,7 @@ static struct error_info additional[] =
493 {0x0C0B, "Auxiliary memory write error"}, 501 {0x0C0B, "Auxiliary memory write error"},
494 {0x0C0C, "Write error - unexpected unsolicited data"}, 502 {0x0C0C, "Write error - unexpected unsolicited data"},
495 {0x0C0D, "Write error - not enough unsolicited data"}, 503 {0x0C0D, "Write error - not enough unsolicited data"},
504 {0x0C0F, "Defects in error window"},
496 505
497 {0x0D00, "Error detected by third party temporary initiator"}, 506 {0x0D00, "Error detected by third party temporary initiator"},
498 {0x0D01, "Third party device failure"}, 507 {0x0D01, "Third party device failure"},
@@ -504,11 +513,12 @@ static struct error_info additional[] =
504 {0x0E00, "Invalid information unit"}, 513 {0x0E00, "Invalid information unit"},
505 {0x0E01, "Information unit too short"}, 514 {0x0E01, "Information unit too short"},
506 {0x0E02, "Information unit too long"}, 515 {0x0E02, "Information unit too long"},
516 {0x0E03, "Invalid field in command information unit"},
507 517
508 {0x1000, "Id CRC or ECC error"}, 518 {0x1000, "Id CRC or ECC error"},
509 {0x1001, "Data block guard check failed"}, 519 {0x1001, "Logical block guard check failed"},
510 {0x1002, "Data block application tag check failed"}, 520 {0x1002, "Logical block application tag check failed"},
511 {0x1003, "Data block reference tag check failed"}, 521 {0x1003, "Logical block reference tag check failed"},
512 522
513 {0x1100, "Unrecovered read error"}, 523 {0x1100, "Unrecovered read error"},
514 {0x1101, "Read retries exhausted"}, 524 {0x1101, "Read retries exhausted"},
@@ -530,6 +540,7 @@ static struct error_info additional[] =
530 {0x1111, "Read error - loss of streaming"}, 540 {0x1111, "Read error - loss of streaming"},
531 {0x1112, "Auxiliary memory read error"}, 541 {0x1112, "Auxiliary memory read error"},
532 {0x1113, "Read error - failed retransmission request"}, 542 {0x1113, "Read error - failed retransmission request"},
543 {0x1114, "Read error - lba marked bad by application client"},
533 544
534 {0x1200, "Address mark not found for id field"}, 545 {0x1200, "Address mark not found for id field"},
535 546
@@ -610,11 +621,14 @@ static struct error_info additional[] =
610 {0x2100, "Logical block address out of range"}, 621 {0x2100, "Logical block address out of range"},
611 {0x2101, "Invalid element address"}, 622 {0x2101, "Invalid element address"},
612 {0x2102, "Invalid address for write"}, 623 {0x2102, "Invalid address for write"},
624 {0x2103, "Invalid write crossing layer jump"},
613 625
614 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 626 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
615 627
616 {0x2400, "Invalid field in cdb"}, 628 {0x2400, "Invalid field in cdb"},
617 {0x2401, "CDB decryption error"}, 629 {0x2401, "CDB decryption error"},
630 {0x2402, "Obsolete"},
631 {0x2403, "Obsolete"},
618 {0x2404, "Security audit value frozen"}, 632 {0x2404, "Security audit value frozen"},
619 {0x2405, "Security working key frozen"}, 633 {0x2405, "Security working key frozen"},
620 {0x2406, "Nonce not unique"}, 634 {0x2406, "Nonce not unique"},
@@ -637,7 +651,10 @@ static struct error_info additional[] =
637 {0x260C, "Invalid operation for copy source or destination"}, 651 {0x260C, "Invalid operation for copy source or destination"},
638 {0x260D, "Copy segment granularity violation"}, 652 {0x260D, "Copy segment granularity violation"},
639 {0x260E, "Invalid parameter while port is enabled"}, 653 {0x260E, "Invalid parameter while port is enabled"},
640 {0x260F, "Invalid data-out buffer integrity"}, 654 {0x260F, "Invalid data-out buffer integrity check value"},
655 {0x2610, "Data decryption key fail limit reached"},
656 {0x2611, "Incomplete key-associated data set"},
657 {0x2612, "Vendor specific key reference not found"},
641 658
642 {0x2700, "Write protected"}, 659 {0x2700, "Write protected"},
643 {0x2701, "Hardware write protected"}, 660 {0x2701, "Hardware write protected"},
@@ -649,6 +666,7 @@ static struct error_info additional[] =
649 666
650 {0x2800, "Not ready to ready change, medium may have changed"}, 667 {0x2800, "Not ready to ready change, medium may have changed"},
651 {0x2801, "Import or export element accessed"}, 668 {0x2801, "Import or export element accessed"},
669 {0x2802, "Format-layer may have changed"},
652 670
653 {0x2900, "Power on, reset, or bus device reset occurred"}, 671 {0x2900, "Power on, reset, or bus device reset occurred"},
654 {0x2901, "Power on occurred"}, 672 {0x2901, "Power on occurred"},
@@ -669,6 +687,11 @@ static struct error_info additional[] =
669 {0x2A07, "Implicit asymmetric access state transition failed"}, 687 {0x2A07, "Implicit asymmetric access state transition failed"},
670 {0x2A08, "Priority changed"}, 688 {0x2A08, "Priority changed"},
671 {0x2A09, "Capacity data has changed"}, 689 {0x2A09, "Capacity data has changed"},
690 {0x2A10, "Timestamp changed"},
691 {0x2A11, "Data encryption parameters changed by another i_t nexus"},
692 {0x2A12, "Data encryption parameters changed by vendor specific "
693 "event"},
694 {0x2A13, "Data encryption key instance counter has changed"},
672 695
673 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 696 {0x2B00, "Copy cannot execute since host cannot disconnect"},
674 697
@@ -690,6 +713,7 @@ static struct error_info additional[] =
690 {0x2E00, "Insufficient time for operation"}, 713 {0x2E00, "Insufficient time for operation"},
691 714
692 {0x2F00, "Commands cleared by another initiator"}, 715 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"},
693 717
694 {0x3000, "Incompatible medium installed"}, 718 {0x3000, "Incompatible medium installed"},
695 {0x3001, "Cannot read medium - unknown format"}, 719 {0x3001, "Cannot read medium - unknown format"},
@@ -702,7 +726,8 @@ static struct error_info additional[] =
702 {0x3008, "Cannot write - application code mismatch"}, 726 {0x3008, "Cannot write - application code mismatch"},
703 {0x3009, "Current session not fixated for append"}, 727 {0x3009, "Current session not fixated for append"},
704 {0x300A, "Cleaning request rejected"}, 728 {0x300A, "Cleaning request rejected"},
705 {0x300C, "WORM medium, overwrite attempted"}, 729 {0x300C, "WORM medium - overwrite attempted"},
730 {0x300D, "WORM medium - integrity check"},
706 {0x3010, "Medium not formatted"}, 731 {0x3010, "Medium not formatted"},
707 732
708 {0x3100, "Medium format corrupted"}, 733 {0x3100, "Medium format corrupted"},
@@ -790,6 +815,9 @@ static struct error_info additional[] =
790 {0x3F0F, "Echo buffer overwritten"}, 815 {0x3F0F, "Echo buffer overwritten"},
791 {0x3F10, "Medium loadable"}, 816 {0x3F10, "Medium loadable"},
792 {0x3F11, "Medium auxiliary memory accessible"}, 817 {0x3F11, "Medium auxiliary memory accessible"},
818 {0x3F12, "iSCSI IP address added"},
819 {0x3F13, "iSCSI IP address removed"},
820 {0x3F14, "iSCSI IP address changed"},
793/* 821/*
794 * {0x40NN, "Ram failure"}, 822 * {0x40NN, "Ram failure"},
795 * {0x40NN, "Diagnostic failure on component nn"}, 823 * {0x40NN, "Diagnostic failure on component nn"},
@@ -799,6 +827,7 @@ static struct error_info additional[] =
799 {0x4300, "Message error"}, 827 {0x4300, "Message error"},
800 828
801 {0x4400, "Internal target failure"}, 829 {0x4400, "Internal target failure"},
830 {0x4471, "ATA device failed set features"},
802 831
803 {0x4500, "Select or reselect failure"}, 832 {0x4500, "Select or reselect failure"},
804 833
@@ -807,9 +836,10 @@ static struct error_info additional[] =
807 {0x4700, "Scsi parity error"}, 836 {0x4700, "Scsi parity error"},
808 {0x4701, "Data phase CRC error detected"}, 837 {0x4701, "Data phase CRC error detected"},
809 {0x4702, "Scsi parity error detected during st data phase"}, 838 {0x4702, "Scsi parity error detected during st data phase"},
810 {0x4703, "Information unit CRC error detected"}, 839 {0x4703, "Information unit iuCRC error detected"},
811 {0x4704, "Asynchronous information protection error detected"}, 840 {0x4704, "Asynchronous information protection error detected"},
812 {0x4705, "Protocol service CRC error"}, 841 {0x4705, "Protocol service CRC error"},
842 {0x4706, "Phy test function in progress"},
813 {0x477f, "Some commands cleared by iSCSI Protocol event"}, 843 {0x477f, "Some commands cleared by iSCSI Protocol event"},
814 844
815 {0x4800, "Initiator detected error message received"}, 845 {0x4800, "Initiator detected error message received"},
@@ -844,6 +874,8 @@ static struct error_info additional[] =
844 {0x5300, "Media load or eject failed"}, 874 {0x5300, "Media load or eject failed"},
845 {0x5301, "Unload tape failure"}, 875 {0x5301, "Unload tape failure"},
846 {0x5302, "Medium removal prevented"}, 876 {0x5302, "Medium removal prevented"},
877 {0x5303, "Medium removal prevented by data transfer element"},
878 {0x5304, "Medium thread or unthread failure"},
847 879
848 {0x5400, "Scsi to host system interface failure"}, 880 {0x5400, "Scsi to host system interface failure"},
849 881
@@ -855,6 +887,7 @@ static struct error_info additional[] =
855 {0x5505, "Insufficient access control resources"}, 887 {0x5505, "Insufficient access control resources"},
856 {0x5506, "Auxiliary memory out of space"}, 888 {0x5506, "Auxiliary memory out of space"},
857 {0x5507, "Quota error"}, 889 {0x5507, "Quota error"},
890 {0x5508, "Maximum number of supplemental decryption keys exceeded"},
858 891
859 {0x5700, "Unable to recover table-of-contents"}, 892 {0x5700, "Unable to recover table-of-contents"},
860 893
@@ -1004,6 +1037,7 @@ static struct error_info additional[] =
1004 {0x6708, "Assign failure occurred"}, 1037 {0x6708, "Assign failure occurred"},
1005 {0x6709, "Multiply assigned logical unit"}, 1038 {0x6709, "Multiply assigned logical unit"},
1006 {0x670A, "Set target port groups command failed"}, 1039 {0x670A, "Set target port groups command failed"},
1040 {0x670B, "ATA device feature not enabled"},
1007 1041
1008 {0x6800, "Logical unit not configured"}, 1042 {0x6800, "Logical unit not configured"},
1009 1043
@@ -1030,6 +1064,8 @@ static struct error_info additional[] =
1030 {0x6F03, "Read of scrambled sector without authentication"}, 1064 {0x6F03, "Read of scrambled sector without authentication"},
1031 {0x6F04, "Media region code is mismatched to logical unit region"}, 1065 {0x6F04, "Media region code is mismatched to logical unit region"},
1032 {0x6F05, "Drive region must be permanent/region reset count error"}, 1066 {0x6F05, "Drive region must be permanent/region reset count error"},
1067 {0x6F06, "Insufficient block count for binding nonce recording"},
1068 {0x6F07, "Conflict in binding nonce recording"},
1033/* 1069/*
1034 * {0x70NN, "Decompression exception short algorithm id of nn"}, 1070 * {0x70NN, "Decompression exception short algorithm id of nn"},
1035 */ 1071 */
@@ -1041,6 +1077,8 @@ static struct error_info additional[] =
1041 {0x7203, "Session fixation error - incomplete track in session"}, 1077 {0x7203, "Session fixation error - incomplete track in session"},
1042 {0x7204, "Empty or partially written reserved track"}, 1078 {0x7204, "Empty or partially written reserved track"},
1043 {0x7205, "No more track reservations allowed"}, 1079 {0x7205, "No more track reservations allowed"},
1080 {0x7206, "RMZ extension is not allowed"},
1081 {0x7207, "No more test zone extensions are allowed"},
1044 1082
1045 {0x7300, "Cd control error"}, 1083 {0x7300, "Cd control error"},
1046 {0x7301, "Power calibration area almost full"}, 1084 {0x7301, "Power calibration area almost full"},
@@ -1049,6 +1087,18 @@ static struct error_info additional[] =
1049 {0x7304, "Program memory area update failure"}, 1087 {0x7304, "Program memory area update failure"},
1050 {0x7305, "Program memory area is full"}, 1088 {0x7305, "Program memory area is full"},
1051 {0x7306, "RMA/PMA is almost full"}, 1089 {0x7306, "RMA/PMA is almost full"},
1090 {0x7310, "Current power calibration area almost full"},
1091 {0x7311, "Current power calibration area is full"},
1092 {0x7317, "RDZ is full"},
1093
1094 {0x7400, "Security error"},
1095 {0x7401, "Unable to decrypt data"},
1096 {0x7402, "Unencrypted data encountered while decrypting"},
1097 {0x7403, "Incorrect data encryption key"},
1098 {0x7404, "Cryptographic integrity validation failed"},
1099 {0x7405, "Error decrypting data"},
1100 {0x7471, "Logical unit access not authorized"},
1101
1052 {0, NULL} 1102 {0, NULL}
1053}; 1103};
1054 1104
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 10573c24a50b..98bd22714d0d 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1397,7 +1397,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1397 sp->SCp.ptr = NULL; 1397 sp->SCp.ptr = NULL;
1398 } 1398 }
1399 } else { 1399 } else {
1400 sp->SCp.buffer = (struct scatterlist *) sp->buffer; 1400 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, 1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
1402 sp->SCp.buffer, 1402 sp->SCp.buffer,
1403 sp->use_sg, 1403 sp->use_sg,
@@ -1410,7 +1410,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) 1410static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
1411{ 1411{
1412 if (sp->use_sg) { 1412 if (sp->use_sg) {
1413 sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, 1413 sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
1414 sp->sc_data_direction); 1414 sp->sc_data_direction);
1415 } else if (sp->request_bufflen) { 1415 } else if (sp->request_bufflen) {
1416 sbus_unmap_single(esp->sdev, 1416 sbus_unmap_single(esp->sdev,
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp)
2754 */ 2754 */
2755static int esp_should_clear_sync(struct scsi_cmnd *sp) 2755static int esp_should_clear_sync(struct scsi_cmnd *sp)
2756{ 2756{
2757 u8 cmd1 = sp->cmnd[0]; 2757 u8 cmd = sp->cmnd[0];
2758 u8 cmd2 = sp->data_cmnd[0];
2759 2758
2760 /* These cases are for spinning up a disk and 2759 /* These cases are for spinning up a disk and
2761 * waiting for that spinup to complete. 2760 * waiting for that spinup to complete.
2762 */ 2761 */
2763 if (cmd1 == START_STOP || 2762 if (cmd == START_STOP)
2764 cmd2 == START_STOP)
2765 return 0; 2763 return 0;
2766 2764
2767 if (cmd1 == TEST_UNIT_READY || 2765 if (cmd == TEST_UNIT_READY)
2768 cmd2 == TEST_UNIT_READY)
2769 return 0; 2766 return 0;
2770 2767
2771 /* One more special case for SCSI tape drives, 2768 /* One more special case for SCSI tape drives,
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
2773 * completion of a rewind or tape load operation. 2770 * completion of a rewind or tape load operation.
2774 */ 2771 */
2775 if (sp->device->type == TYPE_TAPE) { 2772 if (sp->device->type == TYPE_TAPE) {
2776 if (cmd1 == MODE_SENSE || 2773 if (cmd == MODE_SENSE)
2777 cmd2 == MODE_SENSE)
2778 return 0; 2774 return 0;
2779 } 2775 }
2780 2776
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 7eed0b098171..6aeb5f003c3c 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
81 int rc; 81 int rc;
82 82
83 single_host_data = hostdata; 83 single_host_data = hostdata;
84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); 84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
85 if (rc < 0) { 85 if (rc < 0) {
86 printk("viopath_open failed with rc %d in open_event_path\n", 86 printk("viopath_open failed with rc %d in open_event_path\n",
87 rc); 87 rc);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 242b8873b333..ed22b96580c6 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
238 if (rc == 2) { 238 if (rc == 2) {
239 /* Adapter is good, but other end is not ready */ 239 /* Adapter is good, but other end is not ready */
240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
241 retrc = 0;
241 } else if (rc != 0) { 242 } else if (rc != 0) {
242 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 243 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
243 goto reg_crq_failed; 244 goto reg_crq_failed;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 3fd8a96f2af3..bfac4441d89f 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 257static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
258{ 258{
259 int sz = sp->use_sg - 1; 259 int sz = sp->use_sg - 1;
260 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 260 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
261 261
262 while(sz >= 0) { 262 while(sz >= 0) {
263 vdma_free(sg[sz].dma_address); 263 vdma_free(sg[sz].dma_address);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f81691fcf177..d44f9aac6b8f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -21,10 +21,12 @@
21 21
22struct lpfc_sli2_slim; 22struct lpfc_sli2_slim;
23 23
24#define LPFC_MAX_TARGET 256 /* max targets supported */
25#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
26#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
27 24
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
27 requests */
28#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
29 the NameServer before giving up. */
28#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ 30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
29#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ 31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
30#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ 32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim;
41 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
42/* Provide maximum configuration definitions. */ 44/* Provide maximum configuration definitions. */
43#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
44#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
45#define FC_MAX_ADPTMSG 64 46#define FC_MAX_ADPTMSG 64
46 47
47#define MAX_HBAEVT 32 48#define MAX_HBAEVT 32
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b62a72dfab29..5c68cdd8736f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -219,9 +219,19 @@ lpfc_issue_lip(struct Scsi_Host *host)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
222 lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST;
224
223 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
224 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
229 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
230 phba->cfg_link_speed);
231 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
232 phba->fc_ratov * 2);
233 }
234
225 if (mbxstatus == MBX_TIMEOUT) 235 if (mbxstatus == MBX_TIMEOUT)
226 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 236 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
227 else 237 else
@@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host)
233 return 0; 243 return 0;
234} 244}
235 245
236static ssize_t 246static int
237lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 247lpfc_selective_reset(struct lpfc_hba *phba)
238{ 248{
239 struct Scsi_Host *host = class_to_shost(cdev); 249 struct completion online_compl;
240 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 250 int status = 0;
241 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 251
252 init_completion(&online_compl);
253 lpfc_workq_post_event(phba, &status, &online_compl,
254 LPFC_EVT_OFFLINE);
255 wait_for_completion(&online_compl);
256
257 if (status != 0)
258 return -EIO;
259
260 init_completion(&online_compl);
261 lpfc_workq_post_event(phba, &status, &online_compl,
262 LPFC_EVT_ONLINE);
263 wait_for_completion(&online_compl);
264
265 if (status != 0)
266 return -EIO;
267
268 return 0;
242} 269}
243 270
244static ssize_t 271static ssize_t
245lpfc_board_online_show(struct class_device *cdev, char *buf) 272lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
246{ 273{
247 struct Scsi_Host *host = class_to_shost(cdev); 274 struct Scsi_Host *host = class_to_shost(cdev);
248 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 275 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
276 int status = -EINVAL;
249 277
250 if (phba->fc_flag & FC_OFFLINE_MODE) 278 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
251 return snprintf(buf, PAGE_SIZE, "0\n"); 279 status = lpfc_selective_reset(phba);
280
281 if (status == 0)
282 return strlen(buf);
252 else 283 else
253 return snprintf(buf, PAGE_SIZE, "1\n"); 284 return status;
254} 285}
255 286
256static ssize_t 287static ssize_t
257lpfc_board_online_store(struct class_device *cdev, const char *buf, 288lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
258 size_t count)
259{ 289{
260 struct Scsi_Host *host = class_to_shost(cdev); 290 struct Scsi_Host *host = class_to_shost(cdev);
261 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 291 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
262 struct completion online_compl; 292 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
263 int val=0, status=0;
264
265 if (sscanf(buf, "%d", &val) != 1)
266 return -EINVAL;
267
268 init_completion(&online_compl);
269
270 if (val)
271 lpfc_workq_post_event(phba, &status, &online_compl,
272 LPFC_EVT_ONLINE);
273 else
274 lpfc_workq_post_event(phba, &status, &online_compl,
275 LPFC_EVT_OFFLINE);
276 wait_for_completion(&online_compl);
277 if (!status)
278 return strlen(buf);
279 else
280 return -EIO;
281} 293}
282 294
283static ssize_t 295static ssize_t
@@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
532 NULL); 544 NULL);
533static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 545static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
534 NULL); 546 NULL);
535static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
536 lpfc_board_online_show, lpfc_board_online_store);
537static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 547static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
538 lpfc_board_mode_show, lpfc_board_mode_store); 548 lpfc_board_mode_show, lpfc_board_mode_store);
549static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
539 550
540static int lpfc_poll = 0; 551static int lpfc_poll = 0;
541module_param(lpfc_poll, int, 0); 552module_param(lpfc_poll, int, 0);
@@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
695 "during discovery"); 706 "during discovery");
696 707
697/* 708/*
698# lpfc_max_luns: maximum number of LUNs per target driver will support 709# lpfc_max_luns: maximum allowed LUN.
699# Value range is [1,32768]. Default value is 256. 710# Value range is [0,65535]. Default value is 255.
700# NOTE: The SCSI layer will scan each target for this many luns 711# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
701*/ 712*/
702LPFC_ATTR_R(max_luns, 256, 1, 32768, 713LPFC_ATTR_R(max_luns, 255, 0, 65535,
703 "Maximum number of LUNs per target driver will support"); 714 "Maximum allowed LUN");
704 715
705/* 716/*
706# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. 717# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
739 &class_device_attr_lpfc_max_luns, 750 &class_device_attr_lpfc_max_luns,
740 &class_device_attr_nport_evt_cnt, 751 &class_device_attr_nport_evt_cnt,
741 &class_device_attr_management_version, 752 &class_device_attr_management_version,
742 &class_device_attr_board_online,
743 &class_device_attr_board_mode, 753 &class_device_attr_board_mode,
754 &class_device_attr_issue_reset,
744 &class_device_attr_lpfc_poll, 755 &class_device_attr_lpfc_poll,
745 &class_device_attr_lpfc_poll_tmo, 756 &class_device_attr_lpfc_poll_tmo,
746 NULL, 757 NULL,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee22173fce43..517e9e4dd461 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -147,6 +147,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *);
147int lpfc_sli_hba_down(struct lpfc_hba *); 147int lpfc_sli_hba_down(struct lpfc_hba *);
148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 148int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
149int lpfc_sli_handle_mb_event(struct lpfc_hba *); 149int lpfc_sli_handle_mb_event(struct lpfc_hba *);
150int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
150int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 151int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
151 struct lpfc_sli_ring *, uint32_t); 152 struct lpfc_sli_ring *, uint32_t);
152void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 153void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4126fd87956f..b89f6cb641e6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba)
648} 648}
649 649
650static struct lpfc_nodelist * 650static struct lpfc_nodelist *
651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 651lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
652 struct lpfc_nodelist *ndlp) 652 struct lpfc_nodelist *ndlp)
653{ 653{
654 struct lpfc_nodelist *new_ndlp; 654 struct lpfc_nodelist *new_ndlp;
655 struct lpfc_dmabuf *pcmd, *prsp;
656 uint32_t *lp; 655 uint32_t *lp;
657 struct serv_parm *sp; 656 struct serv_parm *sp;
658 uint8_t name[sizeof (struct lpfc_name)]; 657 uint8_t name[sizeof (struct lpfc_name)];
659 uint32_t rc; 658 uint32_t rc;
660 659
661 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
662 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
663 lp = (uint32_t *) prsp->virt; 660 lp = (uint32_t *) prsp->virt;
664 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name));
665 663
666 /* Now we to find out if the NPort we are logging into, matches the WWPN 664 /* Now we to find out if the NPort we are logging into, matches the WWPN
667 * we have for that ndlp. If not, we have some work to do. 665 * we have for that ndlp. If not, we have some work to do.
668 */ 666 */
669 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
670 668
671 memset(name, 0, sizeof (struct lpfc_name)); 669 if (new_ndlp == ndlp)
672 rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
673 if (!rc || (new_ndlp == ndlp)) {
674 return ndlp; 670 return ndlp;
675 }
676 671
677 if (!new_ndlp) { 672 if (!new_ndlp) {
673 rc =
674 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
675 if (!rc)
676 return ndlp;
678 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
679 if (!new_ndlp) 678 if (!new_ndlp)
680 return ndlp; 679 return ndlp;
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
683 } 682 }
684 683
685 lpfc_unreg_rpi(phba, new_ndlp); 684 lpfc_unreg_rpi(phba, new_ndlp);
686 new_ndlp->nlp_prev_state = ndlp->nlp_state;
687 new_ndlp->nlp_DID = ndlp->nlp_DID; 685 new_ndlp->nlp_DID = ndlp->nlp_DID;
688 new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 686 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
689 lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); 687 new_ndlp->nlp_state = ndlp->nlp_state;
688 lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
690 689
691 /* Move this back to NPR list */ 690 /* Move this back to NPR list */
692 lpfc_unreg_rpi(phba, ndlp); 691 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
693 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 692 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
694 ndlp->nlp_state = NLP_STE_NPR_NODE; 693 }
695 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 694 else {
696 695 lpfc_unreg_rpi(phba, ndlp);
696 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
697 ndlp->nlp_state = NLP_STE_NPR_NODE;
698 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
699 }
697 return new_ndlp; 700 return new_ndlp;
698} 701}
699 702
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
703{ 706{
704 IOCB_t *irsp; 707 IOCB_t *irsp;
705 struct lpfc_nodelist *ndlp; 708 struct lpfc_nodelist *ndlp;
709 struct lpfc_dmabuf *prsp;
706 int disc, rc, did, type; 710 int disc, rc, did, type;
707 711
708 712
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
769 } 773 }
770 } else { 774 } else {
771 /* Good status, call state machine */ 775 /* Good status, call state machine */
772 ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); 776 prsp = list_entry(((struct lpfc_dmabuf *)
777 cmdiocb->context2)->list.next,
778 struct lpfc_dmabuf, list);
779 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
773 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 780 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
774 NLP_EVT_CMPL_PLOGI); 781 NLP_EVT_CMPL_PLOGI);
775 } 782 }
@@ -3282,10 +3289,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3282 } else 3289 } else
3283 lpfc_sli_release_iocbq(phba, piocb); 3290 lpfc_sli_release_iocbq(phba, piocb);
3284 } 3291 }
3285 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 3292 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3286 phba->els_tmofunc.expires = jiffies + HZ * timeout; 3293 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3287 add_timer(&phba->els_tmofunc); 3294
3288 }
3289 spin_unlock_irq(phba->host->host_lock); 3295 spin_unlock_irq(phba->host->host_lock);
3290} 3296}
3291 3297
@@ -3442,6 +3448,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3442 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3448 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3443 ndlp->nlp_type |= NLP_FABRIC; 3449 ndlp->nlp_type |= NLP_FABRIC;
3444 } 3450 }
3451 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
3452 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
3445 } 3453 }
3446 3454
3447 phba->fc_stat.elsRcvFrame++; 3455 phba->fc_stat.elsRcvFrame++;
@@ -3463,13 +3471,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3463 rjt_err = 1; 3471 rjt_err = 1;
3464 break; 3472 break;
3465 } 3473 }
3474 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
3466 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3475 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3467 break; 3476 break;
3468 case ELS_CMD_FLOGI: 3477 case ELS_CMD_FLOGI:
3469 phba->fc_stat.elsRcvFLOGI++; 3478 phba->fc_stat.elsRcvFLOGI++;
3470 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3479 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3471 if (newnode) { 3480 if (newnode) {
3472 mempool_free( ndlp, phba->nlp_mem_pool); 3481 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3473 } 3482 }
3474 break; 3483 break;
3475 case ELS_CMD_LOGO: 3484 case ELS_CMD_LOGO:
@@ -3492,7 +3501,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3492 phba->fc_stat.elsRcvRSCN++; 3501 phba->fc_stat.elsRcvRSCN++;
3493 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3502 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3494 if (newnode) { 3503 if (newnode) {
3495 mempool_free( ndlp, phba->nlp_mem_pool); 3504 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3496 } 3505 }
3497 break; 3506 break;
3498 case ELS_CMD_ADISC: 3507 case ELS_CMD_ADISC:
@@ -3535,28 +3544,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3535 phba->fc_stat.elsRcvLIRR++; 3544 phba->fc_stat.elsRcvLIRR++;
3536 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3545 lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
3537 if (newnode) { 3546 if (newnode) {
3538 mempool_free( ndlp, phba->nlp_mem_pool); 3547 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3539 } 3548 }
3540 break; 3549 break;
3541 case ELS_CMD_RPS: 3550 case ELS_CMD_RPS:
3542 phba->fc_stat.elsRcvRPS++; 3551 phba->fc_stat.elsRcvRPS++;
3543 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3552 lpfc_els_rcv_rps(phba, elsiocb, ndlp);
3544 if (newnode) { 3553 if (newnode) {
3545 mempool_free( ndlp, phba->nlp_mem_pool); 3554 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3546 } 3555 }
3547 break; 3556 break;
3548 case ELS_CMD_RPL: 3557 case ELS_CMD_RPL:
3549 phba->fc_stat.elsRcvRPL++; 3558 phba->fc_stat.elsRcvRPL++;
3550 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3559 lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
3551 if (newnode) { 3560 if (newnode) {
3552 mempool_free( ndlp, phba->nlp_mem_pool); 3561 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3553 } 3562 }
3554 break; 3563 break;
3555 case ELS_CMD_RNID: 3564 case ELS_CMD_RNID:
3556 phba->fc_stat.elsRcvRNID++; 3565 phba->fc_stat.elsRcvRNID++;
3557 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3566 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3558 if (newnode) { 3567 if (newnode) {
3559 mempool_free( ndlp, phba->nlp_mem_pool); 3568 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3560 } 3569 }
3561 break; 3570 break;
3562 default: 3571 default:
@@ -3568,7 +3577,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
3568 "%d:0115 Unknown ELS command x%x received from " 3577 "%d:0115 Unknown ELS command x%x received from "
3569 "NPORT x%x\n", phba->brd_no, cmd, did); 3578 "NPORT x%x\n", phba->brd_no, cmd, did);
3570 if (newnode) { 3579 if (newnode) {
3571 mempool_free( ndlp, phba->nlp_mem_pool); 3580 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
3572 } 3581 }
3573 break; 3582 break;
3574 } 3583 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index adb086009ae0..4d6cf990c4fc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1084 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1085 1085
1086 if ((rport->scsi_target_id != -1) && 1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) { 1087 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id; 1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 } 1089 }
1090 1090
@@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1313 if ((rport_add == mapped) && 1313 if ((rport_add == mapped) &&
1314 ((!nlp->rport) || 1314 ((!nlp->rport) ||
1315 (nlp->rport->scsi_target_id == -1) || 1315 (nlp->rport->scsi_target_id == -1) ||
1316 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { 1316 (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1318 spin_lock_irq(phba->host->host_lock); 1318 spin_lock_irq(phba->host->host_lock);
1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 81755a3f7c68..ef47b824cbed 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
71 uint16_t offset = 0; 71 uint16_t offset = 0;
72 static char licensed[56] = 72 static char licensed[56] =
73 "key unlock for use with gnu public licensed code only\0"; 73 "key unlock for use with gnu public licensed code only\0";
74 static int init_key = 1;
74 75
75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
76 if (!pmb) { 77 if (!pmb) {
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 phba->hba_state = LPFC_INIT_MBX_CMDS;
83 84
84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
85 uint32_t *ptext = (uint32_t *) licensed; 86 if (init_key) {
87 uint32_t *ptext = (uint32_t *) licensed;
86 88
87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
88 *ptext = cpu_to_be32(*ptext); 90 *ptext = cpu_to_be32(*ptext);
91 init_key = 0;
92 }
89 93
90 lpfc_read_nv(phba, pmb); 94 lpfc_read_nv(phba, pmb);
91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 95 memset((char*)mb->un.varRDnvp.rsvd3, 0,
@@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba)
405 } 409 }
406 /* MBOX buffer will be freed in mbox compl */ 410 /* MBOX buffer will be freed in mbox compl */
407 411
408 i = 0; 412 return (0);
413}
414
415static int
416lpfc_discovery_wait(struct lpfc_hba *phba)
417{
418 int i = 0;
419
409 while ((phba->hba_state != LPFC_HBA_READY) || 420 while ((phba->hba_state != LPFC_HBA_READY) ||
410 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 421 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
411 ((phba->fc_map_cnt == 0) && (i<2)) || 422 ((phba->fc_map_cnt == 0) && (i<2)) ||
412 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 423 (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
413 /* Check every second for 30 retries. */ 424 /* Check every second for 30 retries. */
414 i++; 425 i++;
415 if (i > 30) { 426 if (i > 30) {
416 break; 427 return -ETIMEDOUT;
417 } 428 }
418 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 429 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
419 /* The link is down. Set linkdown timeout */ 430 /* The link is down. Set linkdown timeout */
420 break; 431 return -ETIMEDOUT;
421 } 432 }
422 433
423 /* Delay for 1 second to give discovery time to complete. */ 434 /* Delay for 1 second to give discovery time to complete. */
@@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
425 436
426 } 437 }
427 438
428 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 439 return 0;
429 * any potential PRLIs to flush thru the SLI sub-system.
430 */
431 msleep(50);
432
433 return (0);
434} 440}
435 441
436/************************************************************************/ 442/************************************************************************/
@@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba)
1339 struct lpfc_sli_ring *pring; 1345 struct lpfc_sli_ring *pring;
1340 struct lpfc_sli *psli; 1346 struct lpfc_sli *psli;
1341 unsigned long iflag; 1347 unsigned long iflag;
1342 int i = 0; 1348 int i;
1349 int cnt = 0;
1343 1350
1344 if (!phba) 1351 if (!phba)
1345 return 0; 1352 return 0;
@@ -1348,17 +1355,27 @@ lpfc_offline(struct lpfc_hba * phba)
1348 return 0; 1355 return 0;
1349 1356
1350 psli = &phba->sli; 1357 psli = &phba->sli;
1351 pring = &psli->ring[psli->fcp_ring];
1352 1358
1353 lpfc_linkdown(phba); 1359 lpfc_linkdown(phba);
1360 lpfc_sli_flush_mbox_queue(phba);
1354 1361
1355 /* The linkdown event takes 30 seconds to timeout. */ 1362 for (i = 0; i < psli->num_rings; i++) {
1356 while (pring->txcmplq_cnt) { 1363 pring = &psli->ring[i];
1357 mdelay(10); 1364 /* The linkdown event takes 30 seconds to timeout. */
1358 if (i++ > 3000) 1365 while (pring->txcmplq_cnt) {
1359 break; 1366 mdelay(10);
1367 if (cnt++ > 3000) {
1368 lpfc_printf_log(phba,
1369 KERN_WARNING, LOG_INIT,
1370 "%d:0466 Outstanding IO when "
1371 "bringing Adapter offline\n",
1372 phba->brd_no);
1373 break;
1374 }
1375 }
1360 } 1376 }
1361 1377
1378
1362 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1363 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1364 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
@@ -1639,6 +1656,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1639 goto out_free_irq; 1656 goto out_free_irq;
1640 } 1657 }
1641 1658
1659 lpfc_discovery_wait(phba);
1660
1642 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1643 spin_lock_irq(phba->host->host_lock); 1662 spin_lock_irq(phba->host->host_lock);
1644 lpfc_poll_start_timer(phba); 1663 lpfc_poll_start_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 07017658ac56..066292d3995a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba)
133 133
134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
135 pci_pool_destroy(phba->lpfc_mbuf_pool); 135 pci_pool_destroy(phba->lpfc_mbuf_pool);
136
137 /* Free the iocb lookup array */
138 kfree(psli->iocbq_lookup);
139 psli->iocbq_lookup = NULL;
140
136} 141}
137 142
138void * 143void *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27d60ad897cd..bd0b0e293d63 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1110,6 +1110,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1110 phba->brd_no, 1110 phba->brd_no,
1111 did, mb->mbxStatus, phba->hba_state); 1111 did, mb->mbxStatus, phba->hba_state);
1112 1112
1113 /*
1114 * If RegLogin failed due to lack of HBA resources do not
1115 * retry discovery.
1116 */
1117 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1118 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1119 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
1120 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
1121 return ndlp->nlp_state;
1122 }
1123
1113 /* Put ndlp in npr list set plogi timer for 1 sec */ 1124 /* Put ndlp in npr list set plogi timer for 1 sec */
1114 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1125 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1115 spin_lock_irq(phba->host->host_lock); 1126 spin_lock_irq(phba->host->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index aea1ee472f3d..a760a44173df 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -153,22 +153,6 @@ static void
153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
154{ 154{
155 unsigned long iflag = 0; 155 unsigned long iflag = 0;
156 /*
157 * There are only two special cases to consider. (1) the scsi command
158 * requested scatter-gather usage or (2) the scsi command allocated
159 * a request buffer, but did not request use_sg. There is a third
160 * case, but it does not require resource deallocation.
161 */
162 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
163 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
164 psb->seg_cnt, psb->pCmd->sc_data_direction);
165 } else {
166 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
167 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
168 psb->pCmd->request_bufflen,
169 psb->pCmd->sc_data_direction);
170 }
171 }
172 156
173 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 157 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
174 psb->pCmd = NULL; 158 psb->pCmd = NULL;
@@ -282,6 +266,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
282} 266}
283 267
284static void 268static void
269lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
270{
271 /*
272 * There are only two special cases to consider. (1) the scsi command
273 * requested scatter-gather usage or (2) the scsi command allocated
274 * a request buffer, but did not request use_sg. There is a third
275 * case, but it does not require resource deallocation.
276 */
277 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
278 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
279 psb->seg_cnt, psb->pCmd->sc_data_direction);
280 } else {
281 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
282 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
283 psb->pCmd->request_bufflen,
284 psb->pCmd->sc_data_direction);
285 }
286 }
287}
288
289static void
285lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) 290lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
286{ 291{
287 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 292 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
@@ -454,6 +459,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
454 cmd->scsi_done(cmd); 459 cmd->scsi_done(cmd);
455 460
456 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 461 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
462 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
457 lpfc_release_scsi_buf(phba, lpfc_cmd); 463 lpfc_release_scsi_buf(phba, lpfc_cmd);
458 return; 464 return;
459 } 465 }
@@ -511,6 +517,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
511 } 517 }
512 } 518 }
513 519
520 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
514 lpfc_release_scsi_buf(phba, lpfc_cmd); 521 lpfc_release_scsi_buf(phba, lpfc_cmd);
515} 522}
516 523
@@ -609,6 +616,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
609static int 616static int
610lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 617lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
611 struct lpfc_scsi_buf *lpfc_cmd, 618 struct lpfc_scsi_buf *lpfc_cmd,
619 unsigned int lun,
612 uint8_t task_mgmt_cmd) 620 uint8_t task_mgmt_cmd)
613{ 621{
614 struct lpfc_sli *psli; 622 struct lpfc_sli *psli;
@@ -627,8 +635,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
627 piocb = &piocbq->iocb; 635 piocb = &piocbq->iocb;
628 636
629 fcp_cmnd = lpfc_cmd->fcp_cmnd; 637 fcp_cmnd = lpfc_cmd->fcp_cmnd;
630 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 638 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
631 &lpfc_cmd->fcp_cmnd->fcp_lun);
632 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 639 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
633 640
634 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 641 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -655,14 +662,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
655 662
656static int 663static int
657lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 664lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
658 unsigned tgt_id, struct lpfc_rport_data *rdata) 665 unsigned tgt_id, unsigned int lun,
666 struct lpfc_rport_data *rdata)
659{ 667{
660 struct lpfc_iocbq *iocbq; 668 struct lpfc_iocbq *iocbq;
661 struct lpfc_iocbq *iocbqrsp; 669 struct lpfc_iocbq *iocbqrsp;
662 int ret; 670 int ret;
663 671
664 lpfc_cmd->rdata = rdata; 672 lpfc_cmd->rdata = rdata;
665 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 673 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
674 FCP_TARGET_RESET);
666 if (!ret) 675 if (!ret)
667 return FAILED; 676 return FAILED;
668 677
@@ -822,6 +831,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
822 return 0; 831 return 0;
823 832
824 out_host_busy_free_buf: 833 out_host_busy_free_buf:
834 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
825 lpfc_release_scsi_buf(phba, lpfc_cmd); 835 lpfc_release_scsi_buf(phba, lpfc_cmd);
826 out_host_busy: 836 out_host_busy:
827 return SCSI_MLQUEUE_HOST_BUSY; 837 return SCSI_MLQUEUE_HOST_BUSY;
@@ -969,12 +979,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
969 if (lpfc_cmd == NULL) 979 if (lpfc_cmd == NULL)
970 goto out; 980 goto out;
971 981
972 lpfc_cmd->pCmd = cmnd;
973 lpfc_cmd->timeout = 60; 982 lpfc_cmd->timeout = 60;
974 lpfc_cmd->scsi_hba = phba; 983 lpfc_cmd->scsi_hba = phba;
975 lpfc_cmd->rdata = rdata; 984 lpfc_cmd->rdata = rdata;
976 985
977 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 986 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
987 FCP_LUN_RESET);
978 if (!ret) 988 if (!ret)
979 goto out_free_scsi_buf; 989 goto out_free_scsi_buf;
980 990
@@ -1001,7 +1011,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1001 cmd_status = iocbqrsp->iocb.ulpStatus; 1011 cmd_status = iocbqrsp->iocb.ulpStatus;
1002 1012
1003 lpfc_sli_release_iocbq(phba, iocbqrsp); 1013 lpfc_sli_release_iocbq(phba, iocbqrsp);
1004 lpfc_release_scsi_buf(phba, lpfc_cmd);
1005 1014
1006 /* 1015 /*
1007 * All outstanding txcmplq I/Os should have been aborted by the device. 1016 * All outstanding txcmplq I/Os should have been aborted by the device.
@@ -1040,6 +1049,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1040 } 1049 }
1041 1050
1042out_free_scsi_buf: 1051out_free_scsi_buf:
1052 lpfc_release_scsi_buf(phba, lpfc_cmd);
1053
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1054 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1055 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1045 "Data: x%x x%x x%x\n", 1056 "Data: x%x x%x x%x\n",
@@ -1070,7 +1081,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1070 1081
1071 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1082 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1072 lpfc_cmd->timeout = 60; 1083 lpfc_cmd->timeout = 60;
1073 lpfc_cmd->pCmd = cmnd;
1074 lpfc_cmd->scsi_hba = phba; 1084 lpfc_cmd->scsi_hba = phba;
1075 1085
1076 /* 1086 /*
@@ -1078,7 +1088,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1078 * targets known to the driver. Should any target reset 1088 * targets known to the driver. Should any target reset
1079 * fail, this routine returns failure to the midlayer. 1089 * fail, this routine returns failure to the midlayer.
1080 */ 1090 */
1081 for (i = 0; i < MAX_FCP_TARGET; i++) { 1091 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1082 /* Search the mapped list for this target ID */ 1092 /* Search the mapped list for this target ID */
1083 match = 0; 1093 match = 0;
1084 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1094 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
@@ -1090,8 +1100,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1090 if (!match) 1100 if (!match)
1091 continue; 1101 continue;
1092 1102
1093 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, 1103 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
1094 i, ndlp->rport->dd_data); 1104 ndlp->rport->dd_data);
1095 if (ret != SUCCESS) { 1105 if (ret != SUCCESS) {
1096 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1097 "%d:0713 Bus Reset on target %d failed\n", 1107 "%d:0713 Bus Reset on target %d failed\n",
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb69a7a1ec59..350a625fa224 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -191,35 +191,12 @@ static int
191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
193{ 193{
194 uint16_t iotag;
195
196 list_add_tail(&piocb->list, &pring->txcmplq); 194 list_add_tail(&piocb->list, &pring->txcmplq);
197 pring->txcmplq_cnt++; 195 pring->txcmplq_cnt++;
198 if (unlikely(pring->ringno == LPFC_ELS_RING)) 196 if (unlikely(pring->ringno == LPFC_ELS_RING))
199 mod_timer(&phba->els_tmofunc, 197 mod_timer(&phba->els_tmofunc,
200 jiffies + HZ * (phba->fc_ratov << 1)); 198 jiffies + HZ * (phba->fc_ratov << 1));
201 199
202 if (pring->fast_lookup) {
203 /* Setup fast lookup based on iotag for completion */
204 iotag = piocb->iocb.ulpIoTag;
205 if (iotag && (iotag < pring->fast_iotag))
206 *(pring->fast_lookup + iotag) = piocb;
207 else {
208
209 /* Cmd ring <ringno> put: iotag <iotag> greater then
210 configured max <fast_iotag> wd0 <icmd> */
211 lpfc_printf_log(phba,
212 KERN_ERR,
213 LOG_SLI,
214 "%d:0316 Cmd ring %d put: iotag x%x "
215 "greater then configured max x%x "
216 "wd0 x%x\n",
217 phba->brd_no,
218 pring->ringno, iotag,
219 pring->fast_iotag,
220 *(((uint32_t *)(&piocb->iocb)) + 7));
221 }
222 }
223 return (0); 200 return (0);
224} 201}
225 202
@@ -601,7 +578,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 578 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
602 <status> */ 579 <status> */
603 lpfc_printf_log(phba, 580 lpfc_printf_log(phba,
604 KERN_ERR, 581 KERN_WARNING,
605 LOG_MBOX | LOG_SLI, 582 LOG_MBOX | LOG_SLI,
606 "%d:0304 Stray Mailbox Interrupt " 583 "%d:0304 Stray Mailbox Interrupt "
607 "mbxCommand x%x mbxStatus x%x\n", 584 "mbxCommand x%x mbxStatus x%x\n",
@@ -1570,8 +1547,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1570 1547
1571void lpfc_reset_barrier(struct lpfc_hba * phba) 1548void lpfc_reset_barrier(struct lpfc_hba * phba)
1572{ 1549{
1573 uint32_t * resp_buf; 1550 uint32_t __iomem *resp_buf;
1574 uint32_t * mbox_buf; 1551 uint32_t __iomem *mbox_buf;
1575 volatile uint32_t mbox; 1552 volatile uint32_t mbox;
1576 uint32_t hc_copy; 1553 uint32_t hc_copy;
1577 int i; 1554 int i;
@@ -1587,7 +1564,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1587 * Tell the other part of the chip to suspend temporarily all 1564 * Tell the other part of the chip to suspend temporarily all
1588 * its DMA activity. 1565 * its DMA activity.
1589 */ 1566 */
1590 resp_buf = (uint32_t *)phba->MBslimaddr; 1567 resp_buf = phba->MBslimaddr;
1591 1568
1592 /* Disable the error attention */ 1569 /* Disable the error attention */
1593 hc_copy = readl(phba->HCregaddr); 1570 hc_copy = readl(phba->HCregaddr);
@@ -1605,7 +1582,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1582 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1606 1583
1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1584 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1608 mbox_buf = (uint32_t *)phba->MBslimaddr; 1585 mbox_buf = phba->MBslimaddr;
1609 writel(mbox, mbox_buf); 1586 writel(mbox, mbox_buf);
1610 1587
1611 for (i = 0; 1588 for (i = 0;
@@ -1805,7 +1782,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1805 skip_post = 0; 1782 skip_post = 0;
1806 word0 = 0; /* This is really setting up word1 */ 1783 word0 = 0; /* This is really setting up word1 */
1807 } 1784 }
1808 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1785 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1809 writel(*(uint32_t *) mb, to_slim); 1786 writel(*(uint32_t *) mb, to_slim);
1810 readl(to_slim); /* flush */ 1787 readl(to_slim); /* flush */
1811 1788
@@ -2659,8 +2636,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2659 2636
2660 INIT_LIST_HEAD(&(pring->txq)); 2637 INIT_LIST_HEAD(&(pring->txq));
2661 2638
2662 kfree(pring->fast_lookup);
2663 pring->fast_lookup = NULL;
2664 } 2639 }
2665 2640
2666 spin_unlock_irqrestore(phba->host->host_lock, flags); 2641 spin_unlock_irqrestore(phba->host->host_lock, flags);
@@ -3110,6 +3085,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3110 return retval; 3085 return retval;
3111} 3086}
3112 3087
3088int
3089lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3090{
3091 int i = 0;
3092
3093 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3094 if (i++ > LPFC_MBOX_TMO * 1000)
3095 return 1;
3096
3097 if (lpfc_sli_handle_mb_event(phba) == 0)
3098 i = 0;
3099
3100 msleep(1);
3101 }
3102
3103 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3104}
3105
3113irqreturn_t 3106irqreturn_t
3114lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs) 3107lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3115{ 3108{
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a52d6c6cf083..d8ef0d2894d4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -135,8 +135,6 @@ struct lpfc_sli_ring {
135 uint32_t fast_iotag; /* max fastlookup based iotag */ 135 uint32_t fast_iotag; /* max fastlookup based iotag */
136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
137 uint32_t iotag_max; /* max iotag value to use */ 137 uint32_t iotag_max; /* max iotag value to use */
138 struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
139 iotag */
140 struct list_head txq; 138 struct list_head txq;
141 uint16_t txq_cnt; /* current length of queue */ 139 uint16_t txq_cnt; /* current length of queue */
142 uint16_t txq_max; /* max length */ 140 uint16_t txq_max; /* max length */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b737568b831..10e89c6ae823 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.6" 21#define LPFC_DRIVER_VERSION "8.1.7"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 93edaa8696cf..89ef34df5a1d 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
378 int nseg; 378 int nseg;
379 379
380 total = 0; 380 total = 0;
381 scl = (struct scatterlist *) cmd->buffer; 381 scl = (struct scatterlist *) cmd->request_buffer;
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
383 cmd->sc_data_direction); 383 cmd->sc_data_direction);
384 for (i = 0; i < nseg; ++i) { 384 for (i = 0; i < nseg; ++i) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index c88717727be8..5572981a9f92 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1268 if (cmd->use_sg > 0) { 1268 if (cmd->use_sg > 0) {
1269 int nseg; 1269 int nseg;
1270 total = 0; 1270 total = 0;
1271 scl = (struct scatterlist *) cmd->buffer; 1271 scl = (struct scatterlist *) cmd->request_buffer;
1272 off = ms->data_ptr; 1272 off = ms->data_ptr;
1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
1274 cmd->sc_data_direction); 1274 cmd->sc_data_direction);
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 7abf64d1bfc9..0bd9c60e6455 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
169 SCpnt->request->rq_status = RQ_SCSI_BUSY; 169 SCpnt->request->rq_status = RQ_SCSI_BUSY;
170 170
171 SCpnt->done = pluto_detect_done; 171 SCpnt->done = pluto_detect_done;
172 SCpnt->bufflen = 256;
173 SCpnt->buffer = fcs[i].inquiry;
174 SCpnt->request_bufflen = 256; 172 SCpnt->request_bufflen = 256;
175 SCpnt->request_buffer = fcs[i].inquiry; 173 SCpnt->request_buffer = fcs[i].inquiry;
176 PLD(("set up %d %08lx\n", i, (long)SCpnt)) 174 PLD(("set up %d %08lx\n", i, (long)SCpnt))
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69e0551a81d2..5b2f0741a55b 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
874 if (Cmnd->use_sg) { 874 if (Cmnd->use_sg) {
875 int sg_count; 875 int sg_count;
876 876
877 sg = (struct scatterlist *) Cmnd->buffer; 877 sg = (struct scatterlist *) Cmnd->request_buffer;
878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
879 879
880 ds = cmd->dataseg; 880 ds = cmd->dataseg;
@@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
1278 1278
1279 if (Cmnd->use_sg) { 1279 if (Cmnd->use_sg) {
1280 sbus_unmap_sg(qpti->sdev, 1280 sbus_unmap_sg(qpti->sdev,
1281 (struct scatterlist *)Cmnd->buffer, 1281 (struct scatterlist *)Cmnd->request_buffer,
1282 Cmnd->use_sg, 1282 Cmnd->use_sg,
1283 Cmnd->sc_data_direction); 1283 Cmnd->sc_data_direction);
1284 } else { 1284 } else {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ab7df0dcfe8..b332caddd5b3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
346 if (level > 3) { 346 if (level > 3) {
347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
348 " done = 0x%p, queuecommand 0x%p\n", 348 " done = 0x%p, queuecommand 0x%p\n",
349 cmd->buffer, cmd->bufflen, 349 cmd->request_buffer, cmd->request_bufflen,
350 cmd->done, 350 cmd->done,
351 sdev->host->hostt->queuecommand); 351 sdev->host->hostt->queuecommand);
352 352
@@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
661 */ 661 */
662int scsi_retry_command(struct scsi_cmnd *cmd) 662int scsi_retry_command(struct scsi_cmnd *cmd)
663{ 663{
664 /*
665 * Restore the SCSI command state.
666 */
667 scsi_setup_cmd_retry(cmd);
668
669 /* 664 /*
670 * Zero the sense information from the last time we tried 665 * Zero the sense information from the last time we tried
671 * this command. 666 * this command.
@@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
711 "Notifying upper driver of completion " 706 "Notifying upper driver of completion "
712 "(result %x)\n", cmd->result)); 707 "(result %x)\n", cmd->result));
713 708
714 /*
715 * We can get here with use_sg=0, causing a panic in the upper level
716 */
717 cmd->use_sg = cmd->old_use_sg;
718 cmd->done(cmd); 709 cmd->done(cmd);
719} 710}
720EXPORT_SYMBOL(scsi_finish_command); 711EXPORT_SYMBOL(scsi_finish_command);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9c63b00773c4..a80303c6b3fd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
286 int dev_id_num, const char * dev_id_str, 286 int dev_id_num, const char * dev_id_str,
287 int dev_id_str_len); 287 int dev_id_str_len);
288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); 288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
289static void do_create_driverfs_files(void); 289static int do_create_driverfs_files(void);
290static void do_remove_driverfs_files(void); 290static void do_remove_driverfs_files(void);
291 291
292static int sdebug_add_adapter(void); 292static int sdebug_add_adapter(void);
@@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
2488 sdebug_add_host_store); 2488 sdebug_add_host_store);
2489 2489
2490static void do_create_driverfs_files(void) 2490static int do_create_driverfs_files(void)
2491{ 2491{
2492 driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2492 int ret;
2493 driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2493
2494 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2494 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
2495 driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2495 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
2496 driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2496 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2497 driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2497 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2498 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2498 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2499 driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2499 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2500 driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2500 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2501 driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2501 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2502 driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2502 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2503 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2504 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2505 return ret;
2503} 2506}
2504 2507
2505static void do_remove_driverfs_files(void) 2508static void do_remove_driverfs_files(void)
@@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void)
2522 unsigned int sz; 2525 unsigned int sz;
2523 int host_to_add; 2526 int host_to_add;
2524 int k; 2527 int k;
2528 int ret;
2525 2529
2526 if (scsi_debug_dev_size_mb < 1) 2530 if (scsi_debug_dev_size_mb < 1)
2527 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 2531 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
@@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void)
2560 if (scsi_debug_num_parts > 0) 2564 if (scsi_debug_num_parts > 0)
2561 sdebug_build_parts(fake_storep); 2565 sdebug_build_parts(fake_storep);
2562 2566
2563 init_all_queued(); 2567 ret = device_register(&pseudo_primary);
2568 if (ret < 0) {
2569 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
2570 ret);
2571 goto free_vm;
2572 }
2573 ret = bus_register(&pseudo_lld_bus);
2574 if (ret < 0) {
2575 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
2576 ret);
2577 goto dev_unreg;
2578 }
2579 ret = driver_register(&sdebug_driverfs_driver);
2580 if (ret < 0) {
2581 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
2582 ret);
2583 goto bus_unreg;
2584 }
2585 ret = do_create_driverfs_files();
2586 if (ret < 0) {
2587 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
2588 ret);
2589 goto del_files;
2590 }
2564 2591
2565 device_register(&pseudo_primary); 2592 init_all_queued();
2566 bus_register(&pseudo_lld_bus);
2567 driver_register(&sdebug_driverfs_driver);
2568 do_create_driverfs_files();
2569 2593
2570 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2594 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
2571 2595
@@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void)
2585 scsi_debug_add_host); 2609 scsi_debug_add_host);
2586 } 2610 }
2587 return 0; 2611 return 0;
2612
2613del_files:
2614 do_remove_driverfs_files();
2615 driver_unregister(&sdebug_driverfs_driver);
2616bus_unreg:
2617 bus_unregister(&pseudo_lld_bus);
2618dev_unreg:
2619 device_unregister(&pseudo_primary);
2620free_vm:
2621 vfree(fake_storep);
2622
2623 return ret;
2588} 2624}
2589 2625
2590static void __exit scsi_debug_exit(void) 2626static void __exit scsi_debug_exit(void)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6683d596234a..6a5b731bd5ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,19 +460,67 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense)
464{ 464{
465 struct scsi_device *sdev = scmd->device; 465 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 466 struct Scsi_Host *shost = sdev->host;
467 int old_result = scmd->result;
467 DECLARE_COMPLETION(done); 468 DECLARE_COMPLETION(done);
468 unsigned long timeleft; 469 unsigned long timeleft;
469 unsigned long flags; 470 unsigned long flags;
471 unsigned char old_cmnd[MAX_COMMAND_SIZE];
472 enum dma_data_direction old_data_direction;
473 unsigned short old_use_sg;
474 unsigned char old_cmd_len;
475 unsigned old_bufflen;
476 void *old_buffer;
470 int rtn; 477 int rtn;
471 478
479 /*
480 * We need saved copies of a number of fields - this is because
481 * error handling may need to overwrite these with different values
482 * to run different commands, and once error handling is complete,
483 * we will need to restore these values prior to running the actual
484 * command.
485 */
486 old_buffer = scmd->request_buffer;
487 old_bufflen = scmd->request_bufflen;
488 memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
489 old_data_direction = scmd->sc_data_direction;
490 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg;
492
493 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC;
495
496 if (shost->hostt->unchecked_isa_dma)
497 gfp_mask |= __GFP_DMA;
498
499 scmd->sc_data_direction = DMA_FROM_DEVICE;
500 scmd->request_bufflen = 252;
501 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
502 if (!scmd->request_buffer)
503 return FAILED;
504 } else {
505 scmd->request_buffer = NULL;
506 scmd->request_bufflen = 0;
507 scmd->sc_data_direction = DMA_NONE;
508 }
509
510 scmd->underflow = 0;
511 scmd->use_sg = 0;
512 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
513
472 if (sdev->scsi_level <= SCSI_2) 514 if (sdev->scsi_level <= SCSI_2)
473 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 515 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
474 (sdev->lun << 5 & 0xe0); 516 (sdev->lun << 5 & 0xe0);
475 517
518 /*
519 * Zero the sense buffer. The scsi spec mandates that any
520 * untransferred sense data should be interpreted as being zero.
521 */
522 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
523
476 shost->eh_action = &done; 524 shost->eh_action = &done;
477 525
478 spin_lock_irqsave(shost->host_lock, flags); 526 spin_lock_irqsave(shost->host_lock, flags);
@@ -522,6 +570,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
522 rtn = FAILED; 570 rtn = FAILED;
523 } 571 }
524 572
573
574 /*
575 * Last chance to have valid sense data.
576 */
577 if (copy_sense) {
578 if (!SCSI_SENSE_VALID(scmd)) {
579 memcpy(scmd->sense_buffer, scmd->request_buffer,
580 sizeof(scmd->sense_buffer));
581 }
582 kfree(scmd->request_buffer);
583 }
584
585
586 /*
587 * Restore original data
588 */
589 scmd->request_buffer = old_buffer;
590 scmd->request_bufflen = old_bufflen;
591 memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
592 scmd->sc_data_direction = old_data_direction;
593 scmd->cmd_len = old_cmd_len;
594 scmd->use_sg = old_use_sg;
595 scmd->result = old_result;
525 return rtn; 596 return rtn;
526} 597}
527 598
@@ -537,56 +608,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
537static int scsi_request_sense(struct scsi_cmnd *scmd) 608static int scsi_request_sense(struct scsi_cmnd *scmd)
538{ 609{
539 static unsigned char generic_sense[6] = 610 static unsigned char generic_sense[6] =
540 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 611 {REQUEST_SENSE, 0, 0, 0, 252, 0};
541 unsigned char *scsi_result;
542 int saved_result;
543 int rtn;
544 612
545 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
546 614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
547 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
548
549
550 if (unlikely(!scsi_result)) {
551 printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
552 __FUNCTION__);
553 return FAILED;
554 }
555
556 /*
557 * zero the sense buffer. some host adapters automatically always
558 * request sense, so it is not a good idea that
559 * scmd->request_buffer and scmd->sense_buffer point to the same
560 * address (db). 0 is not a valid sense code.
561 */
562 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
563 memset(scsi_result, 0, 252);
564
565 saved_result = scmd->result;
566 scmd->request_buffer = scsi_result;
567 scmd->request_bufflen = 252;
568 scmd->use_sg = 0;
569 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
570 scmd->sc_data_direction = DMA_FROM_DEVICE;
571 scmd->underflow = 0;
572
573 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
574
575 /* last chance to have valid sense data */
576 if(!SCSI_SENSE_VALID(scmd)) {
577 memcpy(scmd->sense_buffer, scmd->request_buffer,
578 sizeof(scmd->sense_buffer));
579 }
580
581 kfree(scsi_result);
582
583 /*
584 * when we eventually call scsi_finish, we really wish to complete
585 * the original request, so let's restore the original data. (db)
586 */
587 scsi_setup_cmd_retry(scmd);
588 scmd->result = saved_result;
589 return rtn;
590} 615}
591 616
592/** 617/**
@@ -605,12 +630,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
605{ 630{
606 scmd->device->host->host_failed--; 631 scmd->device->host->host_failed--;
607 scmd->eh_eflags = 0; 632 scmd->eh_eflags = 0;
608
609 /*
610 * set this back so that the upper level can correctly free up
611 * things.
612 */
613 scsi_setup_cmd_retry(scmd);
614 list_move_tail(&scmd->eh_entry, done_q); 633 list_move_tail(&scmd->eh_entry, done_q);
615} 634}
616EXPORT_SYMBOL(scsi_eh_finish_cmd); 635EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -715,47 +734,26 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
715{ 734{
716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 735 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
717 int retry_cnt = 1, rtn; 736 int retry_cnt = 1, rtn;
718 int saved_result;
719 737
720retry_tur: 738retry_tur:
721 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
722 740
723 /*
724 * zero the sense buffer. the scsi spec mandates that any
725 * untransferred sense data should be interpreted as being zero.
726 */
727 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
728
729 saved_result = scmd->result;
730 scmd->request_buffer = NULL;
731 scmd->request_bufflen = 0;
732 scmd->use_sg = 0;
733 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
734 scmd->underflow = 0;
735 scmd->sc_data_direction = DMA_NONE;
736 741
737 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
738 743
739 /*
740 * when we eventually call scsi_finish, we really wish to complete
741 * the original request, so let's restore the original data. (db)
742 */
743 scsi_setup_cmd_retry(scmd);
744 scmd->result = saved_result;
745
746 /*
747 * hey, we are done. let's look to see what happened.
748 */
749 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
750 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
751 if (rtn == SUCCESS) 746
752 return 0; 747 switch (rtn) {
753 else if (rtn == NEEDS_RETRY) { 748 case NEEDS_RETRY:
754 if (retry_cnt--) 749 if (retry_cnt--)
755 goto retry_tur; 750 goto retry_tur;
751 /*FALLTHRU*/
752 case SUCCESS:
756 return 0; 753 return 0;
754 default:
755 return 1;
757 } 756 }
758 return 1;
759} 757}
760 758
761/** 759/**
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
837static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 835static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
838{ 836{
839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 837 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
840 int rtn;
841 int saved_result;
842 838
843 if (!scmd->device->allow_restart) 839 if (scmd->device->allow_restart) {
844 return 1; 840 int rtn;
845
846 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
847
848 /*
849 * zero the sense buffer. the scsi spec mandates that any
850 * untransferred sense data should be interpreted as being zero.
851 */
852 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
853
854 saved_result = scmd->result;
855 scmd->request_buffer = NULL;
856 scmd->request_bufflen = 0;
857 scmd->use_sg = 0;
858 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
859 scmd->underflow = 0;
860 scmd->sc_data_direction = DMA_NONE;
861 841
862 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
863 843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0);
864 /* 844 if (rtn == SUCCESS)
865 * when we eventually call scsi_finish, we really wish to complete 845 return 0;
866 * the original request, so let's restore the original data. (db) 846 }
867 */
868 scsi_setup_cmd_retry(scmd);
869 scmd->result = saved_result;
870 847
871 /*
872 * hey, we are done. let's look to see what happened.
873 */
874 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
875 __FUNCTION__, scmd, rtn));
876 if (rtn == SUCCESS)
877 return 0;
878 return 1; 848 return 1;
879} 849}
880 850
@@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1684 1654
1685 scmd->scsi_done = scsi_reset_provider_done_command; 1655 scmd->scsi_done = scsi_reset_provider_done_command;
1686 scmd->done = NULL; 1656 scmd->done = NULL;
1687 scmd->buffer = NULL;
1688 scmd->bufflen = 0;
1689 scmd->request_buffer = NULL; 1657 scmd->request_buffer = NULL;
1690 scmd->request_bufflen = 0; 1658 scmd->request_bufflen = 0;
1691 1659
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
110 sshdr.asc, sshdr.ascq); 110 sshdr.asc, sshdr.ascq);
111 break; 111 break;
112 case NOT_READY: /* This happens if there is no disc in drive */ 112 case NOT_READY: /* This happens if there is no disc in drive */
113 if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { 113 if (sdev->removable)
114 printk(KERN_INFO "Device not ready. Make sure"
115 " there is a disc in the drive.\n");
116 break; 114 break;
117 }
118 case UNIT_ATTENTION: 115 case UNIT_ATTENTION:
119 if (sdev->removable) { 116 if (sdev->removable) {
120 sdev->changed = 1; 117 sdev->changed = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08af9aae7df3..077c1c691210 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
436 * 436 *
437 * Arguments: cmd - command that is ready to be queued. 437 * Arguments: cmd - command that is ready to be queued.
438 * 438 *
439 * Returns: Nothing
440 *
441 * Notes: This function has the job of initializing a number of 439 * Notes: This function has the job of initializing a number of
442 * fields related to error handling. Typically this will 440 * fields related to error handling. Typically this will
443 * be called once for each command, as required. 441 * be called once for each command, as required.
444 */ 442 */
445static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
446{ 444{
447 cmd->serial_number = 0; 445 cmd->serial_number = 0;
448
449 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
450
451 if (cmd->cmd_len == 0) 447 if (cmd->cmd_len == 0)
452 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
453
454 /*
455 * We need saved copies of a number of fields - this is because
456 * error handling may need to overwrite these with different values
457 * to run different commands, and once error handling is complete,
458 * we will need to restore these values prior to running the actual
459 * command.
460 */
461 cmd->old_use_sg = cmd->use_sg;
462 cmd->old_cmd_len = cmd->cmd_len;
463 cmd->sc_old_data_direction = cmd->sc_data_direction;
464 cmd->old_underflow = cmd->underflow;
465 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
466 cmd->buffer = cmd->request_buffer;
467 cmd->bufflen = cmd->request_bufflen;
468
469 return 1;
470}
471
472/*
473 * Function: scsi_setup_cmd_retry()
474 *
475 * Purpose: Restore the command state for a retry
476 *
477 * Arguments: cmd - command to be restored
478 *
479 * Returns: Nothing
480 *
481 * Notes: Immediately prior to retrying a command, we need
482 * to restore certain fields that we saved above.
483 */
484void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
485{
486 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
487 cmd->request_buffer = cmd->buffer;
488 cmd->request_bufflen = cmd->bufflen;
489 cmd->use_sg = cmd->old_use_sg;
490 cmd->cmd_len = cmd->old_cmd_len;
491 cmd->sc_data_direction = cmd->sc_old_data_direction;
492 cmd->underflow = cmd->old_underflow;
493} 449}
494 450
495void scsi_device_unbusy(struct scsi_device *sdev) 451void scsi_device_unbusy(struct scsi_device *sdev)
@@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
807 */ 763 */
808static void scsi_release_buffers(struct scsi_cmnd *cmd) 764static void scsi_release_buffers(struct scsi_cmnd *cmd)
809{ 765{
810 struct request *req = cmd->request;
811
812 /*
813 * Free up any indirection buffers we allocated for DMA purposes.
814 */
815 if (cmd->use_sg) 766 if (cmd->use_sg)
816 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 767 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
817 else if (cmd->request_buffer != req->buffer)
818 kfree(cmd->request_buffer);
819 768
820 /* 769 /*
821 * Zero these out. They now point to freed memory, and it is 770 * Zero these out. They now point to freed memory, and it is
822 * dangerous to hang onto the pointers. 771 * dangerous to hang onto the pointers.
823 */ 772 */
824 cmd->buffer = NULL;
825 cmd->bufflen = 0;
826 cmd->request_buffer = NULL; 773 cmd->request_buffer = NULL;
827 cmd->request_bufflen = 0; 774 cmd->request_bufflen = 0;
828} 775}
@@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 805void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859{ 806{
860 int result = cmd->result; 807 int result = cmd->result;
861 int this_count = cmd->bufflen; 808 int this_count = cmd->request_bufflen;
862 request_queue_t *q = cmd->device->request_queue; 809 request_queue_t *q = cmd->device->request_queue;
863 struct request *req = cmd->request; 810 struct request *req = cmd->request;
864 int clear_errors = 1; 811 int clear_errors = 1;
@@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
866 int sense_valid = 0; 813 int sense_valid = 0;
867 int sense_deferred = 0; 814 int sense_deferred = 0;
868 815
869 /* 816 scsi_release_buffers(cmd);
870 * Free up any indirection buffers we allocated for DMA purposes.
871 * For the case of a READ, we need to copy the data out of the
872 * bounce buffer and into the real buffer.
873 */
874 if (cmd->use_sg)
875 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
876 else if (cmd->buffer != req->buffer) {
877 if (rq_data_dir(req) == READ) {
878 unsigned long flags;
879 char *to = bio_kmap_irq(req->bio, &flags);
880 memcpy(to, cmd->buffer, cmd->bufflen);
881 bio_kunmap_irq(to, &flags);
882 }
883 kfree(cmd->buffer);
884 }
885 817
886 if (result) { 818 if (result) {
887 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 819 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
888 if (sense_valid) 820 if (sense_valid)
889 sense_deferred = scsi_sense_is_deferred(&sshdr); 821 sense_deferred = scsi_sense_is_deferred(&sshdr);
890 } 822 }
823
891 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 824 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
892 req->errors = result; 825 req->errors = result;
893 if (result) { 826 if (result) {
@@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
908 } 841 }
909 842
910 /* 843 /*
911 * Zero these out. They now point to freed memory, and it is
912 * dangerous to hang onto the pointers.
913 */
914 cmd->buffer = NULL;
915 cmd->bufflen = 0;
916 cmd->request_buffer = NULL;
917 cmd->request_bufflen = 0;
918
919 /*
920 * Next deal with any sectors which we were able to correctly 844 * Next deal with any sectors which we were able to correctly
921 * handle. 845 * handle.
922 */ 846 */
@@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1012 if (!(req->flags & REQ_QUIET)) { 936 if (!(req->flags & REQ_QUIET)) {
1013 scmd_printk(KERN_INFO, cmd, 937 scmd_printk(KERN_INFO, cmd,
1014 "Volume overflow, CDB: "); 938 "Volume overflow, CDB: ");
1015 __scsi_print_command(cmd->data_cmnd); 939 __scsi_print_command(cmd->cmnd);
1016 scsi_print_sense("", cmd); 940 scsi_print_sense("", cmd);
1017 } 941 }
1018 /* See SSC3rXX or current. */ 942 /* See SSC3rXX or current. */
@@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1143 * successfully. Since this is a REQ_BLOCK_PC command the 1067 * successfully. Since this is a REQ_BLOCK_PC command the
1144 * caller should check the request's errors value 1068 * caller should check the request's errors value
1145 */ 1069 */
1146 scsi_io_completion(cmd, cmd->bufflen); 1070 scsi_io_completion(cmd, cmd->request_bufflen);
1147} 1071}
1148 1072
1149static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1073static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e2fbe9a9d5a9..ae24c85aaeea 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
57 57
58/* scsi_lib.c */ 58/* scsi_lib.c */
59extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 59extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
60extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
61extern void scsi_device_unbusy(struct scsi_device *sdev); 60extern void scsi_device_unbusy(struct scsi_device *sdev);
62extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 61extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
63extern void scsi_next_command(struct scsi_cmnd *cmd); 62extern void scsi_next_command(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index dd075627e605..5a625c3fddae 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -41,6 +41,7 @@ struct sas_host_attrs {
41 struct mutex lock; 41 struct mutex lock;
42 u32 next_target_id; 42 u32 next_target_id;
43 u32 next_expander_id; 43 u32 next_expander_id;
44 int next_port_id;
44}; 45};
45#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) 46#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
46 47
@@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
146 mutex_init(&sas_host->lock); 147 mutex_init(&sas_host->lock);
147 sas_host->next_target_id = 0; 148 sas_host->next_target_id = 0;
148 sas_host->next_expander_id = 0; 149 sas_host->next_expander_id = 0;
150 sas_host->next_port_id = 0;
149 return 0; 151 return 0;
150} 152}
151 153
@@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols,
327sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 329sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
328 unsigned long long); 330 unsigned long long);
329sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 331sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
330//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 332//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
331sas_phy_linkspeed_attr(negotiated_linkrate); 333sas_phy_linkspeed_attr(negotiated_linkrate);
332sas_phy_linkspeed_attr(minimum_linkrate_hw); 334sas_phy_linkspeed_attr(minimum_linkrate_hw);
333sas_phy_linkspeed_attr(minimum_linkrate); 335sas_phy_linkspeed_attr(minimum_linkrate);
@@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
590} 592}
591EXPORT_SYMBOL(sas_port_alloc); 593EXPORT_SYMBOL(sas_port_alloc);
592 594
595/** sas_port_alloc_num - allocate and initialize a SAS port structure
596 *
597 * @parent: parent device
598 *
599 * Allocates a SAS port structure and a number to go with it. This
600 * interface is really for adapters where the port number has no
601 * meansing, so the sas class should manage them. It will be added to
602 * the device tree below the device specified by @parent which must be
603 * either a Scsi_Host or a sas_expander_device.
604 *
605 * Returns %NULL on error
606 */
607struct sas_port *sas_port_alloc_num(struct device *parent)
608{
609 int index;
610 struct Scsi_Host *shost = dev_to_shost(parent);
611 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
612
613 /* FIXME: use idr for this eventually */
614 mutex_lock(&sas_host->lock);
615 if (scsi_is_sas_expander_device(parent)) {
616 struct sas_rphy *rphy = dev_to_rphy(parent);
617 struct sas_expander_device *exp = rphy_to_expander_device(rphy);
618
619 index = exp->next_port_id++;
620 } else
621 index = sas_host->next_port_id++;
622 mutex_unlock(&sas_host->lock);
623 return sas_port_alloc(parent, index);
624}
625EXPORT_SYMBOL(sas_port_alloc_num);
626
593/** 627/**
594 * sas_port_add - add a SAS port to the device hierarchy 628 * sas_port_add - add a SAS port to the device hierarchy
595 * 629 *
@@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port)
658 } 692 }
659 mutex_unlock(&port->phy_list_mutex); 693 mutex_unlock(&port->phy_list_mutex);
660 694
695 if (port->is_backlink) {
696 struct device *parent = port->dev.parent;
697
698 sysfs_remove_link(&port->dev.kobj, parent->bus_id);
699 port->is_backlink = 0;
700 }
701
661 transport_remove_device(dev); 702 transport_remove_device(dev);
662 device_del(dev); 703 device_del(dev);
663 transport_destroy_device(dev); 704 transport_destroy_device(dev);
@@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
733} 774}
734EXPORT_SYMBOL(sas_port_delete_phy); 775EXPORT_SYMBOL(sas_port_delete_phy);
735 776
777void sas_port_mark_backlink(struct sas_port *port)
778{
779 struct device *parent = port->dev.parent->parent->parent;
780
781 if (port->is_backlink)
782 return;
783 port->is_backlink = 1;
784 sysfs_create_link(&port->dev.kobj, &parent->kobj,
785 parent->bus_id);
786
787}
788EXPORT_SYMBOL(sas_port_mark_backlink);
789
736/* 790/*
737 * SAS remote PHY attributes. 791 * SAS remote PHY attributes.
738 */ 792 */
@@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1140 1194
1141 if (identify->device_type == SAS_END_DEVICE && 1195 if (identify->device_type == SAS_END_DEVICE &&
1142 rphy->scsi_target_id != -1) { 1196 rphy->scsi_target_id != -1) {
1143 scsi_scan_target(&rphy->dev, parent->port_identifier, 1197 scsi_scan_target(&rphy->dev, 0,
1144 rphy->scsi_target_id, ~0, 0); 1198 rphy->scsi_target_id, ~0, 0);
1145 } 1199 }
1146 1200
@@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1242 1296
1243 mutex_lock(&sas_host->lock); 1297 mutex_lock(&sas_host->lock);
1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1298 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
1245 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
1246
1247 if (rphy->identify.device_type != SAS_END_DEVICE || 1299 if (rphy->identify.device_type != SAS_END_DEVICE ||
1248 rphy->scsi_target_id == -1) 1300 rphy->scsi_target_id == -1)
1249 continue; 1301 continue;
1250 1302
1251 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 1303 if ((channel == SCAN_WILD_CARD || channel == 0) &&
1252 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1304 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
1253 scsi_scan_target(&rphy->dev, parent->port_identifier, 1305 scsi_scan_target(&rphy->dev, 0,
1254 rphy->scsi_target_id, lun, 1); 1306 rphy->scsi_target_id, lun, 1);
1255 } 1307 }
1256 } 1308 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3225d31449e1..98bd3aab9739 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
502 SCpnt->cmnd[4] = (unsigned char) this_count; 502 SCpnt->cmnd[4] = (unsigned char) this_count;
503 SCpnt->cmnd[5] = 0; 503 SCpnt->cmnd[5] = 0;
504 } 504 }
505 SCpnt->request_bufflen = SCpnt->bufflen = 505 SCpnt->request_bufflen = this_count * sdp->sector_size;
506 this_count * sdp->sector_size;
507 506
508 /* 507 /*
509 * We shouldn't disconnect in the middle of a sector, so with a dumb 508 * We shouldn't disconnect in the middle of a sector, so with a dumb
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 3f312a84c6a7..2679ea8bff1a 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -1002,7 +1002,7 @@ connect_loop:
1002 } 1002 }
1003#endif 1003#endif
1004 1004
1005 buffer = (struct scatterlist *) SCint->buffer; 1005 buffer = (struct scatterlist *) SCint->request_buffer;
1006 len = buffer->length; 1006 len = buffer->length;
1007 data = page_address(buffer->page) + buffer->offset; 1007 data = page_address(buffer->page) + buffer->offset;
1008 } else { 1008 } else {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fd94408577e5..fae6e95a6298 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
360 "mismatch count %d, bytes %d\n", 360 "mismatch count %d, bytes %d\n",
361 size, SCpnt->request_bufflen); 361 size, SCpnt->request_bufflen);
362 if (SCpnt->request_bufflen > size) 362 if (SCpnt->request_bufflen > size)
363 SCpnt->request_bufflen = SCpnt->bufflen = size; 363 SCpnt->request_bufflen = size;
364 } 364 }
365 } 365 }
366 366
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
387 387
388 if (this_count > 0xffff) { 388 if (this_count > 0xffff) {
389 this_count = 0xffff; 389 this_count = 0xffff;
390 SCpnt->request_bufflen = SCpnt->bufflen = 390 SCpnt->request_bufflen = this_count * s_size;
391 this_count * s_size;
392 } 391 }
393 392
394 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 393 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 756ceb93ddc8..7f669b600677 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
370 if (cmdstatp->have_sense) 370 if (cmdstatp->have_sense)
371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
372 } ) /* end DEB */ 372 } ) /* end DEB */
373 if (!debugging) { /* Abnormal conditions for tape */ 373 if (!debugging) { /* Abnormal conditions for tape */
374 if (!cmdstatp->have_sense) 374 if (!cmdstatp->have_sense)
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
384 scode != VOLUME_OVERFLOW && 384 scode != VOLUME_OVERFLOW &&
385 SRpnt->cmd[0] != MODE_SENSE && 385 SRpnt->cmd[0] != MODE_SENSE &&
386 SRpnt->cmd[0] != TEST_UNIT_READY) { 386 SRpnt->cmd[0] != TEST_UNIT_READY) {
387 printk(KERN_WARNING "%s: Error with sense data: ", name); 387
388 __scsi_print_sense("st", SRpnt->sense, 388 __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
389 SCSI_SENSE_BUFFERSIZE);
390 } 389 }
391 } 390 }
392 391
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2ebe0d663899..2f8073b73bf3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
517 */ 517 */
518 518
519 if (cmd->use_sg) { 519 if (cmd->use_sg) {
520 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 520 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
521 cmd->SCp.buffers_residual = cmd->use_sg - 1; 521 cmd->SCp.buffers_residual = cmd->use_sg - 1;
522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
523 cmd->SCp.this_residual = cmd->SCp.buffer->length; 523 cmd->SCp.this_residual = cmd->SCp.buffer->length;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1f328cae5c05..6b60536ac92b 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
348{ 348{
349 int sz = sp->use_sg - 1; 349 int sz = sp->use_sg - 1;
350 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351 351
352 while(sz >= 0) { 352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dma_address); 353 dvma_unmap((char *)sg[sz].dma_address);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 680f38ab60d8..2083454db511 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
373 */ 373 */
374 374
375 if (cmd->use_sg) { 375 if (cmd->use_sg) {
376 cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 376 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
377 cmd->SCp.buffers_residual = cmd->use_sg - 1; 377 cmd->SCp.buffers_residual = cmd->use_sg - 1;
378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
379 cmd->SCp.buffer->offset; 379 cmd->SCp.buffer->offset;
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 979497f108c8..dc673e1b6fd9 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -1047,12 +1047,13 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1047 up = &sunsab_ports[inst * 2]; 1047 up = &sunsab_ports[inst * 2];
1048 1048
1049 err = sunsab_init_one(&up[0], op, 1049 err = sunsab_init_one(&up[0], op,
1050 sizeof(union sab82532_async_regs), 1050 0,
1051 (inst * 2) + 0); 1051 (inst * 2) + 0);
1052 if (err) 1052 if (err)
1053 return err; 1053 return err;
1054 1054
1055 err = sunsab_init_one(&up[1], op, 0, 1055 err = sunsab_init_one(&up[1], op,
1056 sizeof(union sab82532_async_regs),
1056 (inst * 2) + 1); 1057 (inst * 2) + 1);
1057 if (err) { 1058 if (err) {
1058 of_iounmap(up[0].port.membase, 1059 of_iounmap(up[0].port.membase,
@@ -1117,7 +1118,7 @@ static int __init sunsab_init(void)
1117 int err; 1118 int err;
1118 1119
1119 num_channels = 0; 1120 num_channels = 0;
1120 for_each_node_by_name(dp, "su") 1121 for_each_node_by_name(dp, "se")
1121 num_channels += 2; 1122 num_channels += 2;
1122 for_each_node_by_name(dp, "serial") { 1123 for_each_node_by_name(dp, "serial") {
1123 if (of_device_is_compatible(dp, "sab82532")) 1124 if (of_device_is_compatible(dp, "sab82532"))
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index a1456d9352cb..47bc3d57e019 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -68,9 +68,6 @@ static int num_sunzilog;
68#define NUM_SUNZILOG num_sunzilog 68#define NUM_SUNZILOG num_sunzilog
69#define NUM_CHANNELS (NUM_SUNZILOG * 2) 69#define NUM_CHANNELS (NUM_SUNZILOG * 2)
70 70
71#define KEYBOARD_LINE 0x2
72#define MOUSE_LINE 0x3
73
74#define ZS_CLOCK 4915200 /* Zilog input clock rate. */ 71#define ZS_CLOCK 4915200 /* Zilog input clock rate. */
75#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ 72#define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */
76 73
@@ -1225,12 +1222,10 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1225{ 1222{
1226 int baud, brg; 1223 int baud, brg;
1227 1224
1228 if (channel == KEYBOARD_LINE) { 1225 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1229 up->flags |= SUNZILOG_FLAG_CONS_KEYB;
1230 up->cflag = B1200 | CS8 | CLOCAL | CREAD; 1226 up->cflag = B1200 | CS8 | CLOCAL | CREAD;
1231 baud = 1200; 1227 baud = 1200;
1232 } else { 1228 } else {
1233 up->flags |= SUNZILOG_FLAG_CONS_MOUSE;
1234 up->cflag = B4800 | CS8 | CLOCAL | CREAD; 1229 up->cflag = B4800 | CS8 | CLOCAL | CREAD;
1235 baud = 4800; 1230 baud = 4800;
1236 } 1231 }
@@ -1243,14 +1238,14 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1243} 1238}
1244 1239
1245#ifdef CONFIG_SERIO 1240#ifdef CONFIG_SERIO
1246static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) 1241static void __init sunzilog_register_serio(struct uart_sunzilog_port *up)
1247{ 1242{
1248 struct serio *serio = &up->serio; 1243 struct serio *serio = &up->serio;
1249 1244
1250 serio->port_data = up; 1245 serio->port_data = up;
1251 1246
1252 serio->id.type = SERIO_RS232; 1247 serio->id.type = SERIO_RS232;
1253 if (channel == KEYBOARD_LINE) { 1248 if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
1254 serio->id.proto = SERIO_SUNKBD; 1249 serio->id.proto = SERIO_SUNKBD;
1255 strlcpy(serio->name, "zskbd", sizeof(serio->name)); 1250 strlcpy(serio->name, "zskbd", sizeof(serio->name));
1256 } else { 1251 } else {
@@ -1259,7 +1254,8 @@ static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int ch
1259 strlcpy(serio->name, "zsms", sizeof(serio->name)); 1254 strlcpy(serio->name, "zsms", sizeof(serio->name));
1260 } 1255 }
1261 strlcpy(serio->phys, 1256 strlcpy(serio->phys,
1262 (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"), 1257 ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
1258 "zs/serio0" : "zs/serio1"),
1263 sizeof(serio->phys)); 1259 sizeof(serio->phys));
1264 1260
1265 serio->write = sunzilog_serio_write; 1261 serio->write = sunzilog_serio_write;
@@ -1286,8 +1282,8 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1286 (void) read_zsreg(channel, R0); 1282 (void) read_zsreg(channel, R0);
1287 } 1283 }
1288 1284
1289 if (up->port.line == KEYBOARD_LINE || 1285 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1290 up->port.line == MOUSE_LINE) { 1286 SUNZILOG_FLAG_CONS_MOUSE)) {
1291 sunzilog_init_kbdms(up, up->port.line); 1287 sunzilog_init_kbdms(up, up->port.line);
1292 up->curregs[R9] |= (NV | MIE); 1288 up->curregs[R9] |= (NV | MIE);
1293 write_zsreg(channel, R9, up->curregs[R9]); 1289 write_zsreg(channel, R9, up->curregs[R9]);
@@ -1313,37 +1309,26 @@ static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1313 spin_unlock_irqrestore(&up->port.lock, flags); 1309 spin_unlock_irqrestore(&up->port.lock, flags);
1314 1310
1315#ifdef CONFIG_SERIO 1311#ifdef CONFIG_SERIO
1316 if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE) 1312 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1317 sunzilog_register_serio(up, up->port.line); 1313 SUNZILOG_FLAG_CONS_MOUSE))
1314 sunzilog_register_serio(up);
1318#endif 1315#endif
1319} 1316}
1320 1317
1321static int __devinit zs_get_instance(struct device_node *dp)
1322{
1323 int ret;
1324
1325 ret = of_getintprop_default(dp, "slave", -1);
1326 if (ret != -1)
1327 return ret;
1328
1329 if (of_find_property(dp, "keyboard", NULL))
1330 ret = 1;
1331 else
1332 ret = 0;
1333
1334 return ret;
1335}
1336
1337static int zilog_irq = -1; 1318static int zilog_irq = -1;
1338 1319
1339static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match) 1320static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match)
1340{ 1321{
1341 struct of_device *op = to_of_device(&dev->dev); 1322 static int inst;
1342 struct uart_sunzilog_port *up; 1323 struct uart_sunzilog_port *up;
1343 struct zilog_layout __iomem *rp; 1324 struct zilog_layout __iomem *rp;
1344 int inst = zs_get_instance(dev->node); 1325 int keyboard_mouse;
1345 int err; 1326 int err;
1346 1327
1328 keyboard_mouse = 0;
1329 if (of_find_property(op->node, "keyboard", NULL))
1330 keyboard_mouse = 1;
1331
1347 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, 1332 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0,
1348 sizeof(struct zilog_layout), 1333 sizeof(struct zilog_layout),
1349 "zs"); 1334 "zs");
@@ -1352,16 +1337,8 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1352 1337
1353 rp = sunzilog_chip_regs[inst]; 1338 rp = sunzilog_chip_regs[inst];
1354 1339
1355 if (zilog_irq == -1) { 1340 if (zilog_irq == -1)
1356 zilog_irq = op->irqs[0]; 1341 zilog_irq = op->irqs[0];
1357 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1358 "zs", sunzilog_irq_chain);
1359 if (err) {
1360 of_iounmap(rp, sizeof(struct zilog_layout));
1361
1362 return err;
1363 }
1364 }
1365 1342
1366 up = &sunzilog_port_table[inst * 2]; 1343 up = &sunzilog_port_table[inst * 2];
1367 1344
@@ -1378,7 +1355,7 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1378 up[0].port.line = (inst * 2) + 0; 1355 up[0].port.line = (inst * 2) + 0;
1379 up[0].port.dev = &op->dev; 1356 up[0].port.dev = &op->dev;
1380 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; 1357 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
1381 if (inst == 1) 1358 if (keyboard_mouse)
1382 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; 1359 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
1383 sunzilog_init_hw(&up[0]); 1360 sunzilog_init_hw(&up[0]);
1384 1361
@@ -1395,11 +1372,11 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1395 up[1].port.line = (inst * 2) + 1; 1372 up[1].port.line = (inst * 2) + 1;
1396 up[1].port.dev = &op->dev; 1373 up[1].port.dev = &op->dev;
1397 up[1].flags |= 0; 1374 up[1].flags |= 0;
1398 if (inst == 1) 1375 if (keyboard_mouse)
1399 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; 1376 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
1400 sunzilog_init_hw(&up[1]); 1377 sunzilog_init_hw(&up[1]);
1401 1378
1402 if (inst != 1) { 1379 if (!keyboard_mouse) {
1403 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1380 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1404 if (err) { 1381 if (err) {
1405 of_iounmap(rp, sizeof(struct zilog_layout)); 1382 of_iounmap(rp, sizeof(struct zilog_layout));
@@ -1411,9 +1388,18 @@ static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *
1411 of_iounmap(rp, sizeof(struct zilog_layout)); 1388 of_iounmap(rp, sizeof(struct zilog_layout));
1412 return err; 1389 return err;
1413 } 1390 }
1391 } else {
1392 printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) "
1393 "is a zs\n",
1394 op->dev.bus_id, up[0].port.mapbase, op->irqs[0]);
1395 printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) "
1396 "is a zs\n",
1397 op->dev.bus_id, up[1].port.mapbase, op->irqs[0]);
1414 } 1398 }
1415 1399
1416 dev_set_drvdata(&dev->dev, &up[0]); 1400 dev_set_drvdata(&op->dev, &up[0]);
1401
1402 inst++;
1417 1403
1418 return 0; 1404 return 0;
1419} 1405}
@@ -1462,36 +1448,65 @@ static struct of_platform_driver zs_driver = {
1462static int __init sunzilog_init(void) 1448static int __init sunzilog_init(void)
1463{ 1449{
1464 struct device_node *dp; 1450 struct device_node *dp;
1465 int err; 1451 int err, uart_count;
1452 int num_keybms;
1466 1453
1467 NUM_SUNZILOG = 0; 1454 NUM_SUNZILOG = 0;
1468 for_each_node_by_name(dp, "zs") 1455 num_keybms = 0;
1456 for_each_node_by_name(dp, "zs") {
1469 NUM_SUNZILOG++; 1457 NUM_SUNZILOG++;
1458 if (of_find_property(dp, "keyboard", NULL))
1459 num_keybms++;
1460 }
1470 1461
1462 uart_count = 0;
1471 if (NUM_SUNZILOG) { 1463 if (NUM_SUNZILOG) {
1472 int uart_count; 1464 int uart_count;
1473 1465
1474 err = sunzilog_alloc_tables(); 1466 err = sunzilog_alloc_tables();
1475 if (err) 1467 if (err)
1476 return err; 1468 goto out;
1477 1469
1478 /* Subtract 1 for keyboard, 1 for mouse. */ 1470 uart_count = (NUM_SUNZILOG * 2) - (2 * num_keybms);
1479 uart_count = (NUM_SUNZILOG * 2) - 2;
1480 1471
1481 sunzilog_reg.nr = uart_count; 1472 sunzilog_reg.nr = uart_count;
1482 sunzilog_reg.minor = sunserial_current_minor; 1473 sunzilog_reg.minor = sunserial_current_minor;
1483 err = uart_register_driver(&sunzilog_reg); 1474 err = uart_register_driver(&sunzilog_reg);
1484 if (err) { 1475 if (err)
1485 sunzilog_free_tables(); 1476 goto out_free_tables;
1486 return err; 1477
1487 }
1488 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; 1478 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
1489 sunzilog_reg.cons = SUNZILOG_CONSOLE(); 1479 sunzilog_reg.cons = SUNZILOG_CONSOLE();
1490 1480
1491 sunserial_current_minor += uart_count; 1481 sunserial_current_minor += uart_count;
1492 } 1482 }
1493 1483
1494 return of_register_driver(&zs_driver, &of_bus_type); 1484 err = of_register_driver(&zs_driver, &of_bus_type);
1485 if (err)
1486 goto out_unregister_uart;
1487
1488 if (zilog_irq != -1) {
1489 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1490 "zs", sunzilog_irq_chain);
1491 if (err)
1492 goto out_unregister_driver;
1493 }
1494
1495out:
1496 return err;
1497
1498out_unregister_driver:
1499 of_unregister_driver(&zs_driver);
1500
1501out_unregister_uart:
1502 if (NUM_SUNZILOG) {
1503 uart_unregister_driver(&sunzilog_reg);
1504 sunzilog_reg.cons = NULL;
1505 }
1506
1507out_free_tables:
1508 sunzilog_free_tables();
1509 goto out;
1495} 1510}
1496 1511
1497static void __exit sunzilog_exit(void) 1512static void __exit sunzilog_exit(void)