aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/message/fusion/mptbase.c24
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c8
-rw-r--r--drivers/message/fusion/mptlan.c26
-rw-r--r--drivers/message/fusion/mptsas.c54
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/scsi/3w-9xxx.c40
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c46
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c18
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c446
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c802
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c644
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c348
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c262
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c204
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h44
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libsas/sas_ata.c16
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c4
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c55
-rw-r--r--drivers/scsi/scsi_debug.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_netlink.c8
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_tgt_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c291
-rw-r--r--drivers/scsi/sd.h54
-rw-r--r--drivers/scsi/sd_dif.c538
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/tmscsim.c8
-rw-r--r--drivers/scsi/wd7000.c8
-rw-r--r--drivers/scsi/zalon.c8
85 files changed, 3743 insertions, 1025 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fea966d66f98..71dd65aa31b6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -147,9 +147,12 @@ static struct priority_group *alloc_priority_group(void)
147static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 147static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
148{ 148{
149 struct pgpath *pgpath, *tmp; 149 struct pgpath *pgpath, *tmp;
150 struct multipath *m = ti->private;
150 151
151 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 152 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
152 list_del(&pgpath->list); 153 list_del(&pgpath->list);
154 if (m->hw_handler_name)
155 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
153 dm_put_device(ti, pgpath->path.dev); 156 dm_put_device(ti, pgpath->path.dev);
154 free_pgpath(pgpath); 157 free_pgpath(pgpath);
155 } 158 }
@@ -548,6 +551,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
548{ 551{
549 int r; 552 int r;
550 struct pgpath *p; 553 struct pgpath *p;
554 struct multipath *m = ti->private;
551 555
552 /* we need at least a path arg */ 556 /* we need at least a path arg */
553 if (as->argc < 1) { 557 if (as->argc < 1) {
@@ -566,6 +570,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
566 goto bad; 570 goto bad;
567 } 571 }
568 572
573 if (m->hw_handler_name) {
574 r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
575 m->hw_handler_name);
576 if (r < 0) {
577 dm_put_device(ti, p->path.dev);
578 goto bad;
579 }
580 }
581
569 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 582 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
570 if (r) { 583 if (r) {
571 dm_put_device(ti, p->path.dev); 584 dm_put_device(ti, p->path.dev);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 34402c47027e..d6a0074b9dc3 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -273,12 +273,12 @@ mpt_fault_reset_work(struct work_struct *work)
273 ioc_raw_state = mpt_GetIocState(ioc, 0); 273 ioc_raw_state = mpt_GetIocState(ioc, 0);
274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", 275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
276 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 276 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", 277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
278 ioc->name, __FUNCTION__); 278 ioc->name, __func__);
279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP); 279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name, 280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
281 __FUNCTION__, (rc == 0) ? "success" : "failed"); 281 __func__, (rc == 0) ? "success" : "failed");
282 ioc_raw_state = mpt_GetIocState(ioc, 0); 282 ioc_raw_state = mpt_GetIocState(ioc, 0);
283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) 283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
@@ -356,7 +356,7 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
356 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 356 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
357 MptCallbacks[cb_idx] == NULL) { 357 MptCallbacks[cb_idx] == NULL) {
358 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 358 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
359 __FUNCTION__, ioc->name, cb_idx); 359 __func__, ioc->name, cb_idx);
360 goto out; 360 goto out;
361 } 361 }
362 362
@@ -420,7 +420,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
420 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 420 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
421 MptCallbacks[cb_idx] == NULL) { 421 MptCallbacks[cb_idx] == NULL) {
422 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 422 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
423 __FUNCTION__, ioc->name, cb_idx); 423 __func__, ioc->name, cb_idx);
424 freeme = 0; 424 freeme = 0;
425 goto out; 425 goto out;
426 } 426 }
@@ -2434,7 +2434,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2434 2434
2435 if (ioc->cached_fw != NULL) { 2435 if (ioc->cached_fw != NULL) {
2436 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2436 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
2437 "adapter\n", __FUNCTION__, ioc->name)); 2437 "adapter\n", __func__, ioc->name));
2438 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2438 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2439 ioc->cached_fw, CAN_SLEEP)) < 0) { 2439 ioc->cached_fw, CAN_SLEEP)) < 0) {
2440 printk(MYIOC_s_WARN_FMT 2440 printk(MYIOC_s_WARN_FMT
@@ -3693,7 +3693,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3693 3693
3694 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3694 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3695 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3695 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3696 "address=%p\n", ioc->name, __FUNCTION__, 3696 "address=%p\n", ioc->name, __func__,
3697 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3697 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
3698 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07); 3698 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
3699 if (sleepFlag == CAN_SLEEP) 3699 if (sleepFlag == CAN_SLEEP)
@@ -4742,12 +4742,12 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4742 break; 4742 break;
4743 } 4743 }
4744 4744
4745 printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode); 4745 printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
4746 4746
4747 /* Get a MF for this command. 4747 /* Get a MF for this command.
4748 */ 4748 */
4749 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 4749 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4750 printk("%s: no msg frames!\n",__FUNCTION__); 4750 printk("%s: no msg frames!\n",__func__);
4751 return -1; 4751 return -1;
4752 } 4752 }
4753 4753
@@ -4771,13 +4771,13 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4771 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 4771 (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
4772 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 4772 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4773 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 4773 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4774 __FUNCTION__, 4774 __func__,
4775 sasIoUnitCntrReply->IOCStatus, 4775 sasIoUnitCntrReply->IOCStatus,
4776 sasIoUnitCntrReply->IOCLogInfo); 4776 sasIoUnitCntrReply->IOCLogInfo);
4777 return -1; 4777 return -1;
4778 } 4778 }
4779 4779
4780 printk("%s: success\n",__FUNCTION__); 4780 printk("%s: success\n",__func__);
4781 return 0; 4781 return 0;
4782} 4782}
4783 4783
@@ -5784,7 +5784,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5784 5784
5785 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5785 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5786 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 5786 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5787 ioc->name,__FUNCTION__)); 5787 ioc->name,__func__));
5788 return -1; 5788 return -1;
5789 } 5789 }
5790 5790
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a5920423e2b2..f5233f3d9eff 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -505,7 +505,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
505 event = le32_to_cpu(pEvReply->Event) & 0xFF; 505 event = le32_to_cpu(pEvReply->Event) & 0xFF;
506 506
507 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", 507 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
508 ioc->name, __FUNCTION__)); 508 ioc->name, __func__));
509 if(async_queue == NULL) 509 if(async_queue == NULL)
510 return 1; 510 return 1;
511 511
@@ -2482,7 +2482,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2482 */ 2482 */
2483 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2483 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2484 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2484 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
2485 ioc->name,__FUNCTION__)); 2485 ioc->name,__func__));
2486 goto out; 2486 goto out;
2487 } 2487 }
2488 2488
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b36cae9ec6db..c3c24fdf9fb6 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -231,28 +231,28 @@ static int
231mptfc_abort(struct scsi_cmnd *SCpnt) 231mptfc_abort(struct scsi_cmnd *SCpnt)
232{ 232{
233 return 233 return
234 mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__); 234 mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
235} 235}
236 236
237static int 237static int
238mptfc_dev_reset(struct scsi_cmnd *SCpnt) 238mptfc_dev_reset(struct scsi_cmnd *SCpnt)
239{ 239{
240 return 240 return
241 mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__); 241 mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
242} 242}
243 243
244static int 244static int
245mptfc_bus_reset(struct scsi_cmnd *SCpnt) 245mptfc_bus_reset(struct scsi_cmnd *SCpnt)
246{ 246{
247 return 247 return
248 mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__); 248 mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
249} 249}
250 250
251static int 251static int
252mptfc_host_reset(struct scsi_cmnd *SCpnt) 252mptfc_host_reset(struct scsi_cmnd *SCpnt)
253{ 253{
254 return 254 return
255 mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__); 255 mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
256} 256}
257 257
258static void 258static void
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index d709d92b7b30..a1abf95cf751 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -610,7 +610,7 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
610 610
611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
612 IOC_AND_NETDEV_NAMES_s_s(dev), 612 IOC_AND_NETDEV_NAMES_s_s(dev),
613 __FUNCTION__, sent)); 613 __func__, sent));
614 614
615 priv->SendCtl[ctx].skb = NULL; 615 priv->SendCtl[ctx].skb = NULL;
616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -676,7 +676,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
676 676
677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
678 IOC_AND_NETDEV_NAMES_s_s(dev), 678 IOC_AND_NETDEV_NAMES_s_s(dev),
679 __FUNCTION__, sent)); 679 __func__, sent));
680 680
681 priv->SendCtl[ctx].skb = NULL; 681 priv->SendCtl[ctx].skb = NULL;
682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -715,7 +715,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
715 u16 cur_naa = 0x1000; 715 u16 cur_naa = 0x1000;
716 716
717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", 717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
718 __FUNCTION__, skb)); 718 __func__, skb));
719 719
720 spin_lock_irqsave(&priv->txfidx_lock, flags); 720 spin_lock_irqsave(&priv->txfidx_lock, flags);
721 if (priv->mpt_txfidx_tail < 0) { 721 if (priv->mpt_txfidx_tail < 0) {
@@ -723,7 +723,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
723 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 723 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
724 724
725 printk (KERN_ERR "%s: no tx context available: %u\n", 725 printk (KERN_ERR "%s: no tx context available: %u\n",
726 __FUNCTION__, priv->mpt_txfidx_tail); 726 __func__, priv->mpt_txfidx_tail);
727 return 1; 727 return 1;
728 } 728 }
729 729
@@ -733,7 +733,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
733 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 733 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
734 734
735 printk (KERN_ERR "%s: Unable to alloc request frame\n", 735 printk (KERN_ERR "%s: Unable to alloc request frame\n",
736 __FUNCTION__); 736 __func__);
737 return 1; 737 return 1;
738 } 738 }
739 739
@@ -1208,7 +1208,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1208 1208
1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", 1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1210 IOC_AND_NETDEV_NAMES_s_s(dev), 1210 IOC_AND_NETDEV_NAMES_s_s(dev),
1211 __FUNCTION__, buckets, curr)); 1211 __func__, buckets, curr));
1212 1212
1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / 1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); 1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
@@ -1217,9 +1217,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev); 1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1218 if (mf == NULL) { 1218 if (mf == NULL) {
1219 printk (KERN_ERR "%s: Unable to alloc request frame\n", 1219 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1220 __FUNCTION__); 1220 __func__);
1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n", 1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1222 __FUNCTION__, buckets)); 1222 __func__, buckets));
1223 goto out; 1223 goto out;
1224 } 1224 }
1225 pRecvReq = (LANReceivePostRequest_t *) mf; 1225 pRecvReq = (LANReceivePostRequest_t *) mf;
@@ -1244,7 +1244,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1244 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1244 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1245 if (priv->mpt_rxfidx_tail < 0) { 1245 if (priv->mpt_rxfidx_tail < 0) {
1246 printk (KERN_ERR "%s: Can't alloc context\n", 1246 printk (KERN_ERR "%s: Can't alloc context\n",
1247 __FUNCTION__); 1247 __func__);
1248 spin_unlock_irqrestore(&priv->rxfidx_lock, 1248 spin_unlock_irqrestore(&priv->rxfidx_lock,
1249 flags); 1249 flags);
1250 break; 1250 break;
@@ -1267,7 +1267,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1267 if (skb == NULL) { 1267 if (skb == NULL) {
1268 printk (KERN_WARNING 1268 printk (KERN_WARNING
1269 MYNAM "/%s: Can't alloc skb\n", 1269 MYNAM "/%s: Can't alloc skb\n",
1270 __FUNCTION__); 1270 __func__);
1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1273 break; 1273 break;
@@ -1305,7 +1305,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1305 1305
1306 if (pSimple == NULL) { 1306 if (pSimple == NULL) {
1307/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", 1307/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1308/**/ __FUNCTION__); 1308/**/ __func__);
1309 mpt_free_msg_frame(mpt_dev, mf); 1309 mpt_free_msg_frame(mpt_dev, mf);
1310 goto out; 1310 goto out;
1311 } 1311 }
@@ -1329,9 +1329,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1329 1329
1330out: 1330out:
1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", 1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1332 __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); 1332 __func__, buckets, atomic_read(&priv->buckets_out)));
1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", 1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1334 __FUNCTION__, priv->total_posted, priv->total_received)); 1334 __func__, priv->total_posted, priv->total_received));
1335 1335
1336 clear_bit(0, &priv->post_buckets_active); 1336 clear_bit(0, &priv->post_buckets_active);
1337} 1337}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b1147aa7afde..12b732512e57 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -300,7 +300,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
300 phy_info = port_info->phy_info; 300 phy_info = port_info->phy_info;
301 301
302 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d " 302 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
303 "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details, 303 "bitmask=0x%016llX\n", ioc->name, __func__, port_details,
304 port_details->num_phys, (unsigned long long) 304 port_details->num_phys, (unsigned long long)
305 port_details->phy_bitmask)); 305 port_details->phy_bitmask));
306 306
@@ -411,7 +411,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
411 */ 411 */
412 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 412 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
413 "%s: [%p]: deleting phy = %d\n", 413 "%s: [%p]: deleting phy = %d\n",
414 ioc->name, __FUNCTION__, port_details, i)); 414 ioc->name, __func__, port_details, i));
415 port_details->num_phys--; 415 port_details->num_phys--;
416 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 416 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
417 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 417 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -497,7 +497,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
497 continue; 497 continue;
498 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 498 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
499 "%s: [%p]: phy_id=%02d num_phys=%02d " 499 "%s: [%p]: phy_id=%02d num_phys=%02d "
500 "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, 500 "bitmask=0x%016llX\n", ioc->name, __func__,
501 port_details, i, port_details->num_phys, 501 port_details, i, port_details->num_phys,
502 (unsigned long long)port_details->phy_bitmask)); 502 (unsigned long long)port_details->phy_bitmask));
503 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n", 503 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
@@ -553,7 +553,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
553 553
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n", 555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__FUNCTION__, __LINE__)); 556 ioc->name,__func__, __LINE__));
557 return 0; 557 return 0;
558 } 558 }
559 559
@@ -606,7 +606,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
606 GFP_ATOMIC); 606 GFP_ATOMIC);
607 if (!target_reset_list) { 607 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
609 ioc->name,__FUNCTION__, __LINE__)); 609 ioc->name,__func__, __LINE__));
610 return; 610 return;
611 } 611 }
612 612
@@ -673,7 +673,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) { 674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__FUNCTION__, __LINE__)); 676 ioc->name,__func__, __LINE__));
677 return; 677 return;
678 } 678 }
679 679
@@ -1183,7 +1183,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1183 reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply; 1183 reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
1184 if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) { 1184 if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
1185 printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 1185 printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
1186 ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo); 1186 ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
1187 error = -ENXIO; 1187 error = -ENXIO;
1188 goto out_unlock; 1188 goto out_unlock;
1189 } 1189 }
@@ -1270,14 +1270,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1270 1270
1271 if (!rsp) { 1271 if (!rsp) {
1272 printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n", 1272 printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
1273 ioc->name, __FUNCTION__); 1273 ioc->name, __func__);
1274 return -EINVAL; 1274 return -EINVAL;
1275 } 1275 }
1276 1276
1277 /* do we need to support multiple segments? */ 1277 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
1281 rsp->bio->bi_vcnt, rsp->data_len); 1281 rsp->bio->bi_vcnt, rsp->data_len);
1282 return -EINVAL; 1282 return -EINVAL;
1283 } 1283 }
@@ -1343,7 +1343,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1343 1343
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
1345 if (!timeleft) { 1345 if (!timeleft) {
1346 printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__); 1346 printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
1347 /* On timeout reset the board */ 1347 /* On timeout reset the board */
1348 mpt_HardResetHandler(ioc, CAN_SLEEP); 1348 mpt_HardResetHandler(ioc, CAN_SLEEP);
1349 ret = -ETIMEDOUT; 1349 ret = -ETIMEDOUT;
@@ -1361,7 +1361,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1361 rsp->data_len -= smprep->ResponseDataLength; 1361 rsp->data_len -= smprep->ResponseDataLength;
1362 } else { 1362 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __FUNCTION__); 1364 ioc->name, __func__);
1365 ret = -ENXIO; 1365 ret = -ENXIO;
1366 } 1366 }
1367unmap: 1367unmap:
@@ -2006,7 +2006,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2006 if (error) { 2006 if (error) {
2007 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2007 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2008 "%s: exit at line=%d\n", ioc->name, 2008 "%s: exit at line=%d\n", ioc->name,
2009 __FUNCTION__, __LINE__)); 2009 __func__, __LINE__));
2010 goto out; 2010 goto out;
2011 } 2011 }
2012 mptsas_set_port(ioc, phy_info, port); 2012 mptsas_set_port(ioc, phy_info, port);
@@ -2076,7 +2076,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2076 if (!rphy) { 2076 if (!rphy) {
2077 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2077 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2078 "%s: exit at line=%d\n", ioc->name, 2078 "%s: exit at line=%d\n", ioc->name,
2079 __FUNCTION__, __LINE__)); 2079 __func__, __LINE__));
2080 goto out; 2080 goto out;
2081 } 2081 }
2082 2082
@@ -2085,7 +2085,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2085 if (error) { 2085 if (error) {
2086 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2086 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2087 "%s: exit at line=%d\n", ioc->name, 2087 "%s: exit at line=%d\n", ioc->name,
2088 __FUNCTION__, __LINE__)); 2088 __func__, __LINE__));
2089 sas_rphy_free(rphy); 2089 sas_rphy_free(rphy);
2090 goto out; 2090 goto out;
2091 } 2091 }
@@ -2613,7 +2613,7 @@ mptsas_hotplug_work(struct work_struct *work)
2613 (ev->channel << 8) + ev->id)) { 2613 (ev->channel << 8) + ev->id)) {
2614 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2614 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2615 "%s: exit at line=%d\n", ioc->name, 2615 "%s: exit at line=%d\n", ioc->name,
2616 __FUNCTION__, __LINE__)); 2616 __func__, __LINE__));
2617 break; 2617 break;
2618 } 2618 }
2619 phy_info = mptsas_find_phyinfo_by_sas_address( 2619 phy_info = mptsas_find_phyinfo_by_sas_address(
@@ -2633,20 +2633,20 @@ mptsas_hotplug_work(struct work_struct *work)
2633 if (!phy_info){ 2633 if (!phy_info){
2634 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2634 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2635 "%s: exit at line=%d\n", ioc->name, 2635 "%s: exit at line=%d\n", ioc->name,
2636 __FUNCTION__, __LINE__)); 2636 __func__, __LINE__));
2637 break; 2637 break;
2638 } 2638 }
2639 if (!phy_info->port_details) { 2639 if (!phy_info->port_details) {
2640 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2640 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2641 "%s: exit at line=%d\n", ioc->name, 2641 "%s: exit at line=%d\n", ioc->name,
2642 __FUNCTION__, __LINE__)); 2642 __func__, __LINE__));
2643 break; 2643 break;
2644 } 2644 }
2645 rphy = mptsas_get_rphy(phy_info); 2645 rphy = mptsas_get_rphy(phy_info);
2646 if (!rphy) { 2646 if (!rphy) {
2647 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2647 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2648 "%s: exit at line=%d\n", ioc->name, 2648 "%s: exit at line=%d\n", ioc->name,
2649 __FUNCTION__, __LINE__)); 2649 __func__, __LINE__));
2650 break; 2650 break;
2651 } 2651 }
2652 2652
@@ -2654,7 +2654,7 @@ mptsas_hotplug_work(struct work_struct *work)
2654 if (!port) { 2654 if (!port) {
2655 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2655 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2656 "%s: exit at line=%d\n", ioc->name, 2656 "%s: exit at line=%d\n", ioc->name,
2657 __FUNCTION__, __LINE__)); 2657 __func__, __LINE__));
2658 break; 2658 break;
2659 } 2659 }
2660 2660
@@ -2665,7 +2665,7 @@ mptsas_hotplug_work(struct work_struct *work)
2665 if (!vtarget) { 2665 if (!vtarget) {
2666 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2666 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2667 "%s: exit at line=%d\n", ioc->name, 2667 "%s: exit at line=%d\n", ioc->name,
2668 __FUNCTION__, __LINE__)); 2668 __func__, __LINE__));
2669 break; 2669 break;
2670 } 2670 }
2671 2671
@@ -2720,7 +2720,7 @@ mptsas_hotplug_work(struct work_struct *work)
2720 (ev->channel << 8) + ev->id)) { 2720 (ev->channel << 8) + ev->id)) {
2721 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2721 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2722 "%s: exit at line=%d\n", ioc->name, 2722 "%s: exit at line=%d\n", ioc->name,
2723 __FUNCTION__, __LINE__)); 2723 __func__, __LINE__));
2724 break; 2724 break;
2725 } 2725 }
2726 2726
@@ -2732,7 +2732,7 @@ mptsas_hotplug_work(struct work_struct *work)
2732 if (!phy_info || !phy_info->port_details) { 2732 if (!phy_info || !phy_info->port_details) {
2733 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2733 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2734 "%s: exit at line=%d\n", ioc->name, 2734 "%s: exit at line=%d\n", ioc->name,
2735 __FUNCTION__, __LINE__)); 2735 __func__, __LINE__));
2736 break; 2736 break;
2737 } 2737 }
2738 2738
@@ -2744,7 +2744,7 @@ mptsas_hotplug_work(struct work_struct *work)
2744 if (!vtarget) { 2744 if (!vtarget) {
2745 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2745 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2746 "%s: exit at line=%d\n", ioc->name, 2746 "%s: exit at line=%d\n", ioc->name,
2747 __FUNCTION__, __LINE__)); 2747 __func__, __LINE__));
2748 break; 2748 break;
2749 } 2749 }
2750 /* 2750 /*
@@ -2767,7 +2767,7 @@ mptsas_hotplug_work(struct work_struct *work)
2767 if (mptsas_get_rphy(phy_info)) { 2767 if (mptsas_get_rphy(phy_info)) {
2768 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2768 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2769 "%s: exit at line=%d\n", ioc->name, 2769 "%s: exit at line=%d\n", ioc->name,
2770 __FUNCTION__, __LINE__)); 2770 __func__, __LINE__));
2771 if (ev->channel) printk("%d\n", __LINE__); 2771 if (ev->channel) printk("%d\n", __LINE__);
2772 break; 2772 break;
2773 } 2773 }
@@ -2776,7 +2776,7 @@ mptsas_hotplug_work(struct work_struct *work)
2776 if (!port) { 2776 if (!port) {
2777 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2777 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2778 "%s: exit at line=%d\n", ioc->name, 2778 "%s: exit at line=%d\n", ioc->name,
2779 __FUNCTION__, __LINE__)); 2779 __func__, __LINE__));
2780 break; 2780 break;
2781 } 2781 }
2782 memcpy(&phy_info->attached, &sas_device, 2782 memcpy(&phy_info->attached, &sas_device,
@@ -2801,7 +2801,7 @@ mptsas_hotplug_work(struct work_struct *work)
2801 if (!rphy) { 2801 if (!rphy) {
2802 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2802 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2803 "%s: exit at line=%d\n", ioc->name, 2803 "%s: exit at line=%d\n", ioc->name,
2804 __FUNCTION__, __LINE__)); 2804 __func__, __LINE__));
2805 break; /* non-fatal: an rphy can be added later */ 2805 break; /* non-fatal: an rphy can be added later */
2806 } 2806 }
2807 2807
@@ -2809,7 +2809,7 @@ mptsas_hotplug_work(struct work_struct *work)
2809 if (sas_rphy_add(rphy)) { 2809 if (sas_rphy_add(rphy)) {
2810 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2810 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2811 "%s: exit at line=%d\n", ioc->name, 2811 "%s: exit at line=%d\n", ioc->name,
2812 __FUNCTION__, __LINE__)); 2812 __func__, __LINE__));
2813 sas_rphy_free(rphy); 2813 sas_rphy_free(rphy);
2814 break; 2814 break;
2815 } 2815 }
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index d142b6b4b976..9f9354fd3516 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -461,7 +461,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
461 461
462 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 462 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
463 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n", 463 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
464 ioc->name,__FUNCTION__)); 464 ioc->name,__func__));
465 return; 465 return;
466 } 466 }
467 467
@@ -2187,7 +2187,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
2187 (ioc->debug_level & MPT_DEBUG_TM )) 2187 (ioc->debug_level & MPT_DEBUG_TM ))
2188 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X " 2188 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2189 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X " 2189 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2190 "term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus, 2190 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2191 pScsiTmReply->TargetID, pScsiTmReq->TaskType, 2191 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2192 le16_to_cpu(pScsiTmReply->IOCStatus), 2192 le16_to_cpu(pScsiTmReply->IOCStatus),
2193 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode, 2193 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7045511f9ad2..b92c19bb6876 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -71,6 +71,10 @@
71 Add support for 9650SE controllers. 71 Add support for 9650SE controllers.
72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
73 2.26.02.010 - Add support for 9690SA controllers. 73 2.26.02.010 - Add support for 9690SA controllers.
74 2.26.02.011 - Increase max AENs drained to 256.
75 Add MSI support and "use_msi" module parameter.
76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap().
74*/ 78*/
75 79
76#include <linux/module.h> 80#include <linux/module.h>
@@ -95,7 +99,7 @@
95#include "3w-9xxx.h" 99#include "3w-9xxx.h"
96 100
97/* Globals */ 101/* Globals */
98#define TW_DRIVER_VERSION "2.26.02.010" 102#define TW_DRIVER_VERSION "2.26.02.011"
99static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 103static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
100static unsigned int twa_device_extension_count; 104static unsigned int twa_device_extension_count;
101static int twa_major = -1; 105static int twa_major = -1;
@@ -107,6 +111,10 @@ MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
107MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
108MODULE_VERSION(TW_DRIVER_VERSION); 112MODULE_VERSION(TW_DRIVER_VERSION);
109 113
114static int use_msi = 0;
115module_param(use_msi, int, S_IRUGO);
116MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
117
110/* Function prototypes */ 118/* Function prototypes */
111static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); 119static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
112static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 120static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
@@ -1038,7 +1046,6 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
1038 TW_Command_Full *full_command_packet; 1046 TW_Command_Full *full_command_packet;
1039 TW_Command *command_packet; 1047 TW_Command *command_packet;
1040 TW_Param_Apache *param; 1048 TW_Param_Apache *param;
1041 unsigned long param_value;
1042 void *retval = NULL; 1049 void *retval = NULL;
1043 1050
1044 /* Setup the command packet */ 1051 /* Setup the command packet */
@@ -1057,9 +1064,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
1057 param->table_id = cpu_to_le16(table_id | 0x8000); 1064 param->table_id = cpu_to_le16(table_id | 0x8000);
1058 param->parameter_id = cpu_to_le16(parameter_id); 1065 param->parameter_id = cpu_to_le16(parameter_id);
1059 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1066 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1060 param_value = tw_dev->generic_buffer_phys[request_id];
1061 1067
1062 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value); 1068 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1063 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1069 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1064 1070
1065 /* Post the command packet to the board */ 1071 /* Post the command packet to the board */
@@ -2000,7 +2006,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2000{ 2006{
2001 struct Scsi_Host *host = NULL; 2007 struct Scsi_Host *host = NULL;
2002 TW_Device_Extension *tw_dev; 2008 TW_Device_Extension *tw_dev;
2003 u32 mem_addr; 2009 unsigned long mem_addr, mem_len;
2004 int retval = -ENODEV; 2010 int retval = -ENODEV;
2005 2011
2006 retval = pci_enable_device(pdev); 2012 retval = pci_enable_device(pdev);
@@ -2045,13 +2051,16 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2045 goto out_free_device_extension; 2051 goto out_free_device_extension;
2046 } 2052 }
2047 2053
2048 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) 2054 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2049 mem_addr = pci_resource_start(pdev, 1); 2055 mem_addr = pci_resource_start(pdev, 1);
2050 else 2056 mem_len = pci_resource_len(pdev, 1);
2057 } else {
2051 mem_addr = pci_resource_start(pdev, 2); 2058 mem_addr = pci_resource_start(pdev, 2);
2059 mem_len = pci_resource_len(pdev, 2);
2060 }
2052 2061
2053 /* Save base address */ 2062 /* Save base address */
2054 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2063 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2055 if (!tw_dev->base_addr) { 2064 if (!tw_dev->base_addr) {
2056 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2065 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2057 goto out_release_mem_region; 2066 goto out_release_mem_region;
@@ -2086,7 +2095,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2086 2095
2087 pci_set_drvdata(pdev, host); 2096 pci_set_drvdata(pdev, host);
2088 2097
2089 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n", 2098 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2090 host->host_no, mem_addr, pdev->irq); 2099 host->host_no, mem_addr, pdev->irq);
2091 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2100 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2092 host->host_no, 2101 host->host_no,
@@ -2097,6 +2106,11 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2097 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2106 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2098 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2107 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2099 2108
2109 /* Try to enable MSI */
2110 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2111 !pci_enable_msi(pdev))
2112 set_bit(TW_USING_MSI, &tw_dev->flags);
2113
2100 /* Now setup the interrupt handler */ 2114 /* Now setup the interrupt handler */
2101 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2115 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2102 if (retval) { 2116 if (retval) {
@@ -2120,6 +2134,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2120 return 0; 2134 return 0;
2121 2135
2122out_remove_host: 2136out_remove_host:
2137 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2138 pci_disable_msi(pdev);
2123 scsi_remove_host(host); 2139 scsi_remove_host(host);
2124out_iounmap: 2140out_iounmap:
2125 iounmap(tw_dev->base_addr); 2141 iounmap(tw_dev->base_addr);
@@ -2151,6 +2167,10 @@ static void twa_remove(struct pci_dev *pdev)
2151 /* Shutdown the card */ 2167 /* Shutdown the card */
2152 __twa_shutdown(tw_dev); 2168 __twa_shutdown(tw_dev);
2153 2169
2170 /* Disable MSI if enabled */
2171 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2172 pci_disable_msi(pdev);
2173
2154 /* Free IO remapping */ 2174 /* Free IO remapping */
2155 iounmap(tw_dev->base_addr); 2175 iounmap(tw_dev->base_addr);
2156 2176
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d14a9479e389..1729a8785fea 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -319,8 +319,8 @@ static twa_message_type twa_error_table[] = {
319 319
320/* Compatibility defines */ 320/* Compatibility defines */
321#define TW_9000_ARCH_ID 0x5 321#define TW_9000_ARCH_ID 0x5
322#define TW_CURRENT_DRIVER_SRL 30 322#define TW_CURRENT_DRIVER_SRL 35
323#define TW_CURRENT_DRIVER_BUILD 80 323#define TW_CURRENT_DRIVER_BUILD 0
324#define TW_CURRENT_DRIVER_BRANCH 0 324#define TW_CURRENT_DRIVER_BRANCH 0
325 325
326/* Phase defines */ 326/* Phase defines */
@@ -352,8 +352,9 @@ static twa_message_type twa_error_table[] = {
352#define TW_MAX_RESET_TRIES 2 352#define TW_MAX_RESET_TRIES 2
353#define TW_MAX_CMDS_PER_LUN 254 353#define TW_MAX_CMDS_PER_LUN 254
354#define TW_MAX_RESPONSE_DRAIN 256 354#define TW_MAX_RESPONSE_DRAIN 256
355#define TW_MAX_AEN_DRAIN 40 355#define TW_MAX_AEN_DRAIN 255
356#define TW_IN_RESET 2 356#define TW_IN_RESET 2
357#define TW_USING_MSI 3
357#define TW_IN_ATTENTION_LOOP 4 358#define TW_IN_ATTENTION_LOOP 4
358#define TW_MAX_SECTORS 256 359#define TW_MAX_SECTORS 256
359#define TW_AEN_WAIT_TIME 1000 360#define TW_AEN_WAIT_TIME 1000
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 26be540d1dd3..c7f06298bd3c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
63config BLK_DEV_SD 63config BLK_DEV_SD
64 tristate "SCSI disk support" 64 tristate "SCSI disk support"
65 depends on SCSI 65 depends on SCSI
66 select CRC_T10DIF
66 ---help--- 67 ---help---
67 If you want to use SCSI hard disks, Fibre Channel disks, 68 If you want to use SCSI hard disks, Fibre Channel disks,
68 Serial ATA (SATA) or Parallel ATA (PATA) hard disks, 69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a8149677de23..72fd5043cfa1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
152 152
153sd_mod-objs := sd.o 153sd_mod-objs := sd.o
154sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
155
154sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 156sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
155ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 157ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
156 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \ 158 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 8591585e5cc5..218777bfc143 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2278,7 +2278,7 @@ do { \
2278#define ASC_DBG(lvl, format, arg...) { \ 2278#define ASC_DBG(lvl, format, arg...) { \
2279 if (asc_dbglvl >= (lvl)) \ 2279 if (asc_dbglvl >= (lvl)) \
2280 printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ 2280 printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
2281 __FUNCTION__ , ## arg); \ 2281 __func__ , ## arg); \
2282} 2282}
2283 2283
2284#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ 2284#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 0899cb61e3dd..b5a868d85eb4 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -288,20 +288,20 @@ static LIST_HEAD(aha152x_host_list);
288#define DO_LOCK(flags) \ 288#define DO_LOCK(flags) \
289 do { \ 289 do { \
290 if(spin_is_locked(&QLOCK)) { \ 290 if(spin_is_locked(&QLOCK)) { \
291 DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 291 DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
292 } \ 292 } \
293 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 293 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
294 spin_lock_irqsave(&QLOCK,flags); \ 294 spin_lock_irqsave(&QLOCK,flags); \
295 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 295 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
296 QLOCKER=__FUNCTION__; \ 296 QLOCKER=__func__; \
297 QLOCKERL=__LINE__; \ 297 QLOCKERL=__LINE__; \
298 } while(0) 298 } while(0)
299 299
300#define DO_UNLOCK(flags) \ 300#define DO_UNLOCK(flags) \
301 do { \ 301 do { \
302 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 302 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
303 spin_unlock_irqrestore(&QLOCK,flags); \ 303 spin_unlock_irqrestore(&QLOCK,flags); \
304 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 304 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
305 QLOCKER="(not locked)"; \ 305 QLOCKER="(not locked)"; \
306 QLOCKERL=0; \ 306 QLOCKERL=0; \
307 } while(0) 307 } while(0)
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2ef459e9cda1..2863a9d22851 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -39,9 +39,9 @@
39 39
40#ifdef ASD_ENTER_EXIT 40#ifdef ASD_ENTER_EXIT
41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ 41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
42 __FUNCTION__) 42 __func__)
43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ 43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
44 __FUNCTION__) 44 __func__)
45#else 45#else
46#define ENTER 46#define ENTER
47#define EXIT 47#define EXIT
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 83a78222896d..eb9dc3195fdf 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1359,7 +1359,7 @@ int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1359 struct asd_ascb *ascb_list; 1359 struct asd_ascb *ascb_list;
1360 1360
1361 if (!phy_mask) { 1361 if (!phy_mask) {
1362 asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); 1362 asd_printk("%s called with phy_mask of 0!?\n", __func__);
1363 return 0; 1363 return 0;
1364 } 1364 }
1365 1365
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 46643319c520..ca55013b6ae5 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -211,7 +211,7 @@ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
211 phy->asd_port = port; 211 phy->asd_port = port;
212 } 212 }
213 ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n", 213 ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
214 __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id); 214 __func__, phy->asd_port->phy_mask, sas_phy->id);
215 asd_update_port_links(asd_ha, phy); 215 asd_update_port_links(asd_ha, phy);
216 spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); 216 spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
217} 217}
@@ -294,7 +294,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
294 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, 294 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
295 GFP_ATOMIC); 295 GFP_ATOMIC);
296 if (!cp) { 296 if (!cp) {
297 asd_printk("%s: out of memory\n", __FUNCTION__); 297 asd_printk("%s: out of memory\n", __func__);
298 goto out; 298 goto out;
299 } 299 }
300 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", 300 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
@@ -446,7 +446,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
446 struct domain_device *failed_dev = NULL; 446 struct domain_device *failed_dev = NULL;
447 447
448 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", 448 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
449 __FUNCTION__, dl->status_block[3]); 449 __func__, dl->status_block[3]);
450 450
451 /* 451 /*
452 * Find the task that caused the abort and abort it first. 452 * Find the task that caused the abort and abort it first.
@@ -474,7 +474,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
474 474
475 if (!failed_dev) { 475 if (!failed_dev) {
476 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n", 476 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
477 __FUNCTION__, tc_abort); 477 __func__, tc_abort);
478 goto out; 478 goto out;
479 } 479 }
480 480
@@ -502,7 +502,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
502 conn_handle = *((u16*)(&dl->status_block[1])); 502 conn_handle = *((u16*)(&dl->status_block[1]));
503 conn_handle = le16_to_cpu(conn_handle); 503 conn_handle = le16_to_cpu(conn_handle);
504 504
505 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, 505 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
506 dl->status_block[3]); 506 dl->status_block[3]);
507 507
508 /* Find the last pending task for the device... */ 508 /* Find the last pending task for the device... */
@@ -522,7 +522,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
522 522
523 if (!last_dev_task) { 523 if (!last_dev_task) {
524 ASD_DPRINTK("%s: Device reset for idle device %d?\n", 524 ASD_DPRINTK("%s: Device reset for idle device %d?\n",
525 __FUNCTION__, conn_handle); 525 __func__, conn_handle);
526 goto out; 526 goto out;
527 } 527 }
528 528
@@ -549,10 +549,10 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
549 goto out; 549 goto out;
550 } 550 }
551 case SIGNAL_NCQ_ERROR: 551 case SIGNAL_NCQ_ERROR:
552 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__); 552 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
553 goto out; 553 goto out;
554 case CLEAR_NCQ_ERROR: 554 case CLEAR_NCQ_ERROR:
555 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__); 555 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
556 goto out; 556 goto out;
557 } 557 }
558 558
@@ -560,26 +560,26 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
560 560
561 switch (sb_opcode) { 561 switch (sb_opcode) {
562 case BYTES_DMAED: 562 case BYTES_DMAED:
563 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id); 563 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
564 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); 564 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
565 break; 565 break;
566 case PRIMITIVE_RECVD: 566 case PRIMITIVE_RECVD:
567 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__, 567 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
568 phy_id); 568 phy_id);
569 asd_primitive_rcvd_tasklet(ascb, dl, phy_id); 569 asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
570 break; 570 break;
571 case PHY_EVENT: 571 case PHY_EVENT:
572 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id); 572 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
573 asd_phy_event_tasklet(ascb, dl); 573 asd_phy_event_tasklet(ascb, dl);
574 break; 574 break;
575 case LINK_RESET_ERROR: 575 case LINK_RESET_ERROR:
576 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__, 576 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
577 phy_id); 577 phy_id);
578 asd_link_reset_err_tasklet(ascb, dl, phy_id); 578 asd_link_reset_err_tasklet(ascb, dl, phy_id);
579 break; 579 break;
580 case TIMER_EVENT: 580 case TIMER_EVENT:
581 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", 581 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
582 __FUNCTION__, phy_id); 582 __func__, phy_id);
583 asd_turn_led(asd_ha, phy_id, 0); 583 asd_turn_led(asd_ha, phy_id, 0);
584 /* the device is gone */ 584 /* the device is gone */
585 sas_phy_disconnected(sas_phy); 585 sas_phy_disconnected(sas_phy);
@@ -587,7 +587,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
587 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); 587 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
588 break; 588 break;
589 default: 589 default:
590 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, 590 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
591 phy_id, sb_opcode); 591 phy_id, sb_opcode);
592 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", 592 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
593 edb, dl->opcode); 593 edb, dl->opcode);
@@ -654,7 +654,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
654 654
655 if (status != 0) { 655 if (status != 0) {
656 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", 656 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
657 __FUNCTION__, phy_id, status); 657 __func__, phy_id, status);
658 goto out; 658 goto out;
659 } 659 }
660 660
@@ -663,7 +663,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
663 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); 663 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
664 asd_turn_led(asd_ha, phy_id, 0); 664 asd_turn_led(asd_ha, phy_id, 0);
665 asd_control_led(asd_ha, phy_id, 0); 665 asd_control_led(asd_ha, phy_id, 0);
666 ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id); 666 ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
667 break; 667 break;
668 668
669 case ENABLE_PHY: 669 case ENABLE_PHY:
@@ -673,40 +673,40 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
673 get_lrate_mode(phy, oob_mode); 673 get_lrate_mode(phy, oob_mode);
674 asd_turn_led(asd_ha, phy_id, 1); 674 asd_turn_led(asd_ha, phy_id, 1);
675 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", 675 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
676 __FUNCTION__, phy_id,phy->sas_phy.linkrate, 676 __func__, phy_id,phy->sas_phy.linkrate,
677 phy->sas_phy.iproto); 677 phy->sas_phy.iproto);
678 } else if (oob_status & CURRENT_SPINUP_HOLD) { 678 } else if (oob_status & CURRENT_SPINUP_HOLD) {
679 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 679 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
680 asd_turn_led(asd_ha, phy_id, 1); 680 asd_turn_led(asd_ha, phy_id, 1);
681 ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__, 681 ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
682 phy_id); 682 phy_id);
683 } else if (oob_status & CURRENT_ERR_MASK) { 683 } else if (oob_status & CURRENT_ERR_MASK) {
684 asd_turn_led(asd_ha, phy_id, 0); 684 asd_turn_led(asd_ha, phy_id, 0);
685 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", 685 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
686 __FUNCTION__, phy_id, oob_status); 686 __func__, phy_id, oob_status);
687 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT 687 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
688 | CURRENT_DEVICE_PRESENT)) { 688 | CURRENT_DEVICE_PRESENT)) {
689 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 689 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
690 asd_turn_led(asd_ha, phy_id, 1); 690 asd_turn_led(asd_ha, phy_id, 1);
691 ASD_DPRINTK("%s: phy%d: hot plug or device present\n", 691 ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
692 __FUNCTION__, phy_id); 692 __func__, phy_id);
693 } else { 693 } else {
694 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 694 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
695 asd_turn_led(asd_ha, phy_id, 0); 695 asd_turn_led(asd_ha, phy_id, 0);
696 ASD_DPRINTK("%s: phy%d: no device present: " 696 ASD_DPRINTK("%s: phy%d: no device present: "
697 "oob_status:0x%x\n", 697 "oob_status:0x%x\n",
698 __FUNCTION__, phy_id, oob_status); 698 __func__, phy_id, oob_status);
699 } 699 }
700 break; 700 break;
701 case RELEASE_SPINUP_HOLD: 701 case RELEASE_SPINUP_HOLD:
702 case PHY_NO_OP: 702 case PHY_NO_OP:
703 case EXECUTE_HARD_RESET: 703 case EXECUTE_HARD_RESET:
704 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__, 704 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
705 phy_id, control_phy->sub_func); 705 phy_id, control_phy->sub_func);
706 /* XXX finish */ 706 /* XXX finish */
707 break; 707 break;
708 default: 708 default:
709 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__, 709 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
710 phy_id, control_phy->sub_func); 710 phy_id, control_phy->sub_func);
711 break; 711 break;
712 } 712 }
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 326765c9caf8..75d20f72501f 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -320,7 +320,7 @@ Again:
320 case TC_RESUME: 320 case TC_RESUME:
321 case TC_PARTIAL_SG_LIST: 321 case TC_PARTIAL_SG_LIST:
322 default: 322 default:
323 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode); 323 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
324 break; 324 break;
325 } 325 }
326 326
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 633ff40c736a..d4640ef6d44f 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -75,12 +75,12 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
75 struct done_list_struct *dl) 75 struct done_list_struct *dl)
76{ 76{
77 struct tasklet_completion_status *tcs = ascb->uldd_task; 77 struct tasklet_completion_status *tcs = ascb->uldd_task;
78 ASD_DPRINTK("%s: here\n", __FUNCTION__); 78 ASD_DPRINTK("%s: here\n", __func__);
79 if (!del_timer(&ascb->timer)) { 79 if (!del_timer(&ascb->timer)) {
80 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); 80 ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
81 return; 81 return;
82 } 82 }
83 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); 83 ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
84 tcs->dl_opcode = dl->opcode; 84 tcs->dl_opcode = dl->opcode;
85 complete(ascb->completion); 85 complete(ascb->completion);
86 asd_ascb_free(ascb); 86 asd_ascb_free(ascb);
@@ -91,7 +91,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
91 struct asd_ascb *ascb = (void *)data; 91 struct asd_ascb *ascb = (void *)data;
92 struct tasklet_completion_status *tcs = ascb->uldd_task; 92 struct tasklet_completion_status *tcs = ascb->uldd_task;
93 93
94 ASD_DPRINTK("%s: here\n", __FUNCTION__); 94 ASD_DPRINTK("%s: here\n", __func__);
95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED; 95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
96 complete(ascb->completion); 96 complete(ascb->completion);
97} 97}
@@ -103,7 +103,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
103 DECLARE_COMPLETION_ONSTACK(completion); \ 103 DECLARE_COMPLETION_ONSTACK(completion); \
104 DECLARE_TCS(tcs); \ 104 DECLARE_TCS(tcs); \
105 \ 105 \
106 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ 106 ASD_DPRINTK("%s: PRE\n", __func__); \
107 res = 1; \ 107 res = 1; \
108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ 108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
109 if (!ascb) \ 109 if (!ascb) \
@@ -115,12 +115,12 @@ static void asd_clear_nexus_timedout(unsigned long data)
115 scb->header.opcode = CLEAR_NEXUS 115 scb->header.opcode = CLEAR_NEXUS
116 116
117#define CLEAR_NEXUS_POST \ 117#define CLEAR_NEXUS_POST \
118 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \ 118 ASD_DPRINTK("%s: POST\n", __func__); \
119 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ 119 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
120 asd_clear_nexus_timedout); \ 120 asd_clear_nexus_timedout); \
121 if (res) \ 121 if (res) \
122 goto out_err; \ 122 goto out_err; \
123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ 123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
124 wait_for_completion(&completion); \ 124 wait_for_completion(&completion); \
125 res = tcs.dl_opcode; \ 125 res = tcs.dl_opcode; \
126 if (res == TC_NO_ERROR) \ 126 if (res == TC_NO_ERROR) \
@@ -417,7 +417,7 @@ int asd_abort_task(struct sas_task *task)
417 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 417 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
418 spin_unlock_irqrestore(&task->task_state_lock, flags); 418 spin_unlock_irqrestore(&task->task_state_lock, flags);
419 res = TMF_RESP_FUNC_COMPLETE; 419 res = TMF_RESP_FUNC_COMPLETE;
420 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 420 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
421 goto out_done; 421 goto out_done;
422 } 422 }
423 spin_unlock_irqrestore(&task->task_state_lock, flags); 423 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -481,7 +481,7 @@ int asd_abort_task(struct sas_task *task)
481 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 481 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
482 spin_unlock_irqrestore(&task->task_state_lock, flags); 482 spin_unlock_irqrestore(&task->task_state_lock, flags);
483 res = TMF_RESP_FUNC_COMPLETE; 483 res = TMF_RESP_FUNC_COMPLETE;
484 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 484 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
485 goto out_done; 485 goto out_done;
486 } 486 }
487 spin_unlock_irqrestore(&task->task_state_lock, flags); 487 spin_unlock_irqrestore(&task->task_state_lock, flags);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index a715632e19d4..477542602284 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -240,7 +240,7 @@ static void __fas216_checkmagic(FAS216_Info *info, const char *func)
240 panic("scsi memory space corrupted in %s", func); 240 panic("scsi memory space corrupted in %s", func);
241 } 241 }
242} 242}
243#define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__) 243#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
244#else 244#else
245#define fas216_checkmagic(info) 245#define fas216_checkmagic(info)
246#endif 246#endif
@@ -2658,7 +2658,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
2658 fas216_checkmagic(info); 2658 fas216_checkmagic(info);
2659 2659
2660 printk("scsi%d.%c: %s: resetting host\n", 2660 printk("scsi%d.%c: %s: resetting host\n",
2661 info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__); 2661 info->host->host_no, '0' + SCpnt->device->id, __func__);
2662 2662
2663 /* 2663 /*
2664 * Reset the SCSI chip. 2664 * Reset the SCSI chip.
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index aa2011b64683..3c257fe0893e 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -930,6 +930,7 @@ static int ch_probe(struct device *dev)
930 if (init) 930 if (init)
931 ch_init_elem(ch); 931 ch_init_elem(ch);
932 932
933 dev_set_drvdata(dev, ch);
933 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); 934 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
934 935
935 return 0; 936 return 0;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 2adc0f666b68..67070257919f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -30,3 +30,11 @@ config SCSI_DH_EMC
30 depends on SCSI_DH 30 depends on SCSI_DH
31 help 31 help
32 If you have a EMC CLARiiON select y. Otherwise, say N. 32 If you have a EMC CLARiiON select y. Otherwise, say N.
33
34config SCSI_DH_ALUA
35 tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
36 depends on SCSI_DH && EXPERIMENTAL
37 help
38 SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
39 Access (ALUA).
40
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 35272e93b1c8..e1d2ea083e15 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SCSI_DH) += scsi_dh.o
5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o 5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o 6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o 7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
8obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index ab6c21cd9689..a518f2eff19a 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -24,8 +24,16 @@
24#include <scsi/scsi_dh.h> 24#include <scsi/scsi_dh.h>
25#include "../scsi_priv.h" 25#include "../scsi_priv.h"
26 26
27struct scsi_dh_devinfo_list {
28 struct list_head node;
29 char vendor[9];
30 char model[17];
31 struct scsi_device_handler *handler;
32};
33
27static DEFINE_SPINLOCK(list_lock); 34static DEFINE_SPINLOCK(list_lock);
28static LIST_HEAD(scsi_dh_list); 35static LIST_HEAD(scsi_dh_list);
36static LIST_HEAD(scsi_dh_dev_list);
29 37
30static struct scsi_device_handler *get_device_handler(const char *name) 38static struct scsi_device_handler *get_device_handler(const char *name)
31{ 39{
@@ -33,7 +41,7 @@ static struct scsi_device_handler *get_device_handler(const char *name)
33 41
34 spin_lock(&list_lock); 42 spin_lock(&list_lock);
35 list_for_each_entry(tmp, &scsi_dh_list, list) { 43 list_for_each_entry(tmp, &scsi_dh_list, list) {
36 if (!strcmp(tmp->name, name)) { 44 if (!strncmp(tmp->name, name, strlen(tmp->name))) {
37 found = tmp; 45 found = tmp;
38 break; 46 break;
39 } 47 }
@@ -42,11 +50,307 @@ static struct scsi_device_handler *get_device_handler(const char *name)
42 return found; 50 return found;
43} 51}
44 52
53
54static struct scsi_device_handler *
55scsi_dh_cache_lookup(struct scsi_device *sdev)
56{
57 struct scsi_dh_devinfo_list *tmp;
58 struct scsi_device_handler *found_dh = NULL;
59
60 spin_lock(&list_lock);
61 list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
62 if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
63 !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
64 found_dh = tmp->handler;
65 break;
66 }
67 }
68 spin_unlock(&list_lock);
69
70 return found_dh;
71}
72
73static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
74 struct scsi_device *sdev)
75{
76 int i, found = 0;
77
78 for(i = 0; scsi_dh->devlist[i].vendor; i++) {
79 if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
80 strlen(scsi_dh->devlist[i].vendor)) &&
81 !strncmp(sdev->model, scsi_dh->devlist[i].model,
82 strlen(scsi_dh->devlist[i].model))) {
83 found = 1;
84 break;
85 }
86 }
87 return found;
88}
89
90/*
91 * device_handler_match - Attach a device handler to a device
92 * @scsi_dh - The device handler to match against or NULL
93 * @sdev - SCSI device to be tested against @scsi_dh
94 *
95 * Tests @sdev against the device handler @scsi_dh or against
96 * all registered device_handler if @scsi_dh == NULL.
97 * Returns the found device handler or NULL if not found.
98 */
99static struct scsi_device_handler *
100device_handler_match(struct scsi_device_handler *scsi_dh,
101 struct scsi_device *sdev)
102{
103 struct scsi_device_handler *found_dh = NULL;
104 struct scsi_dh_devinfo_list *tmp;
105
106 found_dh = scsi_dh_cache_lookup(sdev);
107 if (found_dh)
108 return found_dh;
109
110 if (scsi_dh) {
111 if (scsi_dh_handler_lookup(scsi_dh, sdev))
112 found_dh = scsi_dh;
113 } else {
114 struct scsi_device_handler *tmp_dh;
115
116 spin_lock(&list_lock);
117 list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
118 if (scsi_dh_handler_lookup(tmp_dh, sdev))
119 found_dh = tmp_dh;
120 }
121 spin_unlock(&list_lock);
122 }
123
124 if (found_dh) { /* If device is found, add it to the cache */
125 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
126 if (tmp) {
127 strncpy(tmp->vendor, sdev->vendor, 8);
128 strncpy(tmp->model, sdev->model, 16);
129 tmp->vendor[8] = '\0';
130 tmp->model[16] = '\0';
131 tmp->handler = found_dh;
132 spin_lock(&list_lock);
133 list_add(&tmp->node, &scsi_dh_dev_list);
134 spin_unlock(&list_lock);
135 } else {
136 found_dh = NULL;
137 }
138 }
139
140 return found_dh;
141}
142
143/*
144 * scsi_dh_handler_attach - Attach a device handler to a device
145 * @sdev - SCSI device the device handler should attach to
146 * @scsi_dh - The device handler to attach
147 */
148static int scsi_dh_handler_attach(struct scsi_device *sdev,
149 struct scsi_device_handler *scsi_dh)
150{
151 int err = 0;
152
153 if (sdev->scsi_dh_data) {
154 if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
155 err = -EBUSY;
156 } else if (scsi_dh->attach)
157 err = scsi_dh->attach(sdev);
158
159 return err;
160}
161
162/*
163 * scsi_dh_handler_detach - Detach a device handler from a device
164 * @sdev - SCSI device the device handler should be detached from
165 * @scsi_dh - Device handler to be detached
166 *
167 * Detach from a device handler. If a device handler is specified,
168 * only detach if the currently attached handler matches @scsi_dh.
169 */
170static void scsi_dh_handler_detach(struct scsi_device *sdev,
171 struct scsi_device_handler *scsi_dh)
172{
173 if (!sdev->scsi_dh_data)
174 return;
175
176 if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
177 return;
178
179 if (!scsi_dh)
180 scsi_dh = sdev->scsi_dh_data->scsi_dh;
181
182 if (scsi_dh && scsi_dh->detach)
183 scsi_dh->detach(sdev);
184}
185
186/*
187 * Functions for sysfs attribute 'dh_state'
188 */
189static ssize_t
190store_dh_state(struct device *dev, struct device_attribute *attr,
191 const char *buf, size_t count)
192{
193 struct scsi_device *sdev = to_scsi_device(dev);
194 struct scsi_device_handler *scsi_dh;
195 int err = -EINVAL;
196
197 if (!sdev->scsi_dh_data) {
198 /*
199 * Attach to a device handler
200 */
201 if (!(scsi_dh = get_device_handler(buf)))
202 return err;
203 err = scsi_dh_handler_attach(sdev, scsi_dh);
204 } else {
205 scsi_dh = sdev->scsi_dh_data->scsi_dh;
206 if (!strncmp(buf, "detach", 6)) {
207 /*
208 * Detach from a device handler
209 */
210 scsi_dh_handler_detach(sdev, scsi_dh);
211 err = 0;
212 } else if (!strncmp(buf, "activate", 8)) {
213 /*
214 * Activate a device handler
215 */
216 if (scsi_dh->activate)
217 err = scsi_dh->activate(sdev);
218 else
219 err = 0;
220 }
221 }
222
223 return err<0?err:count;
224}
225
226static ssize_t
227show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
228{
229 struct scsi_device *sdev = to_scsi_device(dev);
230
231 if (!sdev->scsi_dh_data)
232 return snprintf(buf, 20, "detached\n");
233
234 return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
235}
236
237static struct device_attribute scsi_dh_state_attr =
238 __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
239 store_dh_state);
240
241/*
242 * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
243 */
244static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
245{
246 struct scsi_device *sdev;
247 int err;
248
249 if (!scsi_is_sdev_device(dev))
250 return 0;
251
252 sdev = to_scsi_device(dev);
253
254 err = device_create_file(&sdev->sdev_gendev,
255 &scsi_dh_state_attr);
256
257 return 0;
258}
259
260/*
261 * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
262 */
263static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
264{
265 struct scsi_device *sdev;
266
267 if (!scsi_is_sdev_device(dev))
268 return 0;
269
270 sdev = to_scsi_device(dev);
271
272 device_remove_file(&sdev->sdev_gendev,
273 &scsi_dh_state_attr);
274
275 return 0;
276}
277
278/*
279 * scsi_dh_notifier - notifier chain callback
280 */
281static int scsi_dh_notifier(struct notifier_block *nb,
282 unsigned long action, void *data)
283{
284 struct device *dev = data;
285 struct scsi_device *sdev;
286 int err = 0;
287 struct scsi_device_handler *devinfo = NULL;
288
289 if (!scsi_is_sdev_device(dev))
290 return 0;
291
292 sdev = to_scsi_device(dev);
293
294 if (action == BUS_NOTIFY_ADD_DEVICE) {
295 devinfo = device_handler_match(NULL, sdev);
296 if (!devinfo)
297 goto out;
298
299 err = scsi_dh_handler_attach(sdev, devinfo);
300 if (!err)
301 err = device_create_file(dev, &scsi_dh_state_attr);
302 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
303 device_remove_file(dev, &scsi_dh_state_attr);
304 scsi_dh_handler_detach(sdev, NULL);
305 }
306out:
307 return err;
308}
309
310/*
311 * scsi_dh_notifier_add - Callback for scsi_register_device_handler
312 */
45static int scsi_dh_notifier_add(struct device *dev, void *data) 313static int scsi_dh_notifier_add(struct device *dev, void *data)
46{ 314{
47 struct scsi_device_handler *scsi_dh = data; 315 struct scsi_device_handler *scsi_dh = data;
316 struct scsi_device *sdev;
317
318 if (!scsi_is_sdev_device(dev))
319 return 0;
320
321 if (!get_device(dev))
322 return 0;
323
324 sdev = to_scsi_device(dev);
325
326 if (device_handler_match(scsi_dh, sdev))
327 scsi_dh_handler_attach(sdev, scsi_dh);
328
329 put_device(dev);
330
331 return 0;
332}
333
334/*
335 * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
336 */
337static int scsi_dh_notifier_remove(struct device *dev, void *data)
338{
339 struct scsi_device_handler *scsi_dh = data;
340 struct scsi_device *sdev;
341
342 if (!scsi_is_sdev_device(dev))
343 return 0;
344
345 if (!get_device(dev))
346 return 0;
347
348 sdev = to_scsi_device(dev);
349
350 scsi_dh_handler_detach(sdev, scsi_dh);
351
352 put_device(dev);
48 353
49 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
50 return 0; 354 return 0;
51} 355}
52 356
@@ -59,33 +363,19 @@ static int scsi_dh_notifier_add(struct device *dev, void *data)
59 */ 363 */
60int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 364int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
61{ 365{
62 int ret = -EBUSY; 366 if (get_device_handler(scsi_dh->name))
63 struct scsi_device_handler *tmp; 367 return -EBUSY;
64 368
65 tmp = get_device_handler(scsi_dh->name);
66 if (tmp)
67 goto done;
68
69 ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
70
71 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
72 spin_lock(&list_lock); 369 spin_lock(&list_lock);
73 list_add(&scsi_dh->list, &scsi_dh_list); 370 list_add(&scsi_dh->list, &scsi_dh_list);
74 spin_unlock(&list_lock); 371 spin_unlock(&list_lock);
372 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
373 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
75 374
76done: 375 return SCSI_DH_OK;
77 return ret;
78} 376}
79EXPORT_SYMBOL_GPL(scsi_register_device_handler); 377EXPORT_SYMBOL_GPL(scsi_register_device_handler);
80 378
81static int scsi_dh_notifier_remove(struct device *dev, void *data)
82{
83 struct scsi_device_handler *scsi_dh = data;
84
85 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
86 return 0;
87}
88
89/* 379/*
90 * scsi_unregister_device_handler - register a device handler personality 380 * scsi_unregister_device_handler - register a device handler personality
91 * module. 381 * module.
@@ -95,23 +385,26 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
95 */ 385 */
96int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 386int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
97{ 387{
98 int ret = -ENODEV; 388 struct scsi_dh_devinfo_list *tmp, *pos;
99 struct scsi_device_handler *tmp;
100
101 tmp = get_device_handler(scsi_dh->name);
102 if (!tmp)
103 goto done;
104 389
105 ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb); 390 if (!get_device_handler(scsi_dh->name))
391 return -ENODEV;
106 392
107 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 393 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
108 scsi_dh_notifier_remove); 394 scsi_dh_notifier_remove);
395
109 spin_lock(&list_lock); 396 spin_lock(&list_lock);
110 list_del(&scsi_dh->list); 397 list_del(&scsi_dh->list);
398 list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
399 if (pos->handler == scsi_dh) {
400 list_del(&pos->node);
401 kfree(pos);
402 }
403 }
111 spin_unlock(&list_lock); 404 spin_unlock(&list_lock);
405 printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
112 406
113done: 407 return SCSI_DH_OK;
114 return ret;
115} 408}
116EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); 409EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
117 410
@@ -157,6 +450,97 @@ int scsi_dh_handler_exist(const char *name)
157} 450}
158EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); 451EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
159 452
453/*
454 * scsi_dh_handler_attach - Attach device handler
455 * @sdev - sdev the handler should be attached to
456 * @name - name of the handler to attach
457 */
458int scsi_dh_attach(struct request_queue *q, const char *name)
459{
460 unsigned long flags;
461 struct scsi_device *sdev;
462 struct scsi_device_handler *scsi_dh;
463 int err = 0;
464
465 scsi_dh = get_device_handler(name);
466 if (!scsi_dh)
467 return -EINVAL;
468
469 spin_lock_irqsave(q->queue_lock, flags);
470 sdev = q->queuedata;
471 if (!sdev || !get_device(&sdev->sdev_gendev))
472 err = -ENODEV;
473 spin_unlock_irqrestore(q->queue_lock, flags);
474
475 if (!err) {
476 err = scsi_dh_handler_attach(sdev, scsi_dh);
477
478 put_device(&sdev->sdev_gendev);
479 }
480 return err;
481}
482EXPORT_SYMBOL_GPL(scsi_dh_attach);
483
484/*
485 * scsi_dh_handler_detach - Detach device handler
486 * @sdev - sdev the handler should be detached from
487 *
488 * This function will detach the device handler only
489 * if the sdev is not part of the internal list, ie
490 * if it has been attached manually.
491 */
492void scsi_dh_detach(struct request_queue *q)
493{
494 unsigned long flags;
495 struct scsi_device *sdev;
496 struct scsi_device_handler *scsi_dh = NULL;
497
498 spin_lock_irqsave(q->queue_lock, flags);
499 sdev = q->queuedata;
500 if (!sdev || !get_device(&sdev->sdev_gendev))
501 sdev = NULL;
502 spin_unlock_irqrestore(q->queue_lock, flags);
503
504 if (!sdev)
505 return;
506
507 if (sdev->scsi_dh_data) {
508 /* if sdev is not on internal list, detach */
509 scsi_dh = sdev->scsi_dh_data->scsi_dh;
510 if (!device_handler_match(scsi_dh, sdev))
511 scsi_dh_handler_detach(sdev, scsi_dh);
512 }
513 put_device(&sdev->sdev_gendev);
514}
515EXPORT_SYMBOL_GPL(scsi_dh_detach);
516
517static struct notifier_block scsi_dh_nb = {
518 .notifier_call = scsi_dh_notifier
519};
520
521static int __init scsi_dh_init(void)
522{
523 int r;
524
525 r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
526
527 if (!r)
528 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
529 scsi_dh_sysfs_attr_add);
530
531 return r;
532}
533
534static void __exit scsi_dh_exit(void)
535{
536 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
537 scsi_dh_sysfs_attr_remove);
538 bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
539}
540
541module_init(scsi_dh_init);
542module_exit(scsi_dh_exit);
543
160MODULE_DESCRIPTION("SCSI device handler"); 544MODULE_DESCRIPTION("SCSI device handler");
161MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>"); 545MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
162MODULE_LICENSE("GPL"); 546MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
new file mode 100644
index 000000000000..fcdd73f25625
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -0,0 +1,802 @@
1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler
3 *
4 * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH.
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h>
25
26#define ALUA_DH_NAME "alua"
27#define ALUA_DH_VER "1.2"
28
29#define TPGS_STATE_OPTIMIZED 0x0
30#define TPGS_STATE_NONOPTIMIZED 0x1
31#define TPGS_STATE_STANDBY 0x2
32#define TPGS_STATE_UNAVAILABLE 0x3
33#define TPGS_STATE_OFFLINE 0xe
34#define TPGS_STATE_TRANSITIONING 0xf
35
36#define TPGS_SUPPORT_NONE 0x00
37#define TPGS_SUPPORT_OPTIMIZED 0x01
38#define TPGS_SUPPORT_NONOPTIMIZED 0x02
39#define TPGS_SUPPORT_STANDBY 0x04
40#define TPGS_SUPPORT_UNAVAILABLE 0x08
41#define TPGS_SUPPORT_OFFLINE 0x40
42#define TPGS_SUPPORT_TRANSITION 0x80
43
44#define TPGS_MODE_UNINITIALIZED -1
45#define TPGS_MODE_NONE 0x0
46#define TPGS_MODE_IMPLICIT 0x1
47#define TPGS_MODE_EXPLICIT 0x2
48
49#define ALUA_INQUIRY_SIZE 36
50#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
51#define ALUA_FAILOVER_RETRIES 5
52
53struct alua_dh_data {
54 int group_id;
55 int rel_port;
56 int tpgs;
57 int state;
58 unsigned char inq[ALUA_INQUIRY_SIZE];
59 unsigned char *buff;
60 int bufflen;
61 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
62 int senselen;
63};
64
65#define ALUA_POLICY_SWITCH_CURRENT 0
66#define ALUA_POLICY_SWITCH_ALL 1
67
68static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
69{
70 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
71 BUG_ON(scsi_dh_data == NULL);
72 return ((struct alua_dh_data *) scsi_dh_data->buf);
73}
74
75static int realloc_buffer(struct alua_dh_data *h, unsigned len)
76{
77 if (h->buff && h->buff != h->inq)
78 kfree(h->buff);
79
80 h->buff = kmalloc(len, GFP_NOIO);
81 if (!h->buff) {
82 h->buff = h->inq;
83 h->bufflen = ALUA_INQUIRY_SIZE;
84 return 1;
85 }
86 h->bufflen = len;
87 return 0;
88}
89
90static struct request *get_alua_req(struct scsi_device *sdev,
91 void *buffer, unsigned buflen, int rw)
92{
93 struct request *rq;
94 struct request_queue *q = sdev->request_queue;
95
96 rq = blk_get_request(q, rw, GFP_NOIO);
97
98 if (!rq) {
99 sdev_printk(KERN_INFO, sdev,
100 "%s: blk_get_request failed\n", __func__);
101 return NULL;
102 }
103
104 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
105 blk_put_request(rq);
106 sdev_printk(KERN_INFO, sdev,
107 "%s: blk_rq_map_kern failed\n", __func__);
108 return NULL;
109 }
110
111 rq->cmd_type = REQ_TYPE_BLOCK_PC;
112 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
113 rq->retries = ALUA_FAILOVER_RETRIES;
114 rq->timeout = ALUA_FAILOVER_TIMEOUT;
115
116 return rq;
117}
118
119/*
120 * submit_std_inquiry - Issue a standard INQUIRY command
121 * @sdev: sdev the command should be send to
122 */
123static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
124{
125 struct request *rq;
126 int err = SCSI_DH_RES_TEMP_UNAVAIL;
127
128 rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
129 if (!rq)
130 goto done;
131
132 /* Prepare the command. */
133 rq->cmd[0] = INQUIRY;
134 rq->cmd[1] = 0;
135 rq->cmd[2] = 0;
136 rq->cmd[4] = ALUA_INQUIRY_SIZE;
137 rq->cmd_len = COMMAND_SIZE(INQUIRY);
138
139 rq->sense = h->sense;
140 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
141 rq->sense_len = h->senselen = 0;
142
143 err = blk_execute_rq(rq->q, NULL, rq, 1);
144 if (err == -EIO) {
145 sdev_printk(KERN_INFO, sdev,
146 "%s: std inquiry failed with %x\n",
147 ALUA_DH_NAME, rq->errors);
148 h->senselen = rq->sense_len;
149 err = SCSI_DH_IO;
150 }
151 blk_put_request(rq);
152done:
153 return err;
154}
155
156/*
157 * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
158 * @sdev: sdev the command should be sent to
159 */
160static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
161{
162 struct request *rq;
163 int err = SCSI_DH_RES_TEMP_UNAVAIL;
164
165 rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
166 if (!rq)
167 goto done;
168
169 /* Prepare the command. */
170 rq->cmd[0] = INQUIRY;
171 rq->cmd[1] = 1;
172 rq->cmd[2] = 0x83;
173 rq->cmd[4] = h->bufflen;
174 rq->cmd_len = COMMAND_SIZE(INQUIRY);
175
176 rq->sense = h->sense;
177 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
178 rq->sense_len = h->senselen = 0;
179
180 err = blk_execute_rq(rq->q, NULL, rq, 1);
181 if (err == -EIO) {
182 sdev_printk(KERN_INFO, sdev,
183 "%s: evpd inquiry failed with %x\n",
184 ALUA_DH_NAME, rq->errors);
185 h->senselen = rq->sense_len;
186 err = SCSI_DH_IO;
187 }
188 blk_put_request(rq);
189done:
190 return err;
191}
192
193/*
194 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
195 * @sdev: sdev the command should be sent to
196 */
197static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
198{
199 struct request *rq;
200 int err = SCSI_DH_RES_TEMP_UNAVAIL;
201
202 rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
203 if (!rq)
204 goto done;
205
206 /* Prepare the command. */
207 rq->cmd[0] = MAINTENANCE_IN;
208 rq->cmd[1] = MI_REPORT_TARGET_PGS;
209 rq->cmd[6] = (h->bufflen >> 24) & 0xff;
210 rq->cmd[7] = (h->bufflen >> 16) & 0xff;
211 rq->cmd[8] = (h->bufflen >> 8) & 0xff;
212 rq->cmd[9] = h->bufflen & 0xff;
213 rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
214
215 rq->sense = h->sense;
216 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
217 rq->sense_len = h->senselen = 0;
218
219 err = blk_execute_rq(rq->q, NULL, rq, 1);
220 if (err == -EIO) {
221 sdev_printk(KERN_INFO, sdev,
222 "%s: rtpg failed with %x\n",
223 ALUA_DH_NAME, rq->errors);
224 h->senselen = rq->sense_len;
225 err = SCSI_DH_IO;
226 }
227 blk_put_request(rq);
228done:
229 return err;
230}
231
232/*
233 * submit_stpg - Issue a SET TARGET GROUP STATES command
234 * @sdev: sdev the command should be sent to
235 *
236 * Currently we're only setting the current target port group state
237 * to 'active/optimized' and let the array firmware figure out
238 * the states of the remaining groups.
239 */
240static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
241{
242 struct request *rq;
243 int err = SCSI_DH_RES_TEMP_UNAVAIL;
244 int stpg_len = 8;
245
246 /* Prepare the data buffer */
247 memset(h->buff, 0, stpg_len);
248 h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
249 h->buff[6] = (h->group_id >> 8) & 0x0f;
250 h->buff[7] = h->group_id & 0x0f;
251
252 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
253 if (!rq)
254 goto done;
255
256 /* Prepare the command. */
257 rq->cmd[0] = MAINTENANCE_OUT;
258 rq->cmd[1] = MO_SET_TARGET_PGS;
259 rq->cmd[6] = (stpg_len >> 24) & 0xff;
260 rq->cmd[7] = (stpg_len >> 16) & 0xff;
261 rq->cmd[8] = (stpg_len >> 8) & 0xff;
262 rq->cmd[9] = stpg_len & 0xff;
263 rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
264
265 rq->sense = h->sense;
266 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
267 rq->sense_len = h->senselen = 0;
268
269 err = blk_execute_rq(rq->q, NULL, rq, 1);
270 if (err == -EIO) {
271 sdev_printk(KERN_INFO, sdev,
272 "%s: stpg failed with %x\n",
273 ALUA_DH_NAME, rq->errors);
274 h->senselen = rq->sense_len;
275 err = SCSI_DH_IO;
276 }
277 blk_put_request(rq);
278done:
279 return err;
280}
281
282/*
283 * alua_std_inquiry - Evaluate standard INQUIRY command
284 * @sdev: device to be checked
285 *
286 * Just extract the TPGS setting to find out if ALUA
287 * is supported.
288 */
289static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
290{
291 int err;
292
293 err = submit_std_inquiry(sdev, h);
294
295 if (err != SCSI_DH_OK)
296 return err;
297
298 /* Check TPGS setting */
299 h->tpgs = (h->inq[5] >> 4) & 0x3;
300 switch (h->tpgs) {
301 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
302 sdev_printk(KERN_INFO, sdev,
303 "%s: supports implicit and explicit TPGS\n",
304 ALUA_DH_NAME);
305 break;
306 case TPGS_MODE_EXPLICIT:
307 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
308 ALUA_DH_NAME);
309 break;
310 case TPGS_MODE_IMPLICIT:
311 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
312 ALUA_DH_NAME);
313 break;
314 default:
315 h->tpgs = TPGS_MODE_NONE;
316 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
317 ALUA_DH_NAME);
318 err = SCSI_DH_DEV_UNSUPP;
319 break;
320 }
321
322 return err;
323}
324
325/*
326 * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83
327 * @sdev: device to be checked
328 *
329 * Extract the relative target port and the target port group
330 * descriptor from the list of identificators.
331 */
332static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
333{
334 int len;
335 unsigned err;
336 unsigned char *d;
337
338 retry:
339 err = submit_vpd_inquiry(sdev, h);
340
341 if (err != SCSI_DH_OK)
342 return err;
343
344 /* Check if vpd page exceeds initial buffer */
345 len = (h->buff[2] << 8) + h->buff[3] + 4;
346 if (len > h->bufflen) {
347 /* Resubmit with the correct length */
348 if (realloc_buffer(h, len)) {
349 sdev_printk(KERN_WARNING, sdev,
350 "%s: kmalloc buffer failed\n",
351 ALUA_DH_NAME);
352 /* Temporary failure, bypass */
353 return SCSI_DH_DEV_TEMP_BUSY;
354 }
355 goto retry;
356 }
357
358 /*
359 * Now look for the correct descriptor.
360 */
361 d = h->buff + 4;
362 while (d < h->buff + len) {
363 switch (d[1] & 0xf) {
364 case 0x4:
365 /* Relative target port */
366 h->rel_port = (d[6] << 8) + d[7];
367 break;
368 case 0x5:
369 /* Target port group */
370 h->group_id = (d[6] << 8) + d[7];
371 break;
372 default:
373 break;
374 }
375 d += d[3] + 4;
376 }
377
378 if (h->group_id == -1) {
379 /*
380 * Internal error; TPGS supported but required
381 * VPD identification descriptors not present.
382 * Disable ALUA support
383 */
384 sdev_printk(KERN_INFO, sdev,
385 "%s: No target port descriptors found\n",
386 ALUA_DH_NAME);
387 h->state = TPGS_STATE_OPTIMIZED;
388 h->tpgs = TPGS_MODE_NONE;
389 err = SCSI_DH_DEV_UNSUPP;
390 } else {
391 sdev_printk(KERN_INFO, sdev,
392 "%s: port group %02x rel port %02x\n",
393 ALUA_DH_NAME, h->group_id, h->rel_port);
394 }
395
396 return err;
397}
398
399static char print_alua_state(int state)
400{
401 switch (state) {
402 case TPGS_STATE_OPTIMIZED:
403 return 'A';
404 case TPGS_STATE_NONOPTIMIZED:
405 return 'N';
406 case TPGS_STATE_STANDBY:
407 return 'S';
408 case TPGS_STATE_UNAVAILABLE:
409 return 'U';
410 case TPGS_STATE_OFFLINE:
411 return 'O';
412 case TPGS_STATE_TRANSITIONING:
413 return 'T';
414 default:
415 return 'X';
416 }
417}
418
419static int alua_check_sense(struct scsi_device *sdev,
420 struct scsi_sense_hdr *sense_hdr)
421{
422 switch (sense_hdr->sense_key) {
423 case NOT_READY:
424 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
425 /*
426 * LUN Not Accessible - ALUA state transition
427 */
428 return NEEDS_RETRY;
429 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
430 /*
431 * LUN Not Accessible -- Target port in standby state
432 */
433 return SUCCESS;
434 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c)
435 /*
436 * LUN Not Accessible -- Target port in unavailable state
437 */
438 return SUCCESS;
439 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12)
440 /*
441 * LUN Not Ready -- Offline
442 */
443 return SUCCESS;
444 break;
445 case UNIT_ATTENTION:
446 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
447 /*
448 * Power On, Reset, or Bus Device Reset, just retry.
449 */
450 return NEEDS_RETRY;
451 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
452 /*
453 * ALUA state changed
454 */
455 return NEEDS_RETRY;
456 }
457 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
458 /*
459 * Implicit ALUA state transition failed
460 */
461 return NEEDS_RETRY;
462 }
463 break;
464 }
465
466 return SCSI_RETURN_NOT_HANDLED;
467}
468
469/*
470 * alua_stpg - Evaluate SET TARGET GROUP STATES
471 * @sdev: the device to be evaluated
472 * @state: the new target group state
473 *
474 * Send a SET TARGET GROUP STATES command to the device.
475 * We only have to test here if we should resubmit the command;
476 * any other error is assumed as a failure.
477 */
478static int alua_stpg(struct scsi_device *sdev, int state,
479 struct alua_dh_data *h)
480{
481 struct scsi_sense_hdr sense_hdr;
482 unsigned err;
483 int retry = ALUA_FAILOVER_RETRIES;
484
485 retry:
486 err = submit_stpg(sdev, h);
487 if (err == SCSI_DH_IO && h->senselen > 0) {
488 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
489 &sense_hdr);
490 if (!err)
491 return SCSI_DH_IO;
492 err = alua_check_sense(sdev, &sense_hdr);
493 if (retry > 0 && err == NEEDS_RETRY) {
494 retry--;
495 goto retry;
496 }
497 sdev_printk(KERN_INFO, sdev,
498 "%s: stpg sense code: %02x/%02x/%02x\n",
499 ALUA_DH_NAME, sense_hdr.sense_key,
500 sense_hdr.asc, sense_hdr.ascq);
501 err = SCSI_DH_IO;
502 }
503 if (err == SCSI_DH_OK) {
504 h->state = state;
505 sdev_printk(KERN_INFO, sdev,
506 "%s: port group %02x switched to state %c\n",
507 ALUA_DH_NAME, h->group_id,
508 print_alua_state(h->state) );
509 }
510 return err;
511}
512
513/*
514 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
515 * @sdev: the device to be evaluated.
516 *
517 * Evaluate the Target Port Group State.
518 * Returns SCSI_DH_DEV_OFFLINED if the path is
519 * found to be unuseable.
520 */
521static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
522{
523 struct scsi_sense_hdr sense_hdr;
524 int len, k, off, valid_states = 0;
525 char *ucp;
526 unsigned err;
527
528 retry:
529 err = submit_rtpg(sdev, h);
530
531 if (err == SCSI_DH_IO && h->senselen > 0) {
532 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
533 &sense_hdr);
534 if (!err)
535 return SCSI_DH_IO;
536
537 err = alua_check_sense(sdev, &sense_hdr);
538 if (err == NEEDS_RETRY)
539 goto retry;
540 sdev_printk(KERN_INFO, sdev,
541 "%s: rtpg sense code %02x/%02x/%02x\n",
542 ALUA_DH_NAME, sense_hdr.sense_key,
543 sense_hdr.asc, sense_hdr.ascq);
544 err = SCSI_DH_IO;
545 }
546 if (err != SCSI_DH_OK)
547 return err;
548
549 len = (h->buff[0] << 24) + (h->buff[1] << 16) +
550 (h->buff[2] << 8) + h->buff[3] + 4;
551
552 if (len > h->bufflen) {
553 /* Resubmit with the correct length */
554 if (realloc_buffer(h, len)) {
555 sdev_printk(KERN_WARNING, sdev,
556 "%s: kmalloc buffer failed\n",__func__);
557 /* Temporary failure, bypass */
558 return SCSI_DH_DEV_TEMP_BUSY;
559 }
560 goto retry;
561 }
562
563 for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
564 if (h->group_id == (ucp[2] << 8) + ucp[3]) {
565 h->state = ucp[0] & 0x0f;
566 valid_states = ucp[1];
567 }
568 off = 8 + (ucp[7] * 4);
569 }
570
571 sdev_printk(KERN_INFO, sdev,
572 "%s: port group %02x state %c supports %c%c%c%c%c%c\n",
573 ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
574 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
575 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
576 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
577 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
578 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
579 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
580
581 if (h->tpgs & TPGS_MODE_EXPLICIT) {
582 switch (h->state) {
583 case TPGS_STATE_TRANSITIONING:
584 /* State transition, retry */
585 goto retry;
586 break;
587 case TPGS_STATE_OFFLINE:
588 /* Path is offline, fail */
589 err = SCSI_DH_DEV_OFFLINED;
590 break;
591 default:
592 break;
593 }
594 } else {
595 /* Only Implicit ALUA support */
596 if (h->state == TPGS_STATE_OPTIMIZED ||
597 h->state == TPGS_STATE_NONOPTIMIZED ||
598 h->state == TPGS_STATE_STANDBY)
599 /* Useable path if active */
600 err = SCSI_DH_OK;
601 else
602 /* Path unuseable for unavailable/offline */
603 err = SCSI_DH_DEV_OFFLINED;
604 }
605 return err;
606}
607
608/*
609 * alua_initialize - Initialize ALUA state
610 * @sdev: the device to be initialized
611 *
612 * For the prep_fn to work correctly we have
613 * to initialize the ALUA state for the device.
614 */
615static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
616{
617 int err;
618
619 err = alua_std_inquiry(sdev, h);
620 if (err != SCSI_DH_OK)
621 goto out;
622
623 err = alua_vpd_inquiry(sdev, h);
624 if (err != SCSI_DH_OK)
625 goto out;
626
627 err = alua_rtpg(sdev, h);
628 if (err != SCSI_DH_OK)
629 goto out;
630
631out:
632 return err;
633}
634
635/*
636 * alua_activate - activate a path
637 * @sdev: device on the path to be activated
638 *
639 * We're currently switching the port group to be activated only and
640 * let the array figure out the rest.
641 * There may be other arrays which require us to switch all port groups
642 * based on a certain policy. But until we actually encounter them it
643 * should be okay.
644 */
645static int alua_activate(struct scsi_device *sdev)
646{
647 struct alua_dh_data *h = get_alua_data(sdev);
648 int err = SCSI_DH_OK;
649
650 if (h->group_id != -1) {
651 err = alua_rtpg(sdev, h);
652 if (err != SCSI_DH_OK)
653 goto out;
654 }
655
656 if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
657 err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
658
659out:
660 return err;
661}
662
663/*
664 * alua_prep_fn - request callback
665 *
666 * Fail I/O to all paths not in state
667 * active/optimized or active/non-optimized.
668 */
669static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
670{
671 struct alua_dh_data *h = get_alua_data(sdev);
672 int ret = BLKPREP_OK;
673
674 if (h->state != TPGS_STATE_OPTIMIZED &&
675 h->state != TPGS_STATE_NONOPTIMIZED) {
676 ret = BLKPREP_KILL;
677 req->cmd_flags |= REQ_QUIET;
678 }
679 return ret;
680
681}
682
683const struct scsi_dh_devlist alua_dev_list[] = {
684 {"HP", "MSA VOLUME" },
685 {"HP", "HSV101" },
686 {"HP", "HSV111" },
687 {"HP", "HSV200" },
688 {"HP", "HSV210" },
689 {"HP", "HSV300" },
690 {"IBM", "2107900" },
691 {"IBM", "2145" },
692 {"Pillar", "Axiom" },
693 {NULL, NULL}
694};
695
696static int alua_bus_attach(struct scsi_device *sdev);
697static void alua_bus_detach(struct scsi_device *sdev);
698
699static struct scsi_device_handler alua_dh = {
700 .name = ALUA_DH_NAME,
701 .module = THIS_MODULE,
702 .devlist = alua_dev_list,
703 .attach = alua_bus_attach,
704 .detach = alua_bus_detach,
705 .prep_fn = alua_prep_fn,
706 .check_sense = alua_check_sense,
707 .activate = alua_activate,
708};
709
710/*
711 * alua_bus_attach - Attach device handler
712 * @sdev: device to be attached to
713 */
714static int alua_bus_attach(struct scsi_device *sdev)
715{
716 struct scsi_dh_data *scsi_dh_data;
717 struct alua_dh_data *h;
718 unsigned long flags;
719 int err = SCSI_DH_OK;
720
721 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
722 + sizeof(*h) , GFP_KERNEL);
723 if (!scsi_dh_data) {
724 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
725 ALUA_DH_NAME);
726 return -ENOMEM;
727 }
728
729 scsi_dh_data->scsi_dh = &alua_dh;
730 h = (struct alua_dh_data *) scsi_dh_data->buf;
731 h->tpgs = TPGS_MODE_UNINITIALIZED;
732 h->state = TPGS_STATE_OPTIMIZED;
733 h->group_id = -1;
734 h->rel_port = -1;
735 h->buff = h->inq;
736 h->bufflen = ALUA_INQUIRY_SIZE;
737
738 err = alua_initialize(sdev, h);
739 if (err != SCSI_DH_OK)
740 goto failed;
741
742 if (!try_module_get(THIS_MODULE))
743 goto failed;
744
745 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
746 sdev->scsi_dh_data = scsi_dh_data;
747 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
748
749 return 0;
750
751failed:
752 kfree(scsi_dh_data);
753 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME);
754 return -EINVAL;
755}
756
757/*
758 * alua_bus_detach - Detach device handler
759 * @sdev: device to be detached from
760 */
761static void alua_bus_detach(struct scsi_device *sdev)
762{
763 struct scsi_dh_data *scsi_dh_data;
764 struct alua_dh_data *h;
765 unsigned long flags;
766
767 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
768 scsi_dh_data = sdev->scsi_dh_data;
769 sdev->scsi_dh_data = NULL;
770 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
771
772 h = (struct alua_dh_data *) scsi_dh_data->buf;
773 if (h->buff && h->inq != h->buff)
774 kfree(h->buff);
775 kfree(scsi_dh_data);
776 module_put(THIS_MODULE);
777 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME);
778}
779
780static int __init alua_init(void)
781{
782 int r;
783
784 r = scsi_register_device_handler(&alua_dh);
785 if (r != 0)
786 printk(KERN_ERR "%s: Failed to register scsi device handler",
787 ALUA_DH_NAME);
788 return r;
789}
790
791static void __exit alua_exit(void)
792{
793 scsi_unregister_device_handler(&alua_dh);
794}
795
796module_init(alua_init);
797module_exit(alua_exit);
798
799MODULE_DESCRIPTION("DM Multipath ALUA support");
800MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
801MODULE_LICENSE("GPL");
802MODULE_VERSION(ALUA_DH_VER);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index f2467e936e55..aa46b131b20e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -25,28 +25,31 @@
25#include <scsi/scsi_dh.h> 25#include <scsi/scsi_dh.h>
26#include <scsi/scsi_device.h> 26#include <scsi/scsi_device.h>
27 27
28#define CLARIION_NAME "emc_clariion" 28#define CLARIION_NAME "emc"
29 29
30#define CLARIION_TRESPASS_PAGE 0x22 30#define CLARIION_TRESPASS_PAGE 0x22
31#define CLARIION_BUFFER_SIZE 0x80 31#define CLARIION_BUFFER_SIZE 0xFC
32#define CLARIION_TIMEOUT (60 * HZ) 32#define CLARIION_TIMEOUT (60 * HZ)
33#define CLARIION_RETRIES 3 33#define CLARIION_RETRIES 3
34#define CLARIION_UNBOUND_LU -1 34#define CLARIION_UNBOUND_LU -1
35#define CLARIION_SP_A 0
36#define CLARIION_SP_B 1
35 37
36static unsigned char long_trespass[] = { 38/* Flags */
37 0, 0, 0, 0, 39#define CLARIION_SHORT_TRESPASS 1
38 CLARIION_TRESPASS_PAGE, /* Page code */ 40#define CLARIION_HONOR_RESERVATIONS 2
39 0x09, /* Page length - 2 */
40 0x81, /* Trespass code + Honor reservation bit */
41 0xff, 0xff, /* Trespass target */
42 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
43};
44 41
45static unsigned char long_trespass_hr[] = { 42/* LUN states */
46 0, 0, 0, 0, 43#define CLARIION_LUN_UNINITIALIZED -1
44#define CLARIION_LUN_UNBOUND 0
45#define CLARIION_LUN_BOUND 1
46#define CLARIION_LUN_OWNED 2
47
48static unsigned char long_trespass[] = {
49 0, 0, 0, 0, 0, 0, 0, 0,
47 CLARIION_TRESPASS_PAGE, /* Page code */ 50 CLARIION_TRESPASS_PAGE, /* Page code */
48 0x09, /* Page length - 2 */ 51 0x09, /* Page length - 2 */
49 0x01, /* Trespass code + Honor reservation bit */ 52 0x01, /* Trespass code */
50 0xff, 0xff, /* Trespass target */ 53 0xff, 0xff, /* Trespass target */
51 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ 54 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
52}; 55};
@@ -55,39 +58,56 @@ static unsigned char short_trespass[] = {
55 0, 0, 0, 0, 58 0, 0, 0, 0,
56 CLARIION_TRESPASS_PAGE, /* Page code */ 59 CLARIION_TRESPASS_PAGE, /* Page code */
57 0x02, /* Page length - 2 */ 60 0x02, /* Page length - 2 */
58 0x81, /* Trespass code + Honor reservation bit */ 61 0x01, /* Trespass code */
59 0xff, /* Trespass target */ 62 0xff, /* Trespass target */
60}; 63};
61 64
62static unsigned char short_trespass_hr[] = { 65static const char * lun_state[] =
63 0, 0, 0, 0, 66{
64 CLARIION_TRESPASS_PAGE, /* Page code */ 67 "not bound",
65 0x02, /* Page length - 2 */ 68 "bound",
66 0x01, /* Trespass code + Honor reservation bit */ 69 "owned",
67 0xff, /* Trespass target */
68}; 70};
69 71
70struct clariion_dh_data { 72struct clariion_dh_data {
71 /* 73 /*
74 * Flags:
75 * CLARIION_SHORT_TRESPASS
72 * Use short trespass command (FC-series) or the long version 76 * Use short trespass command (FC-series) or the long version
73 * (default for AX/CX CLARiiON arrays). 77 * (default for AX/CX CLARiiON arrays).
74 */ 78 *
75 unsigned short_trespass; 79 * CLARIION_HONOR_RESERVATIONS
76 /*
77 * Whether or not (default) to honor SCSI reservations when 80 * Whether or not (default) to honor SCSI reservations when
78 * initiating a switch-over. 81 * initiating a switch-over.
79 */ 82 */
80 unsigned hr; 83 unsigned flags;
81 /* I/O buffer for both MODE_SELECT and INQUIRY commands. */ 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */
82 char buffer[CLARIION_BUFFER_SIZE]; 87 char buffer[CLARIION_BUFFER_SIZE];
83 /* 88 /*
84 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
85 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
86 */ 91 */
87 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 92 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
88 /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */ 93 unsigned int senselen;
94 /*
95 * LUN state
96 */
97 int lun_state;
98 /*
99 * SP Port number
100 */
101 int port;
102 /*
103 * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
104 * path's mapped LUN
105 */
89 int default_sp; 106 int default_sp;
90 /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */ 107 /*
108 * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
109 * path's mapped LUN
110 */
91 int current_sp; 111 int current_sp;
92}; 112};
93 113
@@ -102,19 +122,16 @@ static inline struct clariion_dh_data
102/* 122/*
103 * Parse MODE_SELECT cmd reply. 123 * Parse MODE_SELECT cmd reply.
104 */ 124 */
105static int trespass_endio(struct scsi_device *sdev, int result) 125static int trespass_endio(struct scsi_device *sdev, char *sense)
106{ 126{
107 int err = SCSI_DH_OK; 127 int err = SCSI_DH_IO;
108 struct scsi_sense_hdr sshdr; 128 struct scsi_sense_hdr sshdr;
109 struct clariion_dh_data *csdev = get_clariion_data(sdev);
110 char *sense = csdev->sense;
111 129
112 if (status_byte(result) == CHECK_CONDITION && 130 if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
113 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { 131 sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
114 sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
115 "0x%2x, 0x%2x while sending CLARiiON trespass " 132 "0x%2x, 0x%2x while sending CLARiiON trespass "
116 "command.\n", sshdr.sense_key, sshdr.asc, 133 "command.\n", CLARIION_NAME, sshdr.sense_key,
117 sshdr.ascq); 134 sshdr.asc, sshdr.ascq);
118 135
119 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && 136 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
120 (sshdr.ascq == 0x00)) { 137 (sshdr.ascq == 0x00)) {
@@ -122,9 +139,9 @@ static int trespass_endio(struct scsi_device *sdev, int result)
122 * Array based copy in progress -- do not send 139 * Array based copy in progress -- do not send
123 * mode_select or copy will be aborted mid-stream. 140 * mode_select or copy will be aborted mid-stream.
124 */ 141 */
125 sdev_printk(KERN_INFO, sdev, "Array Based Copy in " 142 sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
126 "progress while sending CLARiiON trespass " 143 "progress while sending CLARiiON trespass "
127 "command.\n"); 144 "command.\n", CLARIION_NAME);
128 err = SCSI_DH_DEV_TEMP_BUSY; 145 err = SCSI_DH_DEV_TEMP_BUSY;
129 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && 146 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
130 (sshdr.ascq == 0x03)) { 147 (sshdr.ascq == 0x03)) {
@@ -132,160 +149,153 @@ static int trespass_endio(struct scsi_device *sdev, int result)
132 * LUN Not Ready - Manual Intervention Required 149 * LUN Not Ready - Manual Intervention Required
133 * indicates in-progress ucode upgrade (NDU). 150 * indicates in-progress ucode upgrade (NDU).
134 */ 151 */
135 sdev_printk(KERN_INFO, sdev, "Detected in-progress " 152 sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
136 "ucode upgrade NDU operation while sending " 153 "ucode upgrade NDU operation while sending "
137 "CLARiiON trespass command.\n"); 154 "CLARiiON trespass command.\n", CLARIION_NAME);
138 err = SCSI_DH_DEV_TEMP_BUSY; 155 err = SCSI_DH_DEV_TEMP_BUSY;
139 } else 156 } else
140 err = SCSI_DH_DEV_FAILED; 157 err = SCSI_DH_DEV_FAILED;
141 } else if (result) { 158 } else {
142 sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending " 159 sdev_printk(KERN_INFO, sdev,
143 "CLARiiON trespass command.\n", result); 160 "%s: failed to send MODE SELECT, no sense available\n",
144 err = SCSI_DH_IO; 161 CLARIION_NAME);
145 } 162 }
146
147 return err; 163 return err;
148} 164}
149 165
150static int parse_sp_info_reply(struct scsi_device *sdev, int result, 166static int parse_sp_info_reply(struct scsi_device *sdev,
151 int *default_sp, int *current_sp, int *new_current_sp) 167 struct clariion_dh_data *csdev)
152{ 168{
153 int err = SCSI_DH_OK; 169 int err = SCSI_DH_OK;
154 struct clariion_dh_data *csdev = get_clariion_data(sdev);
155 170
156 if (result == 0) { 171 /* check for in-progress ucode upgrade (NDU) */
157 /* check for in-progress ucode upgrade (NDU) */ 172 if (csdev->buffer[48] != 0) {
158 if (csdev->buffer[48] != 0) { 173 sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
159 sdev_printk(KERN_NOTICE, sdev, "Detected in-progress " 174 "ucode upgrade NDU operation while finding "
160 "ucode upgrade NDU operation while finding " 175 "current active SP.", CLARIION_NAME);
161 "current active SP."); 176 err = SCSI_DH_DEV_TEMP_BUSY;
162 err = SCSI_DH_DEV_TEMP_BUSY; 177 goto out;
163 } else { 178 }
164 *default_sp = csdev->buffer[5]; 179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
165 180 /* Invalid buffer format */
166 if (csdev->buffer[4] == 2) 181 sdev_printk(KERN_NOTICE, sdev,
167 /* SP for path is current */ 182 "%s: invalid VPD page 0xC0 format\n",
168 *current_sp = csdev->buffer[8]; 183 CLARIION_NAME);
169 else { 184 err = SCSI_DH_NOSYS;
170 if (csdev->buffer[4] == 1) 185 goto out;
171 /* SP for this path is NOT current */ 186 }
172 if (csdev->buffer[8] == 0) 187 switch (csdev->buffer[28] & 0x0f) {
173 *current_sp = 1; 188 case 6:
174 else 189 sdev_printk(KERN_NOTICE, sdev,
175 *current_sp = 0; 190 "%s: ALUA failover mode detected\n",
176 else 191 CLARIION_NAME);
177 /* unbound LU or LUNZ */ 192 break;
178 *current_sp = CLARIION_UNBOUND_LU; 193 case 4:
179 } 194 /* Linux failover */
180 *new_current_sp = csdev->buffer[8]; 195 break;
181 } 196 default:
182 } else { 197 sdev_printk(KERN_WARNING, sdev,
183 struct scsi_sense_hdr sshdr; 198 "%s: Invalid failover mode %d\n",
184 199 CLARIION_NAME, csdev->buffer[28] & 0x0f);
185 err = SCSI_DH_IO; 200 err = SCSI_DH_NOSYS;
186 201 goto out;
187 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
188 &sshdr))
189 sdev_printk(KERN_ERR, sdev, "Found valid sense data "
190 "0x%2x, 0x%2x, 0x%2x while finding current "
191 "active SP.", sshdr.sense_key, sshdr.asc,
192 sshdr.ascq);
193 else
194 sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
195 "current active SP.", result);
196 } 202 }
197 203
204 csdev->default_sp = csdev->buffer[5];
205 csdev->lun_state = csdev->buffer[4];
206 csdev->current_sp = csdev->buffer[8];
207 csdev->port = csdev->buffer[7];
208
209out:
198 return err; 210 return err;
199} 211}
200 212
201static int sp_info_endio(struct scsi_device *sdev, int result, 213#define emc_default_str "FC (Legacy)"
202 int mode_select_sent, int *done) 214
215static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
203{ 216{
204 struct clariion_dh_data *csdev = get_clariion_data(sdev); 217 unsigned char len = buffer[4] + 5;
205 int err_flags, default_sp, current_sp, new_current_sp; 218 char *sp_model = NULL;
219 unsigned char sp_len, serial_len;
220
221 if (len < 160) {
222 sdev_printk(KERN_WARNING, sdev,
223 "%s: Invalid information section length %d\n",
224 CLARIION_NAME, len);
225 /* Check for old FC arrays */
226 if (!strncmp(buffer + 8, "DGC", 3)) {
227 /* Old FC array, not supporting extended information */
228 sp_model = emc_default_str;
229 }
230 goto out;
231 }
206 232
207 err_flags = parse_sp_info_reply(sdev, result, &default_sp, 233 /*
208 &current_sp, &new_current_sp); 234 * Parse extended information for SP model number
235 */
236 serial_len = buffer[160];
237 if (serial_len == 0 || serial_len + 161 > len) {
238 sdev_printk(KERN_WARNING, sdev,
239 "%s: Invalid array serial number length %d\n",
240 CLARIION_NAME, serial_len);
241 goto out;
242 }
243 sp_len = buffer[99];
244 if (sp_len == 0 || serial_len + sp_len + 161 > len) {
245 sdev_printk(KERN_WARNING, sdev,
246 "%s: Invalid model number length %d\n",
247 CLARIION_NAME, sp_len);
248 goto out;
249 }
250 sp_model = &buffer[serial_len + 161];
251 /* Strip whitespace at the end */
252 while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
253 sp_len--;
209 254
210 if (err_flags != SCSI_DH_OK) 255 sp_model[sp_len] = '\0';
211 goto done;
212 256
213 if (mode_select_sent) { 257out:
214 csdev->default_sp = default_sp; 258 return sp_model;
215 csdev->current_sp = current_sp;
216 } else {
217 /*
218 * Issue the actual module_selec request IFF either
219 * (1) we do not know the identity of the current SP OR
220 * (2) what we think we know is actually correct.
221 */
222 if ((current_sp != CLARIION_UNBOUND_LU) &&
223 (new_current_sp != current_sp)) {
224
225 csdev->default_sp = default_sp;
226 csdev->current_sp = current_sp;
227
228 sdev_printk(KERN_INFO, sdev, "Ignoring path group "
229 "switch-over command for CLARiiON SP%s since "
230 " mapped device is already initialized.",
231 current_sp ? "B" : "A");
232 if (done)
233 *done = 1; /* as good as doing it */
234 }
235 }
236done:
237 return err_flags;
238} 259}
239 260
240/* 261/*
241* Get block request for REQ_BLOCK_PC command issued to path. Currently 262 * Get block request for REQ_BLOCK_PC command issued to path. Currently
242* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. 263 * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
243* 264 *
244* Uses data and sense buffers in hardware handler context structure and 265 * Uses data and sense buffers in hardware handler context structure and
245* assumes serial servicing of commands, both issuance and completion. 266 * assumes serial servicing of commands, both issuance and completion.
246*/ 267 */
247static struct request *get_req(struct scsi_device *sdev, int cmd) 268static struct request *get_req(struct scsi_device *sdev, int cmd,
269 unsigned char *buffer)
248{ 270{
249 struct clariion_dh_data *csdev = get_clariion_data(sdev);
250 struct request *rq; 271 struct request *rq;
251 unsigned char *page22;
252 int len = 0; 272 int len = 0;
253 273
254 rq = blk_get_request(sdev->request_queue, 274 rq = blk_get_request(sdev->request_queue,
255 (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC); 275 (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
256 if (!rq) { 276 if (!rq) {
257 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
258 return NULL; 278 return NULL;
259 } 279 }
260 280
261 memset(&rq->cmd, 0, BLK_MAX_CDB); 281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd);
262 rq->cmd[0] = cmd; 283 rq->cmd[0] = cmd;
263 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
264 284
265 switch (cmd) { 285 switch (cmd) {
266 case MODE_SELECT: 286 case MODE_SELECT:
267 if (csdev->short_trespass) { 287 len = sizeof(short_trespass);
268 page22 = csdev->hr ? short_trespass_hr : short_trespass; 288 rq->cmd_flags |= REQ_RW;
269 len = sizeof(short_trespass); 289 rq->cmd[1] = 0x10;
270 } else { 290 break;
271 page22 = csdev->hr ? long_trespass_hr : long_trespass; 291 case MODE_SELECT_10:
272 len = sizeof(long_trespass); 292 len = sizeof(long_trespass);
273 }
274 /*
275 * Can't DMA from kernel BSS -- must copy selected trespass
276 * command mode page contents to context buffer which is
277 * allocated by kmalloc.
278 */
279 BUG_ON((len > CLARIION_BUFFER_SIZE));
280 memcpy(csdev->buffer, page22, len);
281 rq->cmd_flags |= REQ_RW; 293 rq->cmd_flags |= REQ_RW;
282 rq->cmd[1] = 0x10; 294 rq->cmd[1] = 0x10;
283 break; 295 break;
284 case INQUIRY: 296 case INQUIRY:
285 rq->cmd[1] = 0x1;
286 rq->cmd[2] = 0xC0;
287 len = CLARIION_BUFFER_SIZE; 297 len = CLARIION_BUFFER_SIZE;
288 memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE); 298 memset(buffer, 0, len);
289 break; 299 break;
290 default: 300 default:
291 BUG_ON(1); 301 BUG_ON(1);
@@ -298,47 +308,94 @@ static struct request *get_req(struct scsi_device *sdev, int cmd)
298 rq->timeout = CLARIION_TIMEOUT; 308 rq->timeout = CLARIION_TIMEOUT;
299 rq->retries = CLARIION_RETRIES; 309 rq->retries = CLARIION_RETRIES;
300 310
301 rq->sense = csdev->sense; 311 if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
302 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 312 blk_put_request(rq);
303 rq->sense_len = 0;
304
305 if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
306 len, GFP_ATOMIC)) {
307 __blk_put_request(rq->q, rq);
308 return NULL; 313 return NULL;
309 } 314 }
310 315
311 return rq; 316 return rq;
312} 317}
313 318
314static int send_cmd(struct scsi_device *sdev, int cmd) 319static int send_inquiry_cmd(struct scsi_device *sdev, int page,
320 struct clariion_dh_data *csdev)
315{ 321{
316 struct request *rq = get_req(sdev, cmd); 322 struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
323 int err;
317 324
318 if (!rq) 325 if (!rq)
319 return SCSI_DH_RES_TEMP_UNAVAIL; 326 return SCSI_DH_RES_TEMP_UNAVAIL;
320 327
321 return blk_execute_rq(sdev->request_queue, NULL, rq, 1); 328 rq->sense = csdev->sense;
329 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
330 rq->sense_len = csdev->senselen = 0;
331
332 rq->cmd[0] = INQUIRY;
333 if (page != 0) {
334 rq->cmd[1] = 1;
335 rq->cmd[2] = page;
336 }
337 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
338 if (err == -EIO) {
339 sdev_printk(KERN_INFO, sdev,
340 "%s: failed to send %s INQUIRY: %x\n",
341 CLARIION_NAME, page?"EVPD":"standard",
342 rq->errors);
343 csdev->senselen = rq->sense_len;
344 err = SCSI_DH_IO;
345 }
346
347 blk_put_request(rq);
348
349 return err;
322} 350}
323 351
324static int clariion_activate(struct scsi_device *sdev) 352static int send_trespass_cmd(struct scsi_device *sdev,
353 struct clariion_dh_data *csdev)
325{ 354{
326 int result, done = 0; 355 struct request *rq;
356 unsigned char *page22;
357 int err, len, cmd;
358
359 if (csdev->flags & CLARIION_SHORT_TRESPASS) {
360 page22 = short_trespass;
361 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
362 /* Set Honor Reservations bit */
363 page22[6] |= 0x80;
364 len = sizeof(short_trespass);
365 cmd = MODE_SELECT;
366 } else {
367 page22 = long_trespass;
368 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
369 /* Set Honor Reservations bit */
370 page22[10] |= 0x80;
371 len = sizeof(long_trespass);
372 cmd = MODE_SELECT_10;
373 }
374 BUG_ON((len > CLARIION_BUFFER_SIZE));
375 memcpy(csdev->buffer, page22, len);
327 376
328 result = send_cmd(sdev, INQUIRY); 377 rq = get_req(sdev, cmd, csdev->buffer);
329 result = sp_info_endio(sdev, result, 0, &done); 378 if (!rq)
330 if (result || done) 379 return SCSI_DH_RES_TEMP_UNAVAIL;
331 goto done;
332 380
333 result = send_cmd(sdev, MODE_SELECT); 381 rq->sense = csdev->sense;
334 result = trespass_endio(sdev, result); 382 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
335 if (result) 383 rq->sense_len = csdev->senselen = 0;
336 goto done;
337 384
338 result = send_cmd(sdev, INQUIRY); 385 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
339 result = sp_info_endio(sdev, result, 1, NULL); 386 if (err == -EIO) {
340done: 387 if (rq->sense_len) {
341 return result; 388 err = trespass_endio(sdev, csdev->sense);
389 } else {
390 sdev_printk(KERN_INFO, sdev,
391 "%s: failed to send MODE SELECT: %x\n",
392 CLARIION_NAME, rq->errors);
393 }
394 }
395
396 blk_put_request(rq);
397
398 return err;
342} 399}
343 400
344static int clariion_check_sense(struct scsi_device *sdev, 401static int clariion_check_sense(struct scsi_device *sdev,
@@ -386,99 +443,215 @@ static int clariion_check_sense(struct scsi_device *sdev,
386 break; 443 break;
387 } 444 }
388 445
389 /* success just means we do not care what scsi-ml does */ 446 return SCSI_RETURN_NOT_HANDLED;
390 return SUCCESS; 447}
448
449static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
450{
451 struct clariion_dh_data *h = get_clariion_data(sdev);
452 int ret = BLKPREP_OK;
453
454 if (h->lun_state != CLARIION_LUN_OWNED) {
455 ret = BLKPREP_KILL;
456 req->cmd_flags |= REQ_QUIET;
457 }
458 return ret;
459
460}
461
462static int clariion_std_inquiry(struct scsi_device *sdev,
463 struct clariion_dh_data *csdev)
464{
465 int err;
466 char *sp_model;
467
468 err = send_inquiry_cmd(sdev, 0, csdev);
469 if (err != SCSI_DH_OK && csdev->senselen) {
470 struct scsi_sense_hdr sshdr;
471
472 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
473 &sshdr)) {
474 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
475 "%02x/%02x/%02x\n", CLARIION_NAME,
476 sshdr.sense_key, sshdr.asc, sshdr.ascq);
477 }
478 err = SCSI_DH_IO;
479 goto out;
480 }
481
482 sp_model = parse_sp_model(sdev, csdev->buffer);
483 if (!sp_model) {
484 err = SCSI_DH_DEV_UNSUPP;
485 goto out;
486 }
487
488 /*
489 * FC Series arrays do not support long trespass
490 */
491 if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
492 csdev->flags |= CLARIION_SHORT_TRESPASS;
493
494 sdev_printk(KERN_INFO, sdev,
495 "%s: detected Clariion %s, flags %x\n",
496 CLARIION_NAME, sp_model, csdev->flags);
497out:
498 return err;
391} 499}
392 500
393static const struct { 501static int clariion_send_inquiry(struct scsi_device *sdev,
394 char *vendor; 502 struct clariion_dh_data *csdev)
395 char *model; 503{
396} clariion_dev_list[] = { 504 int err, retry = CLARIION_RETRIES;
505
506retry:
507 err = send_inquiry_cmd(sdev, 0xC0, csdev);
508 if (err != SCSI_DH_OK && csdev->senselen) {
509 struct scsi_sense_hdr sshdr;
510
511 err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
512 &sshdr);
513 if (!err)
514 return SCSI_DH_IO;
515
516 err = clariion_check_sense(sdev, &sshdr);
517 if (retry > 0 && err == NEEDS_RETRY) {
518 retry--;
519 goto retry;
520 }
521 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
522 "%02x/%02x/%02x\n", CLARIION_NAME,
523 sshdr.sense_key, sshdr.asc, sshdr.ascq);
524 err = SCSI_DH_IO;
525 } else {
526 err = parse_sp_info_reply(sdev, csdev);
527 }
528 return err;
529}
530
531static int clariion_activate(struct scsi_device *sdev)
532{
533 struct clariion_dh_data *csdev = get_clariion_data(sdev);
534 int result;
535
536 result = clariion_send_inquiry(sdev, csdev);
537 if (result != SCSI_DH_OK)
538 goto done;
539
540 if (csdev->lun_state == CLARIION_LUN_OWNED)
541 goto done;
542
543 result = send_trespass_cmd(sdev, csdev);
544 if (result != SCSI_DH_OK)
545 goto done;
546 sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
547 CLARIION_NAME,
548 csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
549
550 /* Update status */
551 result = clariion_send_inquiry(sdev, csdev);
552 if (result != SCSI_DH_OK)
553 goto done;
554
555done:
556 sdev_printk(KERN_INFO, sdev,
557 "%s: at SP %c Port %d (%s, default SP %c)\n",
558 CLARIION_NAME, csdev->current_sp + 'A',
559 csdev->port, lun_state[csdev->lun_state],
560 csdev->default_sp + 'A');
561
562 return result;
563}
564
565const struct scsi_dh_devlist clariion_dev_list[] = {
397 {"DGC", "RAID"}, 566 {"DGC", "RAID"},
398 {"DGC", "DISK"}, 567 {"DGC", "DISK"},
568 {"DGC", "VRAID"},
399 {NULL, NULL}, 569 {NULL, NULL},
400}; 570};
401 571
402static int clariion_bus_notify(struct notifier_block *, unsigned long, void *); 572static int clariion_bus_attach(struct scsi_device *sdev);
573static void clariion_bus_detach(struct scsi_device *sdev);
403 574
404static struct scsi_device_handler clariion_dh = { 575static struct scsi_device_handler clariion_dh = {
405 .name = CLARIION_NAME, 576 .name = CLARIION_NAME,
406 .module = THIS_MODULE, 577 .module = THIS_MODULE,
407 .nb.notifier_call = clariion_bus_notify, 578 .devlist = clariion_dev_list,
579 .attach = clariion_bus_attach,
580 .detach = clariion_bus_detach,
408 .check_sense = clariion_check_sense, 581 .check_sense = clariion_check_sense,
409 .activate = clariion_activate, 582 .activate = clariion_activate,
583 .prep_fn = clariion_prep_fn,
410}; 584};
411 585
412/* 586/*
413 * TODO: need some interface so we can set trespass values 587 * TODO: need some interface so we can set trespass values
414 */ 588 */
415static int clariion_bus_notify(struct notifier_block *nb, 589static int clariion_bus_attach(struct scsi_device *sdev)
416 unsigned long action, void *data)
417{ 590{
418 struct device *dev = data;
419 struct scsi_device *sdev;
420 struct scsi_dh_data *scsi_dh_data; 591 struct scsi_dh_data *scsi_dh_data;
421 struct clariion_dh_data *h; 592 struct clariion_dh_data *h;
422 int i, found = 0;
423 unsigned long flags; 593 unsigned long flags;
594 int err;
424 595
425 if (!scsi_is_sdev_device(dev)) 596 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
426 return 0; 597 + sizeof(*h) , GFP_KERNEL);
598 if (!scsi_dh_data) {
599 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
600 CLARIION_NAME);
601 return -ENOMEM;
602 }
427 603
428 sdev = to_scsi_device(dev); 604 scsi_dh_data->scsi_dh = &clariion_dh;
605 h = (struct clariion_dh_data *) scsi_dh_data->buf;
606 h->lun_state = CLARIION_LUN_UNINITIALIZED;
607 h->default_sp = CLARIION_UNBOUND_LU;
608 h->current_sp = CLARIION_UNBOUND_LU;
429 609
430 if (action == BUS_NOTIFY_ADD_DEVICE) { 610 err = clariion_std_inquiry(sdev, h);
431 for (i = 0; clariion_dev_list[i].vendor; i++) { 611 if (err != SCSI_DH_OK)
432 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, 612 goto failed;
433 strlen(clariion_dev_list[i].vendor)) &&
434 !strncmp(sdev->model, clariion_dev_list[i].model,
435 strlen(clariion_dev_list[i].model))) {
436 found = 1;
437 break;
438 }
439 }
440 if (!found)
441 goto out;
442
443 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
444 + sizeof(*h) , GFP_KERNEL);
445 if (!scsi_dh_data) {
446 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
447 CLARIION_NAME);
448 goto out;
449 }
450 613
451 scsi_dh_data->scsi_dh = &clariion_dh; 614 err = clariion_send_inquiry(sdev, h);
452 h = (struct clariion_dh_data *) scsi_dh_data->buf; 615 if (err != SCSI_DH_OK)
453 h->default_sp = CLARIION_UNBOUND_LU; 616 goto failed;
454 h->current_sp = CLARIION_UNBOUND_LU;
455 617
456 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 618 if (!try_module_get(THIS_MODULE))
457 sdev->scsi_dh_data = scsi_dh_data; 619 goto failed;
458 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
459 620
460 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME); 621 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
461 try_module_get(THIS_MODULE); 622 sdev->scsi_dh_data = scsi_dh_data;
623 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
462 624
463 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 625 sdev_printk(KERN_INFO, sdev,
464 if (sdev->scsi_dh_data == NULL || 626 "%s: connected to SP %c Port %d (%s, default SP %c)\n",
465 sdev->scsi_dh_data->scsi_dh != &clariion_dh) 627 CLARIION_NAME, h->current_sp + 'A',
466 goto out; 628 h->port, lun_state[h->lun_state],
629 h->default_sp + 'A');
467 630
468 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 631 return 0;
469 scsi_dh_data = sdev->scsi_dh_data;
470 sdev->scsi_dh_data = NULL;
471 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
472 632
473 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", 633failed:
474 CLARIION_NAME); 634 kfree(scsi_dh_data);
635 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
636 CLARIION_NAME);
637 return -EINVAL;
638}
475 639
476 kfree(scsi_dh_data); 640static void clariion_bus_detach(struct scsi_device *sdev)
477 module_put(THIS_MODULE); 641{
478 } 642 struct scsi_dh_data *scsi_dh_data;
643 unsigned long flags;
479 644
480out: 645 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
481 return 0; 646 scsi_dh_data = sdev->scsi_dh_data;
647 sdev->scsi_dh_data = NULL;
648 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
649
650 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
651 CLARIION_NAME);
652
653 kfree(scsi_dh_data);
654 module_put(THIS_MODULE);
482} 655}
483 656
484static int __init clariion_init(void) 657static int __init clariion_init(void)
@@ -487,7 +660,8 @@ static int __init clariion_init(void)
487 660
488 r = scsi_register_device_handler(&clariion_dh); 661 r = scsi_register_device_handler(&clariion_dh);
489 if (r != 0) 662 if (r != 0)
490 printk(KERN_ERR "Failed to register scsi device handler."); 663 printk(KERN_ERR "%s: Failed to register scsi device handler.",
664 CLARIION_NAME);
491 return r; 665 return r;
492} 666}
493 667
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index ae6be87d6a83..9c7a1f8ebb72 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2006 Mike Christie 6 * Copyright (C) 2006 Mike Christie
7 * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -25,13 +26,18 @@
25#include <scsi/scsi_eh.h> 26#include <scsi/scsi_eh.h>
26#include <scsi/scsi_dh.h> 27#include <scsi/scsi_dh.h>
27 28
28#define HP_SW_NAME "hp_sw" 29#define HP_SW_NAME "hp_sw"
29 30
30#define HP_SW_TIMEOUT (60 * HZ) 31#define HP_SW_TIMEOUT (60 * HZ)
31#define HP_SW_RETRIES 3 32#define HP_SW_RETRIES 3
33
34#define HP_SW_PATH_UNINITIALIZED -1
35#define HP_SW_PATH_ACTIVE 0
36#define HP_SW_PATH_PASSIVE 1
32 37
33struct hp_sw_dh_data { 38struct hp_sw_dh_data {
34 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 39 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
40 int path_state;
35 int retries; 41 int retries;
36}; 42};
37 43
@@ -42,51 +48,161 @@ static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
42 return ((struct hp_sw_dh_data *) scsi_dh_data->buf); 48 return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
43} 49}
44 50
45static int hp_sw_done(struct scsi_device *sdev) 51/*
52 * tur_done - Handle TEST UNIT READY return status
53 * @sdev: sdev the command has been sent to
54 * @errors: blk error code
55 *
56 * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
57 */
58static int tur_done(struct scsi_device *sdev, unsigned char *sense)
46{ 59{
47 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
48 struct scsi_sense_hdr sshdr; 60 struct scsi_sense_hdr sshdr;
49 int rc; 61 int ret;
50
51 sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
52 62
53 rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 63 ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
54 if (!rc) 64 if (!ret) {
65 sdev_printk(KERN_WARNING, sdev,
66 "%s: sending tur failed, no sense available\n",
67 HP_SW_NAME);
68 ret = SCSI_DH_IO;
55 goto done; 69 goto done;
70 }
56 switch (sshdr.sense_key) { 71 switch (sshdr.sense_key) {
72 case UNIT_ATTENTION:
73 ret = SCSI_DH_IMM_RETRY;
74 break;
57 case NOT_READY: 75 case NOT_READY:
58 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { 76 if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
59 rc = SCSI_DH_RETRY; 77 /*
60 h->retries++; 78 * LUN not ready - Initialization command required
79 *
80 * This is the passive path
81 */
82 ret = SCSI_DH_DEV_OFFLINED;
61 break; 83 break;
62 } 84 }
63 /* fall through */ 85 /* Fallthrough */
64 default: 86 default:
65 h->retries++; 87 sdev_printk(KERN_WARNING, sdev,
66 rc = SCSI_DH_IMM_RETRY; 88 "%s: sending tur failed, sense %x/%x/%x\n",
89 HP_SW_NAME, sshdr.sense_key, sshdr.asc,
90 sshdr.ascq);
91 break;
67 } 92 }
68 93
69done: 94done:
70 if (rc == SCSI_DH_OK || rc == SCSI_DH_IO) 95 return ret;
71 h->retries = 0; 96}
72 else if (h->retries > HP_SW_RETRIES) { 97
73 h->retries = 0; 98/*
99 * hp_sw_tur - Send TEST UNIT READY
100 * @sdev: sdev command should be sent to
101 *
102 * Use the TEST UNIT READY command to determine
103 * the path state.
104 */
105static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
106{
107 struct request *req;
108 int ret;
109
110 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
111 if (!req)
112 return SCSI_DH_RES_TEMP_UNAVAIL;
113
114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense;
121 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
122 req->sense_len = 0;
123
124retry:
125 ret = blk_execute_rq(req->q, NULL, req, 1);
126 if (ret == -EIO) {
127 if (req->sense_len > 0) {
128 ret = tur_done(sdev, h->sense);
129 } else {
130 sdev_printk(KERN_WARNING, sdev,
131 "%s: sending tur failed with %x\n",
132 HP_SW_NAME, req->errors);
133 ret = SCSI_DH_IO;
134 }
135 } else {
136 h->path_state = HP_SW_PATH_ACTIVE;
137 ret = SCSI_DH_OK;
138 }
139 if (ret == SCSI_DH_IMM_RETRY)
140 goto retry;
141 if (ret == SCSI_DH_DEV_OFFLINED) {
142 h->path_state = HP_SW_PATH_PASSIVE;
143 ret = SCSI_DH_OK;
144 }
145
146 blk_put_request(req);
147
148 return ret;
149}
150
151/*
152 * start_done - Handle START STOP UNIT return status
153 * @sdev: sdev the command has been sent to
154 * @errors: blk error code
155 */
156static int start_done(struct scsi_device *sdev, unsigned char *sense)
157{
158 struct scsi_sense_hdr sshdr;
159 int rc;
160
161 rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
162 if (!rc) {
163 sdev_printk(KERN_WARNING, sdev,
164 "%s: sending start_stop_unit failed, "
165 "no sense available\n",
166 HP_SW_NAME);
167 return SCSI_DH_IO;
168 }
169 switch (sshdr.sense_key) {
170 case NOT_READY:
171 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
172 /*
173 * LUN not ready - manual intervention required
174 *
175 * Switch-over in progress, retry.
176 */
177 rc = SCSI_DH_RETRY;
178 break;
179 }
180 /* fall through */
181 default:
182 sdev_printk(KERN_WARNING, sdev,
183 "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
184 HP_SW_NAME, sshdr.sense_key, sshdr.asc,
185 sshdr.ascq);
74 rc = SCSI_DH_IO; 186 rc = SCSI_DH_IO;
75 } 187 }
188
76 return rc; 189 return rc;
77} 190}
78 191
79static int hp_sw_activate(struct scsi_device *sdev) 192/*
193 * hp_sw_start_stop - Send START STOP UNIT command
194 * @sdev: sdev command should be sent to
195 *
196 * Sending START STOP UNIT activates the SP.
197 */
198static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
80{ 199{
81 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
82 struct request *req; 200 struct request *req;
83 int ret = SCSI_DH_RES_TEMP_UNAVAIL; 201 int ret, retry;
84 202
85 req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC); 203 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
86 if (!req) 204 if (!req)
87 goto done; 205 return SCSI_DH_RES_TEMP_UNAVAIL;
88
89 sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
90 206
91 req->cmd_type = REQ_TYPE_BLOCK_PC; 207 req->cmd_type = REQ_TYPE_BLOCK_PC;
92 req->cmd_flags |= REQ_FAILFAST; 208 req->cmd_flags |= REQ_FAILFAST;
@@ -98,95 +214,153 @@ static int hp_sw_activate(struct scsi_device *sdev)
98 req->sense = h->sense; 214 req->sense = h->sense;
99 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 215 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
100 req->sense_len = 0; 216 req->sense_len = 0;
217 retry = h->retries;
101 218
219retry:
102 ret = blk_execute_rq(req->q, NULL, req, 1); 220 ret = blk_execute_rq(req->q, NULL, req, 1);
103 if (!ret) /* SUCCESS */ 221 if (ret == -EIO) {
104 ret = hp_sw_done(sdev); 222 if (req->sense_len > 0) {
105 else 223 ret = start_done(sdev, h->sense);
224 } else {
225 sdev_printk(KERN_WARNING, sdev,
226 "%s: sending start_stop_unit failed with %x\n",
227 HP_SW_NAME, req->errors);
228 ret = SCSI_DH_IO;
229 }
230 } else
231 ret = SCSI_DH_OK;
232
233 if (ret == SCSI_DH_RETRY) {
234 if (--retry)
235 goto retry;
106 ret = SCSI_DH_IO; 236 ret = SCSI_DH_IO;
107done: 237 }
238
239 blk_put_request(req);
240
241 return ret;
242}
243
244static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
245{
246 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
247 int ret = BLKPREP_OK;
248
249 if (h->path_state != HP_SW_PATH_ACTIVE) {
250 ret = BLKPREP_KILL;
251 req->cmd_flags |= REQ_QUIET;
252 }
253 return ret;
254
255}
256
257/*
258 * hp_sw_activate - Activate a path
259 * @sdev: sdev on the path to be activated
260 *
261 * The HP Active/Passive firmware is pretty simple;
262 * the passive path reports NOT READY with sense codes
263 * 0x04/0x02; a START STOP UNIT command will then
264 * activate the passive path (and deactivate the
265 * previously active one).
266 */
267static int hp_sw_activate(struct scsi_device *sdev)
268{
269 int ret = SCSI_DH_OK;
270 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
271
272 ret = hp_sw_tur(sdev, h);
273
274 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
275 ret = hp_sw_start_stop(sdev, h);
276 if (ret == SCSI_DH_OK)
277 sdev_printk(KERN_INFO, sdev,
278 "%s: activated path\n",
279 HP_SW_NAME);
280 }
281
108 return ret; 282 return ret;
109} 283}
110 284
111static const struct { 285const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
112 char *vendor; 286 {"COMPAQ", "MSA1000 VOLUME"},
113 char *model; 287 {"COMPAQ", "HSV110"},
114} hp_sw_dh_data_list[] = { 288 {"HP", "HSV100"},
115 {"COMPAQ", "MSA"},
116 {"HP", "HSV"},
117 {"DEC", "HSG80"}, 289 {"DEC", "HSG80"},
118 {NULL, NULL}, 290 {NULL, NULL},
119}; 291};
120 292
121static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *); 293static int hp_sw_bus_attach(struct scsi_device *sdev);
294static void hp_sw_bus_detach(struct scsi_device *sdev);
122 295
123static struct scsi_device_handler hp_sw_dh = { 296static struct scsi_device_handler hp_sw_dh = {
124 .name = HP_SW_NAME, 297 .name = HP_SW_NAME,
125 .module = THIS_MODULE, 298 .module = THIS_MODULE,
126 .nb.notifier_call = hp_sw_bus_notify, 299 .devlist = hp_sw_dh_data_list,
300 .attach = hp_sw_bus_attach,
301 .detach = hp_sw_bus_detach,
127 .activate = hp_sw_activate, 302 .activate = hp_sw_activate,
303 .prep_fn = hp_sw_prep_fn,
128}; 304};
129 305
130static int hp_sw_bus_notify(struct notifier_block *nb, 306static int hp_sw_bus_attach(struct scsi_device *sdev)
131 unsigned long action, void *data)
132{ 307{
133 struct device *dev = data;
134 struct scsi_device *sdev;
135 struct scsi_dh_data *scsi_dh_data; 308 struct scsi_dh_data *scsi_dh_data;
136 int i, found = 0; 309 struct hp_sw_dh_data *h;
137 unsigned long flags; 310 unsigned long flags;
311 int ret;
138 312
139 if (!scsi_is_sdev_device(dev)) 313 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
314 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
315 if (!scsi_dh_data) {
316 sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
317 HP_SW_NAME);
140 return 0; 318 return 0;
319 }
141 320
142 sdev = to_scsi_device(dev); 321 scsi_dh_data->scsi_dh = &hp_sw_dh;
143 322 h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
144 if (action == BUS_NOTIFY_ADD_DEVICE) { 323 h->path_state = HP_SW_PATH_UNINITIALIZED;
145 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { 324 h->retries = HP_SW_RETRIES;
146 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
147 strlen(hp_sw_dh_data_list[i].vendor)) &&
148 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
149 strlen(hp_sw_dh_data_list[i].model))) {
150 found = 1;
151 break;
152 }
153 }
154 if (!found)
155 goto out;
156 325
157 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 326 ret = hp_sw_tur(sdev, h);
158 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); 327 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
159 if (!scsi_dh_data) { 328 goto failed;
160 sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
161 HP_SW_NAME);
162 goto out;
163 }
164 329
165 scsi_dh_data->scsi_dh = &hp_sw_dh; 330 if (!try_module_get(THIS_MODULE))
166 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 331 goto failed;
167 sdev->scsi_dh_data = scsi_dh_data;
168 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
169 try_module_get(THIS_MODULE);
170 332
171 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME); 333 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
172 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 334 sdev->scsi_dh_data = scsi_dh_data;
173 if (sdev->scsi_dh_data == NULL || 335 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
174 sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
175 goto out;
176 336
177 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 337 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
178 scsi_dh_data = sdev->scsi_dh_data; 338 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
179 sdev->scsi_dh_data = NULL; 339 "active":"passive");
180 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
181 module_put(THIS_MODULE);
182 340
183 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME); 341 return 0;
184 342
185 kfree(scsi_dh_data); 343failed:
186 } 344 kfree(scsi_dh_data);
345 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
346 HP_SW_NAME);
347 return -EINVAL;
348}
187 349
188out: 350static void hp_sw_bus_detach( struct scsi_device *sdev )
189 return 0; 351{
352 struct scsi_dh_data *scsi_dh_data;
353 unsigned long flags;
354
355 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
356 scsi_dh_data = sdev->scsi_dh_data;
357 sdev->scsi_dh_data = NULL;
358 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
359 module_put(THIS_MODULE);
360
361 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
362
363 kfree(scsi_dh_data);
190} 364}
191 365
192static int __init hp_sw_init(void) 366static int __init hp_sw_init(void)
@@ -202,6 +376,6 @@ static void __exit hp_sw_exit(void)
202module_init(hp_sw_init); 376module_init(hp_sw_init);
203module_exit(hp_sw_exit); 377module_exit(hp_sw_exit);
204 378
205MODULE_DESCRIPTION("HP MSA 1000"); 379MODULE_DESCRIPTION("HP Active/Passive driver");
206MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); 380MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
207MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fdf34b0ec6e1..b093a501f8ae 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -173,6 +173,11 @@ struct rdac_dh_data {
173#define RDAC_STATE_ACTIVE 0 173#define RDAC_STATE_ACTIVE 0
174#define RDAC_STATE_PASSIVE 1 174#define RDAC_STATE_PASSIVE 1
175 unsigned char state; 175 unsigned char state;
176
177#define RDAC_LUN_UNOWNED 0
178#define RDAC_LUN_OWNED 1
179#define RDAC_LUN_AVT 2
180 char lun_state;
176 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 181 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
177 union { 182 union {
178 struct c2_inquiry c2; 183 struct c2_inquiry c2;
@@ -182,6 +187,13 @@ struct rdac_dh_data {
182 } inq; 187 } inq;
183}; 188};
184 189
190static const char *lun_state[] =
191{
192 "unowned",
193 "owned",
194 "owned (AVT mode)",
195};
196
185static LIST_HEAD(ctlr_list); 197static LIST_HEAD(ctlr_list);
186static DEFINE_SPINLOCK(list_lock); 198static DEFINE_SPINLOCK(list_lock);
187 199
@@ -197,9 +209,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
197{ 209{
198 struct request *rq; 210 struct request *rq;
199 struct request_queue *q = sdev->request_queue; 211 struct request_queue *q = sdev->request_queue;
200 struct rdac_dh_data *h = get_rdac_data(sdev);
201 212
202 rq = blk_get_request(q, rw, GFP_KERNEL); 213 rq = blk_get_request(q, rw, GFP_NOIO);
203 214
204 if (!rq) { 215 if (!rq) {
205 sdev_printk(KERN_INFO, sdev, 216 sdev_printk(KERN_INFO, sdev,
@@ -207,17 +218,14 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
207 return NULL; 218 return NULL;
208 } 219 }
209 220
210 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { 221 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
211 blk_put_request(rq); 222 blk_put_request(rq);
212 sdev_printk(KERN_INFO, sdev, 223 sdev_printk(KERN_INFO, sdev,
213 "get_rdac_req: blk_rq_map_kern failed.\n"); 224 "get_rdac_req: blk_rq_map_kern failed.\n");
214 return NULL; 225 return NULL;
215 } 226 }
216 227
217 memset(&rq->cmd, 0, BLK_MAX_CDB); 228 memset(rq->cmd, 0, BLK_MAX_CDB);
218 rq->sense = h->sense;
219 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
220 rq->sense_len = 0;
221 229
222 rq->cmd_type = REQ_TYPE_BLOCK_PC; 230 rq->cmd_type = REQ_TYPE_BLOCK_PC;
223 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
@@ -227,12 +235,12 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
227 return rq; 235 return rq;
228} 236}
229 237
230static struct request *rdac_failover_get(struct scsi_device *sdev) 238static struct request *rdac_failover_get(struct scsi_device *sdev,
239 struct rdac_dh_data *h)
231{ 240{
232 struct request *rq; 241 struct request *rq;
233 struct rdac_mode_common *common; 242 struct rdac_mode_common *common;
234 unsigned data_size; 243 unsigned data_size;
235 struct rdac_dh_data *h = get_rdac_data(sdev);
236 244
237 if (h->ctlr->use_ms10) { 245 if (h->ctlr->use_ms10) {
238 struct rdac_pg_expanded *rdac_pg; 246 struct rdac_pg_expanded *rdac_pg;
@@ -277,6 +285,10 @@ static struct request *rdac_failover_get(struct scsi_device *sdev)
277 } 285 }
278 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 286 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
279 287
288 rq->sense = h->sense;
289 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
290 rq->sense_len = 0;
291
280 return rq; 292 return rq;
281} 293}
282 294
@@ -321,11 +333,10 @@ done:
321} 333}
322 334
323static int submit_inquiry(struct scsi_device *sdev, int page_code, 335static int submit_inquiry(struct scsi_device *sdev, int page_code,
324 unsigned int len) 336 unsigned int len, struct rdac_dh_data *h)
325{ 337{
326 struct request *rq; 338 struct request *rq;
327 struct request_queue *q = sdev->request_queue; 339 struct request_queue *q = sdev->request_queue;
328 struct rdac_dh_data *h = get_rdac_data(sdev);
329 int err = SCSI_DH_RES_TEMP_UNAVAIL; 340 int err = SCSI_DH_RES_TEMP_UNAVAIL;
330 341
331 rq = get_rdac_req(sdev, &h->inq, len, READ); 342 rq = get_rdac_req(sdev, &h->inq, len, READ);
@@ -338,59 +349,68 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code,
338 rq->cmd[2] = page_code; 349 rq->cmd[2] = page_code;
339 rq->cmd[4] = len; 350 rq->cmd[4] = len;
340 rq->cmd_len = COMMAND_SIZE(INQUIRY); 351 rq->cmd_len = COMMAND_SIZE(INQUIRY);
352
353 rq->sense = h->sense;
354 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
355 rq->sense_len = 0;
356
341 err = blk_execute_rq(q, NULL, rq, 1); 357 err = blk_execute_rq(q, NULL, rq, 1);
342 if (err == -EIO) 358 if (err == -EIO)
343 err = SCSI_DH_IO; 359 err = SCSI_DH_IO;
360
361 blk_put_request(rq);
344done: 362done:
345 return err; 363 return err;
346} 364}
347 365
348static int get_lun(struct scsi_device *sdev) 366static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
349{ 367{
350 int err; 368 int err;
351 struct c8_inquiry *inqp; 369 struct c8_inquiry *inqp;
352 struct rdac_dh_data *h = get_rdac_data(sdev);
353 370
354 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry)); 371 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
355 if (err == SCSI_DH_OK) { 372 if (err == SCSI_DH_OK) {
356 inqp = &h->inq.c8; 373 inqp = &h->inq.c8;
357 h->lun = inqp->lun[7]; /* currently it uses only one byte */ 374 if (inqp->page_code != 0xc8)
375 return SCSI_DH_NOSYS;
376 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
378 return SCSI_DH_NOSYS;
379 h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
358 } 380 }
359 return err; 381 return err;
360} 382}
361 383
362#define RDAC_OWNED 0 384static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
363#define RDAC_UNOWNED 1
364#define RDAC_FAILED 2
365static int check_ownership(struct scsi_device *sdev)
366{ 385{
367 int err; 386 int err;
368 struct c9_inquiry *inqp; 387 struct c9_inquiry *inqp;
369 struct rdac_dh_data *h = get_rdac_data(sdev);
370 388
371 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry)); 389 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
372 if (err == SCSI_DH_OK) { 390 if (err == SCSI_DH_OK) {
373 err = RDAC_UNOWNED;
374 inqp = &h->inq.c9; 391 inqp = &h->inq.c9;
375 /* 392 if ((inqp->avte_cvp >> 7) == 0x1) {
376 * If in AVT mode or if the path already owns the LUN, 393 /* LUN in AVT mode */
377 * return RDAC_OWNED; 394 sdev_printk(KERN_NOTICE, sdev,
378 */ 395 "%s: AVT mode detected\n",
379 if (((inqp->avte_cvp >> 7) == 0x1) || 396 RDAC_NAME);
380 ((inqp->avte_cvp & 0x1) != 0)) 397 h->lun_state = RDAC_LUN_AVT;
381 err = RDAC_OWNED; 398 } else if ((inqp->avte_cvp & 0x1) != 0) {
382 } else 399 /* LUN was owned by the controller */
383 err = RDAC_FAILED; 400 h->lun_state = RDAC_LUN_OWNED;
401 }
402 }
403
384 return err; 404 return err;
385} 405}
386 406
387static int initialize_controller(struct scsi_device *sdev) 407static int initialize_controller(struct scsi_device *sdev,
408 struct rdac_dh_data *h)
388{ 409{
389 int err; 410 int err;
390 struct c4_inquiry *inqp; 411 struct c4_inquiry *inqp;
391 struct rdac_dh_data *h = get_rdac_data(sdev);
392 412
393 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry)); 413 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
394 if (err == SCSI_DH_OK) { 414 if (err == SCSI_DH_OK) {
395 inqp = &h->inq.c4; 415 inqp = &h->inq.c4;
396 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); 416 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
@@ -400,13 +420,12 @@ static int initialize_controller(struct scsi_device *sdev)
400 return err; 420 return err;
401} 421}
402 422
403static int set_mode_select(struct scsi_device *sdev) 423static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
404{ 424{
405 int err; 425 int err;
406 struct c2_inquiry *inqp; 426 struct c2_inquiry *inqp;
407 struct rdac_dh_data *h = get_rdac_data(sdev);
408 427
409 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry)); 428 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
410 if (err == SCSI_DH_OK) { 429 if (err == SCSI_DH_OK) {
411 inqp = &h->inq.c2; 430 inqp = &h->inq.c2;
412 /* 431 /*
@@ -421,13 +440,13 @@ static int set_mode_select(struct scsi_device *sdev)
421 return err; 440 return err;
422} 441}
423 442
424static int mode_select_handle_sense(struct scsi_device *sdev) 443static int mode_select_handle_sense(struct scsi_device *sdev,
444 unsigned char *sensebuf)
425{ 445{
426 struct scsi_sense_hdr sense_hdr; 446 struct scsi_sense_hdr sense_hdr;
427 struct rdac_dh_data *h = get_rdac_data(sdev);
428 int sense, err = SCSI_DH_IO, ret; 447 int sense, err = SCSI_DH_IO, ret;
429 448
430 ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 449 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
431 if (!ret) 450 if (!ret)
432 goto done; 451 goto done;
433 452
@@ -451,14 +470,13 @@ done:
451 return err; 470 return err;
452} 471}
453 472
454static int send_mode_select(struct scsi_device *sdev) 473static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
455{ 474{
456 struct request *rq; 475 struct request *rq;
457 struct request_queue *q = sdev->request_queue; 476 struct request_queue *q = sdev->request_queue;
458 struct rdac_dh_data *h = get_rdac_data(sdev);
459 int err = SCSI_DH_RES_TEMP_UNAVAIL; 477 int err = SCSI_DH_RES_TEMP_UNAVAIL;
460 478
461 rq = rdac_failover_get(sdev); 479 rq = rdac_failover_get(sdev, h);
462 if (!rq) 480 if (!rq)
463 goto done; 481 goto done;
464 482
@@ -466,9 +484,11 @@ static int send_mode_select(struct scsi_device *sdev)
466 484
467 err = blk_execute_rq(q, NULL, rq, 1); 485 err = blk_execute_rq(q, NULL, rq, 1);
468 if (err != SCSI_DH_OK) 486 if (err != SCSI_DH_OK)
469 err = mode_select_handle_sense(sdev); 487 err = mode_select_handle_sense(sdev, h->sense);
470 if (err == SCSI_DH_OK) 488 if (err == SCSI_DH_OK)
471 h->state = RDAC_STATE_ACTIVE; 489 h->state = RDAC_STATE_ACTIVE;
490
491 blk_put_request(rq);
472done: 492done:
473 return err; 493 return err;
474} 494}
@@ -478,38 +498,23 @@ static int rdac_activate(struct scsi_device *sdev)
478 struct rdac_dh_data *h = get_rdac_data(sdev); 498 struct rdac_dh_data *h = get_rdac_data(sdev);
479 int err = SCSI_DH_OK; 499 int err = SCSI_DH_OK;
480 500
481 if (h->lun == UNINITIALIZED_LUN) { 501 err = check_ownership(sdev, h);
482 err = get_lun(sdev); 502 if (err != SCSI_DH_OK)
483 if (err != SCSI_DH_OK)
484 goto done;
485 }
486
487 err = check_ownership(sdev);
488 switch (err) {
489 case RDAC_UNOWNED:
490 break;
491 case RDAC_OWNED:
492 err = SCSI_DH_OK;
493 goto done;
494 case RDAC_FAILED:
495 default:
496 err = SCSI_DH_IO;
497 goto done; 503 goto done;
498 }
499 504
500 if (!h->ctlr) { 505 if (!h->ctlr) {
501 err = initialize_controller(sdev); 506 err = initialize_controller(sdev, h);
502 if (err != SCSI_DH_OK) 507 if (err != SCSI_DH_OK)
503 goto done; 508 goto done;
504 } 509 }
505 510
506 if (h->ctlr->use_ms10 == -1) { 511 if (h->ctlr->use_ms10 == -1) {
507 err = set_mode_select(sdev); 512 err = set_mode_select(sdev, h);
508 if (err != SCSI_DH_OK) 513 if (err != SCSI_DH_OK)
509 goto done; 514 goto done;
510 } 515 }
511 516 if (h->lun_state == RDAC_LUN_UNOWNED)
512 err = send_mode_select(sdev); 517 err = send_mode_select(sdev, h);
513done: 518done:
514 return err; 519 return err;
515} 520}
@@ -569,10 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
569 return SCSI_RETURN_NOT_HANDLED; 574 return SCSI_RETURN_NOT_HANDLED;
570} 575}
571 576
572static const struct { 577const struct scsi_dh_devlist rdac_dev_list[] = {
573 char *vendor;
574 char *model;
575} rdac_dev_list[] = {
576 {"IBM", "1722"}, 578 {"IBM", "1722"},
577 {"IBM", "1724"}, 579 {"IBM", "1724"},
578 {"IBM", "1726"}, 580 {"IBM", "1726"},
@@ -590,89 +592,89 @@ static const struct {
590 {NULL, NULL}, 592 {NULL, NULL},
591}; 593};
592 594
593static int rdac_bus_notify(struct notifier_block *, unsigned long, void *); 595static int rdac_bus_attach(struct scsi_device *sdev);
596static void rdac_bus_detach(struct scsi_device *sdev);
594 597
595static struct scsi_device_handler rdac_dh = { 598static struct scsi_device_handler rdac_dh = {
596 .name = RDAC_NAME, 599 .name = RDAC_NAME,
597 .module = THIS_MODULE, 600 .module = THIS_MODULE,
598 .nb.notifier_call = rdac_bus_notify, 601 .devlist = rdac_dev_list,
599 .prep_fn = rdac_prep_fn, 602 .prep_fn = rdac_prep_fn,
600 .check_sense = rdac_check_sense, 603 .check_sense = rdac_check_sense,
604 .attach = rdac_bus_attach,
605 .detach = rdac_bus_detach,
601 .activate = rdac_activate, 606 .activate = rdac_activate,
602}; 607};
603 608
604/* 609static int rdac_bus_attach(struct scsi_device *sdev)
605 * TODO: need some interface so we can set trespass values
606 */
607static int rdac_bus_notify(struct notifier_block *nb,
608 unsigned long action, void *data)
609{ 610{
610 struct device *dev = data;
611 struct scsi_device *sdev;
612 struct scsi_dh_data *scsi_dh_data; 611 struct scsi_dh_data *scsi_dh_data;
613 struct rdac_dh_data *h; 612 struct rdac_dh_data *h;
614 int i, found = 0;
615 unsigned long flags; 613 unsigned long flags;
614 int err;
616 615
617 if (!scsi_is_sdev_device(dev)) 616 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
617 + sizeof(*h) , GFP_KERNEL);
618 if (!scsi_dh_data) {
619 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
620 RDAC_NAME);
618 return 0; 621 return 0;
622 }
619 623
620 sdev = to_scsi_device(dev); 624 scsi_dh_data->scsi_dh = &rdac_dh;
621 625 h = (struct rdac_dh_data *) scsi_dh_data->buf;
622 if (action == BUS_NOTIFY_ADD_DEVICE) { 626 h->lun = UNINITIALIZED_LUN;
623 for (i = 0; rdac_dev_list[i].vendor; i++) { 627 h->state = RDAC_STATE_ACTIVE;
624 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
625 strlen(rdac_dev_list[i].vendor)) &&
626 !strncmp(sdev->model, rdac_dev_list[i].model,
627 strlen(rdac_dev_list[i].model))) {
628 found = 1;
629 break;
630 }
631 }
632 if (!found)
633 goto out;
634 628
635 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 629 err = get_lun(sdev, h);
636 + sizeof(*h) , GFP_KERNEL); 630 if (err != SCSI_DH_OK)
637 if (!scsi_dh_data) { 631 goto failed;
638 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
639 RDAC_NAME);
640 goto out;
641 }
642 632
643 scsi_dh_data->scsi_dh = &rdac_dh; 633 err = check_ownership(sdev, h);
644 h = (struct rdac_dh_data *) scsi_dh_data->buf; 634 if (err != SCSI_DH_OK)
645 h->lun = UNINITIALIZED_LUN; 635 goto failed;
646 h->state = RDAC_STATE_ACTIVE; 636
647 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 637 if (!try_module_get(THIS_MODULE))
648 sdev->scsi_dh_data = scsi_dh_data; 638 goto failed;
649 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 639
650 try_module_get(THIS_MODULE); 640 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
651 641 sdev->scsi_dh_data = scsi_dh_data;
652 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME); 642 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
653 643
654 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 644 sdev_printk(KERN_NOTICE, sdev,
655 if (sdev->scsi_dh_data == NULL || 645 "%s: LUN %d (%s)\n",
656 sdev->scsi_dh_data->scsi_dh != &rdac_dh) 646 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
657 goto out;
658
659 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
660 scsi_dh_data = sdev->scsi_dh_data;
661 sdev->scsi_dh_data = NULL;
662 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
663
664 h = (struct rdac_dh_data *) scsi_dh_data->buf;
665 if (h->ctlr)
666 kref_put(&h->ctlr->kref, release_controller);
667 kfree(scsi_dh_data);
668 module_put(THIS_MODULE);
669 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
670 }
671 647
672out:
673 return 0; 648 return 0;
649
650failed:
651 kfree(scsi_dh_data);
652 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
653 RDAC_NAME);
654 return -EINVAL;
655}
656
657static void rdac_bus_detach( struct scsi_device *sdev )
658{
659 struct scsi_dh_data *scsi_dh_data;
660 struct rdac_dh_data *h;
661 unsigned long flags;
662
663 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
664 scsi_dh_data = sdev->scsi_dh_data;
665 sdev->scsi_dh_data = NULL;
666 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
667
668 h = (struct rdac_dh_data *) scsi_dh_data->buf;
669 if (h->ctlr)
670 kref_put(&h->ctlr->kref, release_controller);
671 kfree(scsi_dh_data);
672 module_put(THIS_MODULE);
673 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
674} 674}
675 675
676
677
676static int __init rdac_init(void) 678static int __init rdac_init(void)
677{ 679{
678 int r; 680 int r;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 61f8fdea2d96..ae560bc04f9d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -521,9 +521,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) 521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
522{ 522{
523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) { 523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
524 scsi_block_requests(vhost->host); 524 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
525 ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING); 525 scsi_block_requests(vhost->host);
526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
527 }
527 } else 528 } else
528 vhost->reinit = 1; 529 vhost->reinit = 1;
529 530
@@ -854,39 +855,41 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
854} 855}
855 856
856/** 857/**
857 * __ibmvfc_find_target - Find the specified scsi_target (no locking) 858 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
858 * @starget: scsi target struct 859 * @starget: scsi target struct
859 * 860 *
860 * Return value: 861 * Return value:
861 * ibmvfc_target struct / NULL if not found 862 * ibmvfc_target struct / NULL if not found
862 **/ 863 **/
863static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget) 864static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
864{ 865{
865 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 866 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
866 struct ibmvfc_host *vhost = shost_priv(shost); 867 struct ibmvfc_host *vhost = shost_priv(shost);
867 struct ibmvfc_target *tgt; 868 struct ibmvfc_target *tgt;
868 869
869 list_for_each_entry(tgt, &vhost->targets, queue) 870 list_for_each_entry(tgt, &vhost->targets, queue)
870 if (tgt->target_id == starget->id) 871 if (tgt->target_id == starget->id) {
872 kref_get(&tgt->kref);
871 return tgt; 873 return tgt;
874 }
872 return NULL; 875 return NULL;
873} 876}
874 877
875/** 878/**
876 * ibmvfc_find_target - Find the specified scsi_target 879 * ibmvfc_get_target - Find the specified scsi_target
877 * @starget: scsi target struct 880 * @starget: scsi target struct
878 * 881 *
879 * Return value: 882 * Return value:
880 * ibmvfc_target struct / NULL if not found 883 * ibmvfc_target struct / NULL if not found
881 **/ 884 **/
882static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget) 885static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
883{ 886{
884 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 887 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
885 struct ibmvfc_target *tgt; 888 struct ibmvfc_target *tgt;
886 unsigned long flags; 889 unsigned long flags;
887 890
888 spin_lock_irqsave(shost->host_lock, flags); 891 spin_lock_irqsave(shost->host_lock, flags);
889 tgt = __ibmvfc_find_target(starget); 892 tgt = __ibmvfc_get_target(starget);
890 spin_unlock_irqrestore(shost->host_lock, flags); 893 spin_unlock_irqrestore(shost->host_lock, flags);
891 return tgt; 894 return tgt;
892} 895}
@@ -963,6 +966,9 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
963 case IBMVFC_HALTED: 966 case IBMVFC_HALTED:
964 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; 967 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
965 break; 968 break;
969 case IBMVFC_NO_CRQ:
970 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
971 break;
966 default: 972 default:
967 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); 973 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
968 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 974 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
@@ -988,6 +994,17 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
988} 994}
989 995
990/** 996/**
997 * ibmvfc_release_tgt - Free memory allocated for a target
998 * @kref: kref struct
999 *
1000 **/
1001static void ibmvfc_release_tgt(struct kref *kref)
1002{
1003 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1004 kfree(tgt);
1005}
1006
1007/**
991 * ibmvfc_get_starget_node_name - Get SCSI target's node name 1008 * ibmvfc_get_starget_node_name - Get SCSI target's node name
992 * @starget: scsi target struct 1009 * @starget: scsi target struct
993 * 1010 *
@@ -996,8 +1013,10 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
996 **/ 1013 **/
997static void ibmvfc_get_starget_node_name(struct scsi_target *starget) 1014static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
998{ 1015{
999 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1016 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1000 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; 1017 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1018 if (tgt)
1019 kref_put(&tgt->kref, ibmvfc_release_tgt);
1001} 1020}
1002 1021
1003/** 1022/**
@@ -1009,8 +1028,10 @@ static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1009 **/ 1028 **/
1010static void ibmvfc_get_starget_port_name(struct scsi_target *starget) 1029static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1011{ 1030{
1012 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1031 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1013 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; 1032 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1033 if (tgt)
1034 kref_put(&tgt->kref, ibmvfc_release_tgt);
1014} 1035}
1015 1036
1016/** 1037/**
@@ -1022,8 +1043,10 @@ static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1022 **/ 1043 **/
1023static void ibmvfc_get_starget_port_id(struct scsi_target *starget) 1044static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1024{ 1045{
1025 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1046 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1026 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; 1047 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1048 if (tgt)
1049 kref_put(&tgt->kref, ibmvfc_release_tgt);
1027} 1050}
1028 1051
1029/** 1052/**
@@ -1113,7 +1136,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1113 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1136 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1114 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1137 login_info->capabilities = IBMVFC_CAN_MIGRATE;
1115 login_info->async.va = vhost->async_crq.msg_token; 1138 login_info->async.va = vhost->async_crq.msg_token;
1116 login_info->async.len = vhost->async_crq.size; 1139 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1117 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); 1140 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1118 strncpy(login_info->device_name, 1141 strncpy(login_info->device_name,
1119 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); 1142 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
@@ -1404,7 +1427,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1404 err = cmd_status[index].name; 1427 err = cmd_status[index].name;
1405 } 1428 }
1406 1429
1407 if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL)) 1430 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1408 return; 1431 return;
1409 1432
1410 if (rsp->flags & FCP_RSP_LEN_VALID) 1433 if (rsp->flags & FCP_RSP_LEN_VALID)
@@ -2054,7 +2077,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2054{ 2077{
2055 const char *desc = ibmvfc_get_ae_desc(crq->event); 2078 const char *desc = ibmvfc_get_ae_desc(crq->event);
2056 2079
2057 ibmvfc_log(vhost, 2, "%s event received\n", desc); 2080 ibmvfc_log(vhost, 3, "%s event received\n", desc);
2058 2081
2059 switch (crq->event) { 2082 switch (crq->event) {
2060 case IBMVFC_AE_LINK_UP: 2083 case IBMVFC_AE_LINK_UP:
@@ -2648,17 +2671,6 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2648} 2671}
2649 2672
2650/** 2673/**
2651 * ibmvfc_release_tgt - Free memory allocated for a target
2652 * @kref: kref struct
2653 *
2654 **/
2655static void ibmvfc_release_tgt(struct kref *kref)
2656{
2657 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
2658 kfree(tgt);
2659}
2660
2661/**
2662 * ibmvfc_tgt_prli_done - Completion handler for Process Login 2674 * ibmvfc_tgt_prli_done - Completion handler for Process Login
2663 * @evt: ibmvfc event struct 2675 * @evt: ibmvfc event struct
2664 * 2676 *
@@ -2902,6 +2914,139 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2902} 2914}
2903 2915
2904/** 2916/**
2917 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
2918 * @mad: ibmvfc passthru mad struct
2919 * @tgt: ibmvfc target struct
2920 *
2921 * Returns:
2922 * 1 if PLOGI needed / 0 if PLOGI not needed
2923 **/
2924static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
2925 struct ibmvfc_target *tgt)
2926{
2927 if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
2928 sizeof(tgt->ids.port_name)))
2929 return 1;
2930 if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
2931 sizeof(tgt->ids.node_name)))
2932 return 1;
2933 if (mad->fc_iu.response[6] != tgt->scsi_id)
2934 return 1;
2935 return 0;
2936}
2937
2938/**
2939 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
2940 * @evt: ibmvfc event struct
2941 *
2942 **/
2943static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
2944{
2945 struct ibmvfc_target *tgt = evt->tgt;
2946 struct ibmvfc_host *vhost = evt->vhost;
2947 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
2948 u32 status = mad->common.status;
2949 u8 fc_reason, fc_explain;
2950
2951 vhost->discovery_threads--;
2952 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2953
2954 switch (status) {
2955 case IBMVFC_MAD_SUCCESS:
2956 tgt_dbg(tgt, "ADISC succeeded\n");
2957 if (ibmvfc_adisc_needs_plogi(mad, tgt))
2958 tgt->need_login = 1;
2959 break;
2960 case IBMVFC_MAD_DRIVER_FAILED:
2961 break;
2962 case IBMVFC_MAD_FAILED:
2963 default:
2964 tgt->need_login = 1;
2965 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
2966 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
2967 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2968 ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
2969 mad->iu.status, mad->iu.error,
2970 ibmvfc_get_fc_type(fc_reason), fc_reason,
2971 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
2972 break;
2973 };
2974
2975 kref_put(&tgt->kref, ibmvfc_release_tgt);
2976 ibmvfc_free_event(evt);
2977 wake_up(&vhost->work_wait_q);
2978}
2979
2980/**
2981 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
2982 * @evt: ibmvfc event struct
2983 *
2984 **/
2985static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
2986{
2987 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
2988
2989 memset(mad, 0, sizeof(*mad));
2990 mad->common.version = 1;
2991 mad->common.opcode = IBMVFC_PASSTHRU;
2992 mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
2993 mad->cmd_ioba.va = (u64)evt->crq.ioba +
2994 offsetof(struct ibmvfc_passthru_mad, iu);
2995 mad->cmd_ioba.len = sizeof(mad->iu);
2996 mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
2997 mad->iu.rsp_len = sizeof(mad->fc_iu.response);
2998 mad->iu.cmd.va = (u64)evt->crq.ioba +
2999 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3000 offsetof(struct ibmvfc_passthru_fc_iu, payload);
3001 mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
3002 mad->iu.rsp.va = (u64)evt->crq.ioba +
3003 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3004 offsetof(struct ibmvfc_passthru_fc_iu, response);
3005 mad->iu.rsp.len = sizeof(mad->fc_iu.response);
3006}
3007
3008/**
3009 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3010 * @tgt: ibmvfc target struct
3011 *
3012 **/
3013static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3014{
3015 struct ibmvfc_passthru_mad *mad;
3016 struct ibmvfc_host *vhost = tgt->vhost;
3017 struct ibmvfc_event *evt;
3018
3019 if (vhost->discovery_threads >= disc_threads)
3020 return;
3021
3022 kref_get(&tgt->kref);
3023 evt = ibmvfc_get_event(vhost);
3024 vhost->discovery_threads++;
3025 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3026 evt->tgt = tgt;
3027
3028 ibmvfc_init_passthru(evt);
3029 mad = &evt->iu.passthru;
3030 mad->iu.flags = IBMVFC_FC_ELS;
3031 mad->iu.scsi_id = tgt->scsi_id;
3032
3033 mad->fc_iu.payload[0] = IBMVFC_ADISC;
3034 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
3035 sizeof(vhost->login_buf->resp.port_name));
3036 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
3037 sizeof(vhost->login_buf->resp.node_name));
3038 mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
3039
3040 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3041 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3042 vhost->discovery_threads--;
3043 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3044 kref_put(&tgt->kref, ibmvfc_release_tgt);
3045 } else
3046 tgt_dbg(tgt, "Sent ADISC\n");
3047}
3048
3049/**
2905 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD 3050 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
2906 * @evt: ibmvfc event struct 3051 * @evt: ibmvfc event struct
2907 * 3052 *
@@ -2921,6 +3066,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
2921 tgt->new_scsi_id = rsp->scsi_id; 3066 tgt->new_scsi_id = rsp->scsi_id;
2922 if (rsp->scsi_id != tgt->scsi_id) 3067 if (rsp->scsi_id != tgt->scsi_id)
2923 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); 3068 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3069 else
3070 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
2924 break; 3071 break;
2925 case IBMVFC_MAD_DRIVER_FAILED: 3072 case IBMVFC_MAD_DRIVER_FAILED:
2926 break; 3073 break;
@@ -3336,6 +3483,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3336 tgt_dbg(tgt, "rport add succeeded\n"); 3483 tgt_dbg(tgt, "rport add succeeded\n");
3337 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3484 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3338 rport->supported_classes = 0; 3485 rport->supported_classes = 0;
3486 tgt->target_id = rport->scsi_target_id;
3339 if (tgt->service_parms.class1_parms[0] & 0x80000000) 3487 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3340 rport->supported_classes |= FC_COS_CLASS1; 3488 rport->supported_classes |= FC_COS_CLASS1;
3341 if (tgt->service_parms.class2_parms[0] & 0x80000000) 3489 if (tgt->service_parms.class2_parms[0] & 0x80000000)
@@ -3800,10 +3948,12 @@ static int ibmvfc_remove(struct vio_dev *vdev)
3800 3948
3801 ENTER; 3949 ENTER;
3802 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); 3950 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
3951 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
3952 ibmvfc_wait_while_resetting(vhost);
3953 ibmvfc_release_crq_queue(vhost);
3803 kthread_stop(vhost->work_thread); 3954 kthread_stop(vhost->work_thread);
3804 fc_remove_host(vhost->host); 3955 fc_remove_host(vhost->host);
3805 scsi_remove_host(vhost->host); 3956 scsi_remove_host(vhost->host);
3806 ibmvfc_release_crq_queue(vhost);
3807 3957
3808 spin_lock_irqsave(vhost->host->host_lock, flags); 3958 spin_lock_irqsave(vhost->host->host_lock, flags);
3809 ibmvfc_purge_requests(vhost, DID_ERROR); 3959 ibmvfc_purge_requests(vhost, DID_ERROR);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 057f3c01ed61..4bf6e374f076 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.0" 32#define IBMVFC_DRIVER_VERSION "1.0.1"
33#define IBMVFC_DRIVER_DATE "(July 1, 2008)" 33#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 15 35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30 36#define IBMVFC_INIT_TIMEOUT 30
@@ -119,6 +119,7 @@ enum ibmvfc_mad_types {
119 IBMVFC_PROCESS_LOGIN = 0x0008, 119 IBMVFC_PROCESS_LOGIN = 0x0008,
120 IBMVFC_QUERY_TARGET = 0x0010, 120 IBMVFC_QUERY_TARGET = 0x0010,
121 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 121 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
122 IBMVFC_PASSTHRU = 0x0200,
122 IBMVFC_TMF_MAD = 0x0100, 123 IBMVFC_TMF_MAD = 0x0100,
123}; 124};
124 125
@@ -439,6 +440,37 @@ struct ibmvfc_cmd {
439 struct ibmvfc_fcp_rsp rsp; 440 struct ibmvfc_fcp_rsp rsp;
440}__attribute__((packed, aligned (8))); 441}__attribute__((packed, aligned (8)));
441 442
443struct ibmvfc_passthru_fc_iu {
444 u32 payload[7];
445#define IBMVFC_ADISC 0x52000000
446 u32 response[7];
447};
448
449struct ibmvfc_passthru_iu {
450 u64 task_tag;
451 u32 cmd_len;
452 u32 rsp_len;
453 u16 status;
454 u16 error;
455 u32 flags;
456#define IBMVFC_FC_ELS 0x01
457 u32 cancel_key;
458 u32 reserved;
459 struct srp_direct_buf cmd;
460 struct srp_direct_buf rsp;
461 u64 correlation;
462 u64 scsi_id;
463 u64 tag;
464 u64 reserved2[2];
465}__attribute__((packed, aligned (8)));
466
467struct ibmvfc_passthru_mad {
468 struct ibmvfc_mad_common common;
469 struct srp_direct_buf cmd_ioba;
470 struct ibmvfc_passthru_iu iu;
471 struct ibmvfc_passthru_fc_iu fc_iu;
472}__attribute__((packed, aligned (8)));
473
442struct ibmvfc_trace_start_entry { 474struct ibmvfc_trace_start_entry {
443 u32 xfer_len; 475 u32 xfer_len;
444}__attribute__((packed)); 476}__attribute__((packed));
@@ -531,6 +563,7 @@ union ibmvfc_iu {
531 struct ibmvfc_implicit_logout implicit_logout; 563 struct ibmvfc_implicit_logout implicit_logout;
532 struct ibmvfc_tmf tmf; 564 struct ibmvfc_tmf tmf;
533 struct ibmvfc_cmd cmd; 565 struct ibmvfc_cmd cmd;
566 struct ibmvfc_passthru_mad passthru;
534}__attribute__((packed, aligned (8))); 567}__attribute__((packed, aligned (8)));
535 568
536enum ibmvfc_target_action { 569enum ibmvfc_target_action {
@@ -656,6 +689,9 @@ struct ibmvfc_host {
656#define tgt_dbg(t, fmt, ...) \ 689#define tgt_dbg(t, fmt, ...) \
657 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) 690 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
658 691
692#define tgt_info(t, fmt, ...) \
693 dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
694
659#define tgt_err(t, fmt, ...) \ 695#define tgt_err(t, fmt, ...) \
660 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 696 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
661 697
@@ -668,8 +704,8 @@ struct ibmvfc_host {
668 dev_err((vhost)->dev, ##__VA_ARGS__); \ 704 dev_err((vhost)->dev, ##__VA_ARGS__); \
669 } while (0) 705 } while (0)
670 706
671#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__)) 707#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
672#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__)) 708#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
673 709
674#ifdef CONFIG_SCSI_IBMVFC_TRACE 710#ifdef CONFIG_SCSI_IBMVFC_TRACE
675#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) 711#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2e13ec00172a..2a5b29d12172 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -55,7 +55,7 @@
55/* tmp - will replace with SCSI logging stuff */ 55/* tmp - will replace with SCSI logging stuff */
56#define eprintk(fmt, args...) \ 56#define eprintk(fmt, args...) \
57do { \ 57do { \
58 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 58 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
59} while (0) 59} while (0)
60/* #define dprintk eprintk */ 60/* #define dprintk eprintk */
61#define dprintk(fmt, args...) 61#define dprintk(fmt, args...)
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f97d172844be..c2a9a13d788f 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -163,7 +163,7 @@ static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
163 163
164#if IMM_DEBUG > 0 164#if IMM_DEBUG > 0
165#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\ 165#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
166 y, __FUNCTION__, __LINE__); imm_fail_func(x,y); 166 y, __func__, __LINE__); imm_fail_func(x,y);
167static inline void 167static inline void
168imm_fail_func(imm_struct *dev, int error_code) 168imm_fail_func(imm_struct *dev, int error_code)
169#else 169#else
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d93156671e93..4871dd1f2582 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1403,10 +1403,10 @@ struct ipr_ucode_image_header {
1403} 1403}
1404 1404
1405#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1405#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
1406 __FILE__, __FUNCTION__, __LINE__) 1406 __FILE__, __func__, __LINE__)
1407 1407
1408#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__)) 1408#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
1409#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__)) 1409#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
1410 1410
1411#define ipr_err_separator \ 1411#define ipr_err_separator \
1412ipr_err("----------------------------------------------------------\n") 1412ipr_err("----------------------------------------------------------\n")
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 744f06d04a36..48ee8c7f5bdd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -74,7 +74,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
74 case SAS_OPEN_TO: 74 case SAS_OPEN_TO:
75 case SAS_OPEN_REJECT: 75 case SAS_OPEN_REJECT:
76 SAS_DPRINTK("%s: Saw error %d. What to do?\n", 76 SAS_DPRINTK("%s: Saw error %d. What to do?\n",
77 __FUNCTION__, ts->stat); 77 __func__, ts->stat);
78 return AC_ERR_OTHER; 78 return AC_ERR_OTHER;
79 79
80 case SAS_ABORTED_TASK: 80 case SAS_ABORTED_TASK:
@@ -115,7 +115,7 @@ static void sas_ata_task_done(struct sas_task *task)
115 } else if (stat->stat != SAM_STAT_GOOD) { 115 } else if (stat->stat != SAM_STAT_GOOD) {
116 ac = sas_to_ata_err(stat); 116 ac = sas_to_ata_err(stat);
117 if (ac) { 117 if (ac) {
118 SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__, 118 SAS_DPRINTK("%s: SAS error %x\n", __func__,
119 stat->stat); 119 stat->stat);
120 /* We saw a SAS error. Send a vague error. */ 120 /* We saw a SAS error. Send a vague error. */
121 qc->err_mask = ac; 121 qc->err_mask = ac;
@@ -244,20 +244,20 @@ static void sas_ata_phy_reset(struct ata_port *ap)
244 res = i->dft->lldd_I_T_nexus_reset(dev); 244 res = i->dft->lldd_I_T_nexus_reset(dev);
245 245
246 if (res != TMF_RESP_FUNC_COMPLETE) 246 if (res != TMF_RESP_FUNC_COMPLETE)
247 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); 247 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
248 248
249 switch (dev->sata_dev.command_set) { 249 switch (dev->sata_dev.command_set) {
250 case ATA_COMMAND_SET: 250 case ATA_COMMAND_SET:
251 SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__); 251 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
252 ap->link.device[0].class = ATA_DEV_ATA; 252 ap->link.device[0].class = ATA_DEV_ATA;
253 break; 253 break;
254 case ATAPI_COMMAND_SET: 254 case ATAPI_COMMAND_SET:
255 SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__); 255 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
256 ap->link.device[0].class = ATA_DEV_ATAPI; 256 ap->link.device[0].class = ATA_DEV_ATAPI;
257 break; 257 break;
258 default: 258 default:
259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
260 __FUNCTION__, 260 __func__,
261 dev->sata_dev.command_set); 261 dev->sata_dev.command_set);
262 ap->link.device[0].class = ATA_DEV_UNKNOWN; 262 ap->link.device[0].class = ATA_DEV_UNKNOWN;
263 break; 263 break;
@@ -299,7 +299,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __FUNCTION__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
304 case SCR_STATUS: 304 case SCR_STATUS:
305 dev->sata_dev.sstatus = val; 305 dev->sata_dev.sstatus = val;
@@ -324,7 +324,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __FUNCTION__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
329 case SCR_STATUS: 329 case SCR_STATUS:
330 *val = dev->sata_dev.sstatus; 330 *val = dev->sata_dev.sstatus;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aefd865a5788..3da02e436788 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -121,7 +121,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
121 break; 121 break;
122 } else { 122 } else {
123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
124 "status 0x%x\n", __FUNCTION__, 124 "status 0x%x\n", __func__,
125 SAS_ADDR(dev->sas_addr), 125 SAS_ADDR(dev->sas_addr),
126 task->task_status.resp, 126 task->task_status.resp,
127 task->task_status.stat); 127 task->task_status.stat);
@@ -1279,7 +1279,7 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
1279 goto out; 1279 goto out;
1280 } else if (res != SMP_RESP_FUNC_ACC) { 1280 } else if (res != SMP_RESP_FUNC_ACC) {
1281 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " 1281 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
1282 "result 0x%x\n", __FUNCTION__, 1282 "result 0x%x\n", __func__,
1283 SAS_ADDR(dev->sas_addr), phy_id, i, res); 1283 SAS_ADDR(dev->sas_addr), phy_id, i, res);
1284 goto out; 1284 goto out;
1285 } 1285 }
@@ -1901,7 +1901,7 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1901 1901
1902 if (!rsp) { 1902 if (!rsp) {
1903 printk("%s: space for a smp response is missing\n", 1903 printk("%s: space for a smp response is missing\n",
1904 __FUNCTION__); 1904 __func__);
1905 return -EINVAL; 1905 return -EINVAL;
1906 } 1906 }
1907 1907
@@ -1914,20 +1914,20 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1914 if (type != SAS_EDGE_EXPANDER_DEVICE && 1914 if (type != SAS_EDGE_EXPANDER_DEVICE &&
1915 type != SAS_FANOUT_EXPANDER_DEVICE) { 1915 type != SAS_FANOUT_EXPANDER_DEVICE) {
1916 printk("%s: can we send a smp request to a device?\n", 1916 printk("%s: can we send a smp request to a device?\n",
1917 __FUNCTION__); 1917 __func__);
1918 return -EINVAL; 1918 return -EINVAL;
1919 } 1919 }
1920 1920
1921 dev = sas_find_dev_by_rphy(rphy); 1921 dev = sas_find_dev_by_rphy(rphy);
1922 if (!dev) { 1922 if (!dev) {
1923 printk("%s: fail to find a domain_device?\n", __FUNCTION__); 1923 printk("%s: fail to find a domain_device?\n", __func__);
1924 return -EINVAL; 1924 return -EINVAL;
1925 } 1925 }
1926 1926
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, req->data_len,
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, rsp->data_len);
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 39ae68a3b0ef..139935a121b4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -50,7 +50,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
50 sas_deform_port(phy); 50 sas_deform_port(phy);
51 else { 51 else {
52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
53 __FUNCTION__, phy->id, phy->port->id, 53 __func__, phy->id, phy->port->id,
54 phy->port->num_phys); 54 phy->port->num_phys);
55 return; 55 return;
56 } 56 }
@@ -78,7 +78,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
78 78
79 if (i >= sas_ha->num_phys) { 79 if (i >= sas_ha->num_phys) {
80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
81 __FUNCTION__); 81 __func__);
82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
83 return; 83 return;
84 } 84 }
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 601ec5b6a7f6..a8e3ef309070 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -343,7 +343,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
343 flags); 343 flags);
344 SAS_DPRINTK("%s: task 0x%p aborted from " 344 SAS_DPRINTK("%s: task 0x%p aborted from "
345 "task_queue\n", 345 "task_queue\n",
346 __FUNCTION__, task); 346 __func__, task);
347 return TASK_IS_ABORTED; 347 return TASK_IS_ABORTED;
348 } 348 }
349 } 349 }
@@ -351,13 +351,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
351 } 351 }
352 352
353 for (i = 0; i < 5; i++) { 353 for (i = 0; i < 5; i++) {
354 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 354 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
355 res = si->dft->lldd_abort_task(task); 355 res = si->dft->lldd_abort_task(task);
356 356
357 spin_lock_irqsave(&task->task_state_lock, flags); 357 spin_lock_irqsave(&task->task_state_lock, flags);
358 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 358 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
359 spin_unlock_irqrestore(&task->task_state_lock, flags); 359 spin_unlock_irqrestore(&task->task_state_lock, flags);
360 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 360 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
361 task); 361 task);
362 return TASK_IS_DONE; 362 return TASK_IS_DONE;
363 } 363 }
@@ -365,24 +365,24 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
365 365
366 if (res == TMF_RESP_FUNC_COMPLETE) { 366 if (res == TMF_RESP_FUNC_COMPLETE) {
367 SAS_DPRINTK("%s: task 0x%p is aborted\n", 367 SAS_DPRINTK("%s: task 0x%p is aborted\n",
368 __FUNCTION__, task); 368 __func__, task);
369 return TASK_IS_ABORTED; 369 return TASK_IS_ABORTED;
370 } else if (si->dft->lldd_query_task) { 370 } else if (si->dft->lldd_query_task) {
371 SAS_DPRINTK("%s: querying task 0x%p\n", 371 SAS_DPRINTK("%s: querying task 0x%p\n",
372 __FUNCTION__, task); 372 __func__, task);
373 res = si->dft->lldd_query_task(task); 373 res = si->dft->lldd_query_task(task);
374 switch (res) { 374 switch (res) {
375 case TMF_RESP_FUNC_SUCC: 375 case TMF_RESP_FUNC_SUCC:
376 SAS_DPRINTK("%s: task 0x%p at LU\n", 376 SAS_DPRINTK("%s: task 0x%p at LU\n",
377 __FUNCTION__, task); 377 __func__, task);
378 return TASK_IS_AT_LU; 378 return TASK_IS_AT_LU;
379 case TMF_RESP_FUNC_COMPLETE: 379 case TMF_RESP_FUNC_COMPLETE:
380 SAS_DPRINTK("%s: task 0x%p not at LU\n", 380 SAS_DPRINTK("%s: task 0x%p not at LU\n",
381 __FUNCTION__, task); 381 __func__, task);
382 return TASK_IS_NOT_AT_LU; 382 return TASK_IS_NOT_AT_LU;
383 case TMF_RESP_FUNC_FAILED: 383 case TMF_RESP_FUNC_FAILED:
384 SAS_DPRINTK("%s: task 0x%p failed to abort\n", 384 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
385 __FUNCTION__, task); 385 __func__, task);
386 return TASK_ABORT_FAILED; 386 return TASK_ABORT_FAILED;
387 } 387 }
388 388
@@ -545,7 +545,7 @@ Again:
545 545
546 if (need_reset) { 546 if (need_reset) {
547 SAS_DPRINTK("%s: task 0x%p requests reset\n", 547 SAS_DPRINTK("%s: task 0x%p requests reset\n",
548 __FUNCTION__, task); 548 __func__, task);
549 goto reset; 549 goto reset;
550 } 550 }
551 551
@@ -556,13 +556,13 @@ Again:
556 556
557 switch (res) { 557 switch (res) {
558 case TASK_IS_DONE: 558 case TASK_IS_DONE:
559 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 559 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
560 task); 560 task);
561 sas_eh_finish_cmd(cmd); 561 sas_eh_finish_cmd(cmd);
562 continue; 562 continue;
563 case TASK_IS_ABORTED: 563 case TASK_IS_ABORTED:
564 SAS_DPRINTK("%s: task 0x%p is aborted\n", 564 SAS_DPRINTK("%s: task 0x%p is aborted\n",
565 __FUNCTION__, task); 565 __func__, task);
566 sas_eh_finish_cmd(cmd); 566 sas_eh_finish_cmd(cmd);
567 continue; 567 continue;
568 case TASK_IS_AT_LU: 568 case TASK_IS_AT_LU:
@@ -633,7 +633,7 @@ Again:
633 } 633 }
634 return list_empty(work_q); 634 return list_empty(work_q);
635clear_q: 635clear_q:
636 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 636 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
637 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 637 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
638 sas_eh_finish_cmd(cmd); 638 sas_eh_finish_cmd(cmd);
639 639
@@ -650,7 +650,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
650 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 650 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
651 spin_unlock_irqrestore(shost->host_lock, flags); 651 spin_unlock_irqrestore(shost->host_lock, flags);
652 652
653 SAS_DPRINTK("Enter %s\n", __FUNCTION__); 653 SAS_DPRINTK("Enter %s\n", __func__);
654 /* 654 /*
655 * Deal with commands that still have SAS tasks (i.e. they didn't 655 * Deal with commands that still have SAS tasks (i.e. they didn't
656 * complete via the normal sas_task completion mechanism) 656 * complete via the normal sas_task completion mechanism)
@@ -669,7 +669,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
669 669
670out: 670out:
671 scsi_eh_flush_done_q(&ha->eh_done_q); 671 scsi_eh_flush_done_q(&ha->eh_done_q);
672 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); 672 SAS_DPRINTK("--- Exit %s\n", __func__);
673 return; 673 return;
674} 674}
675 675
@@ -990,7 +990,7 @@ int __sas_task_abort(struct sas_task *task)
990 if (task->task_state_flags & SAS_TASK_STATE_ABORTED || 990 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
991 task->task_state_flags & SAS_TASK_STATE_DONE) { 991 task->task_state_flags & SAS_TASK_STATE_DONE) {
992 spin_unlock_irqrestore(&task->task_state_lock, flags); 992 spin_unlock_irqrestore(&task->task_state_lock, flags);
993 SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__, 993 SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
994 task); 994 task);
995 return 0; 995 return 0;
996 } 996 }
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 6d6a76e65a6c..15e2d132e8b9 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -39,7 +39,7 @@ enum srp_task_attributes {
39/* tmp - will replace with SCSI logging stuff */ 39/* tmp - will replace with SCSI logging stuff */
40#define eprintk(fmt, args...) \ 40#define eprintk(fmt, args...) \
41do { \ 41do { \
42 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 42 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
43} while (0) 43} while (0)
44/* #define dprintk eprintk */ 44/* #define dprintk eprintk */
45#define dprintk(fmt, args...) 45#define dprintk(fmt, args...)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5b6e5395c8eb..d51a2a4b43eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2083,7 +2083,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2083 if (iocbq_entry == NULL) { 2083 if (iocbq_entry == NULL) {
2084 printk(KERN_ERR "%s: only allocated %d iocbs of " 2084 printk(KERN_ERR "%s: only allocated %d iocbs of "
2085 "expected %d count. Unloading driver.\n", 2085 "expected %d count. Unloading driver.\n",
2086 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2086 __func__, i, LPFC_IOCB_LIST_CNT);
2087 error = -ENOMEM; 2087 error = -ENOMEM;
2088 goto out_free_iocbq; 2088 goto out_free_iocbq;
2089 } 2089 }
@@ -2093,7 +2093,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2093 kfree (iocbq_entry); 2093 kfree (iocbq_entry);
2094 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2094 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2095 "Unloading driver.\n", 2095 "Unloading driver.\n",
2096 __FUNCTION__); 2096 __func__);
2097 error = -ENOMEM; 2097 error = -ENOMEM;
2098 goto out_free_iocbq; 2098 goto out_free_iocbq;
2099 } 2099 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c94da4f2b8a6..1bcebbd3dfac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -341,7 +341,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
341 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 341 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
342 printk(KERN_ERR "%s: Too many sg segments from " 342 printk(KERN_ERR "%s: Too many sg segments from "
343 "dma_map_sg. Config %d, seg_cnt %d", 343 "dma_map_sg. Config %d, seg_cnt %d",
344 __FUNCTION__, phba->cfg_sg_seg_cnt, 344 __func__, phba->cfg_sg_seg_cnt,
345 lpfc_cmd->seg_cnt); 345 lpfc_cmd->seg_cnt);
346 scsi_dma_unmap(scsi_cmnd); 346 scsi_dma_unmap(scsi_cmnd);
347 return 1; 347 return 1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f40aa7b905f7..50fe07646738 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
219 case CMD_IOCB_LOGENTRY_CN: 219 case CMD_IOCB_LOGENTRY_CN:
220 case CMD_IOCB_LOGENTRY_ASYNC_CN: 220 case CMD_IOCB_LOGENTRY_ASYNC_CN:
221 printk("%s - Unhandled SLI-3 Command x%x\n", 221 printk("%s - Unhandled SLI-3 Command x%x\n",
222 __FUNCTION__, iocb_cmnd); 222 __func__, iocb_cmnd);
223 type = LPFC_UNKNOWN_IOCB; 223 type = LPFC_UNKNOWN_IOCB;
224 break; 224 break;
225 default: 225 default:
@@ -1715,7 +1715,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1715 rspiocbp = __lpfc_sli_get_iocbq(phba); 1715 rspiocbp = __lpfc_sli_get_iocbq(phba);
1716 if (rspiocbp == NULL) { 1716 if (rspiocbp == NULL) {
1717 printk(KERN_ERR "%s: out of buffers! Failing " 1717 printk(KERN_ERR "%s: out of buffers! Failing "
1718 "completion.\n", __FUNCTION__); 1718 "completion.\n", __func__);
1719 break; 1719 break;
1720 } 1720 }
1721 1721
@@ -3793,7 +3793,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3793 break; 3793 break;
3794 default: 3794 default:
3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3796 __FUNCTION__, ctx_cmd); 3796 __func__, ctx_cmd);
3797 break; 3797 break;
3798 } 3798 }
3799 3799
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index f62ed468ada0..5ead1283a844 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -265,7 +265,7 @@ typedef struct {
265#define ASSERT(expression) \ 265#define ASSERT(expression) \
266 if (!(expression)) { \ 266 if (!(expression)) { \
267 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \ 267 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
268 #expression, __FILE__, __LINE__, __FUNCTION__); \ 268 #expression, __FILE__, __LINE__, __func__); \
269 } 269 }
270#else 270#else
271#define ASSERT(expression) 271#define ASSERT(expression)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 70a0f11f48b2..805bb61dde18 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -458,7 +458,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
458 458
459 if (adapter == NULL) { 459 if (adapter == NULL) {
460 con_log(CL_ANN, (KERN_WARNING 460 con_log(CL_ANN, (KERN_WARNING
461 "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__)); 461 "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
462 462
463 goto out_probe_one; 463 goto out_probe_one;
464 } 464 }
@@ -1002,7 +1002,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1002 1002
1003 if (!raid_dev->una_mbox64) { 1003 if (!raid_dev->una_mbox64) {
1004 con_log(CL_ANN, (KERN_WARNING 1004 con_log(CL_ANN, (KERN_WARNING
1005 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1005 "megaraid: out of memory, %s %d\n", __func__,
1006 __LINE__)); 1006 __LINE__));
1007 return -1; 1007 return -1;
1008 } 1008 }
@@ -1030,7 +1030,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1030 if (!adapter->ibuf) { 1030 if (!adapter->ibuf) {
1031 1031
1032 con_log(CL_ANN, (KERN_WARNING 1032 con_log(CL_ANN, (KERN_WARNING
1033 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1033 "megaraid: out of memory, %s %d\n", __func__,
1034 __LINE__)); 1034 __LINE__));
1035 1035
1036 goto out_free_common_mbox; 1036 goto out_free_common_mbox;
@@ -1052,7 +1052,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1052 1052
1053 if (adapter->kscb_list == NULL) { 1053 if (adapter->kscb_list == NULL) {
1054 con_log(CL_ANN, (KERN_WARNING 1054 con_log(CL_ANN, (KERN_WARNING
1055 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1055 "megaraid: out of memory, %s %d\n", __func__,
1056 __LINE__)); 1056 __LINE__));
1057 goto out_free_ibuf; 1057 goto out_free_ibuf;
1058 } 1058 }
@@ -1060,7 +1060,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1060 // memory allocation for our command packets 1060 // memory allocation for our command packets
1061 if (megaraid_mbox_setup_dma_pools(adapter) != 0) { 1061 if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1062 con_log(CL_ANN, (KERN_WARNING 1062 con_log(CL_ANN, (KERN_WARNING
1063 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1063 "megaraid: out of memory, %s %d\n", __func__,
1064 __LINE__)); 1064 __LINE__));
1065 goto out_free_scb_list; 1065 goto out_free_scb_list;
1066 } 1066 }
@@ -2981,7 +2981,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
2981 2981
2982 if (pinfo == NULL) { 2982 if (pinfo == NULL) {
2983 con_log(CL_ANN, (KERN_WARNING 2983 con_log(CL_ANN, (KERN_WARNING
2984 "megaraid: out of memory, %s %d\n", __FUNCTION__, 2984 "megaraid: out of memory, %s %d\n", __func__,
2985 __LINE__)); 2985 __LINE__));
2986 2986
2987 return -1; 2987 return -1;
@@ -3508,7 +3508,7 @@ megaraid_cmm_register(adapter_t *adapter)
3508 3508
3509 if (adapter->uscb_list == NULL) { 3509 if (adapter->uscb_list == NULL) {
3510 con_log(CL_ANN, (KERN_WARNING 3510 con_log(CL_ANN, (KERN_WARNING
3511 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3511 "megaraid: out of memory, %s %d\n", __func__,
3512 __LINE__)); 3512 __LINE__));
3513 return -1; 3513 return -1;
3514 } 3514 }
@@ -3879,7 +3879,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
3879 !raid_dev->sysfs_buffer) { 3879 !raid_dev->sysfs_buffer) {
3880 3880
3881 con_log(CL_ANN, (KERN_WARNING 3881 con_log(CL_ANN, (KERN_WARNING
3882 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3882 "megaraid: out of memory, %s %d\n", __func__,
3883 __LINE__)); 3883 __LINE__));
3884 3884
3885 rval = -ENOMEM; 3885 rval = -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index ac3b280c2a72..f680561d2c6f 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -929,7 +929,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
929 !adapter->pthru_dma_pool) { 929 !adapter->pthru_dma_pool) {
930 930
931 con_log(CL_ANN, (KERN_WARNING 931 con_log(CL_ANN, (KERN_WARNING
932 "megaraid cmm: out of memory, %s %d\n", __FUNCTION__, 932 "megaraid cmm: out of memory, %s %d\n", __func__,
933 __LINE__)); 933 __LINE__));
934 934
935 rval = (-ENOMEM); 935 rval = (-ENOMEM);
@@ -957,7 +957,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
957 957
958 con_log(CL_ANN, (KERN_WARNING 958 con_log(CL_ANN, (KERN_WARNING
959 "megaraid cmm: out of memory, %s %d\n", 959 "megaraid cmm: out of memory, %s %d\n",
960 __FUNCTION__, __LINE__)); 960 __func__, __LINE__));
961 961
962 rval = (-ENOMEM); 962 rval = (-ENOMEM);
963 963
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed35372150..edf9fdb3cb3c 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -299,9 +299,9 @@ static struct scsi_host_template nsp32_template = {
299#else 299#else
300# define NSP32_DEBUG_MASK 0xffffff 300# define NSP32_DEBUG_MASK 0xffffff
301# define nsp32_msg(type, args...) \ 301# define nsp32_msg(type, args...) \
302 nsp32_message (__FUNCTION__, __LINE__, (type), args) 302 nsp32_message (__func__, __LINE__, (type), args)
303# define nsp32_dbg(mask, args...) \ 303# define nsp32_dbg(mask, args...) \
304 nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args) 304 nsp32_dmessage(__func__, __LINE__, (mask), args)
305#endif 305#endif
306 306
307#define NSP32_DEBUG_QUEUECOMMAND BIT(0) 307#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index ef3c59cbcff6..2fb3fb58858d 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -88,7 +88,7 @@ static void print_commandk (unsigned char *command)
88 int i,s; 88 int i,s;
89// printk(KERN_DEBUG); 89// printk(KERN_DEBUG);
90 print_opcodek(command[0]); 90 print_opcodek(command[0]);
91 /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 91 /*printk(KERN_DEBUG "%s ", __func__);*/
92 if ((command[0] >> 5) == 6 || 92 if ((command[0] >> 5) == 6 ||
93 (command[0] >> 5) == 7 ) { 93 (command[0] >> 5) == 7 ) {
94 s = 12; /* vender specific */ 94 s = 12; /* vender specific */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5082ca3c6876..a221b6ef9fa9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -107,9 +107,9 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
107#else 107#else
108# define NSP_DEBUG_MASK 0xffffff 108# define NSP_DEBUG_MASK 0xffffff
109# define nsp_msg(type, args...) \ 109# define nsp_msg(type, args...) \
110 nsp_cs_message (__FUNCTION__, __LINE__, (type), args) 110 nsp_cs_message (__func__, __LINE__, (type), args)
111# define nsp_dbg(mask, args...) \ 111# define nsp_dbg(mask, args...) \
112 nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args) 112 nsp_cs_dmessage(__func__, __LINE__, (mask), args)
113#endif 113#endif
114 114
115#define NSP_DEBUG_QUEUECOMMAND BIT(0) 115#define NSP_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 2f75fe6e35a7..3c6ef64fcbff 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -90,7 +90,7 @@ static void print_commandk (unsigned char *command)
90 int i, s; 90 int i, s;
91 printk(KERN_DEBUG); 91 printk(KERN_DEBUG);
92 print_opcodek(command[0]); 92 print_opcodek(command[0]);
93 /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 93 /*printk(KERN_DEBUG "%s ", __func__);*/
94 if ((command[0] >> 5) == 6 || 94 if ((command[0] >> 5) == 6 ||
95 (command[0] >> 5) == 7 ) { 95 (command[0] >> 5) == 7 ) {
96 s = 12; /* vender specific */ 96 s = 12; /* vender specific */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f655ae320b48..8aa0bd987e29 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -171,7 +171,7 @@ static int device_check(ppa_struct *dev);
171 171
172#if PPA_DEBUG > 0 172#if PPA_DEBUG > 0
173#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\ 173#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
174 y, __FUNCTION__, __LINE__); ppa_fail_func(x,y); 174 y, __func__, __LINE__); ppa_fail_func(x,y);
175static inline void ppa_fail_func(ppa_struct *dev, int error_code) 175static inline void ppa_fail_func(ppa_struct *dev, int error_code)
176#else 176#else
177static inline void ppa_fail(ppa_struct *dev, int error_code) 177static inline void ppa_fail(ppa_struct *dev, int error_code)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3754ab87f89a..37f9ba0cd798 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1695,7 +1695,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
1696 1696
1697 dprintk(1, "%s: DMA RISC code (%i) words\n", 1697 dprintk(1, "%s: DMA RISC code (%i) words\n",
1698 __FUNCTION__, risc_code_size); 1698 __func__, risc_code_size);
1699 1699
1700 num = 0; 1700 num = 0;
1701 while (risc_code_size > 0) { 1701 while (risc_code_size > 0) {
@@ -1721,7 +1721,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1724 __FUNCTION__, mb[0], 1724 __func__, mb[0],
1725 (void *)(long)ha->request_dma, 1725 (void *)(long)ha->request_dma,
1726 mb[6], mb[7], mb[2], mb[3]); 1726 mb[6], mb[7], mb[2], mb[3]);
1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
@@ -1753,10 +1753,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1753 if (tbuf[i] != sp[i] && warn++ < 10) { 1753 if (tbuf[i] != sp[i] && warn++ < 10) {
1754 printk(KERN_ERR "%s: FW compare error @ " 1754 printk(KERN_ERR "%s: FW compare error @ "
1755 "byte(0x%x) loop#=%x\n", 1755 "byte(0x%x) loop#=%x\n",
1756 __FUNCTION__, i, num); 1756 __func__, i, num);
1757 printk(KERN_ERR "%s: FWbyte=%x " 1757 printk(KERN_ERR "%s: FWbyte=%x "
1758 "FWfromChip=%x\n", 1758 "FWfromChip=%x\n",
1759 __FUNCTION__, sp[i], tbuf[i]); 1759 __func__, sp[i], tbuf[i]);
1760 /*break; */ 1760 /*break; */
1761 } 1761 }
1762 } 1762 }
@@ -1781,7 +1781,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
1781 int err; 1781 int err;
1782 1782
1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1784 __FUNCTION__); 1784 __func__);
1785 1785
1786 /* Verify checksum of loaded RISC code. */ 1786 /* Verify checksum of loaded RISC code. */
1787 mb[0] = MBC_VERIFY_CHECKSUM; 1787 mb[0] = MBC_VERIFY_CHECKSUM;
@@ -1794,7 +1794,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
1794 } 1794 }
1795 1795
1796 /* Start firmware execution. */ 1796 /* Start firmware execution. */
1797 dprintk(1, "%s: start firmware running.\n", __FUNCTION__); 1797 dprintk(1, "%s: start firmware running.\n", __func__);
1798 mb[0] = MBC_EXECUTE_FIRMWARE; 1798 mb[0] = MBC_EXECUTE_FIRMWARE;
1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8dd88fc1244a..7a4409ab30ea 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -20,18 +20,12 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
20{ 20{
21 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 21 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
22 struct device, kobj))); 22 struct device, kobj)));
23 char *rbuf = (char *)ha->fw_dump;
24 23
25 if (ha->fw_dump_reading == 0) 24 if (ha->fw_dump_reading == 0)
26 return 0; 25 return 0;
27 if (off > ha->fw_dump_len)
28 return 0;
29 if (off + count > ha->fw_dump_len)
30 count = ha->fw_dump_len - off;
31 26
32 memcpy(buf, &rbuf[off], count); 27 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33 28 ha->fw_dump_len);
34 return (count);
35} 29}
36 30
37static ssize_t 31static ssize_t
@@ -94,20 +88,13 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
94{ 88{
95 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 89 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
96 struct device, kobj))); 90 struct device, kobj)));
97 int size = ha->nvram_size;
98 char *nvram_cache = ha->nvram;
99 91
100 if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 92 if (!capable(CAP_SYS_ADMIN))
101 return 0; 93 return 0;
102 if (off + count > size) {
103 size -= off;
104 count = size;
105 }
106 94
107 /* Read NVRAM data from cache. */ 95 /* Read NVRAM data from cache. */
108 memcpy(buf, &nvram_cache[off], count); 96 return memory_read_from_buffer(buf, count, &off, ha->nvram,
109 97 ha->nvram_size);
110 return count;
111} 98}
112 99
113static ssize_t 100static ssize_t
@@ -175,14 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
175 162
176 if (ha->optrom_state != QLA_SREADING) 163 if (ha->optrom_state != QLA_SREADING)
177 return 0; 164 return 0;
178 if (off > ha->optrom_region_size)
179 return 0;
180 if (off + count > ha->optrom_region_size)
181 count = ha->optrom_region_size - off;
182
183 memcpy(buf, &ha->optrom_buffer[off], count);
184 165
185 return count; 166 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
167 ha->optrom_region_size);
186} 168}
187 169
188static ssize_t 170static ssize_t
@@ -374,20 +356,12 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
374{ 356{
375 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 357 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
376 struct device, kobj))); 358 struct device, kobj)));
377 int size = ha->vpd_size;
378 char *vpd_cache = ha->vpd;
379 359
380 if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 360 if (!capable(CAP_SYS_ADMIN))
381 return 0; 361 return 0;
382 if (off + count > size) {
383 size -= off;
384 count = size;
385 }
386 362
387 /* Read NVRAM data from cache. */ 363 /* Read NVRAM data from cache. */
388 memcpy(buf, &vpd_cache[off], count); 364 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
389
390 return count;
391} 365}
392 366
393static ssize_t 367static ssize_t
@@ -557,8 +531,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
557 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 531 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
558 uint32_t sn; 532 uint32_t sn;
559 533
560 if (IS_FWI2_CAPABLE(ha)) 534 if (IS_FWI2_CAPABLE(ha)) {
561 return snprintf(buf, PAGE_SIZE, "\n"); 535 qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
536 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
537 }
562 538
563 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 539 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
564 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 540 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
@@ -809,6 +785,16 @@ qla2x00_optrom_fw_version_show(struct device *dev,
809 ha->fw_revision[3]); 785 ha->fw_revision[3]);
810} 786}
811 787
788static ssize_t
789qla2x00_total_isp_aborts_show(struct device *dev,
790 struct device_attribute *attr, char *buf)
791{
792 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
793
794 return snprintf(buf, PAGE_SIZE, "%d\n",
795 ha->qla_stats.total_isp_aborts);
796}
797
812static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 798static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
813static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 799static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
814static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 800static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -831,6 +817,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
831 qla2x00_optrom_fcode_version_show, NULL); 817 qla2x00_optrom_fcode_version_show, NULL);
832static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 818static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
833 NULL); 819 NULL);
820static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
821 NULL);
834 822
835struct device_attribute *qla2x00_host_attrs[] = { 823struct device_attribute *qla2x00_host_attrs[] = {
836 &dev_attr_driver_version, 824 &dev_attr_driver_version,
@@ -849,6 +837,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
849 &dev_attr_optrom_efi_version, 837 &dev_attr_optrom_efi_version,
850 &dev_attr_optrom_fcode_version, 838 &dev_attr_optrom_fcode_version,
851 &dev_attr_optrom_fw_version, 839 &dev_attr_optrom_fw_version,
840 &dev_attr_total_isp_aborts,
852 NULL, 841 NULL,
853}; 842};
854 843
@@ -972,26 +961,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
972} 961}
973 962
974static void 963static void
975qla2x00_get_rport_loss_tmo(struct fc_rport *rport) 964qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
976{ 965{
977 struct Scsi_Host *host = rport_to_shost(rport); 966 if (timeout)
978 scsi_qla_host_t *ha = shost_priv(host); 967 rport->dev_loss_tmo = timeout;
979 968 else
980 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 969 rport->dev_loss_tmo = 1;
981} 970}
982 971
983static void 972static void
984qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 973qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
985{ 974{
986 struct Scsi_Host *host = rport_to_shost(rport); 975 struct Scsi_Host *host = rport_to_shost(rport);
987 scsi_qla_host_t *ha = shost_priv(host); 976 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
977
978 qla2x00_abort_fcport_cmds(fcport);
979
980 /*
981 * Transport has effectively 'deleted' the rport, clear
982 * all local references.
983 */
984 spin_lock_irq(host->host_lock);
985 fcport->rport = NULL;
986 *((fc_port_t **)rport->dd_data) = NULL;
987 spin_unlock_irq(host->host_lock);
988}
988 989
989 if (timeout) 990static void
990 ha->port_down_retry_count = timeout; 991qla2x00_terminate_rport_io(struct fc_rport *rport)
991 else 992{
992 ha->port_down_retry_count = 1; 993 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
993 994
994 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 995 qla2x00_abort_fcport_cmds(fcport);
996 scsi_target_unblock(&rport->dev);
995} 997}
996 998
997static int 999static int
@@ -1045,6 +1047,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1045 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1047 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1046 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1048 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1047 if (IS_FWI2_CAPABLE(ha)) { 1049 if (IS_FWI2_CAPABLE(ha)) {
1050 pfc_host_stat->lip_count = stats->lip_cnt;
1048 pfc_host_stat->tx_frames = stats->tx_frames; 1051 pfc_host_stat->tx_frames = stats->tx_frames;
1049 pfc_host_stat->rx_frames = stats->rx_frames; 1052 pfc_host_stat->rx_frames = stats->rx_frames;
1050 pfc_host_stat->dumped_frames = stats->dumped_frames; 1053 pfc_host_stat->dumped_frames = stats->dumped_frames;
@@ -1173,17 +1176,16 @@ vport_create_failed_2:
1173static int 1176static int
1174qla24xx_vport_delete(struct fc_vport *fc_vport) 1177qla24xx_vport_delete(struct fc_vport *fc_vport)
1175{ 1178{
1176 scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
1177 scsi_qla_host_t *vha = fc_vport->dd_data; 1179 scsi_qla_host_t *vha = fc_vport->dd_data;
1180 scsi_qla_host_t *pha = to_qla_parent(vha);
1181
1182 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1183 test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
1184 msleep(1000);
1178 1185
1179 qla24xx_disable_vp(vha); 1186 qla24xx_disable_vp(vha);
1180 qla24xx_deallocate_vp_id(vha); 1187 qla24xx_deallocate_vp_id(vha);
1181 1188
1182 mutex_lock(&ha->vport_lock);
1183 ha->cur_vport_count--;
1184 clear_bit(vha->vp_idx, ha->vp_idx_map);
1185 mutex_unlock(&ha->vport_lock);
1186
1187 kfree(vha->node_name); 1189 kfree(vha->node_name);
1188 kfree(vha->port_name); 1190 kfree(vha->port_name);
1189 1191
@@ -1248,11 +1250,12 @@ struct fc_function_template qla2xxx_transport_functions = {
1248 .get_starget_port_id = qla2x00_get_starget_port_id, 1250 .get_starget_port_id = qla2x00_get_starget_port_id,
1249 .show_starget_port_id = 1, 1251 .show_starget_port_id = 1,
1250 1252
1251 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1252 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1253 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1253 .show_rport_dev_loss_tmo = 1, 1254 .show_rport_dev_loss_tmo = 1,
1254 1255
1255 .issue_fc_host_lip = qla2x00_issue_lip, 1256 .issue_fc_host_lip = qla2x00_issue_lip,
1257 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1258 .terminate_rport_io = qla2x00_terminate_rport_io,
1256 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1259 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1257 1260
1258 .vport_create = qla24xx_vport_create, 1261 .vport_create = qla24xx_vport_create,
@@ -1291,11 +1294,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1291 .get_starget_port_id = qla2x00_get_starget_port_id, 1294 .get_starget_port_id = qla2x00_get_starget_port_id,
1292 .show_starget_port_id = 1, 1295 .show_starget_port_id = 1,
1293 1296
1294 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1295 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1297 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1296 .show_rport_dev_loss_tmo = 1, 1298 .show_rport_dev_loss_tmo = 1,
1297 1299
1298 .issue_fc_host_lip = qla2x00_issue_lip, 1300 .issue_fc_host_lip = qla2x00_issue_lip,
1301 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1302 .terminate_rport_io = qla2x00_terminate_rport_io,
1299 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1303 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1300}; 1304};
1301 1305
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cbef785765cf..510ba64bc286 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
216 216
217static int 217static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
219 uint16_t ram_words, void **nxt) 219 uint32_t ram_words, void **nxt)
220{ 220{
221 int rval; 221 int rval;
222 uint32_t cnt, stat, timer, words, idx; 222 uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8dd600013bd1..6da31ba94404 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -864,7 +864,8 @@ struct link_statistics {
864 uint32_t prim_seq_err_cnt; 864 uint32_t prim_seq_err_cnt;
865 uint32_t inval_xmit_word_cnt; 865 uint32_t inval_xmit_word_cnt;
866 uint32_t inval_crc_cnt; 866 uint32_t inval_crc_cnt;
867 uint32_t unused1[0x1b]; 867 uint32_t lip_cnt;
868 uint32_t unused1[0x1a];
868 uint32_t tx_frames; 869 uint32_t tx_frames;
869 uint32_t rx_frames; 870 uint32_t rx_frames;
870 uint32_t dumped_frames; 871 uint32_t dumped_frames;
@@ -1544,7 +1545,6 @@ typedef struct fc_port {
1544 int login_retry; 1545 int login_retry;
1545 atomic_t port_down_timer; 1546 atomic_t port_down_timer;
1546 1547
1547 spinlock_t rport_lock;
1548 struct fc_rport *rport, *drport; 1548 struct fc_rport *rport, *drport;
1549 u32 supported_classes; 1549 u32 supported_classes;
1550 1550
@@ -2155,6 +2155,10 @@ struct qla_chip_state_84xx {
2155 uint32_t gold_fw_version; 2155 uint32_t gold_fw_version;
2156}; 2156};
2157 2157
2158struct qla_statistics {
2159 uint32_t total_isp_aborts;
2160};
2161
2158/* 2162/*
2159 * Linux Host Adapter structure 2163 * Linux Host Adapter structure
2160 */ 2164 */
@@ -2166,7 +2170,6 @@ typedef struct scsi_qla_host {
2166 struct pci_dev *pdev; 2170 struct pci_dev *pdev;
2167 2171
2168 unsigned long host_no; 2172 unsigned long host_no;
2169 unsigned long instance;
2170 2173
2171 volatile struct { 2174 volatile struct {
2172 uint32_t init_done :1; 2175 uint32_t init_done :1;
@@ -2515,7 +2518,7 @@ typedef struct scsi_qla_host {
2515 2518
2516 uint8_t model_number[16+1]; 2519 uint8_t model_number[16+1];
2517#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 2520#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
2518 char *model_desc; 2521 char model_desc[80];
2519 uint8_t adapter_id[16+1]; 2522 uint8_t adapter_id[16+1];
2520 2523
2521 uint8_t *node_name; 2524 uint8_t *node_name;
@@ -2596,6 +2599,7 @@ typedef struct scsi_qla_host {
2596 int cur_vport_count; 2599 int cur_vport_count;
2597 2600
2598 struct qla_chip_state_84xx *cs84xx; 2601 struct qla_chip_state_84xx *cs84xx;
2602 struct qla_statistics qla_stats;
2599} scsi_qla_host_t; 2603} scsi_qla_host_t;
2600 2604
2601 2605
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9b4bebee6879..0b156735e9a6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,7 +62,7 @@ extern int ql2xfdmienable;
62extern int ql2xallocfwdump; 62extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int num_hosts; 65extern int ql2xiidmaenable;
66 66
67extern int qla2x00_loop_reset(scsi_qla_host_t *); 67extern int qla2x00_loop_reset(scsi_qla_host_t *);
68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
71extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t, 71extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
72 uint16_t, uint16_t); 72 uint16_t, uint16_t);
73 73
74extern void qla2x00_abort_fcport_cmds(fc_port_t *);
75
74/* 76/*
75 * Global Functions in qla_mid.c source file. 77 * Global Functions in qla_mid.c source file.
76 */ 78 */
@@ -312,6 +314,7 @@ extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
312 uint16_t, uint16_t); 314 uint16_t, uint16_t);
313 315
314extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
315 318
316/* 319/*
317 * Global Function Prototypes in qla_dbg.c source file. 320 * Global Function Prototypes in qla_dbg.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4cb80b476c85..c2a4bfbcb05b 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1661,6 +1661,12 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1661{ 1661{
1662 int rval; 1662 int rval;
1663 1663
1664 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1665 DEBUG2(printk("scsi(%ld): FDMI unsupported on "
1666 "ISP2100/ISP2200.\n", ha->host_no));
1667 return QLA_SUCCESS;
1668 }
1669
1664 rval = qla2x00_mgmt_svr_login(ha); 1670 rval = qla2x00_mgmt_svr_login(ha);
1665 if (rval) 1671 if (rval)
1666 return rval; 1672 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bbbc5a632a1d..601a6b29750c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -334,6 +334,8 @@ static int
334qla2x00_isp_firmware(scsi_qla_host_t *ha) 334qla2x00_isp_firmware(scsi_qla_host_t *ha)
335{ 335{
336 int rval; 336 int rval;
337 uint16_t loop_id, topo, sw_cap;
338 uint8_t domain, area, al_pa;
337 339
338 /* Assume loading risc code */ 340 /* Assume loading risc code */
339 rval = QLA_FUNCTION_FAILED; 341 rval = QLA_FUNCTION_FAILED;
@@ -345,6 +347,11 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
345 347
346 /* Verify checksum of loaded RISC code. */ 348 /* Verify checksum of loaded RISC code. */
347 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 349 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
350 if (rval == QLA_SUCCESS) {
351 /* And, verify we are not in ROM code. */
352 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
353 &area, &domain, &topo, &sw_cap);
354 }
348 } 355 }
349 356
350 if (rval) { 357 if (rval) {
@@ -722,7 +729,7 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
722 /* Perform RISC reset. */ 729 /* Perform RISC reset. */
723 qla24xx_reset_risc(ha); 730 qla24xx_reset_risc(ha);
724 731
725 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 732 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
726 733
727 rval = qla2x00_mbx_reg_test(ha); 734 rval = qla2x00_mbx_reg_test(ha);
728 if (rval) { 735 if (rval) {
@@ -768,42 +775,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
768 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 775 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
769 sizeof(uint32_t); 776 sizeof(uint32_t);
770 777
771 /* Allocate memory for Extended Trace Buffer. */
772 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
773 GFP_KERNEL);
774 if (!tc) {
775 qla_printk(KERN_WARNING, ha, "Unable to allocate "
776 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
777 goto cont_alloc;
778 }
779
780 memset(tc, 0, EFT_SIZE);
781 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
782 if (rval) {
783 qla_printk(KERN_WARNING, ha, "Unable to initialize "
784 "EFT (%d).\n", rval);
785 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
786 tc_dma);
787 goto cont_alloc;
788 }
789
790 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
791 EFT_SIZE / 1024);
792
793 eft_size = EFT_SIZE;
794 ha->eft_dma = tc_dma;
795 ha->eft = tc;
796
797 /* Allocate memory for Fibre Channel Event Buffer. */ 778 /* Allocate memory for Fibre Channel Event Buffer. */
798 if (!IS_QLA25XX(ha)) 779 if (!IS_QLA25XX(ha))
799 goto cont_alloc; 780 goto try_eft;
800 781
801 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 782 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
802 GFP_KERNEL); 783 GFP_KERNEL);
803 if (!tc) { 784 if (!tc) {
804 qla_printk(KERN_WARNING, ha, "Unable to allocate " 785 qla_printk(KERN_WARNING, ha, "Unable to allocate "
805 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 786 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
806 goto cont_alloc; 787 goto try_eft;
807 } 788 }
808 789
809 memset(tc, 0, FCE_SIZE); 790 memset(tc, 0, FCE_SIZE);
@@ -815,7 +796,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
815 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 796 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
816 tc_dma); 797 tc_dma);
817 ha->flags.fce_enabled = 0; 798 ha->flags.fce_enabled = 0;
818 goto cont_alloc; 799 goto try_eft;
819 } 800 }
820 801
821 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 802 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
@@ -825,6 +806,32 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
825 ha->flags.fce_enabled = 1; 806 ha->flags.fce_enabled = 1;
826 ha->fce_dma = tc_dma; 807 ha->fce_dma = tc_dma;
827 ha->fce = tc; 808 ha->fce = tc;
809try_eft:
810 /* Allocate memory for Extended Trace Buffer. */
811 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
812 GFP_KERNEL);
813 if (!tc) {
814 qla_printk(KERN_WARNING, ha, "Unable to allocate "
815 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
816 goto cont_alloc;
817 }
818
819 memset(tc, 0, EFT_SIZE);
820 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
821 if (rval) {
822 qla_printk(KERN_WARNING, ha, "Unable to initialize "
823 "EFT (%d).\n", rval);
824 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
825 tc_dma);
826 goto cont_alloc;
827 }
828
829 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
830 EFT_SIZE / 1024);
831
832 eft_size = EFT_SIZE;
833 ha->eft_dma = tc_dma;
834 ha->eft = tc;
828 } 835 }
829cont_alloc: 836cont_alloc:
830 req_q_size = ha->request_q_length * sizeof(request_t); 837 req_q_size = ha->request_q_length * sizeof(request_t);
@@ -1501,18 +1508,25 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1501 index = (ha->pdev->subsystem_device & 0xff); 1508 index = (ha->pdev->subsystem_device & 0xff);
1502 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1509 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1503 index < QLA_MODEL_NAMES) 1510 index < QLA_MODEL_NAMES)
1504 ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1511 strncpy(ha->model_desc,
1512 qla2x00_model_name[index * 2 + 1],
1513 sizeof(ha->model_desc) - 1);
1505 } else { 1514 } else {
1506 index = (ha->pdev->subsystem_device & 0xff); 1515 index = (ha->pdev->subsystem_device & 0xff);
1507 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1516 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1508 index < QLA_MODEL_NAMES) { 1517 index < QLA_MODEL_NAMES) {
1509 strcpy(ha->model_number, 1518 strcpy(ha->model_number,
1510 qla2x00_model_name[index * 2]); 1519 qla2x00_model_name[index * 2]);
1511 ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1520 strncpy(ha->model_desc,
1521 qla2x00_model_name[index * 2 + 1],
1522 sizeof(ha->model_desc) - 1);
1512 } else { 1523 } else {
1513 strcpy(ha->model_number, def); 1524 strcpy(ha->model_number, def);
1514 } 1525 }
1515 } 1526 }
1527 if (IS_FWI2_CAPABLE(ha))
1528 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
1529 sizeof(ha->model_desc));
1516} 1530}
1517 1531
1518/* On sparc systems, obtain port and node WWN from firmware 1532/* On sparc systems, obtain port and node WWN from firmware
@@ -1864,12 +1878,11 @@ qla2x00_rport_del(void *data)
1864{ 1878{
1865 fc_port_t *fcport = data; 1879 fc_port_t *fcport = data;
1866 struct fc_rport *rport; 1880 struct fc_rport *rport;
1867 unsigned long flags;
1868 1881
1869 spin_lock_irqsave(&fcport->rport_lock, flags); 1882 spin_lock_irq(fcport->ha->host->host_lock);
1870 rport = fcport->drport; 1883 rport = fcport->drport;
1871 fcport->drport = NULL; 1884 fcport->drport = NULL;
1872 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1885 spin_unlock_irq(fcport->ha->host->host_lock);
1873 if (rport) 1886 if (rport)
1874 fc_remote_port_delete(rport); 1887 fc_remote_port_delete(rport);
1875} 1888}
@@ -1898,7 +1911,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1898 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1911 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1899 fcport->flags = FCF_RLC_SUPPORT; 1912 fcport->flags = FCF_RLC_SUPPORT;
1900 fcport->supported_classes = FC_COS_UNSPECIFIED; 1913 fcport->supported_classes = FC_COS_UNSPECIFIED;
1901 spin_lock_init(&fcport->rport_lock);
1902 1914
1903 return fcport; 1915 return fcport;
1904} 1916}
@@ -2007,8 +2019,10 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2007 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2019 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2008 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2020 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2009 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2021 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
2010 if (test_bit(RSCN_UPDATE, &save_flags)) 2022 if (test_bit(RSCN_UPDATE, &save_flags)) {
2023 ha->flags.rscn_queue_overflow = 1;
2011 set_bit(RSCN_UPDATE, &ha->dpc_flags); 2024 set_bit(RSCN_UPDATE, &ha->dpc_flags);
2025 }
2012 } 2026 }
2013 2027
2014 return (rval); 2028 return (rval);
@@ -2243,28 +2257,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2243{ 2257{
2244 struct fc_rport_identifiers rport_ids; 2258 struct fc_rport_identifiers rport_ids;
2245 struct fc_rport *rport; 2259 struct fc_rport *rport;
2246 unsigned long flags;
2247 2260
2248 if (fcport->drport) 2261 if (fcport->drport)
2249 qla2x00_rport_del(fcport); 2262 qla2x00_rport_del(fcport);
2250 if (fcport->rport)
2251 return;
2252 2263
2253 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2264 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2254 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2265 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2255 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2266 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2256 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2267 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2257 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2268 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2258 rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2269 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
2259 if (!rport) { 2270 if (!rport) {
2260 qla_printk(KERN_WARNING, ha, 2271 qla_printk(KERN_WARNING, ha,
2261 "Unable to allocate fc remote port!\n"); 2272 "Unable to allocate fc remote port!\n");
2262 return; 2273 return;
2263 } 2274 }
2264 spin_lock_irqsave(&fcport->rport_lock, flags); 2275 spin_lock_irq(fcport->ha->host->host_lock);
2265 fcport->rport = rport;
2266 *((fc_port_t **)rport->dd_data) = fcport; 2276 *((fc_port_t **)rport->dd_data) = fcport;
2267 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2277 spin_unlock_irq(fcport->ha->host->host_lock);
2268 2278
2269 rport->supported_classes = fcport->supported_classes; 2279 rport->supported_classes = fcport->supported_classes;
2270 2280
@@ -2565,7 +2575,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2565 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2575 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
2566 kfree(swl); 2576 kfree(swl);
2567 swl = NULL; 2577 swl = NULL;
2568 } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2578 } else if (ql2xiidmaenable &&
2579 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
2569 qla2x00_gpsc(ha, swl); 2580 qla2x00_gpsc(ha, swl);
2570 } 2581 }
2571 } 2582 }
@@ -3220,7 +3231,8 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3220 3231
3221 /* Go with deferred removal of rport references. */ 3232 /* Go with deferred removal of rport references. */
3222 list_for_each_entry(fcport, &ha->fcports, list) 3233 list_for_each_entry(fcport, &ha->fcports, list)
3223 if (fcport->drport) 3234 if (fcport->drport &&
3235 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3224 qla2x00_rport_del(fcport); 3236 qla2x00_rport_del(fcport);
3225} 3237}
3226 3238
@@ -3243,6 +3255,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3243 if (ha->flags.online) { 3255 if (ha->flags.online) {
3244 ha->flags.online = 0; 3256 ha->flags.online = 0;
3245 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3257 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
3258 ha->qla_stats.total_isp_aborts++;
3246 3259
3247 qla_printk(KERN_INFO, ha, 3260 qla_printk(KERN_INFO, ha,
3248 "Performing ISP error recovery - ha= %p.\n", ha); 3261 "Performing ISP error recovery - ha= %p.\n", ha);
@@ -3283,17 +3296,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3283 ha->isp_abort_cnt = 0; 3296 ha->isp_abort_cnt = 0;
3284 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3297 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3285 3298
3286 if (ha->eft) {
3287 memset(ha->eft, 0, EFT_SIZE);
3288 rval = qla2x00_enable_eft_trace(ha,
3289 ha->eft_dma, EFT_NUM_BUFFERS);
3290 if (rval) {
3291 qla_printk(KERN_WARNING, ha,
3292 "Unable to reinitialize EFT "
3293 "(%d).\n", rval);
3294 }
3295 }
3296
3297 if (ha->fce) { 3299 if (ha->fce) {
3298 ha->flags.fce_enabled = 1; 3300 ha->flags.fce_enabled = 1;
3299 memset(ha->fce, 0, 3301 memset(ha->fce, 0,
@@ -3308,6 +3310,17 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3308 ha->flags.fce_enabled = 0; 3310 ha->flags.fce_enabled = 0;
3309 } 3311 }
3310 } 3312 }
3313
3314 if (ha->eft) {
3315 memset(ha->eft, 0, EFT_SIZE);
3316 rval = qla2x00_enable_eft_trace(ha,
3317 ha->eft_dma, EFT_NUM_BUFFERS);
3318 if (rval) {
3319 qla_printk(KERN_WARNING, ha,
3320 "Unable to reinitialize EFT "
3321 "(%d).\n", rval);
3322 }
3323 }
3311 } else { /* failed the ISP abort */ 3324 } else { /* failed the ISP abort */
3312 ha->flags.online = 1; 3325 ha->flags.online = 1;
3313 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3326 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
@@ -4026,8 +4039,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4026 ret = qla2x00_stop_firmware(ha); 4039 ret = qla2x00_stop_firmware(ha);
4027 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4040 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4028 retries ; retries--) { 4041 retries ; retries--) {
4029 qla2x00_reset_chip(ha); 4042 ha->isp_ops->reset_chip(ha);
4030 if (qla2x00_chip_diag(ha) != QLA_SUCCESS) 4043 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
4031 continue; 4044 continue;
4032 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4045 if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
4033 continue; 4046 continue;
@@ -4049,7 +4062,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4049 rval = qla2x00_fw_ready(ha->parent); 4062 rval = qla2x00_fw_ready(ha->parent);
4050 if (rval == QLA_SUCCESS) { 4063 if (rval == QLA_SUCCESS) {
4051 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4064 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
4052 qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL); 4065 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
4053 } 4066 }
4054 4067
4055 ha->flags.management_server_logged_in = 0; 4068 ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5489d5024673..d57669aa4615 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -454,10 +454,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
454{ 454{
455 int ret; 455 int ret;
456 unsigned long flags = 0; 456 unsigned long flags = 0;
457 scsi_qla_host_t *pha = to_qla_parent(ha);
457 458
458 spin_lock_irqsave(&ha->hardware_lock, flags); 459 spin_lock_irqsave(&pha->hardware_lock, flags);
459 ret = __qla2x00_marker(ha, loop_id, lun, type); 460 ret = __qla2x00_marker(ha, loop_id, lun, type);
460 spin_unlock_irqrestore(&ha->hardware_lock, flags); 461 spin_unlock_irqrestore(&pha->hardware_lock, flags);
461 462
462 return (ret); 463 return (ret);
463} 464}
@@ -672,7 +673,7 @@ qla24xx_start_scsi(srb_t *sp)
672{ 673{
673 int ret, nseg; 674 int ret, nseg;
674 unsigned long flags; 675 unsigned long flags;
675 scsi_qla_host_t *ha; 676 scsi_qla_host_t *ha, *pha;
676 struct scsi_cmnd *cmd; 677 struct scsi_cmnd *cmd;
677 uint32_t *clr_ptr; 678 uint32_t *clr_ptr;
678 uint32_t index; 679 uint32_t index;
@@ -686,6 +687,7 @@ qla24xx_start_scsi(srb_t *sp)
686 /* Setup device pointers. */ 687 /* Setup device pointers. */
687 ret = 0; 688 ret = 0;
688 ha = sp->ha; 689 ha = sp->ha;
690 pha = to_qla_parent(ha);
689 reg = &ha->iobase->isp24; 691 reg = &ha->iobase->isp24;
690 cmd = sp->cmd; 692 cmd = sp->cmd;
691 /* So we know we haven't pci_map'ed anything yet */ 693 /* So we know we haven't pci_map'ed anything yet */
@@ -700,7 +702,7 @@ qla24xx_start_scsi(srb_t *sp)
700 } 702 }
701 703
702 /* Acquire ring specific lock */ 704 /* Acquire ring specific lock */
703 spin_lock_irqsave(&ha->hardware_lock, flags); 705 spin_lock_irqsave(&pha->hardware_lock, flags);
704 706
705 /* Check for room in outstanding command list. */ 707 /* Check for room in outstanding command list. */
706 handle = ha->current_outstanding_cmd; 708 handle = ha->current_outstanding_cmd;
@@ -795,14 +797,14 @@ qla24xx_start_scsi(srb_t *sp)
795 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 797 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
796 qla24xx_process_response_queue(ha); 798 qla24xx_process_response_queue(ha);
797 799
798 spin_unlock_irqrestore(&ha->hardware_lock, flags); 800 spin_unlock_irqrestore(&pha->hardware_lock, flags);
799 return QLA_SUCCESS; 801 return QLA_SUCCESS;
800 802
801queuing_error: 803queuing_error:
802 if (tot_dsds) 804 if (tot_dsds)
803 scsi_dma_unmap(cmd); 805 scsi_dma_unmap(cmd);
804 806
805 spin_unlock_irqrestore(&ha->hardware_lock, flags); 807 spin_unlock_irqrestore(&pha->hardware_lock, flags);
806 808
807 return QLA_FUNCTION_FAILED; 809 return QLA_FUNCTION_FAILED;
808} 810}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ec63b79f900a..874d802edb7d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -542,10 +542,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
542 break; 542 break;
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* Only handle SCNs for our Vport index. */
546 if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
547 break;
548
549 /* 545 /*
550 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
551 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 250d2f604397..bc90d6b8d0a0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -918,6 +918,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
918 rval = qla2x00_mailbox_command(ha, mcp); 918 rval = qla2x00_mailbox_command(ha, mcp);
919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 919 if (mcp->mb[0] == MBS_COMMAND_ERROR)
920 rval = QLA_COMMAND_ERROR; 920 rval = QLA_COMMAND_ERROR;
921 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
922 rval = QLA_INVALID_COMMAND;
921 923
922 /* Return data. */ 924 /* Return data. */
923 *id = mcp->mb[1]; 925 *id = mcp->mb[1];
@@ -2161,17 +2163,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2161 struct abort_entry_24xx *abt; 2163 struct abort_entry_24xx *abt;
2162 dma_addr_t abt_dma; 2164 dma_addr_t abt_dma;
2163 uint32_t handle; 2165 uint32_t handle;
2166 scsi_qla_host_t *pha = to_qla_parent(ha);
2164 2167
2165 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2168 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2166 2169
2167 fcport = sp->fcport; 2170 fcport = sp->fcport;
2168 2171
2169 spin_lock_irqsave(&ha->hardware_lock, flags); 2172 spin_lock_irqsave(&pha->hardware_lock, flags);
2170 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2173 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2171 if (ha->outstanding_cmds[handle] == sp) 2174 if (pha->outstanding_cmds[handle] == sp)
2172 break; 2175 break;
2173 } 2176 }
2174 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2177 spin_unlock_irqrestore(&pha->hardware_lock, flags);
2175 if (handle == MAX_OUTSTANDING_COMMANDS) { 2178 if (handle == MAX_OUTSTANDING_COMMANDS) {
2176 /* Command not found. */ 2179 /* Command not found. */
2177 return QLA_FUNCTION_FAILED; 2180 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 62a3ad6e8ecb..50baf6a1d67c 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -43,6 +43,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
43 43
44 set_bit(vp_id, ha->vp_idx_map); 44 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 45 ha->num_vhosts++;
46 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
47 list_add_tail(&vha->vp_list, &ha->vp_list); 48 list_add_tail(&vha->vp_list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 49 mutex_unlock(&ha->vport_lock);
@@ -58,6 +59,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
58 mutex_lock(&ha->vport_lock); 59 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 60 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 61 ha->num_vhosts--;
62 ha->cur_vport_count--;
61 clear_bit(vp_id, ha->vp_idx_map); 63 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->vp_list); 64 list_del(&vha->vp_list);
63 mutex_unlock(&ha->vport_lock); 65 mutex_unlock(&ha->vport_lock);
@@ -103,8 +105,8 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
103 "loop_id=0x%04x :%x\n", 105 "loop_id=0x%04x :%x\n",
104 vha->host_no, fcport->loop_id, fcport->vp_idx)); 106 vha->host_no, fcport->loop_id, fcport->vp_idx));
105 107
106 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
107 qla2x00_mark_device_lost(vha, fcport, 0, 0); 108 qla2x00_mark_device_lost(vha, fcport, 0, 0);
109 atomic_set(&fcport->state, FCS_UNCONFIGURED);
108 } 110 }
109} 111}
110 112
@@ -276,7 +278,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
276 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 278 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
277 } 279 }
278 280
279 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 281 if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
282 test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
280 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 283 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
281 qla2x00_loop_resync(vha); 284 qla2x00_loop_resync(vha);
282 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 285 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -390,7 +393,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
390 vha->parent = ha; 393 vha->parent = ha;
391 vha->fc_vport = fc_vport; 394 vha->fc_vport = fc_vport;
392 vha->device_flags = 0; 395 vha->device_flags = 0;
393 vha->instance = num_hosts;
394 vha->vp_idx = qla24xx_allocate_vp_id(vha); 396 vha->vp_idx = qla24xx_allocate_vp_id(vha);
395 if (vha->vp_idx > ha->max_npiv_vports) { 397 if (vha->vp_idx > ha->max_npiv_vports) {
396 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 398 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
@@ -428,7 +430,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
428 host->max_cmd_len = MAX_CMDSZ; 430 host->max_cmd_len = MAX_CMDSZ;
429 host->max_channel = MAX_BUSES - 1; 431 host->max_channel = MAX_BUSES - 1;
430 host->max_lun = MAX_LUNS; 432 host->max_lun = MAX_LUNS;
431 host->unique_id = vha->instance; 433 host->unique_id = host->host_no;
432 host->max_id = MAX_TARGETS_2200; 434 host->max_id = MAX_TARGETS_2200;
433 host->transportt = qla2xxx_transport_vport_template; 435 host->transportt = qla2xxx_transport_vport_template;
434 436
@@ -436,12 +438,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
436 vha->host_no, vha)); 438 vha->host_no, vha));
437 439
438 vha->flags.init_done = 1; 440 vha->flags.init_done = 1;
439 num_hosts++;
440
441 mutex_lock(&ha->vport_lock);
442 set_bit(vha->vp_idx, ha->vp_idx_map);
443 ha->cur_vport_count++;
444 mutex_unlock(&ha->vport_lock);
445 441
446 return vha; 442 return vha;
447 443
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48eaa3bb5433..7c8af7ed2a5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -27,7 +27,6 @@ char qla2x00_version_str[40];
27 */ 27 */
28static struct kmem_cache *srb_cachep; 28static struct kmem_cache *srb_cachep;
29 29
30int num_hosts;
31int ql2xlogintimeout = 20; 30int ql2xlogintimeout = 20;
32module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 31module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
33MODULE_PARM_DESC(ql2xlogintimeout, 32MODULE_PARM_DESC(ql2xlogintimeout,
@@ -87,6 +86,13 @@ MODULE_PARM_DESC(ql2xqfullrampup,
87 "depth for a device after a queue-full condition has been " 86 "depth for a device after a queue-full condition has been "
88 "detected. Default is 120 seconds."); 87 "detected. Default is 120 seconds.");
89 88
89int ql2xiidmaenable=1;
90module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
91MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94
95
90/* 96/*
91 * SCSI host template entry points 97 * SCSI host template entry points
92 */ 98 */
@@ -388,7 +394,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
388 } 394 }
389 395
390 /* Close window on fcport/rport state-transitioning. */ 396 /* Close window on fcport/rport state-transitioning. */
391 if (!*(fc_port_t **)rport->dd_data) { 397 if (fcport->drport) {
392 cmd->result = DID_IMM_RETRY << 16; 398 cmd->result = DID_IMM_RETRY << 16;
393 goto qc_fail_command; 399 goto qc_fail_command;
394 } 400 }
@@ -443,7 +449,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443 int rval; 449 int rval;
444 scsi_qla_host_t *pha = to_qla_parent(ha); 450 scsi_qla_host_t *pha = to_qla_parent(ha);
445 451
446 if (unlikely(pci_channel_offline(ha->pdev))) { 452 if (unlikely(pci_channel_offline(pha->pdev))) {
447 cmd->result = DID_REQUEUE << 16; 453 cmd->result = DID_REQUEUE << 16;
448 goto qc24_fail_command; 454 goto qc24_fail_command;
449 } 455 }
@@ -455,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
455 } 461 }
456 462
457 /* Close window on fcport/rport state-transitioning. */ 463 /* Close window on fcport/rport state-transitioning. */
458 if (!*(fc_port_t **)rport->dd_data) { 464 if (fcport->drport) {
459 cmd->result = DID_IMM_RETRY << 16; 465 cmd->result = DID_IMM_RETRY << 16;
460 goto qc24_fail_command; 466 goto qc24_fail_command;
461 } 467 }
@@ -617,6 +623,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
617 return (return_status); 623 return (return_status);
618} 624}
619 625
626void
627qla2x00_abort_fcport_cmds(fc_port_t *fcport)
628{
629 int cnt;
630 unsigned long flags;
631 srb_t *sp;
632 scsi_qla_host_t *ha = fcport->ha;
633 scsi_qla_host_t *pha = to_qla_parent(ha);
634
635 spin_lock_irqsave(&pha->hardware_lock, flags);
636 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
637 sp = pha->outstanding_cmds[cnt];
638 if (!sp)
639 continue;
640 if (sp->fcport != fcport)
641 continue;
642
643 spin_unlock_irqrestore(&pha->hardware_lock, flags);
644 if (ha->isp_ops->abort_command(ha, sp)) {
645 DEBUG2(qla_printk(KERN_WARNING, ha,
646 "Abort failed -- %lx\n", sp->cmd->serial_number));
647 } else {
648 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
649 QLA_SUCCESS)
650 DEBUG2(qla_printk(KERN_WARNING, ha,
651 "Abort failed while waiting -- %lx\n",
652 sp->cmd->serial_number));
653
654 }
655 spin_lock_irqsave(&pha->hardware_lock, flags);
656 }
657 spin_unlock_irqrestore(&pha->hardware_lock, flags);
658}
659
620static void 660static void
621qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 661qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
622{ 662{
@@ -1073,7 +1113,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1073 else 1113 else
1074 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1114 scsi_deactivate_tcq(sdev, ha->max_q_depth);
1075 1115
1076 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1116 rport->dev_loss_tmo = ha->port_down_retry_count;
1077 1117
1078 return 0; 1118 return 0;
1079} 1119}
@@ -1629,9 +1669,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1629 } 1669 }
1630 host->can_queue = ha->request_q_length + 128; 1670 host->can_queue = ha->request_q_length + 128;
1631 1671
1632 /* load the F/W, read paramaters, and init the H/W */
1633 ha->instance = num_hosts;
1634
1635 mutex_init(&ha->vport_lock); 1672 mutex_init(&ha->vport_lock);
1636 init_completion(&ha->mbx_cmd_comp); 1673 init_completion(&ha->mbx_cmd_comp);
1637 complete(&ha->mbx_cmd_comp); 1674 complete(&ha->mbx_cmd_comp);
@@ -1679,7 +1716,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1679 1716
1680 host->this_id = 255; 1717 host->this_id = 255;
1681 host->cmd_per_lun = 3; 1718 host->cmd_per_lun = 3;
1682 host->unique_id = ha->instance; 1719 host->unique_id = host->host_no;
1683 host->max_cmd_len = MAX_CMDSZ; 1720 host->max_cmd_len = MAX_CMDSZ;
1684 host->max_channel = MAX_BUSES - 1; 1721 host->max_channel = MAX_BUSES - 1;
1685 host->max_lun = MAX_LUNS; 1722 host->max_lun = MAX_LUNS;
@@ -1700,8 +1737,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1700 ha->flags.init_done = 1; 1737 ha->flags.init_done = 1;
1701 ha->flags.online = 1; 1738 ha->flags.online = 1;
1702 1739
1703 num_hosts++;
1704
1705 ret = scsi_add_host(host, &pdev->dev); 1740 ret = scsi_add_host(host, &pdev->dev);
1706 if (ret) 1741 if (ret)
1707 goto probe_failed; 1742 goto probe_failed;
@@ -1813,27 +1848,21 @@ static inline void
1813qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1848qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1814 int defer) 1849 int defer)
1815{ 1850{
1816 unsigned long flags;
1817 struct fc_rport *rport; 1851 struct fc_rport *rport;
1852 scsi_qla_host_t *pha = to_qla_parent(ha);
1818 1853
1819 if (!fcport->rport) 1854 if (!fcport->rport)
1820 return; 1855 return;
1821 1856
1822 rport = fcport->rport; 1857 rport = fcport->rport;
1823 if (defer) { 1858 if (defer) {
1824 spin_lock_irqsave(&fcport->rport_lock, flags); 1859 spin_lock_irq(ha->host->host_lock);
1825 fcport->drport = rport; 1860 fcport->drport = rport;
1826 fcport->rport = NULL; 1861 spin_unlock_irq(ha->host->host_lock);
1827 *(fc_port_t **)rport->dd_data = NULL; 1862 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
1828 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1863 qla2xxx_wake_dpc(pha);
1829 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1864 } else
1830 } else {
1831 spin_lock_irqsave(&fcport->rport_lock, flags);
1832 fcport->rport = NULL;
1833 *(fc_port_t **)rport->dd_data = NULL;
1834 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1835 fc_remote_port_delete(rport); 1865 fc_remote_port_delete(rport);
1836 }
1837} 1866}
1838 1867
1839/* 1868/*
@@ -1903,7 +1932,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1903 scsi_qla_host_t *pha = to_qla_parent(ha); 1932 scsi_qla_host_t *pha = to_qla_parent(ha);
1904 1933
1905 list_for_each_entry(fcport, &pha->fcports, list) { 1934 list_for_each_entry(fcport, &pha->fcports, list) {
1906 if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx) 1935 if (ha->vp_idx != fcport->vp_idx)
1907 continue; 1936 continue;
1908 /* 1937 /*
1909 * No point in marking the device as lost, if the device is 1938 * No point in marking the device as lost, if the device is
@@ -1911,17 +1940,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1911 */ 1940 */
1912 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1941 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1913 continue; 1942 continue;
1914 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1943 if (atomic_read(&fcport->state) == FCS_ONLINE)
1915 if (defer) 1944 qla2x00_schedule_rport_del(ha, fcport, defer);
1916 qla2x00_schedule_rport_del(ha, fcport, defer);
1917 else if (ha->vp_idx == fcport->vp_idx)
1918 qla2x00_schedule_rport_del(ha, fcport, defer);
1919 }
1920 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1945 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1921 } 1946 }
1922
1923 if (defer)
1924 qla2xxx_wake_dpc(ha);
1925} 1947}
1926 1948
1927/* 1949/*
@@ -2156,7 +2178,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2156static int 2178static int
2157qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2179qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
2158{ 2180{
2159 unsigned long flags; 2181 unsigned long uninitialized_var(flags);
2160 scsi_qla_host_t *pha = to_qla_parent(ha); 2182 scsi_qla_host_t *pha = to_qla_parent(ha);
2161 2183
2162 if (!locked) 2184 if (!locked)
@@ -2313,8 +2335,10 @@ qla2x00_do_dpc(void *data)
2313 ha->host_no)); 2335 ha->host_no));
2314 } 2336 }
2315 2337
2316 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) 2338 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
2317 qla2x00_update_fcports(ha); 2339 qla2x00_update_fcports(ha);
2340 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
2341 }
2318 2342
2319 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2343 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
2320 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2344 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1728ab3ccb20..1bca74474935 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -869,11 +869,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
869 uint32_t i; 869 uint32_t i;
870 uint32_t *dwptr; 870 uint32_t *dwptr;
871 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 871 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
872 unsigned long flags;
873 872
874 ret = QLA_SUCCESS; 873 ret = QLA_SUCCESS;
875 874
876 spin_lock_irqsave(&ha->hardware_lock, flags);
877 /* Enable flash write. */ 875 /* Enable flash write. */
878 WRT_REG_DWORD(&reg->ctrl_status, 876 WRT_REG_DWORD(&reg->ctrl_status,
879 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); 877 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -907,7 +905,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
907 WRT_REG_DWORD(&reg->ctrl_status, 905 WRT_REG_DWORD(&reg->ctrl_status,
908 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 906 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
909 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 907 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911 908
912 return ret; 909 return ret;
913} 910}
@@ -2306,6 +2303,51 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2306} 2303}
2307 2304
2308static int 2305static int
2306qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
2307{
2308 if (pos >= end || *pos != 0x82)
2309 return 0;
2310
2311 pos += 3 + pos[1];
2312 if (pos >= end || *pos != 0x90)
2313 return 0;
2314
2315 pos += 3 + pos[1];
2316 if (pos >= end || *pos != 0x78)
2317 return 0;
2318
2319 return 1;
2320}
2321
2322int
2323qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
2324{
2325 uint8_t *pos = ha->vpd;
2326 uint8_t *end = pos + ha->vpd_size;
2327 int len = 0;
2328
2329 if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
2330 return 0;
2331
2332 while (pos < end && *pos != 0x78) {
2333 len = (*pos == 0x82) ? pos[1] : pos[2];
2334
2335 if (!strncmp(pos, key, strlen(key)))
2336 break;
2337
2338 if (*pos != 0x90 && *pos != 0x91)
2339 pos += len;
2340
2341 pos += 3;
2342 }
2343
2344 if (pos < end - len && *pos != 0x78)
2345 return snprintf(str, size, "%.*s", len, pos + 3);
2346
2347 return 0;
2348}
2349
2350static int
2309qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) 2351qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2310{ 2352{
2311 uint32_t d[2], faddr; 2353 uint32_t d[2], faddr;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d058c8862b35..676c390db354 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k4" 10#define QLA2XXX_VERSION "8.02.01-k6"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5822dd595826..88bebb13bc52 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -46,6 +46,8 @@ MODULE_PARM_DESC(ql4xextended_error_logging,
46 46
47int ql4_mod_unload = 0; 47int ql4_mod_unload = 0;
48 48
49#define QL4_DEF_QDEPTH 32
50
49/* 51/*
50 * SCSI host template entry points 52 * SCSI host template entry points
51 */ 53 */
@@ -1387,7 +1389,7 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1387 1389
1388 sdev->hostdata = ddb; 1390 sdev->hostdata = ddb;
1389 sdev->tagged_supported = 1; 1391 sdev->tagged_supported = 1;
1390 scsi_activate_tcq(sdev, sdev->host->can_queue); 1392 scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
1391 return 0; 1393 return 0;
1392} 1394}
1393 1395
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 36c92f961e15..ee6be596503d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -197,11 +197,43 @@ static void
197scsi_pool_free_command(struct scsi_host_cmd_pool *pool, 197scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
198 struct scsi_cmnd *cmd) 198 struct scsi_cmnd *cmd)
199{ 199{
200 if (cmd->prot_sdb)
201 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
202
200 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 203 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
201 kmem_cache_free(pool->cmd_slab, cmd); 204 kmem_cache_free(pool->cmd_slab, cmd);
202} 205}
203 206
204/** 207/**
208 * scsi_host_alloc_command - internal function to allocate command
209 * @shost: SCSI host whose pool to allocate from
210 * @gfp_mask: mask for the allocation
211 *
212 * Returns a fully allocated command with sense buffer and protection
213 * data buffer (where applicable) or NULL on failure
214 */
215static struct scsi_cmnd *
216scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
217{
218 struct scsi_cmnd *cmd;
219
220 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
221 if (!cmd)
222 return NULL;
223
224 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
225 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
226
227 if (!cmd->prot_sdb) {
228 scsi_pool_free_command(shost->cmd_pool, cmd);
229 return NULL;
230 }
231 }
232
233 return cmd;
234}
235
236/**
205 * __scsi_get_command - Allocate a struct scsi_cmnd 237 * __scsi_get_command - Allocate a struct scsi_cmnd
206 * @shost: host to transmit command 238 * @shost: host to transmit command
207 * @gfp_mask: allocation mask 239 * @gfp_mask: allocation mask
@@ -214,7 +246,7 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
214 struct scsi_cmnd *cmd; 246 struct scsi_cmnd *cmd;
215 unsigned char *buf; 247 unsigned char *buf;
216 248
217 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 249 cmd = scsi_host_alloc_command(shost, gfp_mask);
218 250
219 if (unlikely(!cmd)) { 251 if (unlikely(!cmd)) {
220 unsigned long flags; 252 unsigned long flags;
@@ -457,7 +489,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
457 /* 489 /*
458 * Get one backup command for this host. 490 * Get one backup command for this host.
459 */ 491 */
460 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 492 cmd = scsi_host_alloc_command(shost, gfp_mask);
461 if (!cmd) { 493 if (!cmd) {
462 scsi_put_host_cmd_pool(gfp_mask); 494 scsi_put_host_cmd_pool(gfp_mask);
463 shost->cmd_pool = NULL; 495 shost->cmd_pool = NULL;
@@ -902,11 +934,20 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
902 934
903 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 935 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
904 936
905 /* Check to see if the queue is managed by the block layer. 937 /*
906 * If it is, and we fail to adjust the depth, exit. */ 938 * Check to see if the queue is managed by the block layer.
907 if (blk_queue_tagged(sdev->request_queue) && 939 * If it is, and we fail to adjust the depth, exit.
908 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 940 *
909 goto out; 941 * Do not resize the tag map if it is a host wide share bqt,
942 * because the size should be the hosts's can_queue. If there
943 * is more IO than the LLD's can_queue (so there are not enuogh
944 * tags) request_fn's host queue ready check will handle it.
945 */
946 if (!sdev->host->bqt) {
947 if (blk_queue_tagged(sdev->request_queue) &&
948 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
949 goto out;
950 }
910 951
911 sdev->queue_depth = tags; 952 sdev->queue_depth = tags;
912 switch (tagged) { 953 switch (tagged) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01d11a01ffbf..27c633f55794 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1753,7 +1753,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
1753 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 1753 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
1754 if (!open_devip) { 1754 if (!open_devip) {
1755 printk(KERN_ERR "%s: out of memory at line %d\n", 1755 printk(KERN_ERR "%s: out of memory at line %d\n",
1756 __FUNCTION__, __LINE__); 1756 __func__, __LINE__);
1757 return NULL; 1757 return NULL;
1758 } 1758 }
1759 } 1759 }
@@ -2656,7 +2656,7 @@ static int sdebug_add_adapter(void)
2656 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 2656 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
2657 if (NULL == sdbg_host) { 2657 if (NULL == sdbg_host) {
2658 printk(KERN_ERR "%s: out of memory at line %d\n", 2658 printk(KERN_ERR "%s: out of memory at line %d\n",
2659 __FUNCTION__, __LINE__); 2659 __func__, __LINE__);
2660 return -ENOMEM; 2660 return -ENOMEM;
2661 } 2661 }
2662 2662
@@ -2667,7 +2667,7 @@ static int sdebug_add_adapter(void)
2667 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 2667 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
2668 if (!sdbg_devinfo) { 2668 if (!sdbg_devinfo) {
2669 printk(KERN_ERR "%s: out of memory at line %d\n", 2669 printk(KERN_ERR "%s: out of memory at line %d\n",
2670 __FUNCTION__, __LINE__); 2670 __func__, __LINE__);
2671 error = -ENOMEM; 2671 error = -ENOMEM;
2672 goto clean; 2672 goto clean;
2673 } 2673 }
@@ -2987,7 +2987,7 @@ static int sdebug_driver_probe(struct device * dev)
2987 2987
2988 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 2988 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
2989 if (NULL == hpnt) { 2989 if (NULL == hpnt) {
2990 printk(KERN_ERR "%s: scsi_register failed\n", __FUNCTION__); 2990 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
2991 error = -ENODEV; 2991 error = -ENODEV;
2992 return error; 2992 return error;
2993 } 2993 }
@@ -3002,7 +3002,7 @@ static int sdebug_driver_probe(struct device * dev)
3002 3002
3003 error = scsi_add_host(hpnt, &sdbg_host->dev); 3003 error = scsi_add_host(hpnt, &sdbg_host->dev);
3004 if (error) { 3004 if (error) {
3005 printk(KERN_ERR "%s: scsi_add_host failed\n", __FUNCTION__); 3005 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3006 error = -ENODEV; 3006 error = -ENODEV;
3007 scsi_host_put(hpnt); 3007 scsi_host_put(hpnt);
3008 } else 3008 } else
@@ -3021,7 +3021,7 @@ static int sdebug_driver_remove(struct device * dev)
3021 3021
3022 if (!sdbg_host) { 3022 if (!sdbg_host) {
3023 printk(KERN_ERR "%s: Unable to locate host info\n", 3023 printk(KERN_ERR "%s: Unable to locate host info\n",
3024 __FUNCTION__); 3024 __func__);
3025 return -ENODEV; 3025 return -ENODEV;
3026 } 3026 }
3027 3027
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a235802f2981..4969e4ec75ea 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -272,7 +272,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
272 } 272 }
273 if (from_length > to_length) 273 if (from_length > to_length)
274 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 274 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
275 __FUNCTION__, name, from); 275 __func__, name, from);
276} 276}
277 277
278/** 278/**
@@ -298,7 +298,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
298 298
299 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 299 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
300 if (!devinfo) { 300 if (!devinfo) {
301 printk(KERN_ERR "%s: no memory\n", __FUNCTION__); 301 printk(KERN_ERR "%s: no memory\n", __func__);
302 return -ENOMEM; 302 return -ENOMEM;
303 } 303 }
304 304
@@ -363,7 +363,7 @@ static int scsi_dev_info_list_add_str(char *dev_list)
363 strflags = strsep(&next, next_check); 363 strflags = strsep(&next, next_check);
364 if (!model || !strflags) { 364 if (!model || !strflags) {
365 printk(KERN_ERR "%s: bad dev info string '%s' '%s'" 365 printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
366 " '%s'\n", __FUNCTION__, vendor, model, 366 " '%s'\n", __func__, vendor, model,
367 strflags); 367 strflags);
368 res = -EINVAL; 368 res = -EINVAL;
369 } else 369 } else
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 006a95916f72..880051c89bde 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete; 139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140 140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" 141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __FUNCTION__, 142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete)); 143 scmd, timeout, complete));
144 144
145 add_timer(&scmd->eh_timeout); 145 add_timer(&scmd->eh_timeout);
@@ -163,7 +163,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
163 rtn = del_timer(&scmd->eh_timeout); 163 rtn = del_timer(&scmd->eh_timeout);
164 164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," 165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __FUNCTION__, 166 " rtn: %d\n", __func__,
167 scmd, rtn)); 167 scmd, rtn));
168 168
169 scmd->eh_timeout.data = (unsigned long)NULL; 169 scmd->eh_timeout.data = (unsigned long)NULL;
@@ -233,7 +233,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
233 233
234 online = scsi_device_online(sdev); 234 online = scsi_device_online(sdev);
235 235
236 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, 236 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
237 online)); 237 online));
238 238
239 return online; 239 return online;
@@ -271,7 +271,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
271 SCSI_LOG_ERROR_RECOVERY(3, 271 SCSI_LOG_ERROR_RECOVERY(3,
272 sdev_printk(KERN_INFO, sdev, 272 sdev_printk(KERN_INFO, sdev,
273 "%s: cmds failed: %d, cancel: %d\n", 273 "%s: cmds failed: %d, cancel: %d\n",
274 __FUNCTION__, cmd_failed, 274 __func__, cmd_failed,
275 cmd_cancel)); 275 cmd_cancel));
276 cmd_cancel = 0; 276 cmd_cancel = 0;
277 cmd_failed = 0; 277 cmd_failed = 0;
@@ -344,6 +344,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
344 return /* soft_error */ SUCCESS; 344 return /* soft_error */ SUCCESS;
345 345
346 case ABORTED_COMMAND: 346 case ABORTED_COMMAND:
347 if (sshdr.asc == 0x10) /* DIF */
348 return SUCCESS;
349
347 return NEEDS_RETRY; 350 return NEEDS_RETRY;
348 case NOT_READY: 351 case NOT_READY:
349 case UNIT_ATTENTION: 352 case UNIT_ATTENTION:
@@ -470,7 +473,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
470 473
471 SCSI_LOG_ERROR_RECOVERY(3, 474 SCSI_LOG_ERROR_RECOVERY(3,
472 printk("%s scmd: %p result: %x\n", 475 printk("%s scmd: %p result: %x\n",
473 __FUNCTION__, scmd, scmd->result)); 476 __func__, scmd, scmd->result));
474 477
475 eh_action = scmd->device->host->eh_action; 478 eh_action = scmd->device->host->eh_action;
476 if (eh_action) 479 if (eh_action)
@@ -487,7 +490,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
487 int rtn; 490 int rtn;
488 491
489 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 492 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
490 __FUNCTION__)); 493 __func__));
491 494
492 if (!scmd->device->host->hostt->eh_host_reset_handler) 495 if (!scmd->device->host->hostt->eh_host_reset_handler)
493 return FAILED; 496 return FAILED;
@@ -516,7 +519,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
516 int rtn; 519 int rtn;
517 520
518 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 521 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
519 __FUNCTION__)); 522 __func__));
520 523
521 if (!scmd->device->host->hostt->eh_bus_reset_handler) 524 if (!scmd->device->host->hostt->eh_bus_reset_handler)
522 return FAILED; 525 return FAILED;
@@ -664,7 +667,10 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
664 ses->sdb = scmd->sdb; 667 ses->sdb = scmd->sdb;
665 ses->next_rq = scmd->request->next_rq; 668 ses->next_rq = scmd->request->next_rq;
666 ses->result = scmd->result; 669 ses->result = scmd->result;
670 ses->underflow = scmd->underflow;
671 ses->prot_op = scmd->prot_op;
667 672
673 scmd->prot_op = SCSI_PROT_NORMAL;
668 scmd->cmnd = ses->eh_cmnd; 674 scmd->cmnd = ses->eh_cmnd;
669 memset(scmd->cmnd, 0, BLK_MAX_CDB); 675 memset(scmd->cmnd, 0, BLK_MAX_CDB);
670 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 676 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -722,6 +728,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
722 scmd->sdb = ses->sdb; 728 scmd->sdb = ses->sdb;
723 scmd->request->next_rq = ses->next_rq; 729 scmd->request->next_rq = ses->next_rq;
724 scmd->result = ses->result; 730 scmd->result = ses->result;
731 scmd->underflow = ses->underflow;
732 scmd->prot_op = ses->prot_op;
725} 733}
726EXPORT_SYMBOL(scsi_eh_restore_cmnd); 734EXPORT_SYMBOL(scsi_eh_restore_cmnd);
727 735
@@ -766,7 +774,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
766 774
767 SCSI_LOG_ERROR_RECOVERY(3, 775 SCSI_LOG_ERROR_RECOVERY(3,
768 printk("%s: scmd: %p, timeleft: %ld\n", 776 printk("%s: scmd: %p, timeleft: %ld\n",
769 __FUNCTION__, scmd, timeleft)); 777 __func__, scmd, timeleft));
770 778
771 /* 779 /*
772 * If there is time left scsi_eh_done got called, and we will 780 * If there is time left scsi_eh_done got called, and we will
@@ -778,7 +786,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
778 rtn = scsi_eh_completed_normally(scmd); 786 rtn = scsi_eh_completed_normally(scmd);
779 SCSI_LOG_ERROR_RECOVERY(3, 787 SCSI_LOG_ERROR_RECOVERY(3,
780 printk("%s: scsi_eh_completed_normally %x\n", 788 printk("%s: scsi_eh_completed_normally %x\n",
781 __FUNCTION__, rtn)); 789 __func__, rtn));
782 790
783 switch (rtn) { 791 switch (rtn) {
784 case SUCCESS: 792 case SUCCESS:
@@ -913,7 +921,7 @@ retry_tur:
913 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); 921 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
914 922
915 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 923 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
916 __FUNCTION__, scmd, rtn)); 924 __func__, scmd, rtn));
917 925
918 switch (rtn) { 926 switch (rtn) {
919 case NEEDS_RETRY: 927 case NEEDS_RETRY:
@@ -1296,7 +1304,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1296 if (!scsi_device_online(scmd->device)) { 1304 if (!scsi_device_online(scmd->device)) {
1297 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1305 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1298 " as SUCCESS\n", 1306 " as SUCCESS\n",
1299 __FUNCTION__)); 1307 __func__));
1300 return SUCCESS; 1308 return SUCCESS;
1301 } 1309 }
1302 1310
@@ -1511,7 +1519,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1511 * ioctls to queued block devices. 1519 * ioctls to queued block devices.
1512 */ 1520 */
1513 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1521 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1514 __FUNCTION__)); 1522 __func__));
1515 1523
1516 spin_lock_irqsave(shost->host_lock, flags); 1524 spin_lock_irqsave(shost->host_lock, flags);
1517 if (scsi_host_set_state(shost, SHOST_RUNNING)) 1525 if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1835,7 +1843,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1835 */ 1843 */
1836 SCSI_LOG_ERROR_RECOVERY(3, 1844 SCSI_LOG_ERROR_RECOVERY(3,
1837 printk("%s: waking up host to restart after TMF\n", 1845 printk("%s: waking up host to restart after TMF\n",
1838 __FUNCTION__)); 1846 __func__));
1839 1847
1840 wake_up(&shost->host_wait); 1848 wake_up(&shost->host_wait);
1841 1849
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 88d1b5f44e59..ff5d56b3ee4d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
65}; 65};
66#undef SP 66#undef SP
67 67
68static struct kmem_cache *scsi_sdb_cache; 68struct kmem_cache *scsi_sdb_cache;
69 69
70static void scsi_run_queue(struct request_queue *q); 70static void scsi_run_queue(struct request_queue *q);
71 71
@@ -787,6 +787,9 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
787 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 787 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
788 cmd->request->next_rq->special = NULL; 788 cmd->request->next_rq->special = NULL;
789 } 789 }
790
791 if (scsi_prot_sg_count(cmd))
792 scsi_free_sgtable(cmd->prot_sdb);
790} 793}
791EXPORT_SYMBOL(scsi_release_buffers); 794EXPORT_SYMBOL(scsi_release_buffers);
792 795
@@ -947,9 +950,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
947 * 6-byte command. 950 * 6-byte command.
948 */ 951 */
949 scsi_requeue_command(q, cmd); 952 scsi_requeue_command(q, cmd);
950 return; 953 } else if (sshdr.asc == 0x10) /* DIX */
951 } else { 954 scsi_end_request(cmd, -EIO, this_count, 0);
955 else
952 scsi_end_request(cmd, -EIO, this_count, 1); 956 scsi_end_request(cmd, -EIO, this_count, 1);
957 return;
958 case ABORTED_COMMAND:
959 if (sshdr.asc == 0x10) { /* DIF */
960 scsi_end_request(cmd, -EIO, this_count, 0);
953 return; 961 return;
954 } 962 }
955 break; 963 break;
@@ -1072,6 +1080,26 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1072 goto err_exit; 1080 goto err_exit;
1073 } 1081 }
1074 1082
1083 if (blk_integrity_rq(cmd->request)) {
1084 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1085 int ivecs, count;
1086
1087 BUG_ON(prot_sdb == NULL);
1088 ivecs = blk_rq_count_integrity_sg(cmd->request);
1089
1090 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1091 error = BLKPREP_DEFER;
1092 goto err_exit;
1093 }
1094
1095 count = blk_rq_map_integrity_sg(cmd->request,
1096 prot_sdb->table.sgl);
1097 BUG_ON(unlikely(count > ivecs));
1098
1099 cmd->prot_sdb = prot_sdb;
1100 cmd->prot_sdb->table.nents = count;
1101 }
1102
1075 return BLKPREP_OK ; 1103 return BLKPREP_OK ;
1076 1104
1077err_exit: 1105err_exit:
@@ -1367,7 +1395,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1367 1395
1368 if (unlikely(cmd == NULL)) { 1396 if (unlikely(cmd == NULL)) {
1369 printk(KERN_CRIT "impossible request in %s.\n", 1397 printk(KERN_CRIT "impossible request in %s.\n",
1370 __FUNCTION__); 1398 __func__);
1371 BUG(); 1399 BUG();
1372 } 1400 }
1373 1401
@@ -1491,12 +1519,27 @@ static void scsi_request_fn(struct request_queue *q)
1491 printk(KERN_CRIT "impossible request in %s.\n" 1519 printk(KERN_CRIT "impossible request in %s.\n"
1492 "please mail a stack trace to " 1520 "please mail a stack trace to "
1493 "linux-scsi@vger.kernel.org\n", 1521 "linux-scsi@vger.kernel.org\n",
1494 __FUNCTION__); 1522 __func__);
1495 blk_dump_rq_flags(req, "foo"); 1523 blk_dump_rq_flags(req, "foo");
1496 BUG(); 1524 BUG();
1497 } 1525 }
1498 spin_lock(shost->host_lock); 1526 spin_lock(shost->host_lock);
1499 1527
1528 /*
1529 * We hit this when the driver is using a host wide
1530 * tag map. For device level tag maps the queue_depth check
1531 * in the device ready fn would prevent us from trying
1532 * to allocate a tag. Since the map is a shared host resource
1533 * we add the dev to the starved list so it eventually gets
1534 * a run when a tag is freed.
1535 */
1536 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1537 if (list_empty(&sdev->starved_entry))
1538 list_add_tail(&sdev->starved_entry,
1539 &shost->starved_list);
1540 goto not_ready;
1541 }
1542
1500 if (!scsi_host_queue_ready(q, shost, sdev)) 1543 if (!scsi_host_queue_ready(q, shost, sdev))
1501 goto not_ready; 1544 goto not_ready;
1502 if (scsi_target(sdev)->single_lun) { 1545 if (scsi_target(sdev)->single_lun) {
@@ -2486,7 +2529,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2486 if (unlikely(i == sg_count)) { 2529 if (unlikely(i == sg_count)) {
2487 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2530 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2488 "elements %d\n", 2531 "elements %d\n",
2489 __FUNCTION__, sg_len, *offset, sg_count); 2532 __func__, sg_len, *offset, sg_count);
2490 WARN_ON(1); 2533 WARN_ON(1);
2491 return NULL; 2534 return NULL;
2492 } 2535 }
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 370c78cc1cb5..ae7ed9a22662 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -55,7 +55,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
55 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || 55 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
56 (skb->len < nlh->nlmsg_len)) { 56 (skb->len < nlh->nlmsg_len)) {
57 printk(KERN_WARNING "%s: discarding partial skb\n", 57 printk(KERN_WARNING "%s: discarding partial skb\n",
58 __FUNCTION__); 58 __func__);
59 return; 59 return;
60 } 60 }
61 61
@@ -82,7 +82,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
82 82
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 84 printk(KERN_WARNING "%s: discarding partial message\n",
85 __FUNCTION__); 85 __func__);
86 return; 86 return;
87 } 87 }
88 88
@@ -139,7 +139,7 @@ scsi_netlink_init(void)
139 error = netlink_register_notifier(&scsi_netlink_notifier); 139 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 140 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 141 printk(KERN_ERR "%s: register of event handler failed - %d\n",
142 __FUNCTION__, error); 142 __func__, error);
143 return; 143 return;
144 } 144 }
145 145
@@ -148,7 +148,7 @@ scsi_netlink_init(void)
148 THIS_MODULE); 148 THIS_MODULE);
149 if (!scsi_nl_sock) { 149 if (!scsi_nl_sock) {
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 150 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __FUNCTION__); 151 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 152 netlink_unregister_notifier(&scsi_netlink_notifier);
153 } 153 }
154 154
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b33e72516ef8..79f0f7511204 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -77,6 +77,7 @@ extern void scsi_exit_queue(void);
77struct request_queue; 77struct request_queue;
78struct request; 78struct request;
79extern int scsi_prep_fn(struct request_queue *, struct request *); 79extern int scsi_prep_fn(struct request_queue *, struct request *);
80extern struct kmem_cache *scsi_sdb_cache;
80 81
81/* scsi_proc.c */ 82/* scsi_proc.c */
82#ifdef CONFIG_SCSI_PROC_FS 83#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index e4a0d2f9b357..c6a904a45bf9 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -114,7 +114,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
114 sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); 114 sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
115 if (!sht->proc_dir) 115 if (!sht->proc_dir)
116 printk(KERN_ERR "%s: proc_mkdir failed for %s\n", 116 printk(KERN_ERR "%s: proc_mkdir failed for %s\n",
117 __FUNCTION__, sht->proc_name); 117 __func__, sht->proc_name);
118 else 118 else
119 sht->proc_dir->owner = sht->module; 119 sht->proc_dir->owner = sht->module;
120 } 120 }
@@ -157,7 +157,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
157 sht->proc_dir, proc_scsi_read, shost); 157 sht->proc_dir, proc_scsi_read, shost);
158 if (!p) { 158 if (!p) {
159 printk(KERN_ERR "%s: Failed to register host %d in" 159 printk(KERN_ERR "%s: Failed to register host %d in"
160 "%s\n", __FUNCTION__, shost->host_no, 160 "%s\n", __func__, shost->host_no,
161 sht->proc_name); 161 sht->proc_name);
162 return; 162 return;
163 } 163 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 196fe3af0d5e..84b4879cff11 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -318,7 +318,7 @@ out_device_destroy:
318 put_device(&sdev->sdev_gendev); 318 put_device(&sdev->sdev_gendev);
319out: 319out:
320 if (display_failure_msg) 320 if (display_failure_msg)
321 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 321 printk(ALLOC_FAILURE_MSG, __func__);
322 return NULL; 322 return NULL;
323} 323}
324 324
@@ -404,7 +404,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
404 404
405 starget = kzalloc(size, GFP_KERNEL); 405 starget = kzalloc(size, GFP_KERNEL);
406 if (!starget) { 406 if (!starget) {
407 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 407 printk(KERN_ERR "%s: allocation failure\n", __func__);
408 return NULL; 408 return NULL;
409 } 409 }
410 dev = &starget->dev; 410 dev = &starget->dev;
@@ -1337,7 +1337,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1337 lun_data = kmalloc(length, GFP_ATOMIC | 1337 lun_data = kmalloc(length, GFP_ATOMIC |
1338 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1338 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1339 if (!lun_data) { 1339 if (!lun_data) {
1340 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1340 printk(ALLOC_FAILURE_MSG, __func__);
1341 goto out; 1341 goto out;
1342 } 1342 }
1343 1343
@@ -1649,7 +1649,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1649{ 1649{
1650 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1650 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1651 "%s: <%u:%u:%u>\n", 1651 "%s: <%u:%u:%u>\n",
1652 __FUNCTION__, channel, id, lun)); 1652 __func__, channel, id, lun));
1653 1653
1654 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1654 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1655 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1655 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
@@ -1703,7 +1703,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1703 return NULL; 1703 return NULL;
1704 1704
1705 if (shost->async_scan) { 1705 if (shost->async_scan) {
1706 printk("%s called twice for host %d", __FUNCTION__, 1706 printk("%s called twice for host %d", __func__,
1707 shost->host_no); 1707 shost->host_no);
1708 dump_stack(); 1708 dump_stack();
1709 return NULL; 1709 return NULL;
@@ -1757,9 +1757,10 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
1757 mutex_lock(&shost->scan_mutex); 1757 mutex_lock(&shost->scan_mutex);
1758 1758
1759 if (!shost->async_scan) { 1759 if (!shost->async_scan) {
1760 printk("%s called twice for host %d", __FUNCTION__, 1760 printk("%s called twice for host %d", __func__,
1761 shost->host_no); 1761 shost->host_no);
1762 dump_stack(); 1762 dump_stack();
1763 mutex_unlock(&shost->scan_mutex);
1763 return; 1764 return;
1764 } 1765 }
1765 1766
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b6e561059779..ab3c71869be5 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -249,6 +249,8 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
249shost_rd_attr(can_queue, "%hd\n"); 249shost_rd_attr(can_queue, "%hd\n");
250shost_rd_attr(sg_tablesize, "%hu\n"); 250shost_rd_attr(sg_tablesize, "%hu\n");
251shost_rd_attr(unchecked_isa_dma, "%d\n"); 251shost_rd_attr(unchecked_isa_dma, "%d\n");
252shost_rd_attr(prot_capabilities, "%u\n");
253shost_rd_attr(prot_guard_type, "%hd\n");
252shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 254shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
253 255
254static struct attribute *scsi_sysfs_shost_attrs[] = { 256static struct attribute *scsi_sysfs_shost_attrs[] = {
@@ -263,6 +265,8 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
263 &dev_attr_hstate.attr, 265 &dev_attr_hstate.attr,
264 &dev_attr_supported_mode.attr, 266 &dev_attr_supported_mode.attr,
265 &dev_attr_active_mode.attr, 267 &dev_attr_active_mode.attr,
268 &dev_attr_prot_capabilities.attr,
269 &dev_attr_prot_guard_type.attr,
266 NULL 270 NULL
267}; 271};
268 272
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index cb92888948f9..fe4c62177f78 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -6,7 +6,7 @@ struct task_struct;
6/* tmp - will replace with SCSI logging stuff */ 6/* tmp - will replace with SCSI logging stuff */
7#define eprintk(fmt, args...) \ 7#define eprintk(fmt, args...) \
8do { \ 8do { \
9 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 9 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
10} while (0) 10} while (0)
11 11
12#define dprintk(fmt, args...) 12#define dprintk(fmt, args...)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a272b9a2c869..56823fd1fb84 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -571,7 +571,7 @@ send_fail:
571 name = get_fc_host_event_code_name(event_code); 571 name = get_fc_host_event_code_name(event_code);
572 printk(KERN_WARNING 572 printk(KERN_WARNING
573 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", 573 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
574 __FUNCTION__, shost->host_no, 574 __func__, shost->host_no,
575 (name) ? name : "<unknown>", event_data, err); 575 (name) ? name : "<unknown>", event_data, err);
576 return; 576 return;
577} 577}
@@ -644,7 +644,7 @@ send_vendor_fail_skb:
644send_vendor_fail: 644send_vendor_fail:
645 printk(KERN_WARNING 645 printk(KERN_WARNING
646 "%s: Dropped Event : host %d vendor_unique - err %d\n", 646 "%s: Dropped Event : host %d vendor_unique - err %d\n",
647 __FUNCTION__, shost->host_no, err); 647 __func__, shost->host_no, err);
648 return; 648 return;
649} 649}
650EXPORT_SYMBOL(fc_host_post_vendor_event); 650EXPORT_SYMBOL(fc_host_post_vendor_event);
@@ -2464,7 +2464,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2464 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); 2464 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2465 rport = kzalloc(size, GFP_KERNEL); 2465 rport = kzalloc(size, GFP_KERNEL);
2466 if (unlikely(!rport)) { 2466 if (unlikely(!rport)) {
2467 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 2467 printk(KERN_ERR "%s: allocation failure\n", __func__);
2468 return NULL; 2468 return NULL;
2469 } 2469 }
2470 2470
@@ -3137,7 +3137,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3137 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); 3137 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3138 vport = kzalloc(size, GFP_KERNEL); 3138 vport = kzalloc(size, GFP_KERNEL);
3139 if (unlikely(!vport)) { 3139 if (unlikely(!vport)) {
3140 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 3140 printk(KERN_ERR "%s: allocation failure\n", __func__);
3141 return -ENOMEM; 3141 return -ENOMEM;
3142 } 3142 }
3143 3143
@@ -3201,7 +3201,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3201 printk(KERN_ERR 3201 printk(KERN_ERR
3202 "%s: Cannot create vport symlinks for " 3202 "%s: Cannot create vport symlinks for "
3203 "%s, err=%d\n", 3203 "%s, err=%d\n",
3204 __FUNCTION__, dev->bus_id, error); 3204 __func__, dev->bus_id, error);
3205 } 3205 }
3206 spin_lock_irqsave(shost->host_lock, flags); 3206 spin_lock_irqsave(shost->host_lock, flags);
3207 vport->flags &= ~FC_VPORT_CREATING; 3207 vport->flags &= ~FC_VPORT_CREATING;
@@ -3314,7 +3314,7 @@ fc_vport_sched_delete(struct work_struct *work)
3314 if (stat) 3314 if (stat)
3315 dev_printk(KERN_ERR, vport->dev.parent, 3315 dev_printk(KERN_ERR, vport->dev.parent,
3316 "%s: %s could not be deleted created via " 3316 "%s: %s could not be deleted created via "
3317 "shost%d channel %d - error %d\n", __FUNCTION__, 3317 "shost%d channel %d - error %d\n", __func__,
3318 vport->dev.bus_id, vport->shost->host_no, 3318 vport->dev.bus_id, vport->shost->host_no,
3319 vport->channel, stat); 3319 vport->channel, stat);
3320} 3320}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f4461d35ffb9..366609386be1 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -779,7 +779,7 @@ static void sas_port_create_link(struct sas_port *port,
779 return; 779 return;
780err: 780err:
781 printk(KERN_ERR "%s: Cannot create port links, err=%d\n", 781 printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
782 __FUNCTION__, res); 782 __func__, res);
783} 783}
784 784
785static void sas_port_delete_link(struct sas_port *port, 785static void sas_port_delete_link(struct sas_port *port,
@@ -1029,7 +1029,7 @@ void sas_port_mark_backlink(struct sas_port *port)
1029 return; 1029 return;
1030err: 1030err:
1031 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", 1031 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
1032 __FUNCTION__, res); 1032 __func__, res);
1033 1033
1034} 1034}
1035EXPORT_SYMBOL(sas_port_mark_backlink); 1035EXPORT_SYMBOL(sas_port_mark_backlink);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0c63947d8a9d..e5e7d7856454 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -99,8 +99,7 @@ static void scsi_disk_release(struct device *cdev);
99static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 99static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
100static void sd_print_result(struct scsi_disk *, int); 100static void sd_print_result(struct scsi_disk *, int);
101 101
102static DEFINE_IDR(sd_index_idr); 102static DEFINE_IDA(sd_index_ida);
103static DEFINE_SPINLOCK(sd_index_lock);
104 103
105/* This semaphore is used to mediate the 0->1 reference get in the 104/* This semaphore is used to mediate the 0->1 reference get in the
106 * face of object destruction (i.e. we can't allow a get on an 105 * face of object destruction (i.e. we can't allow a get on an
@@ -234,6 +233,24 @@ sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
234 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); 233 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
235} 234}
236 235
236static ssize_t
237sd_show_protection_type(struct device *dev, struct device_attribute *attr,
238 char *buf)
239{
240 struct scsi_disk *sdkp = to_scsi_disk(dev);
241
242 return snprintf(buf, 20, "%u\n", sdkp->protection_type);
243}
244
245static ssize_t
246sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
247 char *buf)
248{
249 struct scsi_disk *sdkp = to_scsi_disk(dev);
250
251 return snprintf(buf, 20, "%u\n", sdkp->ATO);
252}
253
237static struct device_attribute sd_disk_attrs[] = { 254static struct device_attribute sd_disk_attrs[] = {
238 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 255 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
239 sd_store_cache_type), 256 sd_store_cache_type),
@@ -242,6 +259,8 @@ static struct device_attribute sd_disk_attrs[] = {
242 sd_store_allow_restart), 259 sd_store_allow_restart),
243 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 260 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
244 sd_store_manage_start_stop), 261 sd_store_manage_start_stop),
262 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
263 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
245 __ATTR_NULL, 264 __ATTR_NULL,
246}; 265};
247 266
@@ -354,7 +373,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
354 struct scsi_cmnd *SCpnt; 373 struct scsi_cmnd *SCpnt;
355 struct scsi_device *sdp = q->queuedata; 374 struct scsi_device *sdp = q->queuedata;
356 struct gendisk *disk = rq->rq_disk; 375 struct gendisk *disk = rq->rq_disk;
376 struct scsi_disk *sdkp;
357 sector_t block = rq->sector; 377 sector_t block = rq->sector;
378 sector_t threshold;
358 unsigned int this_count = rq->nr_sectors; 379 unsigned int this_count = rq->nr_sectors;
359 unsigned int timeout = sdp->timeout; 380 unsigned int timeout = sdp->timeout;
360 int ret; 381 int ret;
@@ -370,6 +391,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
370 if (ret != BLKPREP_OK) 391 if (ret != BLKPREP_OK)
371 goto out; 392 goto out;
372 SCpnt = rq->special; 393 SCpnt = rq->special;
394 sdkp = scsi_disk(disk);
373 395
374 /* from here on until we're complete, any goto out 396 /* from here on until we're complete, any goto out
375 * is used for a killable error condition */ 397 * is used for a killable error condition */
@@ -401,13 +423,21 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
401 } 423 }
402 424
403 /* 425 /*
404 * Some devices (some sdcards for one) don't like it if the 426 * Some SD card readers can't handle multi-sector accesses which touch
405 * last sector gets read in a larger then 1 sector read. 427 * the last one or two hardware sectors. Split accesses as needed.
406 */ 428 */
407 if (unlikely(sdp->last_sector_bug && 429 threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
408 rq->nr_sectors > sdp->sector_size / 512 && 430 (sdp->sector_size / 512);
409 block + this_count == get_capacity(disk))) 431
410 this_count -= sdp->sector_size / 512; 432 if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
433 if (block < threshold) {
434 /* Access up to the threshold but not beyond */
435 this_count = threshold - block;
436 } else {
437 /* Access only a single hardware sector */
438 this_count = sdp->sector_size / 512;
439 }
440 }
411 441
412 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n", 442 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
413 (unsigned long long)block)); 443 (unsigned long long)block));
@@ -459,6 +489,11 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
459 } 489 }
460 SCpnt->cmnd[0] = WRITE_6; 490 SCpnt->cmnd[0] = WRITE_6;
461 SCpnt->sc_data_direction = DMA_TO_DEVICE; 491 SCpnt->sc_data_direction = DMA_TO_DEVICE;
492
493 if (blk_integrity_rq(rq) &&
494 sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
495 goto out;
496
462 } else if (rq_data_dir(rq) == READ) { 497 } else if (rq_data_dir(rq) == READ) {
463 SCpnt->cmnd[0] = READ_6; 498 SCpnt->cmnd[0] = READ_6;
464 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 499 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -473,8 +508,12 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 "writing" : "reading", this_count, 508 "writing" : "reading", this_count,
474 rq->nr_sectors)); 509 rq->nr_sectors));
475 510
476 SCpnt->cmnd[1] = 0; 511 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
477 512 if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
513 SCpnt->cmnd[1] = 1 << 5;
514 else
515 SCpnt->cmnd[1] = 0;
516
478 if (block > 0xffffffff) { 517 if (block > 0xffffffff) {
479 SCpnt->cmnd[0] += READ_16 - READ_6; 518 SCpnt->cmnd[0] += READ_16 - READ_6;
480 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; 519 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
@@ -492,6 +531,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
492 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; 531 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
493 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; 532 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
494 } else if ((this_count > 0xff) || (block > 0x1fffff) || 533 } else if ((this_count > 0xff) || (block > 0x1fffff) ||
534 scsi_device_protection(SCpnt->device) ||
495 SCpnt->device->use_10_for_rw) { 535 SCpnt->device->use_10_for_rw) {
496 if (this_count > 0xffff) 536 if (this_count > 0xffff)
497 this_count = 0xffff; 537 this_count = 0xffff;
@@ -526,6 +566,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
526 } 566 }
527 SCpnt->sdb.length = this_count * sdp->sector_size; 567 SCpnt->sdb.length = this_count * sdp->sector_size;
528 568
569 /* If DIF or DIX is enabled, tell HBA how to handle request */
570 if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
571 sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
572
529 /* 573 /*
530 * We shouldn't disconnect in the middle of a sector, so with a dumb 574 * We shouldn't disconnect in the middle of a sector, so with a dumb
531 * host adapter, it's safe to assume that we can at least transfer 575 * host adapter, it's safe to assume that we can at least transfer
@@ -920,6 +964,48 @@ static struct block_device_operations sd_fops = {
920 .revalidate_disk = sd_revalidate_disk, 964 .revalidate_disk = sd_revalidate_disk,
921}; 965};
922 966
967static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
968{
969 u64 start_lba = scmd->request->sector;
970 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
971 u64 bad_lba;
972 int info_valid;
973
974 if (!blk_fs_request(scmd->request))
975 return 0;
976
977 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
978 SCSI_SENSE_BUFFERSIZE,
979 &bad_lba);
980 if (!info_valid)
981 return 0;
982
983 if (scsi_bufflen(scmd) <= scmd->device->sector_size)
984 return 0;
985
986 if (scmd->device->sector_size < 512) {
987 /* only legitimate sector_size here is 256 */
988 start_lba <<= 1;
989 end_lba <<= 1;
990 } else {
991 /* be careful ... don't want any overflows */
992 u64 factor = scmd->device->sector_size / 512;
993 do_div(start_lba, factor);
994 do_div(end_lba, factor);
995 }
996
997 /* The bad lba was reported incorrectly, we have no idea where
998 * the error is.
999 */
1000 if (bad_lba < start_lba || bad_lba >= end_lba)
1001 return 0;
1002
1003 /* This computation should always be done in terms of
1004 * the resolution of the device's medium.
1005 */
1006 return (bad_lba - start_lba) * scmd->device->sector_size;
1007}
1008
923/** 1009/**
924 * sd_done - bottom half handler: called when the lower level 1010 * sd_done - bottom half handler: called when the lower level
925 * driver has completed (successfully or otherwise) a scsi command. 1011 * driver has completed (successfully or otherwise) a scsi command.
@@ -930,15 +1016,10 @@ static struct block_device_operations sd_fops = {
930static int sd_done(struct scsi_cmnd *SCpnt) 1016static int sd_done(struct scsi_cmnd *SCpnt)
931{ 1017{
932 int result = SCpnt->result; 1018 int result = SCpnt->result;
933 unsigned int xfer_size = scsi_bufflen(SCpnt); 1019 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
934 unsigned int good_bytes = result ? 0 : xfer_size;
935 u64 start_lba = SCpnt->request->sector;
936 u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
937 u64 bad_lba;
938 struct scsi_sense_hdr sshdr; 1020 struct scsi_sense_hdr sshdr;
939 int sense_valid = 0; 1021 int sense_valid = 0;
940 int sense_deferred = 0; 1022 int sense_deferred = 0;
941 int info_valid;
942 1023
943 if (result) { 1024 if (result) {
944 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 1025 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -963,36 +1044,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
963 switch (sshdr.sense_key) { 1044 switch (sshdr.sense_key) {
964 case HARDWARE_ERROR: 1045 case HARDWARE_ERROR:
965 case MEDIUM_ERROR: 1046 case MEDIUM_ERROR:
966 if (!blk_fs_request(SCpnt->request)) 1047 good_bytes = sd_completed_bytes(SCpnt);
967 goto out;
968 info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
969 SCSI_SENSE_BUFFERSIZE,
970 &bad_lba);
971 if (!info_valid)
972 goto out;
973 if (xfer_size <= SCpnt->device->sector_size)
974 goto out;
975 if (SCpnt->device->sector_size < 512) {
976 /* only legitimate sector_size here is 256 */
977 start_lba <<= 1;
978 end_lba <<= 1;
979 } else {
980 /* be careful ... don't want any overflows */
981 u64 factor = SCpnt->device->sector_size / 512;
982 do_div(start_lba, factor);
983 do_div(end_lba, factor);
984 }
985
986 if (bad_lba < start_lba || bad_lba >= end_lba)
987 /* the bad lba was reported incorrectly, we have
988 * no idea where the error is
989 */
990 goto out;
991
992 /* This computation should always be done in terms of
993 * the resolution of the device's medium.
994 */
995 good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
996 break; 1048 break;
997 case RECOVERED_ERROR: 1049 case RECOVERED_ERROR:
998 case NO_SENSE: 1050 case NO_SENSE:
@@ -1002,10 +1054,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1002 scsi_print_sense("sd", SCpnt); 1054 scsi_print_sense("sd", SCpnt);
1003 SCpnt->result = 0; 1055 SCpnt->result = 0;
1004 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1056 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1005 good_bytes = xfer_size; 1057 good_bytes = scsi_bufflen(SCpnt);
1058 break;
1059 case ABORTED_COMMAND:
1060 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
1061 scsi_print_result(SCpnt);
1062 scsi_print_sense("sd", SCpnt);
1063 good_bytes = sd_completed_bytes(SCpnt);
1064 }
1006 break; 1065 break;
1007 case ILLEGAL_REQUEST: 1066 case ILLEGAL_REQUEST:
1008 if (SCpnt->device->use_10_for_rw && 1067 if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
1068 scsi_print_result(SCpnt);
1069 scsi_print_sense("sd", SCpnt);
1070 good_bytes = sd_completed_bytes(SCpnt);
1071 }
1072 if (!scsi_device_protection(SCpnt->device) &&
1073 SCpnt->device->use_10_for_rw &&
1009 (SCpnt->cmnd[0] == READ_10 || 1074 (SCpnt->cmnd[0] == READ_10 ||
1010 SCpnt->cmnd[0] == WRITE_10)) 1075 SCpnt->cmnd[0] == WRITE_10))
1011 SCpnt->device->use_10_for_rw = 0; 1076 SCpnt->device->use_10_for_rw = 0;
@@ -1018,6 +1083,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1018 break; 1083 break;
1019 } 1084 }
1020 out: 1085 out:
1086 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1087 sd_dif_complete(SCpnt, good_bytes);
1088
1021 return good_bytes; 1089 return good_bytes;
1022} 1090}
1023 1091
@@ -1165,6 +1233,49 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1165 } 1233 }
1166} 1234}
1167 1235
1236
1237/*
1238 * Determine whether disk supports Data Integrity Field.
1239 */
1240void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1241{
1242 struct scsi_device *sdp = sdkp->device;
1243 u8 type;
1244
1245 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1246 type = 0;
1247 else
1248 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1249
1250 switch (type) {
1251 case SD_DIF_TYPE0_PROTECTION:
1252 sdkp->protection_type = 0;
1253 break;
1254
1255 case SD_DIF_TYPE1_PROTECTION:
1256 case SD_DIF_TYPE3_PROTECTION:
1257 sdkp->protection_type = type;
1258 break;
1259
1260 case SD_DIF_TYPE2_PROTECTION:
1261 sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \
1262 "protection which is currently unsupported. " \
1263 "Disabling disk!\n");
1264 goto disable;
1265
1266 default:
1267 sd_printk(KERN_ERR, sdkp, "formatted with unknown " \
1268 "protection type %d. Disabling disk!\n", type);
1269 goto disable;
1270 }
1271
1272 return;
1273
1274disable:
1275 sdkp->protection_type = 0;
1276 sdkp->capacity = 0;
1277}
1278
1168/* 1279/*
1169 * read disk capacity 1280 * read disk capacity
1170 */ 1281 */
@@ -1174,7 +1285,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1174 unsigned char cmd[16]; 1285 unsigned char cmd[16];
1175 int the_result, retries; 1286 int the_result, retries;
1176 int sector_size = 0; 1287 int sector_size = 0;
1177 int longrc = 0; 1288 /* Force READ CAPACITY(16) when PROTECT=1 */
1289 int longrc = scsi_device_protection(sdkp->device) ? 1 : 0;
1178 struct scsi_sense_hdr sshdr; 1290 struct scsi_sense_hdr sshdr;
1179 int sense_valid = 0; 1291 int sense_valid = 0;
1180 struct scsi_device *sdp = sdkp->device; 1292 struct scsi_device *sdp = sdkp->device;
@@ -1186,8 +1298,8 @@ repeat:
1186 memset((void *) cmd, 0, 16); 1298 memset((void *) cmd, 0, 16);
1187 cmd[0] = SERVICE_ACTION_IN; 1299 cmd[0] = SERVICE_ACTION_IN;
1188 cmd[1] = SAI_READ_CAPACITY_16; 1300 cmd[1] = SAI_READ_CAPACITY_16;
1189 cmd[13] = 12; 1301 cmd[13] = 13;
1190 memset((void *) buffer, 0, 12); 1302 memset((void *) buffer, 0, 13);
1191 } else { 1303 } else {
1192 cmd[0] = READ_CAPACITY; 1304 cmd[0] = READ_CAPACITY;
1193 memset((void *) &cmd[1], 0, 9); 1305 memset((void *) &cmd[1], 0, 9);
@@ -1195,7 +1307,7 @@ repeat:
1195 } 1307 }
1196 1308
1197 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1309 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1198 buffer, longrc ? 12 : 8, &sshdr, 1310 buffer, longrc ? 13 : 8, &sshdr,
1199 SD_TIMEOUT, SD_MAX_RETRIES); 1311 SD_TIMEOUT, SD_MAX_RETRIES);
1200 1312
1201 if (media_not_present(sdkp, &sshdr)) 1313 if (media_not_present(sdkp, &sshdr))
@@ -1270,6 +1382,8 @@ repeat:
1270 1382
1271 sector_size = (buffer[8] << 24) | 1383 sector_size = (buffer[8] << 24) |
1272 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11]; 1384 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11];
1385
1386 sd_read_protection_type(sdkp, buffer);
1273 } 1387 }
1274 1388
1275 /* Some devices return the total number of sectors, not the 1389 /* Some devices return the total number of sectors, not the
@@ -1531,6 +1645,52 @@ defaults:
1531 sdkp->DPOFUA = 0; 1645 sdkp->DPOFUA = 0;
1532} 1646}
1533 1647
1648/*
1649 * The ATO bit indicates whether the DIF application tag is available
1650 * for use by the operating system.
1651 */
1652void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1653{
1654 int res, offset;
1655 struct scsi_device *sdp = sdkp->device;
1656 struct scsi_mode_data data;
1657 struct scsi_sense_hdr sshdr;
1658
1659 if (sdp->type != TYPE_DISK)
1660 return;
1661
1662 if (sdkp->protection_type == 0)
1663 return;
1664
1665 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
1666 SD_MAX_RETRIES, &data, &sshdr);
1667
1668 if (!scsi_status_is_good(res) || !data.header_length ||
1669 data.length < 6) {
1670 sd_printk(KERN_WARNING, sdkp,
1671 "getting Control mode page failed, assume no ATO\n");
1672
1673 if (scsi_sense_valid(&sshdr))
1674 sd_print_sense_hdr(sdkp, &sshdr);
1675
1676 return;
1677 }
1678
1679 offset = data.header_length + data.block_descriptor_length;
1680
1681 if ((buffer[offset] & 0x3f) != 0x0a) {
1682 sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
1683 return;
1684 }
1685
1686 if ((buffer[offset + 5] & 0x80) == 0)
1687 return;
1688
1689 sdkp->ATO = 1;
1690
1691 return;
1692}
1693
1534/** 1694/**
1535 * sd_revalidate_disk - called the first time a new disk is seen, 1695 * sd_revalidate_disk - called the first time a new disk is seen,
1536 * performs disk spin up, read_capacity, etc. 1696 * performs disk spin up, read_capacity, etc.
@@ -1567,6 +1727,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1567 sdkp->write_prot = 0; 1727 sdkp->write_prot = 0;
1568 sdkp->WCE = 0; 1728 sdkp->WCE = 0;
1569 sdkp->RCD = 0; 1729 sdkp->RCD = 0;
1730 sdkp->ATO = 0;
1570 1731
1571 sd_spinup_disk(sdkp); 1732 sd_spinup_disk(sdkp);
1572 1733
@@ -1578,6 +1739,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1578 sd_read_capacity(sdkp, buffer); 1739 sd_read_capacity(sdkp, buffer);
1579 sd_read_write_protect_flag(sdkp, buffer); 1740 sd_read_write_protect_flag(sdkp, buffer);
1580 sd_read_cache_type(sdkp, buffer); 1741 sd_read_cache_type(sdkp, buffer);
1742 sd_read_app_tag_own(sdkp, buffer);
1581 } 1743 }
1582 1744
1583 /* 1745 /*
@@ -1643,18 +1805,20 @@ static int sd_probe(struct device *dev)
1643 if (!gd) 1805 if (!gd)
1644 goto out_free; 1806 goto out_free;
1645 1807
1646 if (!idr_pre_get(&sd_index_idr, GFP_KERNEL)) 1808 do {
1647 goto out_put; 1809 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
1810 goto out_put;
1648 1811
1649 spin_lock(&sd_index_lock); 1812 error = ida_get_new(&sd_index_ida, &index);
1650 error = idr_get_new(&sd_index_idr, NULL, &index); 1813 } while (error == -EAGAIN);
1651 spin_unlock(&sd_index_lock);
1652 1814
1653 if (index >= SD_MAX_DISKS)
1654 error = -EBUSY;
1655 if (error) 1815 if (error)
1656 goto out_put; 1816 goto out_put;
1657 1817
1818 error = -EBUSY;
1819 if (index >= SD_MAX_DISKS)
1820 goto out_free_index;
1821
1658 sdkp->device = sdp; 1822 sdkp->device = sdp;
1659 sdkp->driver = &sd_template; 1823 sdkp->driver = &sd_template;
1660 sdkp->disk = gd; 1824 sdkp->disk = gd;
@@ -1675,7 +1839,7 @@ static int sd_probe(struct device *dev)
1675 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE); 1839 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
1676 1840
1677 if (device_add(&sdkp->dev)) 1841 if (device_add(&sdkp->dev))
1678 goto out_put; 1842 goto out_free_index;
1679 1843
1680 get_device(&sdp->sdev_gendev); 1844 get_device(&sdp->sdev_gendev);
1681 1845
@@ -1711,12 +1875,15 @@ static int sd_probe(struct device *dev)
1711 1875
1712 dev_set_drvdata(dev, sdkp); 1876 dev_set_drvdata(dev, sdkp);
1713 add_disk(gd); 1877 add_disk(gd);
1878 sd_dif_config_host(sdkp);
1714 1879
1715 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1880 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1716 sdp->removable ? "removable " : ""); 1881 sdp->removable ? "removable " : "");
1717 1882
1718 return 0; 1883 return 0;
1719 1884
1885 out_free_index:
1886 ida_remove(&sd_index_ida, index);
1720 out_put: 1887 out_put:
1721 put_disk(gd); 1888 put_disk(gd);
1722 out_free: 1889 out_free:
@@ -1766,9 +1933,7 @@ static void scsi_disk_release(struct device *dev)
1766 struct scsi_disk *sdkp = to_scsi_disk(dev); 1933 struct scsi_disk *sdkp = to_scsi_disk(dev);
1767 struct gendisk *disk = sdkp->disk; 1934 struct gendisk *disk = sdkp->disk;
1768 1935
1769 spin_lock(&sd_index_lock); 1936 ida_remove(&sd_index_ida, sdkp->index);
1770 idr_remove(&sd_index_idr, sdkp->index);
1771 spin_unlock(&sd_index_lock);
1772 1937
1773 disk->private_data = NULL; 1938 disk->private_data = NULL;
1774 put_disk(disk); 1939 put_disk(disk);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 03a3d45cfa42..95b9f06534d5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -31,6 +31,12 @@
31 */ 31 */
32#define SD_BUF_SIZE 512 32#define SD_BUF_SIZE 512
33 33
34/*
35 * Number of sectors at the end of the device to avoid multi-sector
36 * accesses to in the case of last_sector_bug
37 */
38#define SD_LAST_BUGGY_SECTORS 8
39
34struct scsi_disk { 40struct scsi_disk {
35 struct scsi_driver *driver; /* always &sd_template */ 41 struct scsi_driver *driver; /* always &sd_template */
36 struct scsi_device *device; 42 struct scsi_device *device;
@@ -41,7 +47,9 @@ struct scsi_disk {
41 u32 index; 47 u32 index;
42 u8 media_present; 48 u8 media_present;
43 u8 write_prot; 49 u8 write_prot;
50 u8 protection_type;/* Data Integrity Field */
44 unsigned previous_state : 1; 51 unsigned previous_state : 1;
52 unsigned ATO : 1; /* state of disk ATO bit */
45 unsigned WCE : 1; /* state of disk WCE bit */ 53 unsigned WCE : 1; /* state of disk WCE bit */
46 unsigned RCD : 1; /* state of disk RCD bit, unused */ 54 unsigned RCD : 1; /* state of disk RCD bit, unused */
47 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 55 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
@@ -59,4 +67,50 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
59 (sdsk)->disk->disk_name, ##a) : \ 67 (sdsk)->disk->disk_name, ##a) : \
60 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 68 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
61 69
70/*
71 * A DIF-capable target device can be formatted with different
72 * protection schemes. Currently 0 through 3 are defined:
73 *
74 * Type 0 is regular (unprotected) I/O
75 *
76 * Type 1 defines the contents of the guard and reference tags
77 *
78 * Type 2 defines the contents of the guard and reference tags and
79 * uses 32-byte commands to seed the latter
80 *
81 * Type 3 defines the contents of the guard tag only
82 */
83
84enum sd_dif_target_protection_types {
85 SD_DIF_TYPE0_PROTECTION = 0x0,
86 SD_DIF_TYPE1_PROTECTION = 0x1,
87 SD_DIF_TYPE2_PROTECTION = 0x2,
88 SD_DIF_TYPE3_PROTECTION = 0x3,
89};
90
91/*
92 * Data Integrity Field tuple.
93 */
94struct sd_dif_tuple {
95 __be16 guard_tag; /* Checksum */
96 __be16 app_tag; /* Opaque storage */
97 __be32 ref_tag; /* Target LBA or indirect LBA */
98};
99
100#if defined(CONFIG_BLK_DEV_INTEGRITY)
101
102extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
103extern void sd_dif_config_host(struct scsi_disk *);
104extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
105extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
106
107#else /* CONFIG_BLK_DEV_INTEGRITY */
108
109#define sd_dif_op(a, b, c) do { } while (0)
110#define sd_dif_config_host(a) do { } while (0)
111#define sd_dif_prepare(a, b, c) (0)
112#define sd_dif_complete(a, b) (0)
113
114#endif /* CONFIG_BLK_DEV_INTEGRITY */
115
62#endif /* _SCSI_DISK_H */ 116#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 000000000000..4d17f3d35aac
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,538 @@
1/*
2 * sd_dif.c - SCSI Data Integrity Field
3 *
4 * Copyright (C) 2007, 2008 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/crc-t10dif.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_ioctl.h>
34#include <scsi/scsicam.h>
35
36#include <net/checksum.h>
37
38#include "sd.h"
39
40typedef __u16 (csum_fn) (void *, unsigned int);
41
42static __u16 sd_dif_crc_fn(void *data, unsigned int len)
43{
44 return cpu_to_be16(crc_t10dif(data, len));
45}
46
47static __u16 sd_dif_ip_fn(void *data, unsigned int len)
48{
49 return ip_compute_csum(data, len);
50}
51
52/*
53 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54 * 16 bit app tag, 32 bit reference tag.
55 */
56static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
57{
58 void *buf = bix->data_buf;
59 struct sd_dif_tuple *sdt = bix->prot_buf;
60 sector_t sector = bix->sector;
61 unsigned int i;
62
63 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
64 sdt->guard_tag = fn(buf, bix->sector_size);
65 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
66 sdt->app_tag = 0;
67
68 buf += bix->sector_size;
69 sector++;
70 }
71}
72
73static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
74{
75 sd_dif_type1_generate(bix, sd_dif_crc_fn);
76}
77
78static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
79{
80 sd_dif_type1_generate(bix, sd_dif_ip_fn);
81}
82
83static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
84{
85 void *buf = bix->data_buf;
86 struct sd_dif_tuple *sdt = bix->prot_buf;
87 sector_t sector = bix->sector;
88 unsigned int i;
89 __u16 csum;
90
91 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
92 /* Unwritten sectors */
93 if (sdt->app_tag == 0xffff)
94 return 0;
95
96 /* Bad ref tag received from disk */
97 if (sdt->ref_tag == 0xffffffff) {
98 printk(KERN_ERR
99 "%s: bad phys ref tag on sector %lu\n",
100 bix->disk_name, (unsigned long)sector);
101 return -EIO;
102 }
103
104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
105 printk(KERN_ERR
106 "%s: ref tag error on sector %lu (rcvd %u)\n",
107 bix->disk_name, (unsigned long)sector,
108 be32_to_cpu(sdt->ref_tag));
109 return -EIO;
110 }
111
112 csum = fn(buf, bix->sector_size);
113
114 if (sdt->guard_tag != csum) {
115 printk(KERN_ERR "%s: guard tag error on sector %lu " \
116 "(rcvd %04x, data %04x)\n", bix->disk_name,
117 (unsigned long)sector,
118 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
119 return -EIO;
120 }
121
122 buf += bix->sector_size;
123 sector++;
124 }
125
126 return 0;
127}
128
129static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
130{
131 return sd_dif_type1_verify(bix, sd_dif_crc_fn);
132}
133
134static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
135{
136 return sd_dif_type1_verify(bix, sd_dif_ip_fn);
137}
138
139/*
140 * Functions for interleaving and deinterleaving application tags
141 */
142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
143{
144 struct sd_dif_tuple *sdt = prot;
145 char *tag = tag_buf;
146 unsigned int i, j;
147
148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
149 sdt->app_tag = tag[j] << 8 | tag[j+1];
150 BUG_ON(sdt->app_tag == 0xffff);
151 }
152}
153
154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
155{
156 struct sd_dif_tuple *sdt = prot;
157 char *tag = tag_buf;
158 unsigned int i, j;
159
160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
161 tag[j] = (sdt->app_tag & 0xff00) >> 8;
162 tag[j+1] = sdt->app_tag & 0xff;
163 }
164}
165
166static struct blk_integrity dif_type1_integrity_crc = {
167 .name = "T10-DIF-TYPE1-CRC",
168 .generate_fn = sd_dif_type1_generate_crc,
169 .verify_fn = sd_dif_type1_verify_crc,
170 .get_tag_fn = sd_dif_type1_get_tag,
171 .set_tag_fn = sd_dif_type1_set_tag,
172 .tuple_size = sizeof(struct sd_dif_tuple),
173 .tag_size = 0,
174};
175
176static struct blk_integrity dif_type1_integrity_ip = {
177 .name = "T10-DIF-TYPE1-IP",
178 .generate_fn = sd_dif_type1_generate_ip,
179 .verify_fn = sd_dif_type1_verify_ip,
180 .get_tag_fn = sd_dif_type1_get_tag,
181 .set_tag_fn = sd_dif_type1_set_tag,
182 .tuple_size = sizeof(struct sd_dif_tuple),
183 .tag_size = 0,
184};
185
186
187/*
188 * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
189 * tag space.
190 */
191static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
192{
193 void *buf = bix->data_buf;
194 struct sd_dif_tuple *sdt = bix->prot_buf;
195 unsigned int i;
196
197 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
198 sdt->guard_tag = fn(buf, bix->sector_size);
199 sdt->ref_tag = 0;
200 sdt->app_tag = 0;
201
202 buf += bix->sector_size;
203 }
204}
205
206static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
207{
208 sd_dif_type3_generate(bix, sd_dif_crc_fn);
209}
210
211static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
212{
213 sd_dif_type3_generate(bix, sd_dif_ip_fn);
214}
215
216static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
217{
218 void *buf = bix->data_buf;
219 struct sd_dif_tuple *sdt = bix->prot_buf;
220 sector_t sector = bix->sector;
221 unsigned int i;
222 __u16 csum;
223
224 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
225 /* Unwritten sectors */
226 if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
227 return 0;
228
229 csum = fn(buf, bix->sector_size);
230
231 if (sdt->guard_tag != csum) {
232 printk(KERN_ERR "%s: guard tag error on sector %lu " \
233 "(rcvd %04x, data %04x)\n", bix->disk_name,
234 (unsigned long)sector,
235 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
236 return -EIO;
237 }
238
239 buf += bix->sector_size;
240 sector++;
241 }
242
243 return 0;
244}
245
246static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
247{
248 return sd_dif_type3_verify(bix, sd_dif_crc_fn);
249}
250
251static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
252{
253 return sd_dif_type3_verify(bix, sd_dif_ip_fn);
254}
255
256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
257{
258 struct sd_dif_tuple *sdt = prot;
259 char *tag = tag_buf;
260 unsigned int i, j;
261
262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
263 sdt->app_tag = tag[j] << 8 | tag[j+1];
264 sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
265 tag[j+4] << 8 | tag[j+5];
266 }
267}
268
269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
270{
271 struct sd_dif_tuple *sdt = prot;
272 char *tag = tag_buf;
273 unsigned int i, j;
274
275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
276 tag[j] = (sdt->app_tag & 0xff00) >> 8;
277 tag[j+1] = sdt->app_tag & 0xff;
278 tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
279 tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
280 tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
281 tag[j+5] = sdt->ref_tag & 0xff;
282 BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
283 }
284}
285
286static struct blk_integrity dif_type3_integrity_crc = {
287 .name = "T10-DIF-TYPE3-CRC",
288 .generate_fn = sd_dif_type3_generate_crc,
289 .verify_fn = sd_dif_type3_verify_crc,
290 .get_tag_fn = sd_dif_type3_get_tag,
291 .set_tag_fn = sd_dif_type3_set_tag,
292 .tuple_size = sizeof(struct sd_dif_tuple),
293 .tag_size = 0,
294};
295
296static struct blk_integrity dif_type3_integrity_ip = {
297 .name = "T10-DIF-TYPE3-IP",
298 .generate_fn = sd_dif_type3_generate_ip,
299 .verify_fn = sd_dif_type3_verify_ip,
300 .get_tag_fn = sd_dif_type3_get_tag,
301 .set_tag_fn = sd_dif_type3_set_tag,
302 .tuple_size = sizeof(struct sd_dif_tuple),
303 .tag_size = 0,
304};
305
306/*
307 * Configure exchange of protection information between OS and HBA.
308 */
309void sd_dif_config_host(struct scsi_disk *sdkp)
310{
311 struct scsi_device *sdp = sdkp->device;
312 struct gendisk *disk = sdkp->disk;
313 u8 type = sdkp->protection_type;
314
315 /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
316 if (scsi_host_dix_capable(sdp->host, type) == 0) {
317
318 if (type == SD_DIF_TYPE0_PROTECTION)
319 return;
320
321 if (scsi_host_dif_capable(sdp->host, type) == 0) {
322 sd_printk(KERN_INFO, sdkp, "Type %d protection " \
323 "unsupported by HBA. Disabling DIF.\n", type);
324 sdkp->protection_type = 0;
325 return;
326 }
327
328 sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
329 type);
330
331 return;
332 }
333
334 /* Enable DMA of protection information */
335 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
336 if (type == SD_DIF_TYPE3_PROTECTION)
337 blk_integrity_register(disk, &dif_type3_integrity_ip);
338 else
339 blk_integrity_register(disk, &dif_type1_integrity_ip);
340 else
341 if (type == SD_DIF_TYPE3_PROTECTION)
342 blk_integrity_register(disk, &dif_type3_integrity_crc);
343 else
344 blk_integrity_register(disk, &dif_type1_integrity_crc);
345
346 sd_printk(KERN_INFO, sdkp,
347 "Enabling %s integrity protection\n", disk->integrity->name);
348
349 /* Signal to block layer that we support sector tagging */
350 if (type && sdkp->ATO) {
351 if (type == SD_DIF_TYPE3_PROTECTION)
352 disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
353 else
354 disk->integrity->tag_size = sizeof(u16);
355
356 sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
357 disk->integrity->tag_size);
358 }
359}
360
361/*
362 * DIF DMA operation magic decoder ring.
363 */
364void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
365{
366 int csum_convert, prot_op;
367
368 prot_op = 0;
369
370 /* Convert checksum? */
371 if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
372 csum_convert = 1;
373 else
374 csum_convert = 0;
375
376 switch (scmd->cmnd[0]) {
377 case READ_10:
378 case READ_12:
379 case READ_16:
380 if (dif && dix)
381 if (csum_convert)
382 prot_op = SCSI_PROT_READ_CONVERT;
383 else
384 prot_op = SCSI_PROT_READ_PASS;
385 else if (dif && !dix)
386 prot_op = SCSI_PROT_READ_STRIP;
387 else if (!dif && dix)
388 prot_op = SCSI_PROT_READ_INSERT;
389
390 break;
391
392 case WRITE_10:
393 case WRITE_12:
394 case WRITE_16:
395 if (dif && dix)
396 if (csum_convert)
397 prot_op = SCSI_PROT_WRITE_CONVERT;
398 else
399 prot_op = SCSI_PROT_WRITE_PASS;
400 else if (dif && !dix)
401 prot_op = SCSI_PROT_WRITE_INSERT;
402 else if (!dif && dix)
403 prot_op = SCSI_PROT_WRITE_STRIP;
404
405 break;
406 }
407
408 scsi_set_prot_op(scmd, prot_op);
409 scsi_set_prot_type(scmd, dif);
410}
411
412/*
413 * The virtual start sector is the one that was originally submitted
414 * by the block layer. Due to partitioning, MD/DM cloning, etc. the
415 * actual physical start sector is likely to be different. Remap
416 * protection information to match the physical LBA.
417 *
418 * From a protocol perspective there's a slight difference between
419 * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
420 * reference tag is seeded in the CDB. This gives us the potential to
421 * avoid virt->phys remapping during write. However, at read time we
422 * don't know whether the virt sector is the same as when we wrote it
423 * (we could be reading from real disk as opposed to MD/DM device. So
424 * we always remap Type 2 making it identical to Type 1.
425 *
426 * Type 3 does not have a reference tag so no remapping is required.
427 */
428int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
429{
430 const int tuple_sz = sizeof(struct sd_dif_tuple);
431 struct bio *bio;
432 struct scsi_disk *sdkp;
433 struct sd_dif_tuple *sdt;
434 unsigned int i, j;
435 u32 phys, virt;
436
437 /* Already remapped? */
438 if (rq->cmd_flags & REQ_INTEGRITY)
439 return 0;
440
441 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
442
443 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
444 return 0;
445
446 rq->cmd_flags |= REQ_INTEGRITY;
447 phys = hw_sector & 0xffffffff;
448
449 __rq_for_each_bio(bio, rq) {
450 struct bio_vec *iv;
451
452 virt = bio->bi_integrity->bip_sector & 0xffffffff;
453
454 bip_for_each_vec(iv, bio->bi_integrity, i) {
455 sdt = kmap_atomic(iv->bv_page, KM_USER0)
456 + iv->bv_offset;
457
458 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
459
460 if (be32_to_cpu(sdt->ref_tag) != virt)
461 goto error;
462
463 sdt->ref_tag = cpu_to_be32(phys);
464 virt++;
465 phys++;
466 }
467
468 kunmap_atomic(sdt, KM_USER0);
469 }
470 }
471
472 return 0;
473
474error:
475 kunmap_atomic(sdt, KM_USER0);
476 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
477 __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
478
479 return -EIO;
480}
481
482/*
483 * Remap physical sector values in the reference tag to the virtual
484 * values expected by the block layer.
485 */
486void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
487{
488 const int tuple_sz = sizeof(struct sd_dif_tuple);
489 struct scsi_disk *sdkp;
490 struct bio *bio;
491 struct sd_dif_tuple *sdt;
492 unsigned int i, j, sectors, sector_sz;
493 u32 phys, virt;
494
495 sdkp = scsi_disk(scmd->request->rq_disk);
496
497 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
498 return;
499
500 sector_sz = scmd->device->sector_size;
501 sectors = good_bytes / sector_sz;
502
503 phys = scmd->request->sector & 0xffffffff;
504 if (sector_sz == 4096)
505 phys >>= 3;
506
507 __rq_for_each_bio(bio, scmd->request) {
508 struct bio_vec *iv;
509
510 virt = bio->bi_integrity->bip_sector & 0xffffffff;
511
512 bip_for_each_vec(iv, bio->bi_integrity, i) {
513 sdt = kmap_atomic(iv->bv_page, KM_USER0)
514 + iv->bv_offset;
515
516 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
517
518 if (sectors == 0) {
519 kunmap_atomic(sdt, KM_USER0);
520 return;
521 }
522
523 if (be32_to_cpu(sdt->ref_tag) != phys &&
524 sdt->app_tag != 0xffff)
525 sdt->ref_tag = 0xffffffff; /* Bad ref */
526 else
527 sdt->ref_tag = cpu_to_be32(virt);
528
529 virt++;
530 phys++;
531 sectors--;
532 }
533
534 kunmap_atomic(sdt, KM_USER0);
535 }
536 }
537}
538
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 4684cc716aa4..c2bb53e3d941 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080224"; 20static const char *verstr = "20080504";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -631,7 +631,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
631/* Flush the write buffer (never need to write if variable blocksize). */ 631/* Flush the write buffer (never need to write if variable blocksize). */
632static int st_flush_write_buffer(struct scsi_tape * STp) 632static int st_flush_write_buffer(struct scsi_tape * STp)
633{ 633{
634 int offset, transfer, blks; 634 int transfer, blks;
635 int result; 635 int result;
636 unsigned char cmd[MAX_COMMAND_SIZE]; 636 unsigned char cmd[MAX_COMMAND_SIZE];
637 struct st_request *SRpnt; 637 struct st_request *SRpnt;
@@ -644,14 +644,10 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
644 result = 0; 644 result = 0;
645 if (STp->dirty == 1) { 645 if (STp->dirty == 1) {
646 646
647 offset = (STp->buffer)->buffer_bytes; 647 transfer = STp->buffer->buffer_bytes;
648 transfer = ((offset + STp->block_size - 1) /
649 STp->block_size) * STp->block_size;
650 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n", 648 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
651 tape_name(STp), transfer)); 649 tape_name(STp), transfer));
652 650
653 memset((STp->buffer)->b_data + offset, 0, transfer - offset);
654
655 memset(cmd, 0, MAX_COMMAND_SIZE); 651 memset(cmd, 0, MAX_COMMAND_SIZE);
656 cmd[0] = WRITE_6; 652 cmd[0] = WRITE_6;
657 cmd[1] = 1; 653 cmd[1] = 1;
@@ -1670,6 +1666,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
1670 if (undone <= do_count) { 1666 if (undone <= do_count) {
1671 /* Only data from this write is not written */ 1667 /* Only data from this write is not written */
1672 count += undone; 1668 count += undone;
1669 b_point -= undone;
1673 do_count -= undone; 1670 do_count -= undone;
1674 if (STp->block_size) 1671 if (STp->block_size)
1675 blks = (transfer - undone) / STp->block_size; 1672 blks = (transfer - undone) / STp->block_size;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f308a0308829..3790906a77d1 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -467,7 +467,7 @@ stex_slave_alloc(struct scsi_device *sdev)
467 /* Cheat: usually extracted from Inquiry data */ 467 /* Cheat: usually extracted from Inquiry data */
468 sdev->tagged_supported = 1; 468 sdev->tagged_supported = 1;
469 469
470 scsi_activate_tcq(sdev, sdev->host->can_queue); 470 scsi_activate_tcq(sdev, ST_CMD_PER_LUN);
471 471
472 return 0; 472 return 0;
473} 473}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 22a6aae78699..98df1651404f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5741,6 +5741,8 @@ void sym_hcb_free(struct sym_hcb *np)
5741 5741
5742 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 5742 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
5743 tp = &np->target[target]; 5743 tp = &np->target[target];
5744 if (tp->luntbl)
5745 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5744#if SYM_CONF_MAX_LUN > 1 5746#if SYM_CONF_MAX_LUN > 1
5745 kfree(tp->lunmp); 5747 kfree(tp->lunmp);
5746#endif 5748#endif
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5b04ddfed26c..1723d71cbf3f 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -452,7 +452,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
452 /* TODO: error handling */ 452 /* TODO: error handling */
453 if (pSRB->SGcount != 1) 453 if (pSRB->SGcount != 1)
454 error = 1; 454 error = 1;
455 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 455 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
456 /* Map SG list */ 456 /* Map SG list */
457 } else if (scsi_sg_count(pcmd)) { 457 } else if (scsi_sg_count(pcmd)) {
458 int nseg; 458 int nseg;
@@ -466,7 +466,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
466 if (nseg < 0) 466 if (nseg < 0)
467 error = 1; 467 error = 1;
468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ 468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
469 __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); 469 __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
470 /* Map single segment */ 470 /* Map single segment */
471 } else 471 } else
472 pSRB->SGcount = 0; 472 pSRB->SGcount = 0;
@@ -483,11 +483,11 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
483 483
484 if (pSRB->SRBFlag) { 484 if (pSRB->SRBFlag) {
485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); 485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
486 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle)); 486 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
487 } else { 487 } else {
488 scsi_dma_unmap(pcmd); 488 scsi_dma_unmap(pcmd);
489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", 489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
490 __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); 490 __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
491 } 491 }
492} 492}
493 493
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index c975c01b3a02..d4c13561f4a6 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -148,7 +148,7 @@
148 * 148 *
149 * 2002/10/04 - Alan Cox <alan@redhat.com> 149 * 2002/10/04 - Alan Cox <alan@redhat.com>
150 * 150 *
151 * Use dev_id for interrupts, kill __FUNCTION__ pasting 151 * Use dev_id for interrupts, kill __func__ pasting
152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff 152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff
153 * Use the adapter lock for the other places we had the cli's 153 * Use the adapter lock for the other places we had the cli's
154 * 154 *
@@ -640,12 +640,12 @@ static int __init wd7000_setup(char *str)
640 (void) get_options(str, ARRAY_SIZE(ints), ints); 640 (void) get_options(str, ARRAY_SIZE(ints), ints);
641 641
642 if (wd7000_card_num >= NUM_CONFIGS) { 642 if (wd7000_card_num >= NUM_CONFIGS) {
643 printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __FUNCTION__); 643 printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
644 return 0; 644 return 0;
645 } 645 }
646 646
647 if ((ints[0] < 3) || (ints[0] > 5)) { 647 if ((ints[0] < 3) || (ints[0] > 5)) {
648 printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __FUNCTION__); 648 printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
649 } else { 649 } else {
650 for (i = 0; i < NUM_IRQS; i++) 650 for (i = 0; i < NUM_IRQS; i++)
651 if (ints[1] == wd7000_irq[i]) 651 if (ints[1] == wd7000_irq[i])
@@ -1642,7 +1642,7 @@ static int wd7000_biosparam(struct scsi_device *sdev,
1642 ip[2] = info[2]; 1642 ip[2] = info[2];
1643 1643
1644 if (info[0] == 255) 1644 if (info[0] == 255)
1645 printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __FUNCTION__); 1645 printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
1646 } 1646 }
1647 } 1647 }
1648 1648
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 4b5f908d35c3..3c4a300494a4 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -68,11 +68,11 @@ lasi_scsi_clock(void * hpa, int defaultclock)
68 if (status == PDC_RET_OK) { 68 if (status == PDC_RET_OK) {
69 clock = (int) pdc_result[16]; 69 clock = (int) pdc_result[16];
70 } else { 70 } else {
71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status); 71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status);
72 clock = defaultclock; 72 clock = defaultclock;
73 } 73 }
74 74
75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock); 75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock);
76 return clock; 76 return clock;
77} 77}
78#endif 78#endif
@@ -108,13 +108,13 @@ zalon_probe(struct parisc_device *dev)
108 */ 108 */
109 dev->irq = gsc_alloc_irq(&gsc_irq); 109 dev->irq = gsc_alloc_irq(&gsc_irq);
110 110
111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__, 111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__,
112 zalon_vers, dev->irq); 112 zalon_vers, dev->irq);
113 113
114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); 114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
115 115
116 if (zalon_vers == 0) 116 if (zalon_vers == 0)
117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__); 117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__);
118 118
119 memset(&device, 0, sizeof(struct ncr_device)); 119 memset(&device, 0, sizeof(struct ncr_device));
120 120