aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c137
-rw-r--r--drivers/scsi/NCR5380.h32
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c18
-rw-r--r--drivers/scsi/arm/acornscsi.c53
-rw-r--r--drivers/scsi/arm/cumana_1.c3
-rw-r--r--drivers/scsi/arm/oak.c3
-rw-r--r--drivers/scsi/atari_NCR5380.c193
-rw-r--r--drivers/scsi/atari_scsi.c24
-rw-r--r--drivers/scsi/atari_scsi.h119
-rw-r--r--drivers/scsi/be2iscsi/be.h11
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h31
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c12
-rw-r--r--drivers/scsi/be2iscsi/be_main.c84
-rw-r--r--drivers/scsi/be2iscsi/be_main.h7
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c68
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c64
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fnic/fnic.h5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c238
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c61
-rw-r--r--drivers/scsi/fnic/fnic_main.c23
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/fnic/fnic_trace.c326
-rw-r--r--drivers/scsi/fnic/fnic_trace.h38
-rw-r--r--drivers/scsi/g_NCR5380.c4
-rw-r--r--drivers/scsi/g_NCR5380.h7
-rw-r--r--drivers/scsi/hpsa.c297
-rw-r--r--drivers/scsi/hpsa.h43
-rw-r--r--drivers/scsi/hpsa_cmd.h49
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c13
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c258
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c60
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c318
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/mac_scsi.c10
-rw-r--r--drivers/scsi/mac_scsi.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c15
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c24
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h58
-rw-r--r--drivers/scsi/mvsas/mv_init.c9
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/pas16.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h60
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c148
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c510
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c105
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c867
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h98
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c91
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c47
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h5
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c106
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h44
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c32
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c458
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c41
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c8
-rw-r--r--drivers/scsi/scsi_error.c30
-rw-r--r--drivers/scsi/scsi_lib.c227
-rw-r--r--drivers/scsi/scsi_sysctl.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c53
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/sun3_NCR5380.c195
-rw-r--r--drivers/scsi/sun3_scsi.c241
-rw-r--r--drivers/scsi/sun3_scsi.h199
-rw-r--r--drivers/scsi/sun3_scsi_vme.c588
-rw-r--r--drivers/scsi/t128.c4
-rw-r--r--drivers/scsi/t128.h7
-rw-r--r--drivers/scsi/ufs/ufs.h36
-rw-r--r--drivers/scsi/ufs/ufshcd.c722
-rw-r--r--drivers/scsi/ufs/ufshcd.h22
-rw-r--r--drivers/scsi/ufs/ufshci.h32
-rw-r--r--drivers/scsi/virtio_scsi.c199
142 files changed, 5631 insertions, 3208 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 02832d64d918..baca5897039f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC
1773config SCSI_VIRTIO 1773config SCSI_VIRTIO
1774 tristate "virtio-scsi support" 1774 tristate "virtio-scsi support"
1775 depends on VIRTIO 1775 depends on VIRTIO
1776 select BLK_DEV_INTEGRITY
1776 help 1777 help
1777 This is the virtual HBA driver for virtio. If the kernel will 1778 This is the virtual HBA driver for virtio. If the kernel will
1778 be used in a virtual machine, say Y or M. 1779 be used in a virtual machine, say Y or M.
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index bcd223868227..93d13fc9a293 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -27,8 +27,6 @@
27 */ 27 */
28 28
29/* 29/*
30 * $Log: NCR5380.c,v $
31
32 * Revision 1.10 1998/9/2 Alan Cox 30 * Revision 1.10 1998/9/2 Alan Cox
33 * (alan@lxorguk.ukuu.org.uk) 31 * (alan@lxorguk.ukuu.org.uk)
34 * Fixed up the timer lockups reported so far. Things still suck. Looking 32 * Fixed up the timer lockups reported so far. Things still suck. Looking
@@ -89,13 +87,6 @@
89#include <scsi/scsi_dbg.h> 87#include <scsi/scsi_dbg.h>
90#include <scsi/scsi_transport_spi.h> 88#include <scsi/scsi_transport_spi.h>
91 89
92#ifndef NDEBUG
93#define NDEBUG 0
94#endif
95#ifndef NDEBUG_ABORT
96#define NDEBUG_ABORT 0
97#endif
98
99#if (NDEBUG & NDEBUG_LISTS) 90#if (NDEBUG & NDEBUG_LISTS)
100#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } 91#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
101#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } 92#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
1005 LIST(cmd, tmp); 996 LIST(cmd, tmp);
1006 tmp->host_scribble = (unsigned char *) cmd; 997 tmp->host_scribble = (unsigned char *) cmd;
1007 } 998 }
1008 dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail")); 999 dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
1009 1000
1010 /* Run the coroutine if it isn't already running. */ 1001 /* Run the coroutine if it isn't already running. */
1011 /* Kick off command processing */ 1002 /* Kick off command processing */
@@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work)
1040 /* Lock held here */ 1031 /* Lock held here */
1041 done = 1; 1032 done = 1;
1042 if (!hostdata->connected && !hostdata->selecting) { 1033 if (!hostdata->connected && !hostdata->selecting) {
1043 dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no)); 1034 dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);
1044 /* 1035 /*
1045 * Search through the issue_queue for a command destined 1036 * Search through the issue_queue for a command destined
1046 * for a target that's not busy. 1037 * for a target that's not busy.
@@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work)
1048 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 1039 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
1049 { 1040 {
1050 if (prev != tmp) 1041 if (prev != tmp)
1051 dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun)); 1042 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
1052 /* When we find one, remove it from the issue queue. */ 1043 /* When we find one, remove it from the issue queue. */
1053 if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { 1044 if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
1054 if (prev) { 1045 if (prev) {
@@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work)
1066 * On failure, we must add the command back to the 1057 * On failure, we must add the command back to the
1067 * issue queue so we can keep trying. 1058 * issue queue so we can keep trying.
1068 */ 1059 */
1069 dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun)); 1060 dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
1070 1061
1071 /* 1062 /*
1072 * A successful selection is defined as one that 1063 * A successful selection is defined as one that
@@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
1095 tmp->host_scribble = (unsigned char *) hostdata->issue_queue; 1086 tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
1096 hostdata->issue_queue = tmp; 1087 hostdata->issue_queue = tmp;
1097 done = 0; 1088 done = 0;
1098 dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no)); 1089 dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);
1099 } 1090 }
1100 /* lock held here still */ 1091 /* lock held here still */
1101 } /* if target/lun is not busy */ 1092 } /* if target/lun is not busy */
@@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work)
1125#endif 1116#endif
1126 && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) 1117 && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
1127 ) { 1118 ) {
1128 dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no)); 1119 dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);
1129 NCR5380_information_transfer(instance); 1120 NCR5380_information_transfer(instance);
1130 dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no)); 1121 dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);
1131 done = 0; 1122 done = 0;
1132 } else 1123 } else
1133 break; 1124 break;
@@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1159 unsigned char basr; 1150 unsigned char basr;
1160 unsigned long flags; 1151 unsigned long flags;
1161 1152
1162 dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", 1153 dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n",
1163 instance->irq)); 1154 instance->irq);
1164 1155
1165 do { 1156 do {
1166 done = 1; 1157 done = 1;
@@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1173 NCR5380_dprint(NDEBUG_INTR, instance); 1164 NCR5380_dprint(NDEBUG_INTR, instance);
1174 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { 1165 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
1175 done = 0; 1166 done = 0;
1176 dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no)); 1167 dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);
1177 NCR5380_reselect(instance); 1168 NCR5380_reselect(instance);
1178 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1169 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1179 } else if (basr & BASR_PARITY_ERROR) { 1170 } else if (basr & BASR_PARITY_ERROR) {
1180 dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no)); 1171 dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);
1181 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1172 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1182 } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { 1173 } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1183 dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no)); 1174 dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);
1184 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1175 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1185 } else { 1176 } else {
1186#if defined(REAL_DMA) 1177#if defined(REAL_DMA)
@@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
1210 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1201 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1211 } 1202 }
1212#else 1203#else
1213 dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG))); 1204 dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1214 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1205 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1215#endif 1206#endif
1216 } 1207 }
@@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1304 hostdata->restart_select = 0; 1295 hostdata->restart_select = 0;
1305 1296
1306 NCR5380_dprint(NDEBUG_ARBITRATION, instance); 1297 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
1307 dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id)); 1298 dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);
1308 1299
1309 /* 1300 /*
1310 * Set the phase bits to 0, otherwise the NCR5380 won't drive the 1301 * Set the phase bits to 0, otherwise the NCR5380 won't drive the
@@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1333 goto failed; 1324 goto failed;
1334 } 1325 }
1335 1326
1336 dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no)); 1327 dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);
1337 1328
1338 /* 1329 /*
1339 * The arbitration delay is 2.2us, but this is a minimum and there is 1330 * The arbitration delay is 2.2us, but this is a minimum and there is
@@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1347 /* Check for lost arbitration */ 1338 /* Check for lost arbitration */
1348 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { 1339 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
1349 NCR5380_write(MODE_REG, MR_BASE); 1340 NCR5380_write(MODE_REG, MR_BASE);
1350 dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no)); 1341 dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);
1351 goto failed; 1342 goto failed;
1352 } 1343 }
1353 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); 1344 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
@@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1360 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { 1351 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
1361 NCR5380_write(MODE_REG, MR_BASE); 1352 NCR5380_write(MODE_REG, MR_BASE);
1362 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1353 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1363 dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no)); 1354 dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);
1364 goto failed; 1355 goto failed;
1365 } 1356 }
1366 /* 1357 /*
@@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1370 1361
1371 udelay(2); 1362 udelay(2);
1372 1363
1373 dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no)); 1364 dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);
1374 1365
1375 /* 1366 /*
1376 * Now that we have won arbitration, start Selection process, asserting 1367 * Now that we have won arbitration, start Selection process, asserting
@@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
1422 1413
1423 udelay(1); 1414 udelay(1);
1424 1415
1425 dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd))); 1416 dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));
1426 1417
1427 /* 1418 /*
1428 * The SCSI specification calls for a 250 ms timeout for the actual 1419 * The SCSI specification calls for a 250 ms timeout for the actual
@@ -1487,7 +1478,7 @@ part2:
1487 collect_stats(hostdata, cmd); 1478 collect_stats(hostdata, cmd);
1488 cmd->scsi_done(cmd); 1479 cmd->scsi_done(cmd);
1489 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1480 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1490 dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no)); 1481 dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
1491 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1482 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1492 return 0; 1483 return 0;
1493 } 1484 }
@@ -1520,7 +1511,7 @@ part2:
1520 goto failed; 1511 goto failed;
1521 } 1512 }
1522 1513
1523 dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id)); 1514 dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
1524 tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); 1515 tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);
1525 1516
1526 len = 1; 1517 len = 1;
@@ -1530,7 +1521,7 @@ part2:
1530 data = tmp; 1521 data = tmp;
1531 phase = PHASE_MSGOUT; 1522 phase = PHASE_MSGOUT;
1532 NCR5380_transfer_pio(instance, &phase, &len, &data); 1523 NCR5380_transfer_pio(instance, &phase, &len, &data);
1533 dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no)); 1524 dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
1534 /* XXX need to handle errors here */ 1525 /* XXX need to handle errors here */
1535 hostdata->connected = cmd; 1526 hostdata->connected = cmd;
1536 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 1527 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
@@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
1583 NCR5380_setup(instance); 1574 NCR5380_setup(instance);
1584 1575
1585 if (!(p & SR_IO)) 1576 if (!(p & SR_IO))
1586 dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c)); 1577 dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);
1587 else 1578 else
1588 dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c)); 1579 dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);
1589 1580
1590 /* 1581 /*
1591 * The NCR5380 chip will only drive the SCSI bus when the 1582 * The NCR5380 chip will only drive the SCSI bus when the
@@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
1620 break; 1611 break;
1621 } 1612 }
1622 1613
1623 dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no)); 1614 dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);
1624 1615
1625 /* Check for phase mismatch */ 1616 /* Check for phase mismatch */
1626 if ((tmp & PHASE_MASK) != p) { 1617 if ((tmp & PHASE_MASK) != p) {
1627 dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no)); 1618 dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);
1628 NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); 1619 NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
1629 break; 1620 break;
1630 } 1621 }
@@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
1660 1651
1661 /* FIXME - if this fails bus reset ?? */ 1652 /* FIXME - if this fails bus reset ?? */
1662 NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); 1653 NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
1663 dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no)); 1654 dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);
1664 1655
1665/* 1656/*
1666 * We have several special cases to consider during REQ/ACK handshaking : 1657 * We have several special cases to consider during REQ/ACK handshaking :
@@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
1681 } 1672 }
1682 } while (--c); 1673 } while (--c);
1683 1674
1684 dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c)); 1675 dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);
1685 1676
1686 *count = c; 1677 *count = c;
1687 *data = d; 1678 *data = d;
@@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
1828 c -= 2; 1819 c -= 2;
1829 } 1820 }
1830#endif 1821#endif
1831 dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d)); 1822 dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
1832 hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); 1823 hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
1833#endif 1824#endif
1834 1825
@@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
1857 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); 1848 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
1858#endif /* def REAL_DMA */ 1849#endif /* def REAL_DMA */
1859 1850
1860 dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG))); 1851 dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
1861 1852
1862 /* 1853 /*
1863 * On the PAS16 at least I/O recovery delays are not needed here. 1854 * On the PAS16 at least I/O recovery delays are not needed here.
@@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
1934 } 1925 }
1935 } 1926 }
1936 1927
1937 dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG))); 1928 dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));
1938 1929
1939 NCR5380_write(MODE_REG, MR_BASE); 1930 NCR5380_write(MODE_REG, MR_BASE);
1940 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1931 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
@@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
1948#ifdef READ_OVERRUNS 1939#ifdef READ_OVERRUNS
1949 if (*phase == p && (p & SR_IO) && residue == 0) { 1940 if (*phase == p && (p & SR_IO) && residue == 0) {
1950 if (overrun) { 1941 if (overrun) {
1951 dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n")); 1942 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
1952 **data = saved_data; 1943 **data = saved_data;
1953 *data += 1; 1944 *data += 1;
1954 *count -= 1; 1945 *count -= 1;
@@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
1957 printk("No overrun??\n"); 1948 printk("No overrun??\n");
1958 cnt = toPIO = 2; 1949 cnt = toPIO = 2;
1959 } 1950 }
1960 dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data)); 1951 dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
1961 NCR5380_transfer_pio(instance, phase, &cnt, data); 1952 NCR5380_transfer_pio(instance, phase, &cnt, data);
1962 *count -= toPIO - cnt; 1953 *count -= toPIO - cnt;
1963 } 1954 }
1964#endif 1955#endif
1965 1956
1966 dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count))); 1957 dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
1967 return 0; 1958 return 0;
1968 1959
1969#elif defined(REAL_DMA) 1960#elif defined(REAL_DMA)
@@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
2013 foo = NCR5380_pwrite(instance, d, c); 2004 foo = NCR5380_pwrite(instance, d, c);
2014#else 2005#else
2015 int timeout; 2006 int timeout;
2016 dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c)); 2007 dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);
2017 if (!(foo = NCR5380_pwrite(instance, d, c))) { 2008 if (!(foo = NCR5380_pwrite(instance, d, c))) {
2018 /* 2009 /*
2019 * Wait for the last byte to be sent. If REQ is being asserted for 2010 * Wait for the last byte to be sent. If REQ is being asserted for
@@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
2024 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); 2015 while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
2025 2016
2026 if (!timeout) 2017 if (!timeout)
2027 dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no)); 2018 dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);
2028 2019
2029 if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { 2020 if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
2030 hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; 2021 hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
2031 if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { 2022 if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
2032 hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; 2023 hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
2033 dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no)); 2024 dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);
2034 } 2025 }
2035 } 2026 }
2036 } else { 2027 } else {
2037 dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n")); 2028 dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");
2038 while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); 2029 while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
2039 dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n")); 2030 dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");
2040 } 2031 }
2041 } 2032 }
2042#endif 2033#endif
@@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
2045 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2036 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2046 2037
2047 if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { 2038 if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
2048 dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n")); 2039 dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");
2049 if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { 2040 if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
2050 dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n")); 2041 dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n");
2051 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 2042 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
2052 } else { 2043 } else {
2053 printk("53C400w: IRQ NOT THERE!\n"); 2044 printk("53C400w: IRQ NOT THERE!\n");
@@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2139 --cmd->SCp.buffers_residual; 2130 --cmd->SCp.buffers_residual;
2140 cmd->SCp.this_residual = cmd->SCp.buffer->length; 2131 cmd->SCp.this_residual = cmd->SCp.buffer->length;
2141 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); 2132 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
2142 dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); 2133 dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);
2143 } 2134 }
2144 /* 2135 /*
2145 * The preferred transfer method is going to be 2136 * The preferred transfer method is going to be
@@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2219 case LINKED_FLG_CMD_COMPLETE: 2210 case LINKED_FLG_CMD_COMPLETE:
2220 /* Accept message by clearing ACK */ 2211 /* Accept message by clearing ACK */
2221 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2212 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2222 dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun)); 2213 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
2223 /* 2214 /*
2224 * Sanity check : A linked command should only terminate with 2215 * Sanity check : A linked command should only terminate with
2225 * one of these messages if there are more linked commands 2216 * one of these messages if there are more linked commands
@@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2235 /* The next command is still part of this process */ 2226 /* The next command is still part of this process */
2236 cmd->next_link->tag = cmd->tag; 2227 cmd->next_link->tag = cmd->tag;
2237 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2228 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2238 dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun)); 2229 dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
2239 collect_stats(hostdata, cmd); 2230 collect_stats(hostdata, cmd);
2240 cmd->scsi_done(cmd); 2231 cmd->scsi_done(cmd);
2241 cmd = hostdata->connected; 2232 cmd = hostdata->connected;
@@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2247 sink = 1; 2238 sink = 1;
2248 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2239 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2249 hostdata->connected = NULL; 2240 hostdata->connected = NULL;
2250 dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun)); 2241 dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
2251 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2242 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2252 2243
2253 /* 2244 /*
@@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2281 if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { 2272 if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2282 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); 2273 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
2283 2274
2284 dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no)); 2275 dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);
2285 2276
2286 LIST(cmd, hostdata->issue_queue); 2277 LIST(cmd, hostdata->issue_queue);
2287 cmd->host_scribble = (unsigned char *) 2278 cmd->host_scribble = (unsigned char *)
2288 hostdata->issue_queue; 2279 hostdata->issue_queue;
2289 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2280 hostdata->issue_queue = (Scsi_Cmnd *) cmd;
2290 dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no)); 2281 dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
2291 } else 2282 } else
2292#endif /* def AUTOSENSE */ 2283#endif /* def AUTOSENSE */
2293 { 2284 {
@@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2327 hostdata->disconnected_queue; 2318 hostdata->disconnected_queue;
2328 hostdata->connected = NULL; 2319 hostdata->connected = NULL;
2329 hostdata->disconnected_queue = cmd; 2320 hostdata->disconnected_queue = cmd;
2330 dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun)); 2321 dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
2331 /* 2322 /*
2332 * Restore phase bits to 0 so an interrupted selection, 2323 * Restore phase bits to 0 so an interrupted selection,
2333 * arbitration can resume. 2324 * arbitration can resume.
@@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2373 extended_msg[0] = EXTENDED_MESSAGE; 2364 extended_msg[0] = EXTENDED_MESSAGE;
2374 /* Accept first byte by clearing ACK */ 2365 /* Accept first byte by clearing ACK */
2375 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2366 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2376 dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no)); 2367 dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);
2377 2368
2378 len = 2; 2369 len = 2;
2379 data = extended_msg + 1; 2370 data = extended_msg + 1;
2380 phase = PHASE_MSGIN; 2371 phase = PHASE_MSGIN;
2381 NCR5380_transfer_pio(instance, &phase, &len, &data); 2372 NCR5380_transfer_pio(instance, &phase, &len, &data);
2382 2373
2383 dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2])); 2374 dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);
2384 2375
2385 if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { 2376 if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
2386 /* Accept third byte by clearing ACK */ 2377 /* Accept third byte by clearing ACK */
@@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2390 phase = PHASE_MSGIN; 2381 phase = PHASE_MSGIN;
2391 2382
2392 NCR5380_transfer_pio(instance, &phase, &len, &data); 2383 NCR5380_transfer_pio(instance, &phase, &len, &data);
2393 dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len)); 2384 dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);
2394 2385
2395 switch (extended_msg[2]) { 2386 switch (extended_msg[2]) {
2396 case EXTENDED_SDTR: 2387 case EXTENDED_SDTR:
@@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2456 NCR5380_transfer_pio(instance, &phase, &len, &data); 2447 NCR5380_transfer_pio(instance, &phase, &len, &data);
2457 if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { 2448 if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
2458 NCR5380_set_timer(hostdata, USLEEP_SLEEP); 2449 NCR5380_set_timer(hostdata, USLEEP_SLEEP);
2459 dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); 2450 dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
2460 return; 2451 return;
2461 } 2452 }
2462 break; 2453 break;
@@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2468 break; 2459 break;
2469 default: 2460 default:
2470 printk("scsi%d : unknown phase\n", instance->host_no); 2461 printk("scsi%d : unknown phase\n", instance->host_no);
2471 NCR5380_dprint(NDEBUG_ALL, instance); 2462 NCR5380_dprint(NDEBUG_ANY, instance);
2472 } /* switch(phase) */ 2463 } /* switch(phase) */
2473 } /* if (tmp * SR_REQ) */ 2464 } /* if (tmp * SR_REQ) */
2474 else { 2465 else {
@@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
2476 */ 2467 */
2477 if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { 2468 if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
2478 NCR5380_set_timer(hostdata, USLEEP_SLEEP); 2469 NCR5380_set_timer(hostdata, USLEEP_SLEEP);
2479 dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); 2470 dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
2480 return; 2471 return;
2481 } 2472 }
2482 } 2473 }
@@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2517 hostdata->restart_select = 1; 2508 hostdata->restart_select = 1;
2518 2509
2519 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2510 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2520 dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no)); 2511 dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);
2521 2512
2522 /* 2513 /*
2523 * At this point, we have detected that our SCSI ID is on the bus, 2514 * At this point, we have detected that our SCSI ID is on the bus,
@@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
2597 do_abort(instance); 2588 do_abort(instance);
2598 } else { 2589 } else {
2599 hostdata->connected = tmp; 2590 hostdata->connected = tmp;
2600 dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag)); 2591 dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
2601 } 2592 }
2602} 2593}
2603 2594
@@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2682 2673
2683 NCR5380_setup(instance); 2674 NCR5380_setup(instance);
2684 2675
2685 dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no)); 2676 dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no);
2686 dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG))); 2677 dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
2687 2678
2688#if 0 2679#if 0
2689/* 2680/*
@@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2693 */ 2684 */
2694 2685
2695 if (hostdata->connected == cmd) { 2686 if (hostdata->connected == cmd) {
2696 dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no)); 2687 dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);
2697 hostdata->aborted = 1; 2688 hostdata->aborted = 1;
2698/* 2689/*
2699 * We should perform BSY checking, and make sure we haven't slipped 2690 * We should perform BSY checking, and make sure we haven't slipped
@@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2721 * from the issue queue. 2712 * from the issue queue.
2722 */ 2713 */
2723 2714
2724 dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no)); 2715 dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
2725 for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) 2716 for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
2726 if (cmd == tmp) { 2717 if (cmd == tmp) {
2727 REMOVE(5, *prev, tmp, tmp->host_scribble); 2718 REMOVE(5, *prev, tmp, tmp->host_scribble);
2728 (*prev) = (Scsi_Cmnd *) tmp->host_scribble; 2719 (*prev) = (Scsi_Cmnd *) tmp->host_scribble;
2729 tmp->host_scribble = NULL; 2720 tmp->host_scribble = NULL;
2730 tmp->result = DID_ABORT << 16; 2721 tmp->result = DID_ABORT << 16;
2731 dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no)); 2722 dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
2732 tmp->scsi_done(tmp); 2723 tmp->scsi_done(tmp);
2733 return SUCCESS; 2724 return SUCCESS;
2734 } 2725 }
@@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2750 */ 2741 */
2751 2742
2752 if (hostdata->connected) { 2743 if (hostdata->connected) {
2753 dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no)); 2744 dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);
2754 return FAILED; 2745 return FAILED;
2755 } 2746 }
2756/* 2747/*
@@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
2780 2771
2781 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) 2772 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
2782 if (cmd == tmp) { 2773 if (cmd == tmp) {
2783 dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no)); 2774 dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
2784 2775
2785 if (NCR5380_select(instance, cmd, (int) cmd->tag)) 2776 if (NCR5380_select(instance, cmd, (int) cmd->tag))
2786 return FAILED; 2777 return FAILED;
2787 dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no)); 2778 dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
2788 2779
2789 do_abort(instance); 2780 do_abort(instance);
2790 2781
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 14964d0a0e9d..c79ddfa6f53c 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -21,10 +21,6 @@
21 * 1+ (800) 334-5454 21 * 1+ (800) 334-5454
22 */ 22 */
23 23
24/*
25 * $Log: NCR5380.h,v $
26 */
27
28#ifndef NCR5380_H 24#ifndef NCR5380_H
29#define NCR5380_H 25#define NCR5380_H
30 26
@@ -60,6 +56,9 @@
60#define NDEBUG_C400_PREAD 0x100000 56#define NDEBUG_C400_PREAD 0x100000
61#define NDEBUG_C400_PWRITE 0x200000 57#define NDEBUG_C400_PWRITE 0x200000
62#define NDEBUG_LISTS 0x400000 58#define NDEBUG_LISTS 0x400000
59#define NDEBUG_ABORT 0x800000
60#define NDEBUG_TAGS 0x1000000
61#define NDEBUG_MERGING 0x2000000
63 62
64#define NDEBUG_ANY 0xFFFFFFFFUL 63#define NDEBUG_ANY 0xFFFFFFFFUL
65 64
@@ -292,9 +291,24 @@ struct NCR5380_hostdata {
292 291
293#ifdef __KERNEL__ 292#ifdef __KERNEL__
294 293
295#define dprintk(a,b) do {} while(0) 294#ifndef NDEBUG
296#define NCR5380_dprint(a,b) do {} while(0) 295#define NDEBUG (0)
297#define NCR5380_dprint_phase(a,b) do {} while(0) 296#endif
297
298#define dprintk(flg, fmt, ...) \
299 do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0)
300
301#if NDEBUG
302#define NCR5380_dprint(flg, arg) \
303 do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0)
304#define NCR5380_dprint_phase(flg, arg) \
305 do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0)
306static void NCR5380_print_phase(struct Scsi_Host *instance);
307static void NCR5380_print(struct Scsi_Host *instance);
308#else
309#define NCR5380_dprint(flg, arg) do {} while (0)
310#define NCR5380_dprint_phase(flg, arg) do {} while (0)
311#endif
298 312
299#if defined(AUTOPROBE_IRQ) 313#if defined(AUTOPROBE_IRQ)
300static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); 314static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
@@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
307#endif 321#endif
308static void NCR5380_main(struct work_struct *work); 322static void NCR5380_main(struct work_struct *work);
309static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); 323static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
310#ifdef NDEBUG
311static void NCR5380_print_phase(struct Scsi_Host *instance);
312static void NCR5380_print(struct Scsi_Host *instance);
313#endif
314static int NCR5380_abort(Scsi_Cmnd * cmd); 324static int NCR5380_abort(Scsi_Cmnd * cmd);
315static int NCR5380_bus_reset(Scsi_Cmnd * cmd); 325static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
316static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); 326static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 14b5f8d0e7f4..cc9bd26f5d1a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
827 for (bit = 0; bit < 8; bit++) { 827 for (bit = 0; bit < 8; bit++) {
828 828
829 if ((pci_status[i] & (0x1 << bit)) != 0) { 829 if ((pci_status[i] & (0x1 << bit)) != 0) {
830 static const char *s; 830 const char *s;
831 831
832 s = pci_status_strings[bit]; 832 s = pci_status_strings[bit];
833 if (i == 7/*TARG*/ && bit == 3) 833 if (i == 7/*TARG*/ && bit == 3)
@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
887 887
888 for (bit = 0; bit < 8; bit++) { 888 for (bit = 0; bit < 8; bit++) {
889 889
890 if ((split_status[i] & (0x1 << bit)) != 0) { 890 if ((split_status[i] & (0x1 << bit)) != 0)
891 static const char *s; 891 printk(split_status_strings[bit], ahd_name(ahd),
892
893 s = split_status_strings[bit];
894 printk(s, ahd_name(ahd),
895 split_status_source[i]); 892 split_status_source[i]);
896 }
897 893
898 if (i > 1) 894 if (i > 1)
899 continue; 895 continue;
900 896
901 if ((sg_split_status[i] & (0x1 << bit)) != 0) { 897 if ((sg_split_status[i] & (0x1 << bit)) != 0)
902 static const char *s; 898 printk(split_status_strings[bit], ahd_name(ahd), "SG");
903
904 s = split_status_strings[bit];
905 printk(s, ahd_name(ahd), "SG");
906 }
907 } 899 }
908 } 900 }
909 /* 901 /*
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 059ff477a398..2e797a367608 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -62,13 +62,6 @@
62 */ 62 */
63#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 63#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
64/* 64/*
65 * SCSI-II Linked command support.
66 *
67 * The higher level code doesn't support linked commands yet, and so the option
68 * is undef'd here.
69 */
70#undef CONFIG_SCSI_ACORNSCSI_LINK
71/*
72 * SCSI-II Synchronous transfer support. 65 * SCSI-II Synchronous transfer support.
73 * 66 *
74 * Tried and tested... 67 * Tried and tested...
@@ -160,10 +153,6 @@
160#error "Yippee! ABORT TAG is now defined! Remove this error!" 153#error "Yippee! ABORT TAG is now defined! Remove this error!"
161#endif 154#endif
162 155
163#ifdef CONFIG_SCSI_ACORNSCSI_LINK
164#error SCSI2 LINKed commands not supported (yet)!
165#endif
166
167#ifdef USE_DMAC 156#ifdef USE_DMAC
168/* 157/*
169 * DMAC setup parameters 158 * DMAC setup parameters
@@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host)
1668 } 1657 }
1669 break; 1658 break;
1670 1659
1671#ifdef CONFIG_SCSI_ACORNSCSI_LINK
1672 case LINKED_CMD_COMPLETE:
1673 case LINKED_FLG_CMD_COMPLETE:
1674 /*
1675 * We don't support linked commands yet
1676 */
1677 if (0) {
1678#if (DEBUG & DEBUG_LINK)
1679 printk("scsi%d.%c: lun %d tag %d linked command complete\n",
1680 host->host->host_no, acornscsi_target(host), host->SCpnt->tag);
1681#endif
1682 /*
1683 * A linked command should only terminate with one of these messages
1684 * if there are more linked commands available.
1685 */
1686 if (!host->SCpnt->next_link) {
1687 printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n",
1688 instance->host_no, acornscsi_target(host), host->SCpnt->tag);
1689 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
1690 msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
1691 } else {
1692 struct scsi_cmnd *SCpnt = host->SCpnt;
1693
1694 acornscsi_dma_cleanup(host);
1695
1696 host->SCpnt = host->SCpnt->next_link;
1697 host->SCpnt->tag = SCpnt->tag;
1698 SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status;
1699 SCpnt->done(SCpnt);
1700
1701 /* initialise host->SCpnt->SCp */
1702 }
1703 break;
1704 }
1705#endif
1706
1707 default: /* reject message */ 1660 default: /* reject message */
1708 printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", 1661 printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",
1709 host->host->host_no, acornscsi_target(host), 1662 host->host->host_no, acornscsi_target(host),
@@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host)
2825#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2778#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
2826 " TAG" 2779 " TAG"
2827#endif 2780#endif
2828#ifdef CONFIG_SCSI_ACORNSCSI_LINK
2829 " LINK"
2830#endif
2831#if (DEBUG & DEBUG_NO_WRITE) 2781#if (DEBUG & DEBUG_NO_WRITE)
2832 " NOWRITE (" __stringify(NO_WRITE) ")" 2782 " NOWRITE (" __stringify(NO_WRITE) ")"
2833#endif 2783#endif
@@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
2851#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2801#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
2852 " TAG" 2802 " TAG"
2853#endif 2803#endif
2854#ifdef CONFIG_SCSI_ACORNSCSI_LINK
2855 " LINK"
2856#endif
2857#if (DEBUG & DEBUG_NO_WRITE) 2804#if (DEBUG & DEBUG_NO_WRITE)
2858 " NOWRITE (" __stringify(NO_WRITE) ")" 2805 " NOWRITE (" __stringify(NO_WRITE) ")"
2859#endif 2806#endif
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index f8e060900052..8ef810a4476e 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -36,9 +36,6 @@
36 void __iomem *base; \ 36 void __iomem *base; \
37 void __iomem *dma 37 void __iomem *dma
38 38
39#define BOARD_NORMAL 0
40#define BOARD_NCR53C400 1
41
42#include "../NCR5380.h" 39#include "../NCR5380.h"
43 40
44void cumanascsi_setup(char *str, int *ints) 41void cumanascsi_setup(char *str, int *ints)
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 4266eef8aca1..188e734c7ff0 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -37,9 +37,6 @@
37#define NCR5380_implementation_fields \ 37#define NCR5380_implementation_fields \
38 void __iomem *base 38 void __iomem *base
39 39
40#define BOARD_NORMAL 0
41#define BOARD_NCR53C400 1
42
43#include "../NCR5380.h" 40#include "../NCR5380.h"
44 41
45#undef START_DMA_INITIATOR_RECEIVE_REG 42#undef START_DMA_INITIATOR_RECEIVE_REG
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 0f3cdbc80ba6..1814aa20b724 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
370 return 0; 370 return 0;
371 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= 371 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
372 TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { 372 TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
373 TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", 373 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
374 H_NO(cmd), cmd->device->id, cmd->device->lun); 374 H_NO(cmd), cmd->device->id, cmd->device->lun);
375 return 1; 375 return 1;
376 } 376 }
@@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
394 !setup_use_tagged_queuing || !cmd->device->tagged_supported) { 394 !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
395 cmd->tag = TAG_NONE; 395 cmd->tag = TAG_NONE;
396 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 396 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
397 TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " 397 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
398 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); 398 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
399 } else { 399 } else {
400 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 400 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
@@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
402 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); 402 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
403 set_bit(cmd->tag, ta->allocated); 403 set_bit(cmd->tag, ta->allocated);
404 ta->nr_allocated++; 404 ta->nr_allocated++;
405 TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " 405 dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
406 "(now %d tags in use)\n", 406 "(now %d tags in use)\n",
407 H_NO(cmd), cmd->tag, cmd->device->id, 407 H_NO(cmd), cmd->tag, cmd->device->id,
408 cmd->device->lun, ta->nr_allocated); 408 cmd->device->lun, ta->nr_allocated);
@@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
420 420
421 if (cmd->tag == TAG_NONE) { 421 if (cmd->tag == TAG_NONE) {
422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
423 TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", 423 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
424 H_NO(cmd), cmd->device->id, cmd->device->lun); 424 H_NO(cmd), cmd->device->id, cmd->device->lun);
425 } else if (cmd->tag >= MAX_TAGS) { 425 } else if (cmd->tag >= MAX_TAGS) {
426 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", 426 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
@@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
429 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 429 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
430 clear_bit(cmd->tag, ta->allocated); 430 clear_bit(cmd->tag, ta->allocated);
431 ta->nr_allocated--; 431 ta->nr_allocated--;
432 TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", 432 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
433 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); 433 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
434 } 434 }
435} 435}
@@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
478 for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; 478 for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
479 cmd->SCp.buffers_residual && 479 cmd->SCp.buffers_residual &&
480 virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { 480 virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
481 MER_PRINTK("VTOP(%p) == %08lx -> merging\n", 481 dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
482 page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); 482 page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
483#if (NDEBUG & NDEBUG_MERGING) 483#if (NDEBUG & NDEBUG_MERGING)
484 ++cnt; 484 ++cnt;
@@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
490 } 490 }
491#if (NDEBUG & NDEBUG_MERGING) 491#if (NDEBUG & NDEBUG_MERGING)
492 if (oldlen != cmd->SCp.this_residual) 492 if (oldlen != cmd->SCp.this_residual)
493 MER_PRINTK("merged %d buffers from %p, new length %08x\n", 493 dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
494 cnt, cmd->SCp.ptr, cmd->SCp.this_residual); 494 cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
495#endif 495#endif
496} 496}
@@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
626 } 626 }
627} 627}
628 628
629#else /* !NDEBUG */
630
631/* dummies... */
632static inline void NCR5380_print(struct Scsi_Host *instance)
633{
634};
635static inline void NCR5380_print_phase(struct Scsi_Host *instance)
636{
637};
638
639#endif 629#endif
640 630
641/* 631/*
@@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void)
676{ 666{
677 static int done = 0; 667 static int done = 0;
678 if (!done) { 668 if (!done) {
679 INI_PRINTK("scsi : NCR5380_all_init()\n"); 669 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
680 done = 1; 670 done = 1;
681 } 671 }
682} 672}
@@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
739 Scsi_Cmnd *ptr; 729 Scsi_Cmnd *ptr;
740 unsigned long flags; 730 unsigned long flags;
741 731
742 NCR_PRINT(NDEBUG_ANY); 732 NCR5380_dprint(NDEBUG_ANY, instance);
743 NCR_PRINT_PHASE(NDEBUG_ANY); 733 NCR5380_dprint_phase(NDEBUG_ANY, instance);
744 734
745 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 735 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
746 736
@@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
984 } 974 }
985 local_irq_restore(flags); 975 local_irq_restore(flags);
986 976
987 QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), 977 dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
988 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); 978 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
989 979
990 /* If queue_command() is called from an interrupt (real one or bottom 980 /* If queue_command() is called from an interrupt (real one or bottom
@@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work)
1054 done = 1; 1044 done = 1;
1055 1045
1056 if (!hostdata->connected) { 1046 if (!hostdata->connected) {
1057 MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); 1047 dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);
1058 /* 1048 /*
1059 * Search through the issue_queue for a command destined 1049 * Search through the issue_queue for a command destined
1060 * for a target that's not busy. 1050 * for a target that's not busy.
@@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work)
1107 * On failure, we must add the command back to the 1097 * On failure, we must add the command back to the
1108 * issue queue so we can keep trying. 1098 * issue queue so we can keep trying.
1109 */ 1099 */
1110 MAIN_PRINTK("scsi%d: main(): command for target %d " 1100 dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
1111 "lun %d removed from issue_queue\n", 1101 "lun %d removed from issue_queue\n",
1112 HOSTNO, tmp->device->id, tmp->device->lun); 1102 HOSTNO, tmp->device->id, tmp->device->lun);
1113 /* 1103 /*
@@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work)
1140#endif 1130#endif
1141 falcon_dont_release--; 1131 falcon_dont_release--;
1142 local_irq_restore(flags); 1132 local_irq_restore(flags);
1143 MAIN_PRINTK("scsi%d: main(): select() failed, " 1133 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1144 "returned to issue_queue\n", HOSTNO); 1134 "returned to issue_queue\n", HOSTNO);
1145 if (hostdata->connected) 1135 if (hostdata->connected)
1146 break; 1136 break;
@@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work)
1155#endif 1145#endif
1156 ) { 1146 ) {
1157 local_irq_restore(flags); 1147 local_irq_restore(flags);
1158 MAIN_PRINTK("scsi%d: main: performing information transfer\n", 1148 dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
1159 HOSTNO); 1149 HOSTNO);
1160 NCR5380_information_transfer(instance); 1150 NCR5380_information_transfer(instance);
1161 MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); 1151 dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
1162 done = 0; 1152 done = 0;
1163 } 1153 }
1164 } while (!done); 1154 } while (!done);
@@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1204 (BASR_PHASE_MATCH|BASR_ACK)) { 1194 (BASR_PHASE_MATCH|BASR_ACK)) {
1205 saved_data = NCR5380_read(INPUT_DATA_REG); 1195 saved_data = NCR5380_read(INPUT_DATA_REG);
1206 overrun = 1; 1196 overrun = 1;
1207 DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); 1197 dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);
1208 } 1198 }
1209 } 1199 }
1210 } 1200 }
1211 1201
1212 DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", 1202 dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
1213 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), 1203 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1214 NCR5380_read(STATUS_REG)); 1204 NCR5380_read(STATUS_REG));
1215 1205
@@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
1229 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 1219 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
1230 cnt = toPIO = atari_read_overruns; 1220 cnt = toPIO = atari_read_overruns;
1231 if (overrun) { 1221 if (overrun) {
1232 DMA_PRINTK("Got an input overrun, using saved byte\n"); 1222 dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
1233 *(*data)++ = saved_data; 1223 *(*data)++ = saved_data;
1234 (*count)--; 1224 (*count)--;
1235 cnt--; 1225 cnt--;
1236 toPIO--; 1226 toPIO--;
1237 } 1227 }
1238 DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); 1228 dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
1239 NCR5380_transfer_pio(instance, &p, &cnt, data); 1229 NCR5380_transfer_pio(instance, &p, &cnt, data);
1240 *count -= toPIO - cnt; 1230 *count -= toPIO - cnt;
1241 } 1231 }
@@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1261 int done = 1, handled = 0; 1251 int done = 1, handled = 0;
1262 unsigned char basr; 1252 unsigned char basr;
1263 1253
1264 INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); 1254 dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
1265 1255
1266 /* Look for pending interrupts */ 1256 /* Look for pending interrupts */
1267 basr = NCR5380_read(BUS_AND_STATUS_REG); 1257 basr = NCR5380_read(BUS_AND_STATUS_REG);
1268 INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); 1258 dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
1269 /* dispatch to appropriate routine if found and done=0 */ 1259 /* dispatch to appropriate routine if found and done=0 */
1270 if (basr & BASR_IRQ) { 1260 if (basr & BASR_IRQ) {
1271 NCR_PRINT(NDEBUG_INTR); 1261 NCR5380_dprint(NDEBUG_INTR, instance);
1272 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { 1262 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1273 done = 0; 1263 done = 0;
1274 ENABLE_IRQ(); 1264 ENABLE_IRQ();
1275 INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); 1265 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1276 NCR5380_reselect(instance); 1266 NCR5380_reselect(instance);
1277 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1267 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1278 } else if (basr & BASR_PARITY_ERROR) { 1268 } else if (basr & BASR_PARITY_ERROR) {
1279 INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); 1269 dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
1280 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1270 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1281 } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { 1271 } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1282 INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); 1272 dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
1283 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1273 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1284 } else { 1274 } else {
1285 /* 1275 /*
@@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1298 ((basr & BASR_END_DMA_TRANSFER) || 1288 ((basr & BASR_END_DMA_TRANSFER) ||
1299 !(basr & BASR_PHASE_MATCH))) { 1289 !(basr & BASR_PHASE_MATCH))) {
1300 1290
1301 INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); 1291 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1302 NCR5380_dma_complete( instance ); 1292 NCR5380_dma_complete( instance );
1303 done = 0; 1293 done = 0;
1304 ENABLE_IRQ(); 1294 ENABLE_IRQ();
@@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1323 } 1313 }
1324 1314
1325 if (!done) { 1315 if (!done) {
1326 INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); 1316 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1327 /* Put a call to NCR5380_main() on the queue... */ 1317 /* Put a call to NCR5380_main() on the queue... */
1328 queue_main(); 1318 queue_main();
1329 } 1319 }
@@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1396 unsigned long flags; 1386 unsigned long flags;
1397 1387
1398 hostdata->restart_select = 0; 1388 hostdata->restart_select = 0;
1399 NCR_PRINT(NDEBUG_ARBITRATION); 1389 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
1400 ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, 1390 dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
1401 instance->this_id); 1391 instance->this_id);
1402 1392
1403 /* 1393 /*
@@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1442 ; 1432 ;
1443#endif 1433#endif
1444 1434
1445 ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); 1435 dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
1446 1436
1447 if (hostdata->connected) { 1437 if (hostdata->connected) {
1448 NCR5380_write(MODE_REG, MR_BASE); 1438 NCR5380_write(MODE_REG, MR_BASE);
@@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1463 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || 1453 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1464 hostdata->connected) { 1454 hostdata->connected) {
1465 NCR5380_write(MODE_REG, MR_BASE); 1455 NCR5380_write(MODE_REG, MR_BASE);
1466 ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", 1456 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1467 HOSTNO); 1457 HOSTNO);
1468 return -1; 1458 return -1;
1469 } 1459 }
@@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1478 hostdata->connected) { 1468 hostdata->connected) {
1479 NCR5380_write(MODE_REG, MR_BASE); 1469 NCR5380_write(MODE_REG, MR_BASE);
1480 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1470 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1481 ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", 1471 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1482 HOSTNO); 1472 HOSTNO);
1483 return -1; 1473 return -1;
1484 } 1474 }
@@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1501 return -1; 1491 return -1;
1502 } 1492 }
1503 1493
1504 ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); 1494 dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
1505 1495
1506 /* 1496 /*
1507 * Now that we have won arbitration, start Selection process, asserting 1497 * Now that we have won arbitration, start Selection process, asserting
@@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1561 1551
1562 udelay(1); 1552 udelay(1);
1563 1553
1564 SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); 1554 dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1565 1555
1566 /* 1556 /*
1567 * The SCSI specification calls for a 250 ms timeout for the actual 1557 * The SCSI specification calls for a 250 ms timeout for the actual
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1617 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); 1607 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
1618 if (hostdata->restart_select) 1608 if (hostdata->restart_select)
1619 printk(KERN_NOTICE "\trestart select\n"); 1609 printk(KERN_NOTICE "\trestart select\n");
1620 NCR_PRINT(NDEBUG_ANY); 1610 NCR5380_dprint(NDEBUG_ANY, instance);
1621 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1611 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1622 return -1; 1612 return -1;
1623 } 1613 }
@@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1630#endif 1620#endif
1631 cmd->scsi_done(cmd); 1621 cmd->scsi_done(cmd);
1632 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1622 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1633 SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); 1623 dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
1634 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1624 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1635 return 0; 1625 return 0;
1636 } 1626 }
@@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1656 while (!(NCR5380_read(STATUS_REG) & SR_REQ)) 1646 while (!(NCR5380_read(STATUS_REG) & SR_REQ))
1657 ; 1647 ;
1658 1648
1659 SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", 1649 dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
1660 HOSTNO, cmd->device->id); 1650 HOSTNO, cmd->device->id);
1661 tmp[0] = IDENTIFY(1, cmd->device->lun); 1651 tmp[0] = IDENTIFY(1, cmd->device->lun);
1662 1652
@@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1676 data = tmp; 1666 data = tmp;
1677 phase = PHASE_MSGOUT; 1667 phase = PHASE_MSGOUT;
1678 NCR5380_transfer_pio(instance, &phase, &len, &data); 1668 NCR5380_transfer_pio(instance, &phase, &len, &data);
1679 SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); 1669 dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
1680 /* XXX need to handle errors here */ 1670 /* XXX need to handle errors here */
1681 hostdata->connected = cmd; 1671 hostdata->connected = cmd;
1682#ifndef SUPPORT_TAGS 1672#ifndef SUPPORT_TAGS
@@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1737 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) 1727 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
1738 ; 1728 ;
1739 1729
1740 HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); 1730 dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
1741 1731
1742 /* Check for phase mismatch */ 1732 /* Check for phase mismatch */
1743 if ((tmp & PHASE_MASK) != p) { 1733 if ((tmp & PHASE_MASK) != p) {
1744 PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); 1734 dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
1745 NCR_PRINT_PHASE(NDEBUG_PIO); 1735 NCR5380_dprint_phase(NDEBUG_PIO, instance);
1746 break; 1736 break;
1747 } 1737 }
1748 1738
@@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1764 if (!(p & SR_IO)) { 1754 if (!(p & SR_IO)) {
1765 if (!((p & SR_MSG) && c > 1)) { 1755 if (!((p & SR_MSG) && c > 1)) {
1766 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); 1756 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
1767 NCR_PRINT(NDEBUG_PIO); 1757 NCR5380_dprint(NDEBUG_PIO, instance);
1768 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1758 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1769 ICR_ASSERT_DATA | ICR_ASSERT_ACK); 1759 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1770 } else { 1760 } else {
1771 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1761 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1772 ICR_ASSERT_DATA | ICR_ASSERT_ATN); 1762 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1773 NCR_PRINT(NDEBUG_PIO); 1763 NCR5380_dprint(NDEBUG_PIO, instance);
1774 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1764 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1775 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); 1765 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1776 } 1766 }
1777 } else { 1767 } else {
1778 NCR_PRINT(NDEBUG_PIO); 1768 NCR5380_dprint(NDEBUG_PIO, instance);
1779 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 1769 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1780 } 1770 }
1781 1771
1782 while (NCR5380_read(STATUS_REG) & SR_REQ) 1772 while (NCR5380_read(STATUS_REG) & SR_REQ)
1783 ; 1773 ;
1784 1774
1785 HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); 1775 dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
1786 1776
1787 /* 1777 /*
1788 * We have several special cases to consider during REQ/ACK handshaking : 1778 * We have several special cases to consider during REQ/ACK handshaking :
@@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1803 } 1793 }
1804 } while (--c); 1794 } while (--c);
1805 1795
1806 PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); 1796 dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
1807 1797
1808 *count = c; 1798 *count = c;
1809 *data = d; 1799 *data = d;
@@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1917 if (atari_read_overruns && (p & SR_IO)) 1907 if (atari_read_overruns && (p & SR_IO))
1918 c -= atari_read_overruns; 1908 c -= atari_read_overruns;
1919 1909
1920 DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", 1910 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1921 HOSTNO, (p & SR_IO) ? "reading" : "writing", 1911 HOSTNO, (p & SR_IO) ? "reading" : "writing",
1922 c, (p & SR_IO) ? "to" : "from", d); 1912 c, (p & SR_IO) ? "to" : "from", d);
1923 1913
@@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1997 phase = (tmp & PHASE_MASK); 1987 phase = (tmp & PHASE_MASK);
1998 if (phase != old_phase) { 1988 if (phase != old_phase) {
1999 old_phase = phase; 1989 old_phase = phase;
2000 NCR_PRINT_PHASE(NDEBUG_INFORMATION); 1990 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
2001 } 1991 }
2002 1992
2003 if (sink && (phase != PHASE_MSGOUT)) { 1993 if (sink && (phase != PHASE_MSGOUT)) {
@@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2039 * they are at contiguous physical addresses. 2029 * they are at contiguous physical addresses.
2040 */ 2030 */
2041 merge_contiguous_buffers(cmd); 2031 merge_contiguous_buffers(cmd);
2042 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", 2032 dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
2043 HOSTNO, cmd->SCp.this_residual, 2033 HOSTNO, cmd->SCp.this_residual,
2044 cmd->SCp.buffers_residual); 2034 cmd->SCp.buffers_residual);
2045 } 2035 }
@@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2123 /* Accept message by clearing ACK */ 2113 /* Accept message by clearing ACK */
2124 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2114 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2125 2115
2126 LNK_PRINTK("scsi%d: target %d lun %d linked command " 2116 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
2127 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); 2117 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
2128 2118
2129 /* Enable reselect interrupts */ 2119 /* Enable reselect interrupts */
@@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2148 * and don't free it! */ 2138 * and don't free it! */
2149 cmd->next_link->tag = cmd->tag; 2139 cmd->next_link->tag = cmd->tag;
2150 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2140 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2151 LNK_PRINTK("scsi%d: target %d lun %d linked request " 2141 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
2152 "done, calling scsi_done().\n", 2142 "done, calling scsi_done().\n",
2153 HOSTNO, cmd->device->id, cmd->device->lun); 2143 HOSTNO, cmd->device->id, cmd->device->lun);
2154#ifdef NCR5380_STATS 2144#ifdef NCR5380_STATS
@@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2165 /* ++guenther: possible race with Falcon locking */ 2155 /* ++guenther: possible race with Falcon locking */
2166 falcon_dont_release++; 2156 falcon_dont_release++;
2167 hostdata->connected = NULL; 2157 hostdata->connected = NULL;
2168 QU_PRINTK("scsi%d: command for target %d, lun %d " 2158 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
2169 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); 2159 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2170#ifdef SUPPORT_TAGS 2160#ifdef SUPPORT_TAGS
2171 cmd_free_tag(cmd); 2161 cmd_free_tag(cmd);
@@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2179 /* ++Andreas: the mid level code knows about 2169 /* ++Andreas: the mid level code knows about
2180 QUEUE_FULL now. */ 2170 QUEUE_FULL now. */
2181 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 2171 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
2182 TAG_PRINTK("scsi%d: target %d lun %d returned " 2172 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
2183 "QUEUE_FULL after %d commands\n", 2173 "QUEUE_FULL after %d commands\n",
2184 HOSTNO, cmd->device->id, cmd->device->lun, 2174 HOSTNO, cmd->device->id, cmd->device->lun,
2185 ta->nr_allocated); 2175 ta->nr_allocated);
@@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2224 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { 2214 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2225 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); 2215 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
2226 2216
2227 ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); 2217 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
2228 2218
2229 local_irq_save(flags); 2219 local_irq_save(flags);
2230 LIST(cmd,hostdata->issue_queue); 2220 LIST(cmd,hostdata->issue_queue);
2231 SET_NEXT(cmd, hostdata->issue_queue); 2221 SET_NEXT(cmd, hostdata->issue_queue);
2232 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2222 hostdata->issue_queue = (Scsi_Cmnd *) cmd;
2233 local_irq_restore(flags); 2223 local_irq_restore(flags);
2234 QU_PRINTK("scsi%d: REQUEST SENSE added to head of " 2224 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2235 "issue queue\n", H_NO(cmd)); 2225 "issue queue\n", H_NO(cmd));
2236 } else 2226 } else
2237#endif /* def AUTOSENSE */ 2227#endif /* def AUTOSENSE */
@@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2277 cmd->device->tagged_supported = 0; 2267 cmd->device->tagged_supported = 0;
2278 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 2268 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
2279 cmd->tag = TAG_NONE; 2269 cmd->tag = TAG_NONE;
2280 TAG_PRINTK("scsi%d: target %d lun %d rejected " 2270 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
2281 "QUEUE_TAG message; tagged queuing " 2271 "QUEUE_TAG message; tagged queuing "
2282 "disabled\n", 2272 "disabled\n",
2283 HOSTNO, cmd->device->id, cmd->device->lun); 2273 HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2294 hostdata->connected = NULL; 2284 hostdata->connected = NULL;
2295 hostdata->disconnected_queue = cmd; 2285 hostdata->disconnected_queue = cmd;
2296 local_irq_restore(flags); 2286 local_irq_restore(flags);
2297 QU_PRINTK("scsi%d: command for target %d lun %d was " 2287 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
2298 "moved from connected to the " 2288 "moved from connected to the "
2299 "disconnected_queue\n", HOSTNO, 2289 "disconnected_queue\n", HOSTNO,
2300 cmd->device->id, cmd->device->lun); 2290 cmd->device->id, cmd->device->lun);
@@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2344 /* Accept first byte by clearing ACK */ 2334 /* Accept first byte by clearing ACK */
2345 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2335 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2346 2336
2347 EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); 2337 dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
2348 2338
2349 len = 2; 2339 len = 2;
2350 data = extended_msg + 1; 2340 data = extended_msg + 1;
2351 phase = PHASE_MSGIN; 2341 phase = PHASE_MSGIN;
2352 NCR5380_transfer_pio(instance, &phase, &len, &data); 2342 NCR5380_transfer_pio(instance, &phase, &len, &data);
2353 EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, 2343 dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
2354 (int)extended_msg[1], (int)extended_msg[2]); 2344 (int)extended_msg[1], (int)extended_msg[2]);
2355 2345
2356 if (!len && extended_msg[1] <= 2346 if (!len && extended_msg[1] <=
@@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2362 phase = PHASE_MSGIN; 2352 phase = PHASE_MSGIN;
2363 2353
2364 NCR5380_transfer_pio(instance, &phase, &len, &data); 2354 NCR5380_transfer_pio(instance, &phase, &len, &data);
2365 EXT_PRINTK("scsi%d: message received, residual %d\n", 2355 dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
2366 HOSTNO, len); 2356 HOSTNO, len);
2367 2357
2368 switch (extended_msg[2]) { 2358 switch (extended_msg[2]) {
@@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
2451 break; 2441 break;
2452 default: 2442 default:
2453 printk("scsi%d: unknown phase\n", HOSTNO); 2443 printk("scsi%d: unknown phase\n", HOSTNO);
2454 NCR_PRINT(NDEBUG_ANY); 2444 NCR5380_dprint(NDEBUG_ANY, instance);
2455 } /* switch(phase) */ 2445 } /* switch(phase) */
2456 } /* if (tmp * SR_REQ) */ 2446 } /* if (tmp * SR_REQ) */
2457 } /* while (1) */ 2447 } /* while (1) */
@@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2493 2483
2494 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2484 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2495 2485
2496 RSL_PRINTK("scsi%d: reselect\n", HOSTNO); 2486 dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
2497 2487
2498 /* 2488 /*
2499 * At this point, we have detected that our SCSI ID is on the bus, 2489 * At this point, we have detected that our SCSI ID is on the bus,
@@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2544 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && 2534 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2545 msg[1] == SIMPLE_QUEUE_TAG) 2535 msg[1] == SIMPLE_QUEUE_TAG)
2546 tag = msg[2]; 2536 tag = msg[2];
2547 TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " 2537 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
2548 "reselection\n", HOSTNO, target_mask, lun, tag); 2538 "reselection\n", HOSTNO, target_mask, lun, tag);
2549 } 2539 }
2550#endif 2540#endif
@@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2598 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2588 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2599 2589
2600 hostdata->connected = tmp; 2590 hostdata->connected = tmp;
2601 RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", 2591 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
2602 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); 2592 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2603 falcon_dont_release--; 2593 falcon_dont_release--;
2604} 2594}
@@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2640 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", 2630 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
2641 HOSTNO); 2631 HOSTNO);
2642 2632
2643 ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, 2633 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2644 NCR5380_read(BUS_AND_STATUS_REG), 2634 NCR5380_read(BUS_AND_STATUS_REG),
2645 NCR5380_read(STATUS_REG)); 2635 NCR5380_read(STATUS_REG));
2646 2636
@@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2653 2643
2654 if (hostdata->connected == cmd) { 2644 if (hostdata->connected == cmd) {
2655 2645
2656 ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); 2646 dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
2657 /* 2647 /*
2658 * We should perform BSY checking, and make sure we haven't slipped 2648 * We should perform BSY checking, and make sure we haven't slipped
2659 * into BUS FREE. 2649 * into BUS FREE.
@@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2683 local_irq_restore(flags); 2673 local_irq_restore(flags);
2684 cmd->scsi_done(cmd); 2674 cmd->scsi_done(cmd);
2685 falcon_release_lock_if_possible(hostdata); 2675 falcon_release_lock_if_possible(hostdata);
2686 return SCSI_ABORT_SUCCESS; 2676 return SUCCESS;
2687 } else { 2677 } else {
2688/* local_irq_restore(flags); */ 2678/* local_irq_restore(flags); */
2689 printk("scsi%d: abort of connected command failed!\n", HOSTNO); 2679 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2690 return SCSI_ABORT_ERROR; 2680 return FAILED;
2691 } 2681 }
2692 } 2682 }
2693#endif 2683#endif
@@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2705 SET_NEXT(tmp, NULL); 2695 SET_NEXT(tmp, NULL);
2706 tmp->result = DID_ABORT << 16; 2696 tmp->result = DID_ABORT << 16;
2707 local_irq_restore(flags); 2697 local_irq_restore(flags);
2708 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", 2698 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2709 HOSTNO); 2699 HOSTNO);
2710 /* Tagged queuing note: no tag to free here, hasn't been assigned 2700 /* Tagged queuing note: no tag to free here, hasn't been assigned
2711 * yet... */ 2701 * yet... */
2712 tmp->scsi_done(tmp); 2702 tmp->scsi_done(tmp);
2713 falcon_release_lock_if_possible(hostdata); 2703 falcon_release_lock_if_possible(hostdata);
2714 return SCSI_ABORT_SUCCESS; 2704 return SUCCESS;
2715 } 2705 }
2716 } 2706 }
2717 2707
@@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2728 2718
2729 if (hostdata->connected) { 2719 if (hostdata->connected) {
2730 local_irq_restore(flags); 2720 local_irq_restore(flags);
2731 ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); 2721 dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
2732 return SCSI_ABORT_SNOOZE; 2722 return FAILED;
2733 } 2723 }
2734 2724
2735 /* 2725 /*
@@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2761 tmp = NEXT(tmp)) { 2751 tmp = NEXT(tmp)) {
2762 if (cmd == tmp) { 2752 if (cmd == tmp) {
2763 local_irq_restore(flags); 2753 local_irq_restore(flags);
2764 ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); 2754 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2765 2755
2766 if (NCR5380_select(instance, cmd, (int)cmd->tag)) 2756 if (NCR5380_select(instance, cmd, (int)cmd->tag))
2767 return SCSI_ABORT_BUSY; 2757 return FAILED;
2768 2758
2769 ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); 2759 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
2770 2760
2771 do_abort(instance); 2761 do_abort(instance);
2772 2762
@@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2791 local_irq_restore(flags); 2781 local_irq_restore(flags);
2792 tmp->scsi_done(tmp); 2782 tmp->scsi_done(tmp);
2793 falcon_release_lock_if_possible(hostdata); 2783 falcon_release_lock_if_possible(hostdata);
2794 return SCSI_ABORT_SUCCESS; 2784 return SUCCESS;
2795 } 2785 }
2796 } 2786 }
2797 } 2787 }
@@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2816 */ 2806 */
2817 falcon_release_lock_if_possible(hostdata); 2807 falcon_release_lock_if_possible(hostdata);
2818 2808
2819 return SCSI_ABORT_NOT_RUNNING; 2809 return FAILED;
2820} 2810}
2821 2811
2822 2812
@@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
2825 * 2815 *
2826 * Purpose : reset the SCSI bus. 2816 * Purpose : reset the SCSI bus.
2827 * 2817 *
2828 * Returns : SCSI_RESET_WAKEUP 2818 * Returns : SUCCESS or FAILURE
2829 * 2819 *
2830 */ 2820 */
2831 2821
@@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2834 SETUP_HOSTDATA(cmd->device->host); 2824 SETUP_HOSTDATA(cmd->device->host);
2835 int i; 2825 int i;
2836 unsigned long flags; 2826 unsigned long flags;
2837#if 1 2827#if defined(RESET_RUN_DONE)
2838 Scsi_Cmnd *connected, *disconnected_queue; 2828 Scsi_Cmnd *connected, *disconnected_queue;
2839#endif 2829#endif
2840 2830
@@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2859 * through anymore ... */ 2849 * through anymore ... */
2860 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 2850 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
2861 2851
2862#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ 2852 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2853 * should go.
2854 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2855 * not be reset by atari_scsi_reset()!
2856 */
2857
2858#if defined(RESET_RUN_DONE)
2859 /* XXX Should now be done by midlevel code, but it's broken XXX */
2863 /* XXX see below XXX */ 2860 /* XXX see below XXX */
2864 2861
2865 /* MSch: old-style reset: actually abort all command processing here */ 2862 /* MSch: old-style reset: actually abort all command processing here */
@@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2890 */ 2887 */
2891 2888
2892 if ((cmd = connected)) { 2889 if ((cmd = connected)) {
2893 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 2890 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2894 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); 2891 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2895 cmd->scsi_done(cmd); 2892 cmd->scsi_done(cmd);
2896 } 2893 }
@@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2902 cmd->scsi_done(cmd); 2899 cmd->scsi_done(cmd);
2903 } 2900 }
2904 if (i > 0) 2901 if (i > 0)
2905 ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); 2902 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2906 2903
2907 /* The Falcon lock should be released after a reset... 2904 /* The Falcon lock should be released after a reset...
2908 */ 2905 */
@@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2915 * the midlevel code that the reset was SUCCESSFUL, and there is no 2912 * the midlevel code that the reset was SUCCESSFUL, and there is no
2916 * need to 'wake up' the commands by a request_sense 2913 * need to 'wake up' the commands by a request_sense
2917 */ 2914 */
2918 return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; 2915 return SUCCESS;
2919#else /* 1 */ 2916#else /* 1 */
2920 2917
2921 /* MSch: new-style reset handling: let the mid-level do what it can */ 2918 /* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2942 */ 2939 */
2943 2940
2944 if (hostdata->issue_queue) 2941 if (hostdata->issue_queue)
2945 ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); 2942 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
2946 if (hostdata->connected) 2943 if (hostdata->connected)
2947 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 2944 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2948 if (hostdata->disconnected_queue) 2945 if (hostdata->disconnected_queue)
2949 ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); 2946 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
2950 2947
2951 local_irq_save(flags); 2948 local_irq_save(flags);
2952 hostdata->issue_queue = NULL; 2949 hostdata->issue_queue = NULL;
@@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2963 local_irq_restore(flags); 2960 local_irq_restore(flags);
2964 2961
2965 /* we did no complete reset of all commands, so a wakeup is required */ 2962 /* we did no complete reset of all commands, so a wakeup is required */
2966 return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; 2963 return SUCCESS;
2967#endif /* 1 */ 2964#endif /* 1 */
2968} 2965}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a8d721ff19eb..b522134528d6 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -67,12 +67,6 @@
67 67
68#include <linux/module.h> 68#include <linux/module.h>
69 69
70#define NDEBUG (0)
71
72#define NDEBUG_ABORT 0x00100000
73#define NDEBUG_TAGS 0x00200000
74#define NDEBUG_MERGING 0x00400000
75
76#define AUTOSENSE 70#define AUTOSENSE
77/* For the Atari version, use only polled IO or REAL_DMA */ 71/* For the Atari version, use only polled IO or REAL_DMA */
78#define REAL_DMA 72#define REAL_DMA
@@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
314 308
315 dma_stat = tt_scsi_dma.dma_ctrl; 309 dma_stat = tt_scsi_dma.dma_ctrl;
316 310
317 INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", 311 dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",
318 atari_scsi_host->host_no, dma_stat & 0xff); 312 atari_scsi_host->host_no, dma_stat & 0xff);
319 313
320 /* Look if it was the DMA that has interrupted: First possibility 314 /* Look if it was the DMA that has interrupted: First possibility
@@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
340 if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { 334 if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
341 atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); 335 atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
342 336
343 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", 337 dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
344 atari_dma_residual); 338 atari_dma_residual);
345 339
346 if ((signed int)atari_dma_residual < 0) 340 if ((signed int)atari_dma_residual < 0)
@@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
371 * other command. These shouldn't disconnect anyway. 365 * other command. These shouldn't disconnect anyway.
372 */ 366 */
373 if (atari_dma_residual & 0x1ff) { 367 if (atari_dma_residual & 0x1ff) {
374 DMA_PRINTK("SCSI DMA: DMA bug corrected, " 368 dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "
375 "difference %ld bytes\n", 369 "difference %ld bytes\n",
376 512 - (atari_dma_residual & 0x1ff)); 370 512 - (atari_dma_residual & 0x1ff));
377 atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; 371 atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff;
@@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
438 "ST-DMA fifo\n", transferred & 15); 432 "ST-DMA fifo\n", transferred & 15);
439 433
440 atari_dma_residual = HOSTDATA_DMALEN - transferred; 434 atari_dma_residual = HOSTDATA_DMALEN - transferred;
441 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", 435 dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
442 atari_dma_residual); 436 atari_dma_residual);
443 } else 437 } else
444 atari_dma_residual = 0; 438 atari_dma_residual = 0;
@@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void)
474 /* there are 'nr' bytes left for the last long address 468 /* there are 'nr' bytes left for the last long address
475 before the DMA pointer */ 469 before the DMA pointer */
476 phys_dst ^= nr; 470 phys_dst ^= nr;
477 DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", 471 dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
478 nr, phys_dst); 472 nr, phys_dst);
479 /* The content of the DMA pointer is a physical address! */ 473 /* The content of the DMA pointer is a physical address! */
480 dst = phys_to_virt(phys_dst); 474 dst = phys_to_virt(phys_dst);
481 DMA_PRINTK(" = virt addr %p\n", dst); 475 dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);
482 for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) 476 for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)
483 *dst++ = *src++; 477 *dst++ = *src++;
484 } 478 }
@@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
827 } else { 821 } else {
828 atari_turnon_irq(IRQ_MFP_FSCSI); 822 atari_turnon_irq(IRQ_MFP_FSCSI);
829 } 823 }
830 if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) 824 if (rv == SUCCESS)
831 falcon_release_lock_if_possible(hostdata); 825 falcon_release_lock_if_possible(hostdata);
832 826
833 return rv; 827 return rv;
@@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
883{ 877{
884 unsigned long addr = virt_to_phys(data); 878 unsigned long addr = virt_to_phys(data);
885 879
886 DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " 880 dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
887 "dir = %d\n", instance->host_no, data, addr, count, dir); 881 "dir = %d\n", instance->host_no, data, addr, count, dir);
888 882
889 if (!IS_A_TT() && !STRAM_ADDR(addr)) { 883 if (!IS_A_TT() && !STRAM_ADDR(addr)) {
@@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
1063 possible_len = limit; 1057 possible_len = limit;
1064 1058
1065 if (possible_len != wanted_len) 1059 if (possible_len != wanted_len)
1066 DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " 1060 dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
1067 "instead of %ld\n", possible_len, wanted_len); 1061 "instead of %ld\n", possible_len, wanted_len);
1068 1062
1069 return possible_len; 1063 return possible_len;
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index 11c624bb122d..3299d91d7336 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -54,125 +54,6 @@
54#define NCR5380_dma_xfer_len(i,cmd,phase) \ 54#define NCR5380_dma_xfer_len(i,cmd,phase) \
55 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) 55 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
56 56
57/* former generic SCSI error handling stuff */
58
59#define SCSI_ABORT_SNOOZE 0
60#define SCSI_ABORT_SUCCESS 1
61#define SCSI_ABORT_PENDING 2
62#define SCSI_ABORT_BUSY 3
63#define SCSI_ABORT_NOT_RUNNING 4
64#define SCSI_ABORT_ERROR 5
65
66#define SCSI_RESET_SNOOZE 0
67#define SCSI_RESET_PUNT 1
68#define SCSI_RESET_SUCCESS 2
69#define SCSI_RESET_PENDING 3
70#define SCSI_RESET_WAKEUP 4
71#define SCSI_RESET_NOT_RUNNING 5
72#define SCSI_RESET_ERROR 6
73
74#define SCSI_RESET_SYNCHRONOUS 0x01
75#define SCSI_RESET_ASYNCHRONOUS 0x02
76#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
77#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
78
79#define SCSI_RESET_BUS_RESET 0x100
80#define SCSI_RESET_HOST_RESET 0x200
81#define SCSI_RESET_ACTION 0xff
82
83/* Debugging printk definitions:
84 *
85 * ARB -> arbitration
86 * ASEN -> auto-sense
87 * DMA -> DMA
88 * HSH -> PIO handshake
89 * INF -> information transfer
90 * INI -> initialization
91 * INT -> interrupt
92 * LNK -> linked commands
93 * MAIN -> NCR5380_main() control flow
94 * NDAT -> no data-out phase
95 * NWR -> no write commands
96 * PIO -> PIO transfers
97 * PDMA -> pseudo DMA (unused on Atari)
98 * QU -> queues
99 * RSL -> reselections
100 * SEL -> selections
101 * USL -> usleep cpde (unused on Atari)
102 * LBS -> last byte sent (unused on Atari)
103 * RSS -> restarting of selections
104 * EXT -> extended messages
105 * ABRT -> aborting and resetting
106 * TAG -> queue tag handling
107 * MER -> merging of consec. buffers
108 *
109 */
110
111#define dprint(flg, format...) \
112({ \
113 if (NDEBUG & (flg)) \
114 printk(KERN_DEBUG format); \
115})
116
117#define ARB_PRINTK(format, args...) \
118 dprint(NDEBUG_ARBITRATION, format , ## args)
119#define ASEN_PRINTK(format, args...) \
120 dprint(NDEBUG_AUTOSENSE, format , ## args)
121#define DMA_PRINTK(format, args...) \
122 dprint(NDEBUG_DMA, format , ## args)
123#define HSH_PRINTK(format, args...) \
124 dprint(NDEBUG_HANDSHAKE, format , ## args)
125#define INF_PRINTK(format, args...) \
126 dprint(NDEBUG_INFORMATION, format , ## args)
127#define INI_PRINTK(format, args...) \
128 dprint(NDEBUG_INIT, format , ## args)
129#define INT_PRINTK(format, args...) \
130 dprint(NDEBUG_INTR, format , ## args)
131#define LNK_PRINTK(format, args...) \
132 dprint(NDEBUG_LINKED, format , ## args)
133#define MAIN_PRINTK(format, args...) \
134 dprint(NDEBUG_MAIN, format , ## args)
135#define NDAT_PRINTK(format, args...) \
136 dprint(NDEBUG_NO_DATAOUT, format , ## args)
137#define NWR_PRINTK(format, args...) \
138 dprint(NDEBUG_NO_WRITE, format , ## args)
139#define PIO_PRINTK(format, args...) \
140 dprint(NDEBUG_PIO, format , ## args)
141#define PDMA_PRINTK(format, args...) \
142 dprint(NDEBUG_PSEUDO_DMA, format , ## args)
143#define QU_PRINTK(format, args...) \
144 dprint(NDEBUG_QUEUES, format , ## args)
145#define RSL_PRINTK(format, args...) \
146 dprint(NDEBUG_RESELECTION, format , ## args)
147#define SEL_PRINTK(format, args...) \
148 dprint(NDEBUG_SELECTION, format , ## args)
149#define USL_PRINTK(format, args...) \
150 dprint(NDEBUG_USLEEP, format , ## args)
151#define LBS_PRINTK(format, args...) \
152 dprint(NDEBUG_LAST_BYTE_SENT, format , ## args)
153#define RSS_PRINTK(format, args...) \
154 dprint(NDEBUG_RESTART_SELECT, format , ## args)
155#define EXT_PRINTK(format, args...) \
156 dprint(NDEBUG_EXTENDED, format , ## args)
157#define ABRT_PRINTK(format, args...) \
158 dprint(NDEBUG_ABORT, format , ## args)
159#define TAG_PRINTK(format, args...) \
160 dprint(NDEBUG_TAGS, format , ## args)
161#define MER_PRINTK(format, args...) \
162 dprint(NDEBUG_MERGING, format , ## args)
163
164/* conditional macros for NCR5380_print_{,phase,status} */
165
166#define NCR_PRINT(mask) \
167 ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
168
169#define NCR_PRINT_PHASE(mask) \
170 ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
171
172#define NCR_PRINT_STATUS(mask) \
173 ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
174
175
176#endif /* ndef ASM */ 57#endif /* ndef ASM */
177#endif /* ATARI_SCSI_H */ 58#endif /* ATARI_SCSI_H */
178 59
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1bfb0bd01198..860f527d8f26 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q)
83 83
84/*ISCSI */ 84/*ISCSI */
85 85
86struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
87 bool enable;
88 u32 min_eqd; /* in usecs */
89 u32 max_eqd; /* in usecs */
90 u32 prev_eqd; /* in usecs */
91 u32 et_eqd; /* configured val when aic is off */
92 ulong jiffs;
93 u64 eq_prev; /* Used to calculate eqe */
94};
95
86struct be_eq_obj { 96struct be_eq_obj {
87 bool todo_mcc_cq; 97 bool todo_mcc_cq;
88 bool todo_cq; 98 bool todo_cq;
99 u32 cq_count;
89 struct be_queue_info q; 100 struct be_queue_info q;
90 struct beiscsi_hba *phba; 101 struct beiscsi_hba *phba;
91 struct be_queue_info *cq; 102 struct be_queue_info *cq;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 7cf7f99ee442..cc7405c0eca0 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -71,6 +71,7 @@ struct be_mcc_wrb {
71#define BEISCSI_FW_MBX_TIMEOUT 100 71#define BEISCSI_FW_MBX_TIMEOUT 100
72 72
73/* MBOX Command VER */ 73/* MBOX Command VER */
74#define MBX_CMD_VER1 0x01
74#define MBX_CMD_VER2 0x02 75#define MBX_CMD_VER2 0x02
75 76
76struct be_mcc_compl { 77struct be_mcc_compl {
@@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create {
271 u16 rsvd0; /* sword */ 272 u16 rsvd0; /* sword */
272} __packed; 273} __packed;
273 274
275struct be_set_eqd {
276 u32 eq_id;
277 u32 phase;
278 u32 delay_multiplier;
279} __packed;
280
274struct mgmt_chap_format { 281struct mgmt_chap_format {
275 u32 flags; 282 u32 flags;
276 u8 intr_chap_name[256]; 283 u8 intr_chap_name[256];
@@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay {
622 u32 eq_id; 629 u32 eq_id;
623 u32 phase; 630 u32 phase;
624 u32 delay_multiplier; 631 u32 delay_multiplier;
625 } delay[8]; 632 } delay[MAX_CPUS];
626} __packed; 633} __packed;
627 634
628/******************** Get MAC ADDR *******************/ 635/******************** Get MAC ADDR *******************/
@@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
708 715
709void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); 716void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
710 717
718int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
719 int num);
711int beiscsi_mccq_compl(struct beiscsi_hba *phba, 720int beiscsi_mccq_compl(struct beiscsi_hba *phba,
712 uint32_t tag, struct be_mcc_wrb **wrb, 721 uint32_t tag, struct be_mcc_wrb **wrb,
713 struct be_dma_mem *mbx_cmd_mem); 722 struct be_dma_mem *mbx_cmd_mem);
@@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in {
1005 u8 rsvd0[3]; 1014 u8 rsvd0[3];
1006} __packed; 1015} __packed;
1007 1016
1017struct tcp_connect_and_offload_in_v1 {
1018 struct be_cmd_req_hdr hdr;
1019 struct ip_addr_format ip_address;
1020 u16 tcp_port;
1021 u16 cid;
1022 u16 cq_id;
1023 u16 defq_id;
1024 struct phys_addr dataout_template_pa;
1025 u16 hdr_ring_id;
1026 u16 data_ring_id;
1027 u8 do_offload;
1028 u8 ifd_state;
1029 u8 rsvd0[2];
1030 u16 tcp_window_size;
1031 u8 tcp_window_scale_count;
1032 u8 rsvd1;
1033 u32 tcp_mss:24;
1034 u8 rsvd2;
1035} __packed;
1036
1008struct tcp_connect_and_offload_out { 1037struct tcp_connect_and_offload_out {
1009 struct be_cmd_resp_hdr hdr; 1038 struct be_cmd_resp_hdr hdr;
1010 u32 connection_handle; 1039 u32 connection_handle;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a3df43324c98..fd284ff36ecf 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1106 struct beiscsi_hba *phba = beiscsi_ep->phba; 1106 struct beiscsi_hba *phba = beiscsi_ep->phba;
1107 struct tcp_connect_and_offload_out *ptcpcnct_out; 1107 struct tcp_connect_and_offload_out *ptcpcnct_out;
1108 struct be_dma_mem nonemb_cmd; 1108 struct be_dma_mem nonemb_cmd;
1109 unsigned int tag; 1109 unsigned int tag, req_memsize;
1110 int ret = -ENOMEM; 1110 int ret = -ENOMEM;
1111 1111
1112 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1112 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1127 (beiscsi_ep->ep_cid)] = ep; 1127 (beiscsi_ep->ep_cid)] = ep;
1128 1128
1129 beiscsi_ep->cid_vld = 0; 1129 beiscsi_ep->cid_vld = 0;
1130
1131 if (is_chip_be2_be3r(phba))
1132 req_memsize = sizeof(struct tcp_connect_and_offload_in);
1133 else
1134 req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
1135
1130 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 1136 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
1131 sizeof(struct tcp_connect_and_offload_in), 1137 req_memsize,
1132 &nonemb_cmd.dma); 1138 &nonemb_cmd.dma);
1133 if (nonemb_cmd.va == NULL) { 1139 if (nonemb_cmd.va == NULL) {
1134 1140
@@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1139 beiscsi_free_ep(beiscsi_ep); 1145 beiscsi_free_ep(beiscsi_ep);
1140 return -ENOMEM; 1146 return -ENOMEM;
1141 } 1147 }
1142 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); 1148 nonemb_cmd.size = req_memsize;
1143 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 1149 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
1144 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); 1150 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
1145 if (tag <= 0) { 1151 if (tag <= 0) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0d822297aa80..56467df3d6de 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
599 pci_set_drvdata(pcidev, phba); 599 pci_set_drvdata(pcidev, phba);
600 phba->interface_handle = 0xFFFFFFFF; 600 phba->interface_handle = 0xFFFFFFFF;
601 601
602 if (iscsi_host_add(shost, &phba->pcidev->dev))
603 goto free_devices;
604
605 return phba; 602 return phba;
606
607free_devices:
608 pci_dev_put(phba->pcidev);
609 iscsi_host_free(phba->shost);
610 return NULL;
611} 603}
612 604
613static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 605static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
@@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
2279 2271
2280 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2272 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2281 ret = beiscsi_process_cq(pbe_eq); 2273 ret = beiscsi_process_cq(pbe_eq);
2274 pbe_eq->cq_count += ret;
2282 if (ret < budget) { 2275 if (ret < budget) {
2283 phba = pbe_eq->phba; 2276 phba = pbe_eq->phba;
2284 blk_iopoll_complete(iop); 2277 blk_iopoll_complete(iop);
@@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3692 struct hwi_controller *phwi_ctrlr; 3685 struct hwi_controller *phwi_ctrlr;
3693 struct hwi_context_memory *phwi_context; 3686 struct hwi_context_memory *phwi_context;
3694 struct hwi_async_pdu_context *pasync_ctx; 3687 struct hwi_async_pdu_context *pasync_ctx;
3695 int i, eq_num, ulp_num; 3688 int i, eq_for_mcc, ulp_num;
3696 3689
3697 phwi_ctrlr = phba->phwi_ctrlr; 3690 phwi_ctrlr = phba->phwi_ctrlr;
3698 phwi_context = phwi_ctrlr->phwi_ctxt; 3691 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3729 if (q->created) 3722 if (q->created)
3730 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3723 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3731 } 3724 }
3725
3726 be_mcc_queues_destroy(phba);
3732 if (phba->msix_enabled) 3727 if (phba->msix_enabled)
3733 eq_num = 1; 3728 eq_for_mcc = 1;
3734 else 3729 else
3735 eq_num = 0; 3730 eq_for_mcc = 0;
3736 for (i = 0; i < (phba->num_cpus + eq_num); i++) { 3731 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3737 q = &phwi_context->be_eq[i].q; 3732 q = &phwi_context->be_eq[i].q;
3738 if (q->created) 3733 if (q->created)
3739 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3734 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3740 } 3735 }
3741 be_mcc_queues_destroy(phba);
3742 be_cmd_fw_uninit(ctrl); 3736 be_cmd_fw_uninit(ctrl);
3743} 3737}
3744 3738
@@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3833 3827
3834 phwi_ctrlr = phba->phwi_ctrlr; 3828 phwi_ctrlr = phba->phwi_ctrlr;
3835 phwi_context = phwi_ctrlr->phwi_ctxt; 3829 phwi_context = phwi_ctrlr->phwi_ctxt;
3836 phwi_context->max_eqd = 0; 3830 phwi_context->max_eqd = 128;
3837 phwi_context->min_eqd = 0; 3831 phwi_context->min_eqd = 0;
3838 phwi_context->cur_eqd = 64; 3832 phwi_context->cur_eqd = 0;
3839 be_cmd_fw_initialize(&phba->ctrl); 3833 be_cmd_fw_initialize(&phba->ctrl);
3840 3834
3841 status = beiscsi_create_eqs(phba, phwi_context); 3835 status = beiscsi_create_eqs(phba, phwi_context);
@@ -4204,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4204 kfree(phba->ep_array); 4198 kfree(phba->ep_array);
4205 phba->ep_array = NULL; 4199 phba->ep_array = NULL;
4206 ret = -ENOMEM; 4200 ret = -ENOMEM;
4201
4202 goto free_memory;
4207 } 4203 }
4208 4204
4209 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4205 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
@@ -5290,6 +5286,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5290 return; 5286 return;
5291} 5287}
5292 5288
5289static void be_eqd_update(struct beiscsi_hba *phba)
5290{
5291 struct be_set_eqd set_eqd[MAX_CPUS];
5292 struct be_aic_obj *aic;
5293 struct be_eq_obj *pbe_eq;
5294 struct hwi_controller *phwi_ctrlr;
5295 struct hwi_context_memory *phwi_context;
5296 int eqd, i, num = 0;
5297 ulong now;
5298 u32 pps, delta;
5299 unsigned int tag;
5300
5301 phwi_ctrlr = phba->phwi_ctrlr;
5302 phwi_context = phwi_ctrlr->phwi_ctxt;
5303
5304 for (i = 0; i <= phba->num_cpus; i++) {
5305 aic = &phba->aic_obj[i];
5306 pbe_eq = &phwi_context->be_eq[i];
5307 now = jiffies;
5308 if (!aic->jiffs || time_before(now, aic->jiffs) ||
5309 pbe_eq->cq_count < aic->eq_prev) {
5310 aic->jiffs = now;
5311 aic->eq_prev = pbe_eq->cq_count;
5312 continue;
5313 }
5314 delta = jiffies_to_msecs(now - aic->jiffs);
5315 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5316 eqd = (pps / 1500) << 2;
5317
5318 if (eqd < 8)
5319 eqd = 0;
5320 eqd = min_t(u32, eqd, phwi_context->max_eqd);
5321 eqd = max_t(u32, eqd, phwi_context->min_eqd);
5322
5323 aic->jiffs = now;
5324 aic->eq_prev = pbe_eq->cq_count;
5325
5326 if (eqd != aic->prev_eqd) {
5327 set_eqd[num].delay_multiplier = (eqd * 65)/100;
5328 set_eqd[num].eq_id = pbe_eq->q.id;
5329 aic->prev_eqd = eqd;
5330 num++;
5331 }
5332 }
5333 if (num) {
5334 tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
5335 if (tag)
5336 beiscsi_mccq_compl(phba, tag, NULL, NULL);
5337 }
5338}
5339
5293/* 5340/*
5294 * beiscsi_hw_health_check()- Check adapter health 5341 * beiscsi_hw_health_check()- Check adapter health
5295 * @work: work item to check HW health 5342 * @work: work item to check HW health
@@ -5303,6 +5350,8 @@ beiscsi_hw_health_check(struct work_struct *work)
5303 container_of(work, struct beiscsi_hba, 5350 container_of(work, struct beiscsi_hba,
5304 beiscsi_hw_check_task.work); 5351 beiscsi_hw_check_task.work);
5305 5352
5353 be_eqd_update(phba);
5354
5306 beiscsi_ue_detect(phba); 5355 beiscsi_ue_detect(phba);
5307 5356
5308 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5357 schedule_delayed_work(&phba->beiscsi_hw_check_task,
@@ -5579,7 +5628,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5579 phba->ctrl.mcc_numtag[i + 1] = 0; 5628 phba->ctrl.mcc_numtag[i + 1] = 0;
5580 phba->ctrl.mcc_tag_available++; 5629 phba->ctrl.mcc_tag_available++;
5581 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5630 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5582 sizeof(struct beiscsi_mcc_tag_state)); 5631 sizeof(struct be_dma_mem));
5583 } 5632 }
5584 5633
5585 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5634 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
@@ -5621,6 +5670,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5621 } 5670 }
5622 hwi_enable_intr(phba); 5671 hwi_enable_intr(phba);
5623 5672
5673 if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
5674 goto free_blkenbld;
5675
5624 if (beiscsi_setup_boot_info(phba)) 5676 if (beiscsi_setup_boot_info(phba))
5625 /* 5677 /*
5626 * log error but continue, because we may not be using 5678 * log error but continue, because we may not be using
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 9380b55bdeaf..9ceab426eec9 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.2.125.0" 39#define BUILD_STR "10.2.273.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -71,8 +71,8 @@
71 71
72#define BEISCSI_SGLIST_ELEMENTS 30 72#define BEISCSI_SGLIST_ELEMENTS 30
73 73
74#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ 74#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
75#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ 75#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
76#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ 76#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
77 77
78#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 78#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
@@ -427,6 +427,7 @@ struct beiscsi_hba {
427 struct mgmt_session_info boot_sess; 427 struct mgmt_session_info boot_sess;
428 struct invalidate_command_table inv_tbl[128]; 428 struct invalidate_command_table inv_tbl[128];
429 429
430 struct be_aic_obj aic_obj[MAX_CPUS];
430 unsigned int attr_log_enable; 431 unsigned int attr_log_enable;
431 int (*iotask_fn)(struct iscsi_task *, 432 int (*iotask_fn)(struct iscsi_task *,
432 struct scatterlist *sg, 433 struct scatterlist *sg,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 088bdf752cfa..07934b0b9ee1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba)
155 } 155 }
156} 156}
157 157
158int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
159 struct be_set_eqd *set_eqd, int num)
160{
161 struct be_ctrl_info *ctrl = &phba->ctrl;
162 struct be_mcc_wrb *wrb;
163 struct be_cmd_req_modify_eq_delay *req;
164 unsigned int tag = 0;
165 int i;
166
167 spin_lock(&ctrl->mbox_lock);
168 tag = alloc_mcc_tag(phba);
169 if (!tag) {
170 spin_unlock(&ctrl->mbox_lock);
171 return tag;
172 }
173
174 wrb = wrb_from_mccq(phba);
175 req = embedded_payload(wrb);
176
177 wrb->tag0 |= tag;
178 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
179 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
180 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
181
182 req->num_eq = cpu_to_le32(num);
183 for (i = 0; i < num; i++) {
184 req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
185 req->delay[i].phase = 0;
186 req->delay[i].delay_multiplier =
187 cpu_to_le32(set_eqd[i].delay_multiplier);
188 }
189
190 be_mcc_notify(phba);
191 spin_unlock(&ctrl->mbox_lock);
192 return tag;
193}
194
158/** 195/**
159 * mgmt_reopen_session()- Reopen a session based on reopen_type 196 * mgmt_reopen_session()- Reopen a session based on reopen_type
160 * @phba: Device priv structure instance 197 * @phba: Device priv structure instance
@@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
447 struct be_dma_mem *nonemb_cmd) 484 struct be_dma_mem *nonemb_cmd)
448{ 485{
449 struct be_cmd_resp_hdr *resp; 486 struct be_cmd_resp_hdr *resp;
450 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 487 struct be_mcc_wrb *wrb;
451 struct be_sge *mcc_sge = nonembedded_sgl(wrb); 488 struct be_sge *mcc_sge;
452 unsigned int tag = 0; 489 unsigned int tag = 0;
453 struct iscsi_bsg_request *bsg_req = job->request; 490 struct iscsi_bsg_request *bsg_req = job->request;
454 struct be_bsg_vendor_cmd *req = nonemb_cmd->va; 491 struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
@@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
465 req->sector = sector; 502 req->sector = sector;
466 req->offset = offset; 503 req->offset = offset;
467 spin_lock(&ctrl->mbox_lock); 504 spin_lock(&ctrl->mbox_lock);
468 memset(wrb, 0, sizeof(*wrb));
469 505
470 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { 506 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
471 case BEISCSI_WRITE_FLASH: 507 case BEISCSI_WRITE_FLASH:
@@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
495 return tag; 531 return tag;
496 } 532 }
497 533
534 wrb = wrb_from_mccq(phba);
535 mcc_sge = nonembedded_sgl(wrb);
498 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 536 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
499 job->request_payload.sg_cnt); 537 job->request_payload.sg_cnt);
500 mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 538 mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
@@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
525 int status = 0; 563 int status = 0;
526 564
527 spin_lock(&ctrl->mbox_lock); 565 spin_lock(&ctrl->mbox_lock);
528 memset(wrb, 0, sizeof(*wrb));
529 566
530 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 567 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
531 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 568 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
675 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; 712 struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
676 struct be_ctrl_info *ctrl = &phba->ctrl; 713 struct be_ctrl_info *ctrl = &phba->ctrl;
677 struct be_mcc_wrb *wrb; 714 struct be_mcc_wrb *wrb;
678 struct tcp_connect_and_offload_in *req; 715 struct tcp_connect_and_offload_in_v1 *req;
679 unsigned short def_hdr_id; 716 unsigned short def_hdr_id;
680 unsigned short def_data_id; 717 unsigned short def_data_id;
681 struct phys_addr template_address = { 0, 0 }; 718 struct phys_addr template_address = { 0, 0 };
@@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
702 return tag; 739 return tag;
703 } 740 }
704 wrb = wrb_from_mccq(phba); 741 wrb = wrb_from_mccq(phba);
705 memset(wrb, 0, sizeof(*wrb));
706 sge = nonembedded_sgl(wrb); 742 sge = nonembedded_sgl(wrb);
707 743
708 req = nonemb_cmd->va; 744 req = nonemb_cmd->va;
709 memset(req, 0, sizeof(*req)); 745 memset(req, 0, sizeof(*req));
710 wrb->tag0 |= tag; 746 wrb->tag0 |= tag;
711 747
712 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 748 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
713 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 749 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
714 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 750 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
715 sizeof(*req)); 751 nonemb_cmd->size);
716 if (dst_addr->sa_family == PF_INET) { 752 if (dst_addr->sa_family == PF_INET) {
717 __be32 s_addr = daddr_in->sin_addr.s_addr; 753 __be32 s_addr = daddr_in->sin_addr.s_addr;
718 req->ip_address.ip_type = BE2_IPV4; 754 req->ip_address.ip_type = BE2_IPV4;
@@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
758 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 794 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
759 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 795 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
760 sge->len = cpu_to_le32(nonemb_cmd->size); 796 sge->len = cpu_to_le32(nonemb_cmd->size);
797
798 if (!is_chip_be2_be3r(phba)) {
799 req->hdr.version = MBX_CMD_VER1;
800 req->tcp_window_size = 0;
801 req->tcp_window_scale_count = 2;
802 }
803
761 be_mcc_notify(phba); 804 be_mcc_notify(phba);
762 spin_unlock(&ctrl->mbox_lock); 805 spin_unlock(&ctrl->mbox_lock);
763 return tag; 806 return tag;
@@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
804 int resp_buf_len) 847 int resp_buf_len)
805{ 848{
806 struct be_ctrl_info *ctrl = &phba->ctrl; 849 struct be_ctrl_info *ctrl = &phba->ctrl;
807 struct be_mcc_wrb *wrb = wrb_from_mccq(phba); 850 struct be_mcc_wrb *wrb;
808 struct be_sge *sge; 851 struct be_sge *sge;
809 unsigned int tag; 852 unsigned int tag;
810 int rc = 0; 853 int rc = 0;
@@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
816 rc = -ENOMEM; 859 rc = -ENOMEM;
817 goto free_cmd; 860 goto free_cmd;
818 } 861 }
819 memset(wrb, 0, sizeof(*wrb)); 862
863 wrb = wrb_from_mccq(phba);
820 wrb->tag0 |= tag; 864 wrb->tag0 |= tag;
821 sge = nonembedded_sgl(wrb); 865 sge = nonembedded_sgl(wrb);
822 866
@@ -964,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
964 BE2_IPV6 : BE2_IPV4 ; 1008 BE2_IPV6 : BE2_IPV4 ;
965 1009
966 rc = mgmt_get_if_info(phba, ip_type, &if_info); 1010 rc = mgmt_get_if_info(phba, ip_type, &if_info);
967 if (rc) { 1011 if (rc)
968 kfree(if_info);
969 return rc; 1012 return rc;
970 }
971 1013
972 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 1014 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
973 if (if_info->dhcp_state) { 1015 if (if_info->dhcp_state) {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 01b8c97284c0..24a8fc577477 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
335void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, 335void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
336 struct wrb_handle *pwrb_handle); 336 struct wrb_handle *pwrb_handle);
337void beiscsi_ue_detect(struct beiscsi_hba *phba); 337void beiscsi_ue_detect(struct beiscsi_hba *phba);
338int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
339 struct be_set_eqd *, int num);
338 340
339#endif 341#endif
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cc0fbcdc5192..7593b7c1d336 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
507 struct bfad_vport_s *vport; 507 struct bfad_vport_s *vport;
508 int rc; 508 int rc;
509 509
510 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); 510 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
511 if (!vport) { 511 if (!vport) {
512 bfa_trc(bfad, 0); 512 bfa_trc(bfad, 0);
513 return; 513 return;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f54843023466..785d0d71781e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
516 skb_pull(skb, sizeof(struct fcoe_hdr)); 516 skb_pull(skb, sizeof(struct fcoe_hdr));
517 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 517 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
518 518
519 stats = per_cpu_ptr(lport->stats, get_cpu());
520 stats->RxFrames++;
521 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
522
523 fp = (struct fc_frame *)skb; 519 fp = (struct fc_frame *)skb;
524 fc_frame_init(fp); 520 fc_frame_init(fp);
525 fr_dev(fp) = lport; 521 fr_dev(fp) = lport;
526 fr_sof(fp) = hp->fcoe_sof; 522 fr_sof(fp) = hp->fcoe_sof;
527 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { 523 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
528 put_cpu();
529 kfree_skb(skb); 524 kfree_skb(skb);
530 return; 525 return;
531 } 526 }
532 fr_eof(fp) = crc_eof.fcoe_eof; 527 fr_eof(fp) = crc_eof.fcoe_eof;
533 fr_crc(fp) = crc_eof.fcoe_crc32; 528 fr_crc(fp) = crc_eof.fcoe_crc32;
534 if (pskb_trim(skb, fr_len)) { 529 if (pskb_trim(skb, fr_len)) {
535 put_cpu();
536 kfree_skb(skb); 530 kfree_skb(skb);
537 return; 531 return;
538 } 532 }
@@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
544 port = lport_priv(vn_port); 538 port = lport_priv(vn_port);
545 if (!ether_addr_equal(port->data_src_addr, dest_mac)) { 539 if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
546 BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); 540 BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
547 put_cpu();
548 kfree_skb(skb); 541 kfree_skb(skb);
549 return; 542 return;
550 } 543 }
@@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
552 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && 545 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
553 fh->fh_type == FC_TYPE_FCP) { 546 fh->fh_type == FC_TYPE_FCP) {
554 /* Drop FCP data. We dont this in L2 path */ 547 /* Drop FCP data. We dont this in L2 path */
555 put_cpu();
556 kfree_skb(skb); 548 kfree_skb(skb);
557 return; 549 return;
558 } 550 }
@@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
562 case ELS_LOGO: 554 case ELS_LOGO:
563 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 555 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
564 /* drop non-FIP LOGO */ 556 /* drop non-FIP LOGO */
565 put_cpu();
566 kfree_skb(skb); 557 kfree_skb(skb);
567 return; 558 return;
568 } 559 }
@@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
572 563
573 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { 564 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
574 /* Drop incoming ABTS */ 565 /* Drop incoming ABTS */
575 put_cpu();
576 kfree_skb(skb); 566 kfree_skb(skb);
577 return; 567 return;
578 } 568 }
579 569
570 stats = per_cpu_ptr(lport->stats, smp_processor_id());
571 stats->RxFrames++;
572 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
573
580 if (le32_to_cpu(fr_crc(fp)) != 574 if (le32_to_cpu(fr_crc(fp)) !=
581 ~crc32(~0, skb->data, fr_len)) { 575 ~crc32(~0, skb->data, fr_len)) {
582 if (stats->InvalidCRCCount < 5) 576 if (stats->InvalidCRCCount < 5)
583 printk(KERN_WARNING PFX "dropping frame with " 577 printk(KERN_WARNING PFX "dropping frame with "
584 "CRC error\n"); 578 "CRC error\n");
585 stats->InvalidCRCCount++; 579 stats->InvalidCRCCount++;
586 put_cpu();
587 kfree_skb(skb); 580 kfree_skb(skb);
588 return; 581 return;
589 } 582 }
590 put_cpu();
591 fc_exch_recv(lport, fp); 583 fc_exch_recv(lport, fp);
592} 584}
593 585
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 46a37657307f..512aed3ae4f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1966{ 1966{
1967 int i; 1967 int i;
1968 int segment_count; 1968 int segment_count;
1969 int hash_table_size;
1970 u32 *pbl; 1969 u32 *pbl;
1971 1970
1972 segment_count = hba->hash_tbl_segment_count; 1971 if (hba->hash_tbl_segments) {
1973 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1974 sizeof(struct fcoe_hash_table_entry);
1975 1972
1976 pbl = hba->hash_tbl_pbl; 1973 pbl = hba->hash_tbl_pbl;
1977 for (i = 0; i < segment_count; ++i) { 1974 if (pbl) {
1978 dma_addr_t dma_address; 1975 segment_count = hba->hash_tbl_segment_count;
1976 for (i = 0; i < segment_count; ++i) {
1977 dma_addr_t dma_address;
1979 1978
1980 dma_address = le32_to_cpu(*pbl); 1979 dma_address = le32_to_cpu(*pbl);
1981 ++pbl; 1980 ++pbl;
1982 dma_address += ((u64)le32_to_cpu(*pbl)) << 32; 1981 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1983 ++pbl; 1982 ++pbl;
1984 dma_free_coherent(&hba->pcidev->dev, 1983 dma_free_coherent(&hba->pcidev->dev,
1985 BNX2FC_HASH_TBL_CHUNK_SIZE, 1984 BNX2FC_HASH_TBL_CHUNK_SIZE,
1986 hba->hash_tbl_segments[i], 1985 hba->hash_tbl_segments[i],
1987 dma_address); 1986 dma_address);
1987 }
1988 }
1988 1989
1990 kfree(hba->hash_tbl_segments);
1991 hba->hash_tbl_segments = NULL;
1989 } 1992 }
1990 1993
1991 if (hba->hash_tbl_pbl) { 1994 if (hba->hash_tbl_pbl) {
@@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2023 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); 2026 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2024 if (!dma_segment_array) { 2027 if (!dma_segment_array) {
2025 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); 2028 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2026 return -ENOMEM; 2029 goto cleanup_ht;
2027 } 2030 }
2028 2031
2029 for (i = 0; i < segment_count; ++i) { 2032 for (i = 0; i < segment_count; ++i) {
@@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2034 GFP_KERNEL); 2037 GFP_KERNEL);
2035 if (!hba->hash_tbl_segments[i]) { 2038 if (!hba->hash_tbl_segments[i]) {
2036 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2039 printk(KERN_ERR PFX "hash segment alloc failed\n");
2037 while (--i >= 0) { 2040 goto cleanup_dma;
2038 dma_free_coherent(&hba->pcidev->dev,
2039 BNX2FC_HASH_TBL_CHUNK_SIZE,
2040 hba->hash_tbl_segments[i],
2041 dma_segment_array[i]);
2042 hba->hash_tbl_segments[i] = NULL;
2043 }
2044 kfree(dma_segment_array);
2045 return -ENOMEM;
2046 } 2041 }
2047 memset(hba->hash_tbl_segments[i], 0, 2042 memset(hba->hash_tbl_segments[i], 0,
2048 BNX2FC_HASH_TBL_CHUNK_SIZE); 2043 BNX2FC_HASH_TBL_CHUNK_SIZE);
@@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2054 GFP_KERNEL); 2049 GFP_KERNEL);
2055 if (!hba->hash_tbl_pbl) { 2050 if (!hba->hash_tbl_pbl) {
2056 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2051 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2057 kfree(dma_segment_array); 2052 goto cleanup_dma;
2058 return -ENOMEM;
2059 } 2053 }
2060 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); 2054 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2061 2055
@@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2080 } 2074 }
2081 kfree(dma_segment_array); 2075 kfree(dma_segment_array);
2082 return 0; 2076 return 0;
2077
2078cleanup_dma:
2079 for (i = 0; i < segment_count; ++i) {
2080 if (hba->hash_tbl_segments[i])
2081 dma_free_coherent(&hba->pcidev->dev,
2082 BNX2FC_HASH_TBL_CHUNK_SIZE,
2083 hba->hash_tbl_segments[i],
2084 dma_segment_array[i]);
2085 }
2086
2087 kfree(dma_segment_array);
2088
2089cleanup_ht:
2090 kfree(hba->hash_tbl_segments);
2091 hba->hash_tbl_segments = NULL;
2092 return -ENOMEM;
2083} 2093}
2084 2094
2085/** 2095/**
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 32a5e0a2a669..7bc47fc7c686 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
282 arr_sz, GFP_KERNEL); 282 arr_sz, GFP_KERNEL);
283 if (!cmgr->free_list_lock) { 283 if (!cmgr->free_list_lock) {
284 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 284 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
285 kfree(cmgr->free_list);
286 cmgr->free_list = NULL;
285 goto mem_err; 287 goto mem_err;
286 } 288 }
287 289
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 5248c888552b..7bcf67eec921 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
120 "%s: blk_get_request failed\n", __func__); 120 "%s: blk_get_request failed\n", __func__);
121 return NULL; 121 return NULL;
122 } 122 }
123 blk_rq_set_block_pc(rq);
123 124
124 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { 125 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
125 blk_put_request(rq); 126 blk_put_request(rq);
@@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,
128 return NULL; 129 return NULL;
129 } 130 }
130 131
131 rq->cmd_type = REQ_TYPE_BLOCK_PC;
132 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 132 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
133 REQ_FAILFAST_DRIVER; 133 REQ_FAILFAST_DRIVER;
134 rq->retries = ALUA_FAILOVER_RETRIES; 134 rq->retries = ALUA_FAILOVER_RETRIES;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e1c8be06de9d..6f07f7fe3aa1 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
280 return NULL; 280 return NULL;
281 } 281 }
282 282
283 blk_rq_set_block_pc(rq);
283 rq->cmd_len = COMMAND_SIZE(cmd); 284 rq->cmd_len = COMMAND_SIZE(cmd);
284 rq->cmd[0] = cmd; 285 rq->cmd[0] = cmd;
285 286
@@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
304 break; 305 break;
305 } 306 }
306 307
307 rq->cmd_type = REQ_TYPE_BLOCK_PC;
308 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 308 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
309 REQ_FAILFAST_DRIVER; 309 REQ_FAILFAST_DRIVER;
310 rq->timeout = CLARIION_TIMEOUT; 310 rq->timeout = CLARIION_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 084062bb8ee9..e9d9fea9e272 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -120,7 +120,7 @@ retry:
120 if (!req) 120 if (!req)
121 return SCSI_DH_RES_TEMP_UNAVAIL; 121 return SCSI_DH_RES_TEMP_UNAVAIL;
122 122
123 req->cmd_type = REQ_TYPE_BLOCK_PC; 123 blk_rq_set_block_pc(req);
124 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 124 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
125 REQ_FAILFAST_DRIVER; 125 REQ_FAILFAST_DRIVER;
126 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 126 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
@@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
250 if (!req) 250 if (!req)
251 return SCSI_DH_RES_TEMP_UNAVAIL; 251 return SCSI_DH_RES_TEMP_UNAVAIL;
252 252
253 req->cmd_type = REQ_TYPE_BLOCK_PC; 253 blk_rq_set_block_pc(req);
254 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 254 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
255 REQ_FAILFAST_DRIVER; 255 REQ_FAILFAST_DRIVER;
256 req->cmd_len = COMMAND_SIZE(START_STOP); 256 req->cmd_len = COMMAND_SIZE(START_STOP);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 4b9cf93f3fb6..826069db9848 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
279 "get_rdac_req: blk_get_request failed.\n"); 279 "get_rdac_req: blk_get_request failed.\n");
280 return NULL; 280 return NULL;
281 } 281 }
282 blk_rq_set_block_pc(rq);
282 283
283 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { 284 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
284 blk_put_request(rq); 285 blk_put_request(rq);
@@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
287 return NULL; 288 return NULL;
288 } 289 }
289 290
290 rq->cmd_type = REQ_TYPE_BLOCK_PC;
291 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 291 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
292 REQ_FAILFAST_DRIVER; 292 REQ_FAILFAST_DRIVER;
293 rq->retries = RDAC_RETRIES; 293 rq->retries = RDAC_RETRIES;
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index eb29fe7eaf49..0a667fe05006 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -3,8 +3,6 @@
3#define PSEUDO_DMA 3#define PSEUDO_DMA
4#define DONT_USE_INTR 4#define DONT_USE_INTR
5#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ 5#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
6#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
7 NDEBUG_SELECTION+NDEBUG_ARBITRATION)
8#define DMA_WORKS_RIGHT 6#define DMA_WORKS_RIGHT
9 7
10 8
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f37f3e3dd5d5..6504a195c874 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid,
390 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), 390 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
391 "pci_enable_device() OK"); 391 "pci_enable_device() OK");
392 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), 392 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
393 "after pci_device_enable() enable_cnt: %d", 393 "after pci_enable_device() enable_cnt: %d",
394 pcid->enable_cnt.counter); 394 pcid->enable_cnt.counter);
395 395
396 host = scsi_host_alloc(&driver_template, host_alloc_size); 396 host = scsi_host_alloc(&driver_template, host_alloc_size);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 528d43b7b569..1d3521e13d77 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,14 +39,15 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.5.0.45" 42#define DRV_VERSION "1.6.0.10"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
46#define DESC_CLEAN_LOW_WATERMARK 8 46#define DESC_CLEAN_LOW_WATERMARK 8
47#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ 47#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
48#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ 48#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
49#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ 49#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
50#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
50#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 51#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
51#define FNIC_DFLT_QUEUE_DEPTH 32 52#define FNIC_DFLT_QUEUE_DEPTH 32
52#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ 53#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index b6073f875761..2c613bdea78f 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file;
25static struct dentry *fnic_trace_enable; 25static struct dentry *fnic_trace_enable;
26static struct dentry *fnic_stats_debugfs_root; 26static struct dentry *fnic_stats_debugfs_root;
27 27
28static struct dentry *fnic_fc_trace_debugfs_file;
29static struct dentry *fnic_fc_rdata_trace_debugfs_file;
30static struct dentry *fnic_fc_trace_enable;
31static struct dentry *fnic_fc_trace_clear;
32
33struct fc_trace_flag_type {
34 u8 fc_row_file;
35 u8 fc_normal_file;
36 u8 fnic_trace;
37 u8 fc_trace;
38 u8 fc_clear;
39};
40
41static struct fc_trace_flag_type *fc_trc_flag;
42
28/* 43/*
29 * fnic_debugfs_init - Initialize debugfs for fnic debug logging 44 * fnic_debugfs_init - Initialize debugfs for fnic debug logging
30 * 45 *
@@ -56,6 +71,18 @@ int fnic_debugfs_init(void)
56 return rc; 71 return rc;
57 } 72 }
58 73
74 /* Allocate memory to structure */
75 fc_trc_flag = (struct fc_trace_flag_type *)
76 vmalloc(sizeof(struct fc_trace_flag_type));
77
78 if (fc_trc_flag) {
79 fc_trc_flag->fc_row_file = 0;
80 fc_trc_flag->fc_normal_file = 1;
81 fc_trc_flag->fnic_trace = 2;
82 fc_trc_flag->fc_trace = 3;
83 fc_trc_flag->fc_clear = 4;
84 }
85
59 rc = 0; 86 rc = 0;
60 return rc; 87 return rc;
61} 88}
@@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void)
74 101
75 debugfs_remove(fnic_trace_debugfs_root); 102 debugfs_remove(fnic_trace_debugfs_root);
76 fnic_trace_debugfs_root = NULL; 103 fnic_trace_debugfs_root = NULL;
104
105 if (fc_trc_flag)
106 vfree(fc_trc_flag);
77} 107}
78 108
79/* 109/*
80 * fnic_trace_ctrl_open - Open the trace_enable file 110 * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
111 * Or Open fc_trace_enable file for fc_trace
81 * @inode: The inode pointer. 112 * @inode: The inode pointer.
82 * @file: The file pointer to attach the trace enable/disable flag. 113 * @file: The file pointer to attach the trace enable/disable flag.
83 * 114 *
84 * Description: 115 * Description:
85 * This routine opens a debugsfs file trace_enable. 116 * This routine opens a debugsfs file trace_enable or fc_trace_enable.
86 * 117 *
87 * Returns: 118 * Returns:
88 * This function returns zero if successful. 119 * This function returns zero if successful.
@@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
94} 125}
95 126
96/* 127/*
97 * fnic_trace_ctrl_read - Read a trace_enable debugfs file 128 * fnic_trace_ctrl_read -
129 * Read trace_enable ,fc_trace_enable
130 * or fc_trace_clear debugfs file
98 * @filp: The file pointer to read from. 131 * @filp: The file pointer to read from.
99 * @ubuf: The buffer to copy the data to. 132 * @ubuf: The buffer to copy the data to.
100 * @cnt: The number of bytes to read. 133 * @cnt: The number of bytes to read.
101 * @ppos: The position in the file to start reading from. 134 * @ppos: The position in the file to start reading from.
102 * 135 *
103 * Description: 136 * Description:
104 * This routine reads value of variable fnic_tracing_enabled 137 * This routine reads value of variable fnic_tracing_enabled or
105 * and stores into local @buf. It will start reading file at @ppos and 138 * fnic_fc_tracing_enabled or fnic_fc_trace_cleared
139 * and stores into local @buf.
140 * It will start reading file at @ppos and
106 * copy up to @cnt of data to @ubuf from @buf. 141 * copy up to @cnt of data to @ubuf from @buf.
107 * 142 *
108 * Returns: 143 * Returns:
@@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
114{ 149{
115 char buf[64]; 150 char buf[64];
116 int len; 151 int len;
117 len = sprintf(buf, "%u\n", fnic_tracing_enabled); 152 u8 *trace_type;
153 len = 0;
154 trace_type = (u8 *)filp->private_data;
155 if (*trace_type == fc_trc_flag->fnic_trace)
156 len = sprintf(buf, "%u\n", fnic_tracing_enabled);
157 else if (*trace_type == fc_trc_flag->fc_trace)
158 len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
159 else if (*trace_type == fc_trc_flag->fc_clear)
160 len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
161 else
162 pr_err("fnic: Cannot read to any debugfs file\n");
118 163
119 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 164 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
120} 165}
121 166
122/* 167/*
123 * fnic_trace_ctrl_write - Write to trace_enable debugfs file 168 * fnic_trace_ctrl_write -
169 * Write to trace_enable, fc_trace_enable or
170 * fc_trace_clear debugfs file
124 * @filp: The file pointer to write from. 171 * @filp: The file pointer to write from.
125 * @ubuf: The buffer to copy the data from. 172 * @ubuf: The buffer to copy the data from.
126 * @cnt: The number of bytes to write. 173 * @cnt: The number of bytes to write.
@@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
128 * 175 *
129 * Description: 176 * Description:
130 * This routine writes data from user buffer @ubuf to buffer @buf and 177 * This routine writes data from user buffer @ubuf to buffer @buf and
131 * sets fnic_tracing_enabled value as per user input. 178 * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
179 * value as per user input.
132 * 180 *
133 * Returns: 181 * Returns:
134 * This function returns the amount of data that was written. 182 * This function returns the amount of data that was written.
@@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
140 char buf[64]; 188 char buf[64];
141 unsigned long val; 189 unsigned long val;
142 int ret; 190 int ret;
191 u8 *trace_type;
192 trace_type = (u8 *)filp->private_data;
143 193
144 if (cnt >= sizeof(buf)) 194 if (cnt >= sizeof(buf))
145 return -EINVAL; 195 return -EINVAL;
@@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
153 if (ret < 0) 203 if (ret < 0)
154 return ret; 204 return ret;
155 205
156 fnic_tracing_enabled = val; 206 if (*trace_type == fc_trc_flag->fnic_trace)
207 fnic_tracing_enabled = val;
208 else if (*trace_type == fc_trc_flag->fc_trace)
209 fnic_fc_tracing_enabled = val;
210 else if (*trace_type == fc_trc_flag->fc_clear)
211 fnic_fc_trace_cleared = val;
212 else
213 pr_err("fnic: cannot write to any debufs file\n");
214
157 (*ppos)++; 215 (*ppos)++;
158 216
159 return cnt; 217 return cnt;
160} 218}
161 219
220static const struct file_operations fnic_trace_ctrl_fops = {
221 .owner = THIS_MODULE,
222 .open = fnic_trace_ctrl_open,
223 .read = fnic_trace_ctrl_read,
224 .write = fnic_trace_ctrl_write,
225};
226
162/* 227/*
163 * fnic_trace_debugfs_open - Open the fnic trace log 228 * fnic_trace_debugfs_open - Open the fnic trace log
164 * @inode: The inode pointer 229 * @inode: The inode pointer
@@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode,
178 struct file *file) 243 struct file *file)
179{ 244{
180 fnic_dbgfs_t *fnic_dbg_prt; 245 fnic_dbgfs_t *fnic_dbg_prt;
246 u8 *rdata_ptr;
247 rdata_ptr = (u8 *)inode->i_private;
181 fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); 248 fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
182 if (!fnic_dbg_prt) 249 if (!fnic_dbg_prt)
183 return -ENOMEM; 250 return -ENOMEM;
184 251
185 fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); 252 if (*rdata_ptr == fc_trc_flag->fnic_trace) {
186 if (!fnic_dbg_prt->buffer) { 253 fnic_dbg_prt->buffer = vmalloc(3 *
187 kfree(fnic_dbg_prt); 254 (trace_max_pages * PAGE_SIZE));
188 return -ENOMEM; 255 if (!fnic_dbg_prt->buffer) {
256 kfree(fnic_dbg_prt);
257 return -ENOMEM;
258 }
259 memset((void *)fnic_dbg_prt->buffer, 0,
260 3 * (trace_max_pages * PAGE_SIZE));
261 fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
262 } else {
263 fnic_dbg_prt->buffer =
264 vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
265 if (!fnic_dbg_prt->buffer) {
266 kfree(fnic_dbg_prt);
267 return -ENOMEM;
268 }
269 memset((void *)fnic_dbg_prt->buffer, 0,
270 3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
271 fnic_dbg_prt->buffer_len =
272 fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
189 } 273 }
190 memset((void *)fnic_dbg_prt->buffer, 0,
191 (3*(trace_max_pages * PAGE_SIZE)));
192 fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
193 file->private_data = fnic_dbg_prt; 274 file->private_data = fnic_dbg_prt;
275
194 return 0; 276 return 0;
195} 277}
196 278
@@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode,
272 return 0; 354 return 0;
273} 355}
274 356
275static const struct file_operations fnic_trace_ctrl_fops = {
276 .owner = THIS_MODULE,
277 .open = fnic_trace_ctrl_open,
278 .read = fnic_trace_ctrl_read,
279 .write = fnic_trace_ctrl_write,
280};
281
282static const struct file_operations fnic_trace_debugfs_fops = { 357static const struct file_operations fnic_trace_debugfs_fops = {
283 .owner = THIS_MODULE, 358 .owner = THIS_MODULE,
284 .open = fnic_trace_debugfs_open, 359 .open = fnic_trace_debugfs_open,
@@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void)
306 return rc; 381 return rc;
307 } 382 }
308 fnic_trace_enable = debugfs_create_file("tracing_enable", 383 fnic_trace_enable = debugfs_create_file("tracing_enable",
309 S_IFREG|S_IRUGO|S_IWUSR, 384 S_IFREG|S_IRUGO|S_IWUSR,
310 fnic_trace_debugfs_root, 385 fnic_trace_debugfs_root,
311 NULL, &fnic_trace_ctrl_fops); 386 &(fc_trc_flag->fnic_trace),
387 &fnic_trace_ctrl_fops);
312 388
313 if (!fnic_trace_enable) { 389 if (!fnic_trace_enable) {
314 printk(KERN_DEBUG 390 printk(KERN_DEBUG
@@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void)
317 } 393 }
318 394
319 fnic_trace_debugfs_file = debugfs_create_file("trace", 395 fnic_trace_debugfs_file = debugfs_create_file("trace",
320 S_IFREG|S_IRUGO|S_IWUSR, 396 S_IFREG|S_IRUGO|S_IWUSR,
321 fnic_trace_debugfs_root, 397 fnic_trace_debugfs_root,
322 NULL, 398 &(fc_trc_flag->fnic_trace),
323 &fnic_trace_debugfs_fops); 399 &fnic_trace_debugfs_fops);
324 400
325 if (!fnic_trace_debugfs_file) { 401 if (!fnic_trace_debugfs_file) {
326 printk(KERN_DEBUG 402 printk(KERN_DEBUG
@@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void)
340 */ 416 */
341void fnic_trace_debugfs_terminate(void) 417void fnic_trace_debugfs_terminate(void)
342{ 418{
343 if (fnic_trace_debugfs_file) { 419 debugfs_remove(fnic_trace_debugfs_file);
344 debugfs_remove(fnic_trace_debugfs_file); 420 fnic_trace_debugfs_file = NULL;
345 fnic_trace_debugfs_file = NULL; 421
422 debugfs_remove(fnic_trace_enable);
423 fnic_trace_enable = NULL;
424}
425
426/*
427 * fnic_fc_trace_debugfs_init -
428 * Initialize debugfs for fnic control frame trace logging
429 *
430 * Description:
431 * When Debugfs is configured this routine sets up the fnic_fc debugfs
432 * file system. If not already created, this routine will create the
433 * create file trace to log fnic fc trace buffer output into debugfs and
434 * it will also create file fc_trace_enable to control enable/disable of
435 * trace logging into trace buffer.
436 */
437
438int fnic_fc_trace_debugfs_init(void)
439{
440 int rc = -1;
441
442 if (!fnic_trace_debugfs_root) {
443 pr_err("fnic:Debugfs root directory doesn't exist\n");
444 return rc;
445 }
446
447 fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
448 S_IFREG|S_IRUGO|S_IWUSR,
449 fnic_trace_debugfs_root,
450 &(fc_trc_flag->fc_trace),
451 &fnic_trace_ctrl_fops);
452
453 if (!fnic_fc_trace_enable) {
454 pr_err("fnic: Failed create fc_trace_enable file\n");
455 return rc;
456 }
457
458 fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
459 S_IFREG|S_IRUGO|S_IWUSR,
460 fnic_trace_debugfs_root,
461 &(fc_trc_flag->fc_clear),
462 &fnic_trace_ctrl_fops);
463
464 if (!fnic_fc_trace_clear) {
465 pr_err("fnic: Failed to create fc_trace_enable file\n");
466 return rc;
467 }
468
469 fnic_fc_rdata_trace_debugfs_file =
470 debugfs_create_file("fc_trace_rdata",
471 S_IFREG|S_IRUGO|S_IWUSR,
472 fnic_trace_debugfs_root,
473 &(fc_trc_flag->fc_normal_file),
474 &fnic_trace_debugfs_fops);
475
476 if (!fnic_fc_rdata_trace_debugfs_file) {
477 pr_err("fnic: Failed create fc_rdata_trace file\n");
478 return rc;
346 } 479 }
347 if (fnic_trace_enable) { 480
348 debugfs_remove(fnic_trace_enable); 481 fnic_fc_trace_debugfs_file =
349 fnic_trace_enable = NULL; 482 debugfs_create_file("fc_trace",
483 S_IFREG|S_IRUGO|S_IWUSR,
484 fnic_trace_debugfs_root,
485 &(fc_trc_flag->fc_row_file),
486 &fnic_trace_debugfs_fops);
487
488 if (!fnic_fc_trace_debugfs_file) {
489 pr_err("fnic: Failed to create fc_trace file\n");
490 return rc;
350 } 491 }
492 rc = 0;
493 return rc;
494}
495
496/*
497 * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
498 *
499 * Description:
500 * When Debugfs is configured this routine removes debugfs file system
501 * elements that are specific to fnic_fc trace logging.
502 */
503
504void fnic_fc_trace_debugfs_terminate(void)
505{
506 debugfs_remove(fnic_fc_trace_debugfs_file);
507 fnic_fc_trace_debugfs_file = NULL;
508
509 debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
510 fnic_fc_rdata_trace_debugfs_file = NULL;
511
512 debugfs_remove(fnic_fc_trace_enable);
513 fnic_fc_trace_enable = NULL;
514
515 debugfs_remove(fnic_fc_trace_clear);
516 fnic_fc_trace_clear = NULL;
351} 517}
352 518
353/* 519/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1671325aec7f..1b948f633fc5 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work)
66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); 66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
67 67
68 if (old_link_status == fnic->link_status) { 68 if (old_link_status == fnic->link_status) {
69 if (!fnic->link_status) 69 if (!fnic->link_status) {
70 /* DOWN -> DOWN */ 70 /* DOWN -> DOWN */
71 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 71 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
72 else { 72 fnic_fc_trace_set_data(fnic->lport->host->host_no,
73 FNIC_FC_LE, "Link Status: DOWN->DOWN",
74 strlen("Link Status: DOWN->DOWN"));
75 } else {
73 if (old_link_down_cnt != fnic->link_down_cnt) { 76 if (old_link_down_cnt != fnic->link_down_cnt) {
74 /* UP -> DOWN -> UP */ 77 /* UP -> DOWN -> UP */
75 fnic->lport->host_stats.link_failure_count++; 78 fnic->lport->host_stats.link_failure_count++;
76 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 79 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
80 fnic_fc_trace_set_data(
81 fnic->lport->host->host_no,
82 FNIC_FC_LE,
83 "Link Status:UP_DOWN_UP",
84 strlen("Link_Status:UP_DOWN_UP")
85 );
77 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 86 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
78 "link down\n"); 87 "link down\n");
79 fcoe_ctlr_link_down(&fnic->ctlr); 88 fcoe_ctlr_link_down(&fnic->ctlr);
80 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 89 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
81 /* start FCoE VLAN discovery */ 90 /* start FCoE VLAN discovery */
91 fnic_fc_trace_set_data(
92 fnic->lport->host->host_no,
93 FNIC_FC_LE,
94 "Link Status: UP_DOWN_UP_VLAN",
95 strlen(
96 "Link Status: UP_DOWN_UP_VLAN")
97 );
82 fnic_fcoe_send_vlan_req(fnic); 98 fnic_fcoe_send_vlan_req(fnic);
83 return; 99 return;
84 } 100 }
@@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work)
88 } else 104 } else
89 /* UP -> UP */ 105 /* UP -> UP */
90 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 106 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
107 fnic_fc_trace_set_data(
108 fnic->lport->host->host_no, FNIC_FC_LE,
109 "Link Status: UP_UP",
110 strlen("Link Status: UP_UP"));
91 } 111 }
92 } else if (fnic->link_status) { 112 } else if (fnic->link_status) {
93 /* DOWN -> UP */ 113 /* DOWN -> UP */
94 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 114 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
95 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 115 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
96 /* start FCoE VLAN discovery */ 116 /* start FCoE VLAN discovery */
117 fnic_fc_trace_set_data(
118 fnic->lport->host->host_no,
119 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
120 strlen("Link Status: DOWN_UP_VLAN"));
97 fnic_fcoe_send_vlan_req(fnic); 121 fnic_fcoe_send_vlan_req(fnic);
98 return; 122 return;
99 } 123 }
100 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 124 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
125 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
126 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
101 fcoe_ctlr_link_up(&fnic->ctlr); 127 fcoe_ctlr_link_up(&fnic->ctlr);
102 } else { 128 } else {
103 /* UP -> DOWN */ 129 /* UP -> DOWN */
104 fnic->lport->host_stats.link_failure_count++; 130 fnic->lport->host_stats.link_failure_count++;
105 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 131 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 132 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
133 fnic_fc_trace_set_data(
134 fnic->lport->host->host_no, FNIC_FC_LE,
135 "Link Status: UP_DOWN",
136 strlen("Link Status: UP_DOWN"));
107 fcoe_ctlr_link_down(&fnic->ctlr); 137 fcoe_ctlr_link_down(&fnic->ctlr);
108 } 138 }
109 139
@@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
267 297
268 if (desc->fip_dtype == FIP_DT_FLOGI) { 298 if (desc->fip_dtype == FIP_DT_FLOGI) {
269 299
270 shost_printk(KERN_DEBUG, lport->host,
271 " FIP TYPE FLOGI: fab name:%llx "
272 "vfid:%d map:%x\n",
273 fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
274 fip->sel_fcf->fc_map);
275 if (dlen < sizeof(*els) + sizeof(*fh) + 1) 300 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
276 return 0; 301 return 0;
277 302
@@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
616 "using UCSM\n"); 641 "using UCSM\n");
617 goto drop; 642 goto drop;
618 } 643 }
644 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
645 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
646 printk(KERN_ERR "fnic ctlr frame trace error!!!");
647 }
619 skb_queue_tail(&fnic->fip_frame_queue, skb); 648 skb_queue_tail(&fnic->fip_frame_queue, skb);
620 queue_work(fnic_fip_queue, &fnic->fip_frame_work); 649 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
621 return 1; /* let caller know packet was used */ 650 return 1; /* let caller know packet was used */
@@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
844 } 873 }
845 fr_dev(fp) = fnic->lport; 874 fr_dev(fp) = fnic->lport;
846 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 875 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
876 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
877 (char *)skb->data, skb->len)) != 0) {
878 printk(KERN_ERR "fnic ctlr frame trace error!!!");
879 }
847 880
848 skb_queue_tail(&fnic->frame_queue, skb); 881 skb_queue_tail(&fnic->frame_queue, skb);
849 queue_work(fnic_event_queue, &fnic->frame_work); 882 queue_work(fnic_event_queue, &fnic->frame_work);
@@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
951 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 984 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
952 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; 985 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
953 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 986 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
987 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
988 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
989 printk(KERN_ERR "fnic ctlr frame trace error!!!");
990 }
991 } else {
992 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
993 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
994 printk(KERN_ERR "fnic ctlr frame trace error!!!");
995 }
954 } 996 }
955 997
956 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 998 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
@@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1023 1065
1024 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); 1066 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
1025 1067
1068 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1069 (char *)eth_hdr, tot_len)) != 0) {
1070 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1071 }
1072
1026 spin_lock_irqsave(&fnic->wq_lock[0], flags); 1073 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1027 1074
1028 if (!vnic_wq_desc_avail(wq)) { 1075 if (!vnic_wq_desc_avail(wq)) {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 33e4ec2bfe73..8c56fdc3a456 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
74MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " 74MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
75 "for fnic trace buffer"); 75 "for fnic trace buffer");
76 76
77unsigned int fnic_fc_trace_max_pages = 64;
78module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(fnic_fc_trace_max_pages,
80 "Total allocated memory pages for fc trace buffer");
81
77static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; 82static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
78module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); 83module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); 84MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
@@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = {
111 .change_queue_type = fc_change_queue_type, 116 .change_queue_type = fc_change_queue_type,
112 .this_id = -1, 117 .this_id = -1,
113 .cmd_per_lun = 3, 118 .cmd_per_lun = 3,
114 .can_queue = FNIC_MAX_IO_REQ, 119 .can_queue = FNIC_DFLT_IO_REQ,
115 .use_clustering = ENABLE_CLUSTERING, 120 .use_clustering = ENABLE_CLUSTERING,
116 .sg_tablesize = FNIC_MAX_SG_DESC_CNT, 121 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
117 .max_sectors = 0xffff, 122 .max_sectors = 0xffff,
@@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
773 shost_printk(KERN_INFO, fnic->lport->host, 778 shost_printk(KERN_INFO, fnic->lport->host,
774 "firmware uses non-FIP mode\n"); 779 "firmware uses non-FIP mode\n");
775 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); 780 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
781 fnic->ctlr.state = FIP_ST_NON_FIP;
776 } 782 }
777 fnic->state = FNIC_IN_FC_MODE; 783 fnic->state = FNIC_IN_FC_MODE;
778 784
@@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void)
1033 /* Allocate memory for trace buffer */ 1039 /* Allocate memory for trace buffer */
1034 err = fnic_trace_buf_init(); 1040 err = fnic_trace_buf_init();
1035 if (err < 0) { 1041 if (err < 0) {
1036 printk(KERN_ERR PFX "Trace buffer initialization Failed " 1042 printk(KERN_ERR PFX
1037 "Fnic Tracing utility is disabled\n"); 1043 "Trace buffer initialization Failed. "
1044 "Fnic Tracing utility is disabled\n");
1038 fnic_trace_free(); 1045 fnic_trace_free();
1039 } 1046 }
1040 1047
1048 /* Allocate memory for fc trace buffer */
1049 err = fnic_fc_trace_init();
1050 if (err < 0) {
1051 printk(KERN_ERR PFX "FC trace buffer initialization Failed "
1052 "FC frame tracing utility is disabled\n");
1053 fnic_fc_trace_free();
1054 }
1055
1041 /* Create a cache for allocation of default size sgls */ 1056 /* Create a cache for allocation of default size sgls */
1042 len = sizeof(struct fnic_dflt_sgl_list); 1057 len = sizeof(struct fnic_dflt_sgl_list);
1043 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create 1058 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max:
1118 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1133 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1119err_create_fnic_sgl_slab_dflt: 1134err_create_fnic_sgl_slab_dflt:
1120 fnic_trace_free(); 1135 fnic_trace_free();
1136 fnic_fc_trace_free();
1121 fnic_debugfs_terminate(); 1137 fnic_debugfs_terminate();
1122 return err; 1138 return err;
1123} 1139}
@@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void)
1135 kmem_cache_destroy(fnic_io_req_cache); 1151 kmem_cache_destroy(fnic_io_req_cache);
1136 fc_release_transport(fnic_fc_transport); 1152 fc_release_transport(fnic_fc_transport);
1137 fnic_trace_free(); 1153 fnic_trace_free();
1154 fnic_fc_trace_free();
1138 fnic_debugfs_terminate(); 1155 fnic_debugfs_terminate();
1139} 1156}
1140 1157
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 0521436d05d6..ea28b5ca4c73 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1312 1312
1313cleanup_scsi_cmd: 1313cleanup_scsi_cmd:
1314 sc->result = DID_TRANSPORT_DISRUPTED << 16; 1314 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1315 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" 1315 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1316 " DID_TRANSPORT_DISRUPTED\n"); 1316 "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1317 __func__, (jiffies - start_time));
1317 1318
1318 if (atomic64_read(&fnic->io_cmpl_skip)) 1319 if (atomic64_read(&fnic->io_cmpl_skip))
1319 atomic64_dec(&fnic->io_cmpl_skip); 1320 atomic64_dec(&fnic->io_cmpl_skip);
@@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1733 struct fnic_stats *fnic_stats; 1734 struct fnic_stats *fnic_stats;
1734 struct abort_stats *abts_stats; 1735 struct abort_stats *abts_stats;
1735 struct terminate_stats *term_stats; 1736 struct terminate_stats *term_stats;
1737 enum fnic_ioreq_state old_ioreq_state;
1736 int tag; 1738 int tag;
1737 DECLARE_COMPLETION_ONSTACK(tm_done); 1739 DECLARE_COMPLETION_ONSTACK(tm_done);
1738 1740
@@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1793 * the completion wont be done till mid-layer, since abort 1795 * the completion wont be done till mid-layer, since abort
1794 * has already started. 1796 * has already started.
1795 */ 1797 */
1798 old_ioreq_state = CMD_STATE(sc);
1796 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1799 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1797 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1800 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1798 1801
@@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1816 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, 1819 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1817 fc_lun.scsi_lun, io_req)) { 1820 fc_lun.scsi_lun, io_req)) {
1818 spin_lock_irqsave(io_lock, flags); 1821 spin_lock_irqsave(io_lock, flags);
1822 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1823 CMD_STATE(sc) = old_ioreq_state;
1819 io_req = (struct fnic_io_req *)CMD_SP(sc); 1824 io_req = (struct fnic_io_req *)CMD_SP(sc);
1820 if (io_req) 1825 if (io_req)
1821 io_req->abts_done = NULL; 1826 io_req->abts_done = NULL;
@@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1859 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 1864 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1860 spin_unlock_irqrestore(io_lock, flags); 1865 spin_unlock_irqrestore(io_lock, flags);
1861 if (task_req == FCPIO_ITMF_ABT_TASK) { 1866 if (task_req == FCPIO_ITMF_ABT_TASK) {
1862 FNIC_SCSI_DBG(KERN_INFO,
1863 fnic->lport->host, "Abort Driver Timeout\n");
1864 atomic64_inc(&abts_stats->abort_drv_timeouts); 1867 atomic64_inc(&abts_stats->abort_drv_timeouts);
1865 } else { 1868 } else {
1866 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1867 "Terminate Driver Timeout\n");
1868 atomic64_inc(&term_stats->terminate_drv_timeouts); 1869 atomic64_inc(&term_stats->terminate_drv_timeouts);
1869 } 1870 }
1870 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; 1871 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index e002e7187dc0..c77285926827 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -20,6 +20,7 @@
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/time.h>
23#include "fnic_io.h" 24#include "fnic_io.h"
24#include "fnic.h" 25#include "fnic.h"
25 26
@@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock);
32static fnic_trace_dbg_t fnic_trace_entries; 33static fnic_trace_dbg_t fnic_trace_entries;
33int fnic_tracing_enabled = 1; 34int fnic_tracing_enabled = 1;
34 35
36/* static char *fnic_fc_ctlr_trace_buf_p; */
37
38static int fc_trace_max_entries;
39static unsigned long fnic_fc_ctlr_trace_buf_p;
40static fnic_trace_dbg_t fc_trace_entries;
41int fnic_fc_tracing_enabled = 1;
42int fnic_fc_trace_cleared = 1;
43static DEFINE_SPINLOCK(fnic_fc_trace_lock);
44
45
35/* 46/*
36 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information 47 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
37 * 48 *
@@ -428,10 +439,10 @@ int fnic_trace_buf_init(void)
428 } 439 }
429 err = fnic_trace_debugfs_init(); 440 err = fnic_trace_debugfs_init();
430 if (err < 0) { 441 if (err < 0) {
431 printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); 442 pr_err("fnic: Failed to initialize debugfs for tracing\n");
432 goto err_fnic_trace_debugfs_init; 443 goto err_fnic_trace_debugfs_init;
433 } 444 }
434 printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); 445 pr_info("fnic: Successfully Initialized Trace Buffer\n");
435 return err; 446 return err;
436err_fnic_trace_debugfs_init: 447err_fnic_trace_debugfs_init:
437 fnic_trace_free(); 448 fnic_trace_free();
@@ -456,3 +467,314 @@ void fnic_trace_free(void)
456 } 467 }
457 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); 468 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
458} 469}
470
471/*
472 * fnic_fc_ctlr_trace_buf_init -
473 * Initialize trace buffer to log fnic control frames
474 * Description:
475 * Initialize trace buffer data structure by allocating
476 * required memory for trace data as well as for Indexes.
477 * Frame size is 256 bytes and
478 * memory is allocated for 1024 entries of 256 bytes.
479 * Page_offset(Index) is set to the address of trace entry
480 * and page_offset is initialized by adding frame size
481 * to the previous page_offset entry.
482 */
483
484int fnic_fc_trace_init(void)
485{
486 unsigned long fc_trace_buf_head;
487 int err = 0;
488 int i;
489
490 fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
491 FC_TRC_SIZE_BYTES;
492 fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
493 fnic_fc_trace_max_pages * PAGE_SIZE);
494 if (!fnic_fc_ctlr_trace_buf_p) {
495 pr_err("fnic: Failed to allocate memory for "
496 "FC Control Trace Buf\n");
497 err = -ENOMEM;
498 goto err_fnic_fc_ctlr_trace_buf_init;
499 }
500
501 memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
502 fnic_fc_trace_max_pages * PAGE_SIZE);
503
504 /* Allocate memory for page offset */
505 fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
506 sizeof(unsigned long));
507 if (!fc_trace_entries.page_offset) {
508 pr_err("fnic:Failed to allocate memory for page_offset\n");
509 if (fnic_fc_ctlr_trace_buf_p) {
510 pr_err("fnic: Freeing FC Control Trace Buf\n");
511 vfree((void *)fnic_fc_ctlr_trace_buf_p);
512 fnic_fc_ctlr_trace_buf_p = 0;
513 }
514 err = -ENOMEM;
515 goto err_fnic_fc_ctlr_trace_buf_init;
516 }
517 memset((void *)fc_trace_entries.page_offset, 0,
518 (fc_trace_max_entries * sizeof(unsigned long)));
519
520 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
521 fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
522
523 /*
524 * Set up fc_trace_entries.page_offset field with memory location
525 * for every trace entry
526 */
527 for (i = 0; i < fc_trace_max_entries; i++) {
528 fc_trace_entries.page_offset[i] = fc_trace_buf_head;
529 fc_trace_buf_head += FC_TRC_SIZE_BYTES;
530 }
531 err = fnic_fc_trace_debugfs_init();
532 if (err < 0) {
533 pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
534 goto err_fnic_fc_ctlr_trace_debugfs_init;
535 }
536 pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
537 return err;
538
539err_fnic_fc_ctlr_trace_debugfs_init:
540 fnic_fc_trace_free();
541err_fnic_fc_ctlr_trace_buf_init:
542 return err;
543}
544
545/*
546 * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
547 */
548void fnic_fc_trace_free(void)
549{
550 fnic_fc_tracing_enabled = 0;
551 fnic_fc_trace_debugfs_terminate();
552 if (fc_trace_entries.page_offset) {
553 vfree((void *)fc_trace_entries.page_offset);
554 fc_trace_entries.page_offset = NULL;
555 }
556 if (fnic_fc_ctlr_trace_buf_p) {
557 vfree((void *)fnic_fc_ctlr_trace_buf_p);
558 fnic_fc_ctlr_trace_buf_p = 0;
559 }
560 pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
561}
562
563/*
564 * fnic_fc_ctlr_set_trace_data:
565 * Maintain rd & wr idx accordingly and set data
566 * Passed parameters:
567 * host_no: host number accociated with fnic
568 * frame_type: send_frame, rece_frame or link event
569 * fc_frame: pointer to fc_frame
570 * frame_len: Length of the fc_frame
571 * Description:
572 * This routine will get next available wr_idx and
573 * copy all passed trace data to the buffer pointed by wr_idx
574 * and increment wr_idx. It will also make sure that we dont
575 * overwrite the entry which we are reading and also
576 * wrap around if we reach the maximum entries.
577 * Returned Value:
578 * It will return 0 for success or -1 for failure
579 */
580int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
581 char *frame, u32 fc_trc_frame_len)
582{
583 unsigned long flags;
584 struct fc_trace_hdr *fc_buf;
585 unsigned long eth_fcoe_hdr_len;
586 char *fc_trace;
587
588 if (fnic_fc_tracing_enabled == 0)
589 return 0;
590
591 spin_lock_irqsave(&fnic_fc_trace_lock, flags);
592
593 if (fnic_fc_trace_cleared == 1) {
594 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
595 pr_info("fnic: Reseting the read idx\n");
596 memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
597 fnic_fc_trace_max_pages * PAGE_SIZE);
598 fnic_fc_trace_cleared = 0;
599 }
600
601 fc_buf = (struct fc_trace_hdr *)
602 fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
603
604 fc_trace_entries.wr_idx++;
605
606 if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
607 fc_trace_entries.wr_idx = 0;
608
609 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
610 fc_trace_entries.rd_idx++;
611 if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
612 fc_trace_entries.rd_idx = 0;
613 }
614
615 fc_buf->time_stamp = CURRENT_TIME;
616 fc_buf->host_no = host_no;
617 fc_buf->frame_type = frame_type;
618
619 fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
620
621 /* During the receive path, we do not have eth hdr as well as fcoe hdr
622 * at trace entry point so we will stuff 0xff just to make it generic.
623 */
624 if (frame_type == FNIC_FC_RECV) {
625 eth_fcoe_hdr_len = sizeof(struct ethhdr) +
626 sizeof(struct fcoe_hdr);
627 fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
628 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
629 /* Copy the rest of data frame */
630 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
631 min_t(u8, fc_trc_frame_len,
632 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
633 } else {
634 memcpy((char *)fc_trace, (void *)frame,
635 min_t(u8, fc_trc_frame_len,
636 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
637 }
638
639 /* Store the actual received length */
640 fc_buf->frame_len = fc_trc_frame_len;
641
642 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
643 return 0;
644}
645
646/*
647 * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
648 * Passed parameter:
649 * @fnic_dbgfs_t: pointer to debugfs trace buffer
650 * rdata_flag: 1 => Unformated file
651 * 0 => formated file
652 * Description:
653 * This routine will copy the trace data to memory file with
654 * proper formatting and also copy to another memory
655 * file without formatting for further procesing.
656 * Retrun Value:
657 * Number of bytes that were dumped into fnic_dbgfs_t
658 */
659
660int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
661{
662 int rd_idx, wr_idx;
663 unsigned long flags;
664 int len = 0, j;
665 struct fc_trace_hdr *tdata;
666 char *fc_trace;
667
668 spin_lock_irqsave(&fnic_fc_trace_lock, flags);
669 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
670 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
671 pr_info("fnic: Buffer is empty\n");
672 return 0;
673 }
674 rd_idx = fc_trace_entries.rd_idx;
675 wr_idx = fc_trace_entries.wr_idx;
676 if (rdata_flag == 0) {
677 len += snprintf(fnic_dbgfs_prt->buffer + len,
678 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
679 "Time Stamp (UTC)\t\t"
680 "Host No: F Type: len: FCoE_FRAME:\n");
681 }
682
683 while (rd_idx != wr_idx) {
684 tdata = (struct fc_trace_hdr *)
685 fc_trace_entries.page_offset[rd_idx];
686 if (!tdata) {
687 pr_info("fnic: Rd data is NULL\n");
688 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
689 return 0;
690 }
691 if (rdata_flag == 0) {
692 copy_and_format_trace_data(tdata,
693 fnic_dbgfs_prt, &len, rdata_flag);
694 } else {
695 fc_trace = (char *)tdata;
696 for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
697 len += snprintf(fnic_dbgfs_prt->buffer + len,
698 (fnic_fc_trace_max_pages * PAGE_SIZE * 3)
699 - len, "%02x", fc_trace[j] & 0xff);
700 } /* for loop */
701 len += snprintf(fnic_dbgfs_prt->buffer + len,
702 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
703 "\n");
704 }
705 rd_idx++;
706 if (rd_idx > (fc_trace_max_entries - 1))
707 rd_idx = 0;
708 }
709
710 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
711 return len;
712}
713
714/*
715 * copy_and_format_trace_data: Copy formatted data to char * buffer
716 * Passed Parameter:
717 * @fc_trace_hdr_t: pointer to trace data
718 * @fnic_dbgfs_t: pointer to debugfs trace buffer
719 * @orig_len: pointer to len
720 * rdata_flag: 0 => Formated file, 1 => Unformated file
721 * Description:
722 * This routine will format and copy the passed trace data
723 * for formated file or unformated file accordingly.
724 */
725
726void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
727 fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
728 u8 rdata_flag)
729{
730 struct tm tm;
731 int j, i = 1, len;
732 char *fc_trace, *fmt;
733 int ethhdr_len = sizeof(struct ethhdr) - 1;
734 int fcoehdr_len = sizeof(struct fcoe_hdr);
735 int fchdr_len = sizeof(struct fc_frame_header);
736 int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
737
738 tdata->frame_type = tdata->frame_type & 0x7F;
739
740 len = *orig_len;
741
742 time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
743
744 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
745 len += snprintf(fnic_dbgfs_prt->buffer + len,
746 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
747 fmt,
748 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
749 tm.tm_hour, tm.tm_min, tm.tm_sec,
750 tdata->time_stamp.tv_nsec, tdata->host_no,
751 tdata->frame_type, tdata->frame_len);
752
753 fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
754
755 for (j = 0; j < min_t(u8, tdata->frame_len,
756 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
757 if (tdata->frame_type == FNIC_FC_LE) {
758 len += snprintf(fnic_dbgfs_prt->buffer + len,
759 max_size - len, "%c", fc_trace[j]);
760 } else {
761 len += snprintf(fnic_dbgfs_prt->buffer + len,
762 max_size - len, "%02x", fc_trace[j] & 0xff);
763 len += snprintf(fnic_dbgfs_prt->buffer + len,
764 max_size - len, " ");
765 if (j == ethhdr_len ||
766 j == ethhdr_len + fcoehdr_len ||
767 j == ethhdr_len + fcoehdr_len + fchdr_len ||
768 (i > 3 && j%fchdr_len == 0)) {
769 len += snprintf(fnic_dbgfs_prt->buffer
770 + len, (fnic_fc_trace_max_pages
771 * PAGE_SIZE * 3) - len,
772 "\n\t\t\t\t\t\t\t\t");
773 i++;
774 }
775 } /* end of else*/
776 } /* End of for loop*/
777 len += snprintf(fnic_dbgfs_prt->buffer + len,
778 max_size - len, "\n");
779 *orig_len = len;
780}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index d412f2ee3c4f..a8aa0578fcb0 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -19,6 +19,17 @@
19#define __FNIC_TRACE_H__ 19#define __FNIC_TRACE_H__
20 20
21#define FNIC_ENTRY_SIZE_BYTES 64 21#define FNIC_ENTRY_SIZE_BYTES 64
22#define FC_TRC_SIZE_BYTES 256
23#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr)
24
25/*
26 * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type
27 * of frame 1 => Eth frame, 0=> FC frame
28 */
29
30#define FNIC_FC_RECV 0x52 /* Character R */
31#define FNIC_FC_SEND 0x54 /* Character T */
32#define FNIC_FC_LE 0x4C /* Character L */
22 33
23extern ssize_t simple_read_from_buffer(void __user *to, 34extern ssize_t simple_read_from_buffer(void __user *to,
24 size_t count, 35 size_t count,
@@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages;
30extern int fnic_tracing_enabled; 41extern int fnic_tracing_enabled;
31extern unsigned int trace_max_pages; 42extern unsigned int trace_max_pages;
32 43
44extern unsigned int fnic_fc_trace_max_pages;
45extern int fnic_fc_tracing_enabled;
46extern int fnic_fc_trace_cleared;
47
33typedef struct fnic_trace_dbg { 48typedef struct fnic_trace_dbg {
34 int wr_idx; 49 int wr_idx;
35 int rd_idx; 50 int rd_idx;
@@ -56,6 +71,16 @@ struct fnic_trace_data {
56 71
57typedef struct fnic_trace_data fnic_trace_data_t; 72typedef struct fnic_trace_data fnic_trace_data_t;
58 73
74struct fc_trace_hdr {
75 struct timespec time_stamp;
76 u32 host_no;
77 u8 frame_type;
78 u8 frame_len;
79} __attribute__((__packed__));
80
81#define FC_TRACE_ADDRESS(a) \
82 ((unsigned long)(a) + sizeof(struct fc_trace_hdr))
83
59#define FNIC_TRACE_ENTRY_SIZE \ 84#define FNIC_TRACE_ENTRY_SIZE \
60 (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) 85 (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
61 86
@@ -88,4 +113,17 @@ int fnic_debugfs_init(void);
88void fnic_debugfs_terminate(void); 113void fnic_debugfs_terminate(void);
89int fnic_trace_debugfs_init(void); 114int fnic_trace_debugfs_init(void);
90void fnic_trace_debugfs_terminate(void); 115void fnic_trace_debugfs_terminate(void);
116
117/* Fnic FC CTLR Trace releated function */
118int fnic_fc_trace_init(void);
119void fnic_fc_trace_free(void);
120int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
121 char *frame, u32 fc_frame_len);
122int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
123void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
124 fnic_dbgfs_t *fnic_dbgfs_prt,
125 int *len, u8 rdata_flag);
126int fnic_fc_trace_debugfs_init(void);
127void fnic_fc_trace_debugfs_terminate(void);
128
91#endif 129#endif
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 7176365e916b..a1bc8ca958e1 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -78,10 +78,6 @@
78 * 78 *
79 */ 79 */
80 80
81/*
82 * $Log: generic_NCR5380.c,v $
83 */
84
85/* settings for DTC3181E card with only Mustek scanner attached */ 81/* settings for DTC3181E card with only Mustek scanner attached */
86#define USLEEP 82#define USLEEP
87#define USLEEP_POLL 1 83#define USLEEP_POLL 1
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 1bcdb7beb77b..703adf78e0b2 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -25,10 +25,6 @@
25 * 1+ (800) 334-5454 25 * 1+ (800) 334-5454
26 */ 26 */
27 27
28/*
29 * $Log: generic_NCR5380.h,v $
30 */
31
32#ifndef GENERIC_NCR5380_H 28#ifndef GENERIC_NCR5380_H
33#define GENERIC_NCR5380_H 29#define GENERIC_NCR5380_H
34 30
@@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
58#define CAN_QUEUE 16 54#define CAN_QUEUE 16
59#endif 55#endif
60 56
61#ifndef HOSTS_C
62
63#define __STRVAL(x) #x 57#define __STRVAL(x) #x
64#define STRVAL(x) __STRVAL(x) 58#define STRVAL(x) __STRVAL(x)
65 59
@@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
131#define BOARD_NCR53C400A 2 125#define BOARD_NCR53C400A 2
132#define BOARD_DTC3181E 3 126#define BOARD_DTC3181E 3
133 127
134#endif /* else def HOSTS_C */
135#endif /* ndef ASM */ 128#endif /* ndef ASM */
136#endif /* GENERIC_NCR5380_H */ 129#endif /* GENERIC_NCR5380_H */
137 130
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9a6e4a2cd072..31184b35370f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu.h>
51#include <asm/div64.h> 52#include <asm/div64.h>
52#include "hpsa_cmd.h" 53#include "hpsa_cmd.h"
53#include "hpsa.h" 54#include "hpsa.h"
@@ -115,9 +116,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
121 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
122 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
123 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -165,9 +172,15 @@ static struct board_type products[] = {
165 {0x21C3103C, "Smart Array", &SA5_access}, 172 {0x21C3103C, "Smart Array", &SA5_access},
166 {0x21C4103C, "Smart Array", &SA5_access}, 173 {0x21C4103C, "Smart Array", &SA5_access},
167 {0x21C5103C, "Smart Array", &SA5_access}, 174 {0x21C5103C, "Smart Array", &SA5_access},
175 {0x21C6103C, "Smart Array", &SA5_access},
168 {0x21C7103C, "Smart Array", &SA5_access}, 176 {0x21C7103C, "Smart Array", &SA5_access},
169 {0x21C8103C, "Smart Array", &SA5_access}, 177 {0x21C8103C, "Smart Array", &SA5_access},
170 {0x21C9103C, "Smart Array", &SA5_access}, 178 {0x21C9103C, "Smart Array", &SA5_access},
179 {0x21CA103C, "Smart Array", &SA5_access},
180 {0x21CB103C, "Smart Array", &SA5_access},
181 {0x21CC103C, "Smart Array", &SA5_access},
182 {0x21CD103C, "Smart Array", &SA5_access},
183 {0x21CE103C, "Smart Array", &SA5_access},
171 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 184 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
172 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 185 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
173 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 186 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -181,7 +194,8 @@ static int number_of_controllers;
181static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 194static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
182static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 195static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
183static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 196static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
184static void start_io(struct ctlr_info *h); 197static void lock_and_start_io(struct ctlr_info *h);
198static void start_io(struct ctlr_info *h, unsigned long *flags);
185 199
186#ifdef CONFIG_COMPAT 200#ifdef CONFIG_COMPAT
187static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 201static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
@@ -683,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
683static inline u32 next_command(struct ctlr_info *h, u8 q) 697static inline u32 next_command(struct ctlr_info *h, u8 q)
684{ 698{
685 u32 a; 699 u32 a;
686 struct reply_pool *rq = &h->reply_queue[q]; 700 struct reply_queue_buffer *rq = &h->reply_queue[q];
687 unsigned long flags; 701 unsigned long flags;
688 702
689 if (h->transMethod & CFGTBL_Trans_io_accel1) 703 if (h->transMethod & CFGTBL_Trans_io_accel1)
@@ -832,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
832 spin_lock_irqsave(&h->lock, flags); 846 spin_lock_irqsave(&h->lock, flags);
833 addQ(&h->reqQ, c); 847 addQ(&h->reqQ, c);
834 h->Qdepth++; 848 h->Qdepth++;
849 start_io(h, &flags);
835 spin_unlock_irqrestore(&h->lock, flags); 850 spin_unlock_irqrestore(&h->lock, flags);
836 start_io(h);
837} 851}
838 852
839static inline void removeQ(struct CommandList *c) 853static inline void removeQ(struct CommandList *c)
@@ -1542,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1542 dev_warn(&h->pdev->dev, 1556 dev_warn(&h->pdev->dev,
1543 "%s: task complete with check condition.\n", 1557 "%s: task complete with check condition.\n",
1544 "HP SSD Smart Path"); 1558 "HP SSD Smart Path");
1559 cmd->result |= SAM_STAT_CHECK_CONDITION;
1545 if (c2->error_data.data_present != 1560 if (c2->error_data.data_present !=
1546 IOACCEL2_SENSE_DATA_PRESENT) 1561 IOACCEL2_SENSE_DATA_PRESENT) {
1562 memset(cmd->sense_buffer, 0,
1563 SCSI_SENSE_BUFFERSIZE);
1547 break; 1564 break;
1565 }
1548 /* copy the sense data */ 1566 /* copy the sense data */
1549 data_len = c2->error_data.sense_data_len; 1567 data_len = c2->error_data.sense_data_len;
1550 if (data_len > SCSI_SENSE_BUFFERSIZE) 1568 if (data_len > SCSI_SENSE_BUFFERSIZE)
@@ -1554,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1554 sizeof(c2->error_data.sense_data_buff); 1572 sizeof(c2->error_data.sense_data_buff);
1555 memcpy(cmd->sense_buffer, 1573 memcpy(cmd->sense_buffer,
1556 c2->error_data.sense_data_buff, data_len); 1574 c2->error_data.sense_data_buff, data_len);
1557 cmd->result |= SAM_STAT_CHECK_CONDITION;
1558 retry = 1; 1575 retry = 1;
1559 break; 1576 break;
1560 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1577 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
@@ -1639,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1639 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1656 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1640 c2->error_data.serv_response == 1657 c2->error_data.serv_response ==
1641 IOACCEL2_SERV_RESPONSE_FAILURE) { 1658 IOACCEL2_SERV_RESPONSE_FAILURE) {
1642 if (c2->error_data.status ==
1643 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1644 dev_warn(&h->pdev->dev,
1645 "%s: Path is unavailable, retrying on standard path.\n",
1646 "HP SSD Smart Path");
1647 else
1648 dev_warn(&h->pdev->dev,
1649 "%s: Error 0x%02x, retrying on standard path.\n",
1650 "HP SSD Smart Path", c2->error_data.status);
1651
1652 dev->offload_enabled = 0; 1659 dev->offload_enabled = 0;
1653 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1660 h->drv_req_rescan = 1; /* schedule controller for a rescan */
1654 cmd->result = DID_SOFT_ERROR << 16; 1661 cmd->result = DID_SOFT_ERROR << 16;
@@ -1979,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1979 wait_for_completion(&wait); 1986 wait_for_completion(&wait);
1980} 1987}
1981 1988
1989static u32 lockup_detected(struct ctlr_info *h)
1990{
1991 int cpu;
1992 u32 rc, *lockup_detected;
1993
1994 cpu = get_cpu();
1995 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1996 rc = *lockup_detected;
1997 put_cpu();
1998 return rc;
1999}
2000
1982static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 2001static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1983 struct CommandList *c) 2002 struct CommandList *c)
1984{ 2003{
1985 unsigned long flags;
1986
1987 /* If controller lockup detected, fake a hardware error. */ 2004 /* If controller lockup detected, fake a hardware error. */
1988 spin_lock_irqsave(&h->lock, flags); 2005 if (unlikely(lockup_detected(h)))
1989 if (unlikely(h->lockup_detected)) {
1990 spin_unlock_irqrestore(&h->lock, flags);
1991 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 2006 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1992 } else { 2007 else
1993 spin_unlock_irqrestore(&h->lock, flags);
1994 hpsa_scsi_do_simple_cmd_core(h, c); 2008 hpsa_scsi_do_simple_cmd_core(h, c);
1995 }
1996} 2009}
1997 2010
1998#define MAX_DRIVER_CMD_RETRIES 25 2011#define MAX_DRIVER_CMD_RETRIES 25
@@ -2417,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2417 buflen = 16; 2430 buflen = 16;
2418 buf = kzalloc(64, GFP_KERNEL); 2431 buf = kzalloc(64, GFP_KERNEL);
2419 if (!buf) 2432 if (!buf)
2420 return -1; 2433 return -ENOMEM;
2421 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2434 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2422 if (rc == 0) 2435 if (rc == 0)
2423 memcpy(device_id, &buf[8], buflen); 2436 memcpy(device_id, &buf[8], buflen);
@@ -2503,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
2503 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2516 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2504 2517
2505 /* Does controller have VPD for logical volume status? */ 2518 /* Does controller have VPD for logical volume status? */
2506 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { 2519 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2507 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
2508 goto exit_failed; 2520 goto exit_failed;
2509 }
2510 2521
2511 /* Get the size of the VPD return buffer */ 2522 /* Get the size of the VPD return buffer */
2512 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2523 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2513 buf, HPSA_VPD_HEADER_SZ); 2524 buf, HPSA_VPD_HEADER_SZ);
2514 if (rc != 0) { 2525 if (rc != 0)
2515 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2516 goto exit_failed; 2526 goto exit_failed;
2517 }
2518 size = buf[3]; 2527 size = buf[3];
2519 2528
2520 /* Now get the whole VPD buffer */ 2529 /* Now get the whole VPD buffer */
2521 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2530 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2522 buf, size + HPSA_VPD_HEADER_SZ); 2531 buf, size + HPSA_VPD_HEADER_SZ);
2523 if (rc != 0) { 2532 if (rc != 0)
2524 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2525 goto exit_failed; 2533 goto exit_failed;
2526 }
2527 status = buf[4]; /* status byte */ 2534 status = buf[4]; /* status byte */
2528 2535
2529 kfree(buf); 2536 kfree(buf);
@@ -2536,11 +2543,11 @@ exit_failed:
2536/* Determine offline status of a volume. 2543/* Determine offline status of a volume.
2537 * Return either: 2544 * Return either:
2538 * 0 (not offline) 2545 * 0 (not offline)
2539 * -1 (offline for unknown reasons) 2546 * 0xff (offline for unknown reasons)
2540 * # (integer code indicating one of several NOT READY states 2547 * # (integer code indicating one of several NOT READY states
2541 * describing why a volume is to be kept offline) 2548 * describing why a volume is to be kept offline)
2542 */ 2549 */
2543static unsigned char hpsa_volume_offline(struct ctlr_info *h, 2550static int hpsa_volume_offline(struct ctlr_info *h,
2544 unsigned char scsi3addr[]) 2551 unsigned char scsi3addr[])
2545{ 2552{
2546 struct CommandList *c; 2553 struct CommandList *c;
@@ -2639,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2639 2646
2640 if (this_device->devtype == TYPE_DISK && 2647 if (this_device->devtype == TYPE_DISK &&
2641 is_logical_dev_addr_mode(scsi3addr)) { 2648 is_logical_dev_addr_mode(scsi3addr)) {
2649 int volume_offline;
2650
2642 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2651 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2643 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2652 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2644 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2653 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2645 this_device->volume_offline = 2654 volume_offline = hpsa_volume_offline(h, scsi3addr);
2646 hpsa_volume_offline(h, scsi3addr); 2655 if (volume_offline < 0 || volume_offline > 0xff)
2656 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2657 this_device->volume_offline = volume_offline & 0xff;
2647 } else { 2658 } else {
2648 this_device->raid_level = RAID_UNKNOWN; 2659 this_device->raid_level = RAID_UNKNOWN;
2649 this_device->offload_config = 0; 2660 this_device->offload_config = 0;
@@ -2836,6 +2847,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2836 2847
2837 /* Get the list of physical devices */ 2848 /* Get the list of physical devices */
2838 physicals = kzalloc(reportsize, GFP_KERNEL); 2849 physicals = kzalloc(reportsize, GFP_KERNEL);
2850 if (physicals == NULL)
2851 return 0;
2839 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2852 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2840 reportsize, extended)) { 2853 reportsize, extended)) {
2841 dev_err(&h->pdev->dev, 2854 dev_err(&h->pdev->dev,
@@ -2847,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2847 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2860 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2848 responsesize; 2861 responsesize;
2849 2862
2850
2851 /* find ioaccel2 handle in list of physicals: */ 2863 /* find ioaccel2 handle in list of physicals: */
2852 for (i = 0; i < nphysicals; i++) { 2864 for (i = 0; i < nphysicals; i++) {
2865 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2866
2853 /* handle is in bytes 28-31 of each lun */ 2867 /* handle is in bytes 28-31 of each lun */
2854 if (memcmp(&((struct ReportExtendedLUNdata *) 2868 if (entry->ioaccel_handle != find)
2855 physicals)->LUN[i][20], &find, 4) != 0) {
2856 continue; /* didn't match */ 2869 continue; /* didn't match */
2857 }
2858 found = 1; 2870 found = 1;
2859 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2871 memcpy(scsi3addr, entry->lunid, 8);
2860 physicals)->LUN[i][0], 8);
2861 if (h->raid_offload_debug > 0) 2872 if (h->raid_offload_debug > 0)
2862 dev_info(&h->pdev->dev, 2873 dev_info(&h->pdev->dev,
2863 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2874 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2864 __func__, find, 2875 __func__, find,
2865 ((struct ReportExtendedLUNdata *) 2876 entry->ioaccel_handle, scsi3addr);
2866 physicals)->LUN[i][20],
2867 scsi3addr[0], scsi3addr[1], scsi3addr[2],
2868 scsi3addr[3], scsi3addr[4], scsi3addr[5],
2869 scsi3addr[6], scsi3addr[7]);
2870 break; /* found it */ 2877 break; /* found it */
2871 } 2878 }
2872 2879
@@ -2951,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
2951 return RAID_CTLR_LUNID; 2958 return RAID_CTLR_LUNID;
2952 2959
2953 if (i < logicals_start) 2960 if (i < logicals_start)
2954 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2961 return &physdev_list->LUN[i -
2962 (raid_ctlr_position == 0)].lunid[0];
2955 2963
2956 if (i < last_device) 2964 if (i < last_device)
2957 return &logdev_list->LUN[i - nphysicals - 2965 return &logdev_list->LUN[i - nphysicals -
@@ -2963,19 +2971,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
2963static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2971static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2964{ 2972{
2965 int rc; 2973 int rc;
2974 int hba_mode_enabled;
2966 struct bmic_controller_parameters *ctlr_params; 2975 struct bmic_controller_parameters *ctlr_params;
2967 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 2976 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2968 GFP_KERNEL); 2977 GFP_KERNEL);
2969 2978
2970 if (!ctlr_params) 2979 if (!ctlr_params)
2971 return 0; 2980 return -ENOMEM;
2972 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 2981 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2973 sizeof(struct bmic_controller_parameters)); 2982 sizeof(struct bmic_controller_parameters));
2974 if (rc != 0) { 2983 if (rc) {
2975 kfree(ctlr_params); 2984 kfree(ctlr_params);
2976 return 0; 2985 return rc;
2977 } 2986 }
2978 return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0; 2987
2988 hba_mode_enabled =
2989 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2990 kfree(ctlr_params);
2991 return hba_mode_enabled;
2979} 2992}
2980 2993
2981static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2994static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
@@ -3001,7 +3014,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3001 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; 3014 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3002 int i, n_ext_target_devs, ndevs_to_allocate; 3015 int i, n_ext_target_devs, ndevs_to_allocate;
3003 int raid_ctlr_position; 3016 int raid_ctlr_position;
3004 u8 rescan_hba_mode; 3017 int rescan_hba_mode;
3005 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3018 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3006 3019
3007 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3020 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3016,6 +3029,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3016 memset(lunzerobits, 0, sizeof(lunzerobits)); 3029 memset(lunzerobits, 0, sizeof(lunzerobits));
3017 3030
3018 rescan_hba_mode = hpsa_hba_mode_enabled(h); 3031 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3032 if (rescan_hba_mode < 0)
3033 goto out;
3019 3034
3020 if (!h->hba_mode_enabled && rescan_hba_mode) 3035 if (!h->hba_mode_enabled && rescan_hba_mode)
3021 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 3036 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
@@ -3053,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3053 ndev_allocated++; 3068 ndev_allocated++;
3054 } 3069 }
3055 3070
3056 if (unlikely(is_scsi_rev_5(h))) 3071 if (is_scsi_rev_5(h))
3057 raid_ctlr_position = 0; 3072 raid_ctlr_position = 0;
3058 else 3073 else
3059 raid_ctlr_position = nphysicals + nlogicals; 3074 raid_ctlr_position = nphysicals + nlogicals;
@@ -3950,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3950 struct hpsa_scsi_dev_t *dev; 3965 struct hpsa_scsi_dev_t *dev;
3951 unsigned char scsi3addr[8]; 3966 unsigned char scsi3addr[8];
3952 struct CommandList *c; 3967 struct CommandList *c;
3953 unsigned long flags;
3954 int rc = 0; 3968 int rc = 0;
3955 3969
3956 /* Get the ptr to our adapter structure out of cmd->host. */ 3970 /* Get the ptr to our adapter structure out of cmd->host. */
@@ -3963,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3963 } 3977 }
3964 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3978 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3965 3979
3966 spin_lock_irqsave(&h->lock, flags); 3980 if (unlikely(lockup_detected(h))) {
3967 if (unlikely(h->lockup_detected)) {
3968 spin_unlock_irqrestore(&h->lock, flags);
3969 cmd->result = DID_ERROR << 16; 3981 cmd->result = DID_ERROR << 16;
3970 done(cmd); 3982 done(cmd);
3971 return 0; 3983 return 0;
3972 } 3984 }
3973 spin_unlock_irqrestore(&h->lock, flags);
3974 c = cmd_alloc(h); 3985 c = cmd_alloc(h);
3975 if (c == NULL) { /* trouble... */ 3986 if (c == NULL) { /* trouble... */
3976 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3987 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
@@ -4082,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4082 * we can prevent new rescan threads from piling up on a 4093 * we can prevent new rescan threads from piling up on a
4083 * locked up controller. 4094 * locked up controller.
4084 */ 4095 */
4085 spin_lock_irqsave(&h->lock, flags); 4096 if (unlikely(lockup_detected(h))) {
4086 if (unlikely(h->lockup_detected)) {
4087 spin_unlock_irqrestore(&h->lock, flags);
4088 spin_lock_irqsave(&h->scan_lock, flags); 4097 spin_lock_irqsave(&h->scan_lock, flags);
4089 h->scan_finished = 1; 4098 h->scan_finished = 1;
4090 wake_up_all(&h->scan_wait_queue); 4099 wake_up_all(&h->scan_wait_queue);
4091 spin_unlock_irqrestore(&h->scan_lock, flags); 4100 spin_unlock_irqrestore(&h->scan_lock, flags);
4092 return 1; 4101 return 1;
4093 } 4102 }
4094 spin_unlock_irqrestore(&h->lock, flags);
4095 return 0; 4103 return 0;
4096} 4104}
4097 4105
@@ -4942,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4942 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4950 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4943 if (buff == NULL) 4951 if (buff == NULL)
4944 return -EFAULT; 4952 return -EFAULT;
4945 if (iocommand.Request.Type.Direction == XFER_WRITE) { 4953 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4946 /* Copy the data into the buffer we created */ 4954 /* Copy the data into the buffer we created */
4947 if (copy_from_user(buff, iocommand.buf, 4955 if (copy_from_user(buff, iocommand.buf,
4948 iocommand.buf_size)) { 4956 iocommand.buf_size)) {
@@ -5005,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5005 rc = -EFAULT; 5013 rc = -EFAULT;
5006 goto out; 5014 goto out;
5007 } 5015 }
5008 if (iocommand.Request.Type.Direction == XFER_READ && 5016 if ((iocommand.Request.Type.Direction & XFER_READ) &&
5009 iocommand.buf_size > 0) { 5017 iocommand.buf_size > 0) {
5010 /* Copy the data out of the buffer we created */ 5018 /* Copy the data out of the buffer we created */
5011 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 5019 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
@@ -5082,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5082 status = -ENOMEM; 5090 status = -ENOMEM;
5083 goto cleanup1; 5091 goto cleanup1;
5084 } 5092 }
5085 if (ioc->Request.Type.Direction == XFER_WRITE) { 5093 if (ioc->Request.Type.Direction & XFER_WRITE) {
5086 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5094 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5087 status = -ENOMEM; 5095 status = -ENOMEM;
5088 goto cleanup1; 5096 goto cleanup1;
@@ -5134,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5134 status = -EFAULT; 5142 status = -EFAULT;
5135 goto cleanup0; 5143 goto cleanup0;
5136 } 5144 }
5137 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 5145 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5138 /* Copy the data out of the buffer we created */ 5146 /* Copy the data out of the buffer we created */
5139 BYTE __user *ptr = ioc->buf; 5147 BYTE __user *ptr = ioc->buf;
5140 for (i = 0; i < sg_used; i++) { 5148 for (i = 0; i < sg_used; i++) {
@@ -5438,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
5438 5446
5439/* Takes cmds off the submission queue and sends them to the hardware, 5447/* Takes cmds off the submission queue and sends them to the hardware,
5440 * then puts them on the queue of cmds waiting for completion. 5448 * then puts them on the queue of cmds waiting for completion.
5449 * Assumes h->lock is held
5441 */ 5450 */
5442static void start_io(struct ctlr_info *h) 5451static void start_io(struct ctlr_info *h, unsigned long *flags)
5443{ 5452{
5444 struct CommandList *c; 5453 struct CommandList *c;
5445 unsigned long flags;
5446 5454
5447 spin_lock_irqsave(&h->lock, flags);
5448 while (!list_empty(&h->reqQ)) { 5455 while (!list_empty(&h->reqQ)) {
5449 c = list_entry(h->reqQ.next, struct CommandList, list); 5456 c = list_entry(h->reqQ.next, struct CommandList, list);
5450 /* can't do anything if fifo is full */ 5457 /* can't do anything if fifo is full */
@@ -5467,14 +5474,20 @@ static void start_io(struct ctlr_info *h)
5467 * condition. 5474 * condition.
5468 */ 5475 */
5469 h->commands_outstanding++; 5476 h->commands_outstanding++;
5470 if (h->commands_outstanding > h->max_outstanding)
5471 h->max_outstanding = h->commands_outstanding;
5472 5477
5473 /* Tell the controller execute command */ 5478 /* Tell the controller execute command */
5474 spin_unlock_irqrestore(&h->lock, flags); 5479 spin_unlock_irqrestore(&h->lock, *flags);
5475 h->access.submit_command(h, c); 5480 h->access.submit_command(h, c);
5476 spin_lock_irqsave(&h->lock, flags); 5481 spin_lock_irqsave(&h->lock, *flags);
5477 } 5482 }
5483}
5484
5485static void lock_and_start_io(struct ctlr_info *h)
5486{
5487 unsigned long flags;
5488
5489 spin_lock_irqsave(&h->lock, flags);
5490 start_io(h, &flags);
5478 spin_unlock_irqrestore(&h->lock, flags); 5491 spin_unlock_irqrestore(&h->lock, flags);
5479} 5492}
5480 5493
@@ -5542,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c)
5542 else if (c->cmd_type == CMD_IOCTL_PEND) 5555 else if (c->cmd_type == CMD_IOCTL_PEND)
5543 complete(c->waiting); 5556 complete(c->waiting);
5544 if (unlikely(io_may_be_stalled)) 5557 if (unlikely(io_may_be_stalled))
5545 start_io(h); 5558 lock_and_start_io(h);
5546} 5559}
5547 5560
5548static inline u32 hpsa_tag_contains_index(u32 tag) 5561static inline u32 hpsa_tag_contains_index(u32 tag)
@@ -5819,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5819 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5832 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5820 writel(use_doorbell, vaddr + SA5_DOORBELL); 5833 writel(use_doorbell, vaddr + SA5_DOORBELL);
5821 5834
5822 /* PMC hardware guys tell us we need a 5 second delay after 5835 /* PMC hardware guys tell us we need a 10 second delay after
5823 * doorbell reset and before any attempt to talk to the board 5836 * doorbell reset and before any attempt to talk to the board
5824 * at all to ensure that this actually works and doesn't fall 5837 * at all to ensure that this actually works and doesn't fall
5825 * over in some weird corner cases. 5838 * over in some weird corner cases.
5826 */ 5839 */
5827 msleep(5000); 5840 msleep(10000);
5828 } else { /* Try to do it the PCI power state way */ 5841 } else { /* Try to do it the PCI power state way */
5829 5842
5830 /* Quoting from the Open CISS Specification: "The Power 5843 /* Quoting from the Open CISS Specification: "The Power
@@ -6145,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
6145 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6158 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6146 dev_info(&h->pdev->dev, "MSIX\n"); 6159 dev_info(&h->pdev->dev, "MSIX\n");
6147 h->msix_vector = MAX_REPLY_QUEUES; 6160 h->msix_vector = MAX_REPLY_QUEUES;
6161 if (h->msix_vector > num_online_cpus())
6162 h->msix_vector = num_online_cpus();
6148 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6163 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6149 h->msix_vector); 6164 h->msix_vector);
6150 if (err > 0) { 6165 if (err > 0) {
@@ -6594,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
6594 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6609 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6595} 6610}
6596 6611
6612static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6613{
6614 int i, cpu, rc;
6615
6616 cpu = cpumask_first(cpu_online_mask);
6617 for (i = 0; i < h->msix_vector; i++) {
6618 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6619 cpu = cpumask_next(cpu, cpu_online_mask);
6620 }
6621}
6622
6597static int hpsa_request_irq(struct ctlr_info *h, 6623static int hpsa_request_irq(struct ctlr_info *h,
6598 irqreturn_t (*msixhandler)(int, void *), 6624 irqreturn_t (*msixhandler)(int, void *),
6599 irqreturn_t (*intxhandler)(int, void *)) 6625 irqreturn_t (*intxhandler)(int, void *))
@@ -6613,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
6613 rc = request_irq(h->intr[i], msixhandler, 6639 rc = request_irq(h->intr[i], msixhandler,
6614 0, h->devname, 6640 0, h->devname,
6615 &h->q[i]); 6641 &h->q[i]);
6642 hpsa_irq_affinity_hints(h);
6616 } else { 6643 } else {
6617 /* Use single reply pool */ 6644 /* Use single reply pool */
6618 if (h->msix_vector > 0 || h->msi_vector) { 6645 if (h->msix_vector > 0 || h->msi_vector) {
@@ -6664,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)
6664 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6691 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6665 /* Single reply queue, only one irq to free */ 6692 /* Single reply queue, only one irq to free */
6666 i = h->intr_mode; 6693 i = h->intr_mode;
6694 irq_set_affinity_hint(h->intr[i], NULL);
6667 free_irq(h->intr[i], &h->q[i]); 6695 free_irq(h->intr[i], &h->q[i]);
6668 return; 6696 return;
6669 } 6697 }
6670 6698
6671 for (i = 0; i < h->msix_vector; i++) 6699 for (i = 0; i < h->msix_vector; i++) {
6700 irq_set_affinity_hint(h->intr[i], NULL);
6672 free_irq(h->intr[i], &h->q[i]); 6701 free_irq(h->intr[i], &h->q[i]);
6702 }
6673} 6703}
6674 6704
6675static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6705static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
@@ -6686,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6686#endif /* CONFIG_PCI_MSI */ 6716#endif /* CONFIG_PCI_MSI */
6687} 6717}
6688 6718
6719static void hpsa_free_reply_queues(struct ctlr_info *h)
6720{
6721 int i;
6722
6723 for (i = 0; i < h->nreply_queues; i++) {
6724 if (!h->reply_queue[i].head)
6725 continue;
6726 pci_free_consistent(h->pdev, h->reply_queue_size,
6727 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6728 h->reply_queue[i].head = NULL;
6729 h->reply_queue[i].busaddr = 0;
6730 }
6731}
6732
6689static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6733static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6690{ 6734{
6691 hpsa_free_irqs_and_disable_msix(h); 6735 hpsa_free_irqs_and_disable_msix(h);
@@ -6693,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6693 hpsa_free_cmd_pool(h); 6737 hpsa_free_cmd_pool(h);
6694 kfree(h->ioaccel1_blockFetchTable); 6738 kfree(h->ioaccel1_blockFetchTable);
6695 kfree(h->blockFetchTable); 6739 kfree(h->blockFetchTable);
6696 pci_free_consistent(h->pdev, h->reply_pool_size, 6740 hpsa_free_reply_queues(h);
6697 h->reply_pool, h->reply_pool_dhandle);
6698 if (h->vaddr) 6741 if (h->vaddr)
6699 iounmap(h->vaddr); 6742 iounmap(h->vaddr);
6700 if (h->transtable) 6743 if (h->transtable)
@@ -6719,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6719 } 6762 }
6720} 6763}
6721 6764
6765static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6766{
6767 int i, cpu;
6768
6769 cpu = cpumask_first(cpu_online_mask);
6770 for (i = 0; i < num_online_cpus(); i++) {
6771 u32 *lockup_detected;
6772 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6773 *lockup_detected = value;
6774 cpu = cpumask_next(cpu, cpu_online_mask);
6775 }
6776 wmb(); /* be sure the per-cpu variables are out to memory */
6777}
6778
6722static void controller_lockup_detected(struct ctlr_info *h) 6779static void controller_lockup_detected(struct ctlr_info *h)
6723{ 6780{
6724 unsigned long flags; 6781 unsigned long flags;
6782 u32 lockup_detected;
6725 6783
6726 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6784 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6727 spin_lock_irqsave(&h->lock, flags); 6785 spin_lock_irqsave(&h->lock, flags);
6728 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6786 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6787 if (!lockup_detected) {
6788 /* no heartbeat, but controller gave us a zero. */
6789 dev_warn(&h->pdev->dev,
6790 "lockup detected but scratchpad register is zero\n");
6791 lockup_detected = 0xffffffff;
6792 }
6793 set_lockup_detected_for_all_cpus(h, lockup_detected);
6729 spin_unlock_irqrestore(&h->lock, flags); 6794 spin_unlock_irqrestore(&h->lock, flags);
6730 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6795 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6731 h->lockup_detected); 6796 lockup_detected);
6732 pci_disable_device(h->pdev); 6797 pci_disable_device(h->pdev);
6733 spin_lock_irqsave(&h->lock, flags); 6798 spin_lock_irqsave(&h->lock, flags);
6734 fail_all_cmds_on_list(h, &h->cmpQ); 6799 fail_all_cmds_on_list(h, &h->cmpQ);
@@ -6863,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6863 struct ctlr_info *h = container_of(to_delayed_work(work), 6928 struct ctlr_info *h = container_of(to_delayed_work(work),
6864 struct ctlr_info, monitor_ctlr_work); 6929 struct ctlr_info, monitor_ctlr_work);
6865 detect_controller_lockup(h); 6930 detect_controller_lockup(h);
6866 if (h->lockup_detected) 6931 if (lockup_detected(h))
6867 return; 6932 return;
6868 6933
6869 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6934 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
@@ -6913,7 +6978,6 @@ reinit_after_soft_reset:
6913 * the 5 lower bits of the address are used by the hardware. and by 6978 * the 5 lower bits of the address are used by the hardware. and by
6914 * the driver. See comments in hpsa.h for more info. 6979 * the driver. See comments in hpsa.h for more info.
6915 */ 6980 */
6916#define COMMANDLIST_ALIGNMENT 128
6917 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6981 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6918 h = kzalloc(sizeof(*h), GFP_KERNEL); 6982 h = kzalloc(sizeof(*h), GFP_KERNEL);
6919 if (!h) 6983 if (!h)
@@ -6928,6 +6992,13 @@ reinit_after_soft_reset:
6928 spin_lock_init(&h->offline_device_lock); 6992 spin_lock_init(&h->offline_device_lock);
6929 spin_lock_init(&h->scan_lock); 6993 spin_lock_init(&h->scan_lock);
6930 spin_lock_init(&h->passthru_count_lock); 6994 spin_lock_init(&h->passthru_count_lock);
6995
6996 /* Allocate and clear per-cpu variable lockup_detected */
6997 h->lockup_detected = alloc_percpu(u32);
6998 if (!h->lockup_detected)
6999 goto clean1;
7000 set_lockup_detected_for_all_cpus(h, 0);
7001
6931 rc = hpsa_pci_init(h); 7002 rc = hpsa_pci_init(h);
6932 if (rc != 0) 7003 if (rc != 0)
6933 goto clean1; 7004 goto clean1;
@@ -7051,6 +7122,8 @@ clean4:
7051 free_irqs(h); 7122 free_irqs(h);
7052clean2: 7123clean2:
7053clean1: 7124clean1:
7125 if (h->lockup_detected)
7126 free_percpu(h->lockup_detected);
7054 kfree(h); 7127 kfree(h);
7055 return rc; 7128 return rc;
7056} 7129}
@@ -7059,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h)
7059{ 7132{
7060 char *flush_buf; 7133 char *flush_buf;
7061 struct CommandList *c; 7134 struct CommandList *c;
7062 unsigned long flags;
7063 7135
7064 /* Don't bother trying to flush the cache if locked up */ 7136 /* Don't bother trying to flush the cache if locked up */
7065 spin_lock_irqsave(&h->lock, flags); 7137 if (unlikely(lockup_detected(h)))
7066 if (unlikely(h->lockup_detected)) {
7067 spin_unlock_irqrestore(&h->lock, flags);
7068 return; 7138 return;
7069 }
7070 spin_unlock_irqrestore(&h->lock, flags);
7071
7072 flush_buf = kzalloc(4, GFP_KERNEL); 7139 flush_buf = kzalloc(4, GFP_KERNEL);
7073 if (!flush_buf) 7140 if (!flush_buf)
7074 return; 7141 return;
@@ -7144,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7144 pci_free_consistent(h->pdev, 7211 pci_free_consistent(h->pdev,
7145 h->nr_cmds * sizeof(struct ErrorInfo), 7212 h->nr_cmds * sizeof(struct ErrorInfo),
7146 h->errinfo_pool, h->errinfo_pool_dhandle); 7213 h->errinfo_pool, h->errinfo_pool_dhandle);
7147 pci_free_consistent(h->pdev, h->reply_pool_size, 7214 hpsa_free_reply_queues(h);
7148 h->reply_pool, h->reply_pool_dhandle);
7149 kfree(h->cmd_pool_bits); 7215 kfree(h->cmd_pool_bits);
7150 kfree(h->blockFetchTable); 7216 kfree(h->blockFetchTable);
7151 kfree(h->ioaccel1_blockFetchTable); 7217 kfree(h->ioaccel1_blockFetchTable);
@@ -7153,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7153 kfree(h->hba_inquiry_data); 7219 kfree(h->hba_inquiry_data);
7154 pci_disable_device(pdev); 7220 pci_disable_device(pdev);
7155 pci_release_regions(pdev); 7221 pci_release_regions(pdev);
7222 free_percpu(h->lockup_detected);
7156 kfree(h); 7223 kfree(h);
7157} 7224}
7158 7225
@@ -7257,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7257 * 10 = 6 s/g entry or 24k 7324 * 10 = 6 s/g entry or 24k
7258 */ 7325 */
7259 7326
7327 /* If the controller supports either ioaccel method then
7328 * we can also use the RAID stack submit path that does not
7329 * perform the superfluous readl() after each command submission.
7330 */
7331 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7332 access = SA5_performant_access_no_read;
7333
7260 /* Controller spec: zero out this buffer. */ 7334 /* Controller spec: zero out this buffer. */
7261 memset(h->reply_pool, 0, h->reply_pool_size); 7335 for (i = 0; i < h->nreply_queues; i++)
7336 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7262 7337
7263 bft[7] = SG_ENTRIES_IN_CMD + 4; 7338 bft[7] = SG_ENTRIES_IN_CMD + 4;
7264 calc_bucket_map(bft, ARRAY_SIZE(bft), 7339 calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -7274,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7274 7349
7275 for (i = 0; i < h->nreply_queues; i++) { 7350 for (i = 0; i < h->nreply_queues; i++) {
7276 writel(0, &h->transtable->RepQAddr[i].upper); 7351 writel(0, &h->transtable->RepQAddr[i].upper);
7277 writel(h->reply_pool_dhandle + 7352 writel(h->reply_queue[i].busaddr,
7278 (h->max_commands * sizeof(u64) * i),
7279 &h->transtable->RepQAddr[i].lower); 7353 &h->transtable->RepQAddr[i].lower);
7280 } 7354 }
7281 7355
@@ -7323,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7323 h->ioaccel1_blockFetchTable); 7397 h->ioaccel1_blockFetchTable);
7324 7398
7325 /* initialize all reply queue entries to unused */ 7399 /* initialize all reply queue entries to unused */
7326 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 7400 for (i = 0; i < h->nreply_queues; i++)
7327 h->reply_pool_size); 7401 memset(h->reply_queue[i].head,
7402 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7403 h->reply_queue_size);
7328 7404
7329 /* set all the constant fields in the accelerator command 7405 /* set all the constant fields in the accelerator command
7330 * frames once at init time to save CPU cycles later. 7406 * frames once at init time to save CPU cycles later.
@@ -7386,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7386 * because the 7 lower bits of the address are used by the 7462 * because the 7 lower bits of the address are used by the
7387 * hardware. 7463 * hardware.
7388 */ 7464 */
7389#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7390 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7465 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7391 IOACCEL1_COMMANDLIST_ALIGNMENT); 7466 IOACCEL1_COMMANDLIST_ALIGNMENT);
7392 h->ioaccel_cmd_pool = 7467 h->ioaccel_cmd_pool =
@@ -7424,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7424 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7499 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7425 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7500 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7426 7501
7427#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7428 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7502 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7429 IOACCEL2_COMMANDLIST_ALIGNMENT); 7503 IOACCEL2_COMMANDLIST_ALIGNMENT);
7430 h->ioaccel2_cmd_pool = 7504 h->ioaccel2_cmd_pool =
@@ -7482,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7482 } 7556 }
7483 } 7557 }
7484 7558
7485 /* TODO, check that this next line h->nreply_queues is correct */
7486 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7559 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7487 hpsa_get_max_perf_mode_cmds(h); 7560 hpsa_get_max_perf_mode_cmds(h);
7488 /* Performant mode ring buffer and supporting data structures */ 7561 /* Performant mode ring buffer and supporting data structures */
7489 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7562 h->reply_queue_size = h->max_commands * sizeof(u64);
7490 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
7491 &(h->reply_pool_dhandle));
7492 7563
7493 for (i = 0; i < h->nreply_queues; i++) { 7564 for (i = 0; i < h->nreply_queues; i++) {
7494 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 7565 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7566 h->reply_queue_size,
7567 &(h->reply_queue[i].busaddr));
7568 if (!h->reply_queue[i].head)
7569 goto clean_up;
7495 h->reply_queue[i].size = h->max_commands; 7570 h->reply_queue[i].size = h->max_commands;
7496 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7571 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7497 h->reply_queue[i].current_entry = 0; 7572 h->reply_queue[i].current_entry = 0;
@@ -7500,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7500 /* Need a block fetch table for performant mode */ 7575 /* Need a block fetch table for performant mode */
7501 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7576 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7502 sizeof(u32)), GFP_KERNEL); 7577 sizeof(u32)), GFP_KERNEL);
7503 7578 if (!h->blockFetchTable)
7504 if ((h->reply_pool == NULL)
7505 || (h->blockFetchTable == NULL))
7506 goto clean_up; 7579 goto clean_up;
7507 7580
7508 hpsa_enter_performant_mode(h, trans_support); 7581 hpsa_enter_performant_mode(h, trans_support);
7509 return; 7582 return;
7510 7583
7511clean_up: 7584clean_up:
7512 if (h->reply_pool) 7585 hpsa_free_reply_queues(h);
7513 pci_free_consistent(h->pdev, h->reply_pool_size,
7514 h->reply_pool, h->reply_pool_dhandle);
7515 kfree(h->blockFetchTable); 7586 kfree(h->blockFetchTable);
7516} 7587}
7517 7588
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 44235a27e1b6..24472cec7de3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t {
57 57
58}; 58};
59 59
60struct reply_pool { 60struct reply_queue_buffer {
61 u64 *head; 61 u64 *head;
62 size_t size; 62 size_t size;
63 u8 wraparound; 63 u8 wraparound;
64 u32 current_entry; 64 u32 current_entry;
65 dma_addr_t busaddr;
65}; 66};
66 67
67#pragma pack(1) 68#pragma pack(1)
@@ -90,6 +91,7 @@ struct bmic_controller_parameters {
90 u8 automatic_drive_slamming; 91 u8 automatic_drive_slamming;
91 u8 reserved1; 92 u8 reserved1;
92 u8 nvram_flags; 93 u8 nvram_flags;
94#define HBA_MODE_ENABLED_FLAG (1 << 3)
93 u8 cache_nvram_flags; 95 u8 cache_nvram_flags;
94 u8 drive_config_flags; 96 u8 drive_config_flags;
95 u16 reserved2; 97 u16 reserved2;
@@ -115,11 +117,8 @@ struct ctlr_info {
115 int nr_cmds; /* Number of commands allowed on this controller */ 117 int nr_cmds; /* Number of commands allowed on this controller */
116 struct CfgTable __iomem *cfgtable; 118 struct CfgTable __iomem *cfgtable;
117 int interrupts_enabled; 119 int interrupts_enabled;
118 int major;
119 int max_commands; 120 int max_commands;
120 int commands_outstanding; 121 int commands_outstanding;
121 int max_outstanding; /* Debug */
122 int usage_count; /* number of opens all all minor devices */
123# define PERF_MODE_INT 0 122# define PERF_MODE_INT 0
124# define DOORBELL_INT 1 123# define DOORBELL_INT 1
125# define SIMPLE_MODE_INT 2 124# define SIMPLE_MODE_INT 2
@@ -176,11 +175,9 @@ struct ctlr_info {
176 /* 175 /*
177 * Performant mode completion buffers 176 * Performant mode completion buffers
178 */ 177 */
179 u64 *reply_pool; 178 size_t reply_queue_size;
180 size_t reply_pool_size; 179 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
181 struct reply_pool reply_queue[MAX_REPLY_QUEUES];
182 u8 nreply_queues; 180 u8 nreply_queues;
183 dma_addr_t reply_pool_dhandle;
184 u32 *blockFetchTable; 181 u32 *blockFetchTable;
185 u32 *ioaccel1_blockFetchTable; 182 u32 *ioaccel1_blockFetchTable;
186 u32 *ioaccel2_blockFetchTable; 183 u32 *ioaccel2_blockFetchTable;
@@ -195,7 +192,7 @@ struct ctlr_info {
195 u64 last_heartbeat_timestamp; 192 u64 last_heartbeat_timestamp;
196 u32 heartbeat_sample_interval; 193 u32 heartbeat_sample_interval;
197 atomic_t firmware_flash_in_progress; 194 atomic_t firmware_flash_in_progress;
198 u32 lockup_detected; 195 u32 *lockup_detected;
199 struct delayed_work monitor_ctlr_work; 196 struct delayed_work monitor_ctlr_work;
200 int remove_in_progress; 197 int remove_in_progress;
201 u32 fifo_recently_full; 198 u32 fifo_recently_full;
@@ -232,11 +229,9 @@ struct ctlr_info {
232#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 229#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
233 230
234#define RESCAN_REQUIRED_EVENT_BITS \ 231#define RESCAN_REQUIRED_EVENT_BITS \
235 (CTLR_STATE_CHANGE_EVENT | \ 232 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
236 CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
237 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 233 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
238 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 234 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
239 CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
240 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 235 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
241 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 236 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
242 spinlock_t offline_device_lock; 237 spinlock_t offline_device_lock;
@@ -345,22 +340,23 @@ struct offline_device_entry {
345static void SA5_submit_command(struct ctlr_info *h, 340static void SA5_submit_command(struct ctlr_info *h,
346 struct CommandList *c) 341 struct CommandList *c)
347{ 342{
348 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
349 c->Header.Tag.lower);
350 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 343 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
351 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 344 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
352} 345}
353 346
347static void SA5_submit_command_no_read(struct ctlr_info *h,
348 struct CommandList *c)
349{
350 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
351}
352
354static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 353static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
355 struct CommandList *c) 354 struct CommandList *c)
356{ 355{
357 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
358 c->Header.Tag.lower);
359 if (c->cmd_type == CMD_IOACCEL2) 356 if (c->cmd_type == CMD_IOACCEL2)
360 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 357 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
361 else 358 else
362 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 359 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
363 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
364} 360}
365 361
366/* 362/*
@@ -398,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
398 394
399static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 395static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
400{ 396{
401 struct reply_pool *rq = &h->reply_queue[q]; 397 struct reply_queue_buffer *rq = &h->reply_queue[q];
402 unsigned long flags, register_value = FIFO_EMPTY; 398 unsigned long flags, register_value = FIFO_EMPTY;
403 399
404 /* msi auto clears the interrupt pending bit. */ 400 /* msi auto clears the interrupt pending bit. */
@@ -477,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h)
477{ 473{
478 unsigned long register_value = 474 unsigned long register_value =
479 readl(h->vaddr + SA5_INTR_STATUS); 475 readl(h->vaddr + SA5_INTR_STATUS);
480 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
481 return register_value & SA5_INTR_PENDING; 476 return register_value & SA5_INTR_PENDING;
482} 477}
483 478
@@ -514,7 +509,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
514static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 509static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
515{ 510{
516 u64 register_value; 511 u64 register_value;
517 struct reply_pool *rq = &h->reply_queue[q]; 512 struct reply_queue_buffer *rq = &h->reply_queue[q];
518 unsigned long flags; 513 unsigned long flags;
519 514
520 BUG_ON(q >= h->nreply_queues); 515 BUG_ON(q >= h->nreply_queues);
@@ -572,6 +567,14 @@ static struct access_method SA5_performant_access = {
572 SA5_performant_completed, 567 SA5_performant_completed,
573}; 568};
574 569
570static struct access_method SA5_performant_access_no_read = {
571 SA5_submit_command_no_read,
572 SA5_performant_intr_mask,
573 SA5_fifo_full,
574 SA5_performant_intr_pending,
575 SA5_performant_completed,
576};
577
575struct board_type { 578struct board_type {
576 u32 board_id; 579 u32 board_id;
577 char *product_name; 580 char *product_name;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index b5cc7052339f..b5125dc31439 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -151,7 +151,7 @@
151#define HPSA_VPD_HEADER_SZ 4 151#define HPSA_VPD_HEADER_SZ 4
152 152
153/* Logical volume states */ 153/* Logical volume states */
154#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1 154#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
155#define HPSA_LV_OK 0x0 155#define HPSA_LV_OK 0x0
156#define HPSA_LV_UNDERGOING_ERASE 0x0F 156#define HPSA_LV_UNDERGOING_ERASE 0x0F
157#define HPSA_LV_UNDERGOING_RPI 0x12 157#define HPSA_LV_UNDERGOING_RPI 0x12
@@ -238,11 +238,21 @@ struct ReportLUNdata {
238 u8 LUN[HPSA_MAX_LUN][8]; 238 u8 LUN[HPSA_MAX_LUN][8];
239}; 239};
240 240
241struct ext_report_lun_entry {
242 u8 lunid[8];
243 u8 wwid[8];
244 u8 device_type;
245 u8 device_flags;
246 u8 lun_count; /* multi-lun device, how many luns */
247 u8 redundant_paths;
248 u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
249};
250
241struct ReportExtendedLUNdata { 251struct ReportExtendedLUNdata {
242 u8 LUNListLength[4]; 252 u8 LUNListLength[4];
243 u8 extended_response_flag; 253 u8 extended_response_flag;
244 u8 reserved[3]; 254 u8 reserved[3];
245 u8 LUN[HPSA_MAX_LUN][24]; 255 struct ext_report_lun_entry LUN[HPSA_MAX_LUN];
246}; 256};
247 257
248struct SenseSubsystem_info { 258struct SenseSubsystem_info {
@@ -375,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */
375 * or a bus address. 385 * or a bus address.
376 */ 386 */
377 387
388#define COMMANDLIST_ALIGNMENT 128
378struct CommandList { 389struct CommandList {
379 struct CommandListHeader Header; 390 struct CommandListHeader Header;
380 struct RequestBlock Request; 391 struct RequestBlock Request;
@@ -389,21 +400,7 @@ struct CommandList {
389 struct list_head list; 400 struct list_head list;
390 struct completion *waiting; 401 struct completion *waiting;
391 void *scsi_cmd; 402 void *scsi_cmd;
392 403} __aligned(COMMANDLIST_ALIGNMENT);
393/* on 64 bit architectures, to get this to be 32-byte-aligned
394 * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
395 * we need PAD_32 bytes of padding (see below). This does that.
396 * If it happens that 64 bit and 32 bit systems need different
397 * padding, PAD_32 and PAD_64 can be set independently, and.
398 * the code below will do the right thing.
399 */
400#define IS_32_BIT ((8 - sizeof(long))/4)
401#define IS_64_BIT (!IS_32_BIT)
402#define PAD_32 (40)
403#define PAD_64 (12)
404#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
405 u8 pad[COMMANDLIST_PAD];
406};
407 404
408/* Max S/G elements in I/O accelerator command */ 405/* Max S/G elements in I/O accelerator command */
409#define IOACCEL1_MAXSGENTRIES 24 406#define IOACCEL1_MAXSGENTRIES 24
@@ -413,6 +410,7 @@ struct CommandList {
413 * Structure for I/O accelerator (mode 1) commands. 410 * Structure for I/O accelerator (mode 1) commands.
414 * Note that this structure must be 128-byte aligned in size. 411 * Note that this structure must be 128-byte aligned in size.
415 */ 412 */
413#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
416struct io_accel1_cmd { 414struct io_accel1_cmd {
417 u16 dev_handle; /* 0x00 - 0x01 */ 415 u16 dev_handle; /* 0x00 - 0x01 */
418 u8 reserved1; /* 0x02 */ 416 u8 reserved1; /* 0x02 */
@@ -440,12 +438,7 @@ struct io_accel1_cmd {
440 struct vals32 host_addr; /* 0x70 - 0x77 */ 438 struct vals32 host_addr; /* 0x70 - 0x77 */
441 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 439 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
442 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 440 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
443#define IOACCEL1_PAD_64 0 441} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
444#define IOACCEL1_PAD_32 0
445#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
446 IS_64_BIT * IOACCEL1_PAD_64)
447 u8 pad[IOACCEL1_PAD];
448};
449 442
450#define IOACCEL1_FUNCTION_SCSIIO 0x00 443#define IOACCEL1_FUNCTION_SCSIIO 0x00
451#define IOACCEL1_SGLOFFSET 32 444#define IOACCEL1_SGLOFFSET 32
@@ -510,14 +503,11 @@ struct io_accel2_scsi_response {
510 u8 sense_data_buff[32]; /* sense/response data buffer */ 503 u8 sense_data_buff[32]; /* sense/response data buffer */
511}; 504};
512 505
513#define IOACCEL2_64_PAD 76
514#define IOACCEL2_32_PAD 76
515#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
516 IS_64_BIT * IOACCEL2_64_PAD)
517/* 506/*
518 * Structure for I/O accelerator (mode 2 or m2) commands. 507 * Structure for I/O accelerator (mode 2 or m2) commands.
519 * Note that this structure must be 128-byte aligned in size. 508 * Note that this structure must be 128-byte aligned in size.
520 */ 509 */
510#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
521struct io_accel2_cmd { 511struct io_accel2_cmd {
522 u8 IU_type; /* IU Type */ 512 u8 IU_type; /* IU Type */
523 u8 direction; /* direction, memtype, and encryption */ 513 u8 direction; /* direction, memtype, and encryption */
@@ -544,8 +534,7 @@ struct io_accel2_cmd {
544 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ 534 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
545 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 535 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
546 struct io_accel2_scsi_response error_data; 536 struct io_accel2_scsi_response error_data;
547 u8 pad[IOACCEL2_PAD]; 537} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
548};
549 538
550/* 539/*
551 * defines for Mode 2 command struct 540 * defines for Mode 2 command struct
@@ -636,7 +625,7 @@ struct TransTable_struct {
636 u32 RepQCount; 625 u32 RepQCount;
637 u32 RepQCtrAddrLow32; 626 u32 RepQCtrAddrLow32;
638 u32 RepQCtrAddrHigh32; 627 u32 RepQCtrAddrHigh32;
639#define MAX_REPLY_QUEUES 8 628#define MAX_REPLY_QUEUES 64
640 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; 629 struct vals32 RepQAddr[MAX_REPLY_QUEUES];
641}; 630};
642 631
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 2ebfb2bb0f42..7b23f21f22f1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
185 if (crq->valid & 0x80) { 185 if (crq->valid & 0x80) {
186 if (++queue->cur == queue->size) 186 if (++queue->cur == queue->size)
187 queue->cur = 0; 187 queue->cur = 0;
188
189 /* Ensure the read of the valid bit occurs before reading any
190 * other bits of the CRQ entry
191 */
192 rmb();
188 } else 193 } else
189 crq = NULL; 194 crq = NULL;
190 spin_unlock_irqrestore(&queue->lock, flags); 195 spin_unlock_irqrestore(&queue->lock, flags);
@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
203{ 208{
204 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 209 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
205 210
211 /*
212 * Ensure the command buffer is flushed to memory before handing it
213 * over to the VIOS to prevent it from fetching any stale data.
214 */
215 mb();
206 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); 216 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
207} 217}
208 218
@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
797 evt->hostdata->dev); 807 evt->hostdata->dev);
798 if (evt->cmnd_done) 808 if (evt->cmnd_done)
799 evt->cmnd_done(evt->cmnd); 809 evt->cmnd_done(evt->cmnd);
800 } else if (evt->done) 810 } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
811 evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
801 evt->done(evt); 812 evt->done(evt);
802 free_event_struct(&evt->hostdata->pool, evt); 813 free_event_struct(&evt->hostdata->pool, evt);
803 spin_lock_irqsave(hostdata->host->host_lock, flags); 814 spin_lock_irqsave(hostdata->host->host_lock, flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 11854845393b..a669f2d11c31 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
244 sk->sk_data_ready = tcp_sw_conn->old_data_ready; 244 sk->sk_data_ready = tcp_sw_conn->old_data_ready;
245 sk->sk_state_change = tcp_sw_conn->old_state_change; 245 sk->sk_state_change = tcp_sw_conn->old_state_change;
246 sk->sk_write_space = tcp_sw_conn->old_write_space; 246 sk->sk_write_space = tcp_sw_conn->old_write_space;
247 sk->sk_no_check = 0; 247 sk->sk_no_check_tx = 0;
248 write_unlock_bh(&sk->sk_callback_lock); 248 write_unlock_bh(&sk->sk_callback_lock);
249} 249}
250 250
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 26dc005bb0f0..3d1bc67bac9d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
338 struct iscsi_session *session = conn->session; 338 struct iscsi_session *session = conn->session;
339 struct scsi_cmnd *sc = task->sc; 339 struct scsi_cmnd *sc = task->sc;
340 struct iscsi_scsi_req *hdr; 340 struct iscsi_scsi_req *hdr;
341 unsigned hdrlength, cmd_len; 341 unsigned hdrlength, cmd_len, transfer_length;
342 itt_t itt; 342 itt_t itt;
343 int rc; 343 int rc;
344 344
@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
391 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) 391 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
392 task->protected = true; 392 task->protected = true;
393 393
394 transfer_length = scsi_transfer_length(sc);
395 hdr->data_length = cpu_to_be32(transfer_length);
394 if (sc->sc_data_direction == DMA_TO_DEVICE) { 396 if (sc->sc_data_direction == DMA_TO_DEVICE) {
395 unsigned out_len = scsi_out(sc)->length;
396 struct iscsi_r2t_info *r2t = &task->unsol_r2t; 397 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
397 398
398 hdr->data_length = cpu_to_be32(out_len);
399 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 399 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
400 /* 400 /*
401 * Write counters: 401 * Write counters:
@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
414 memset(r2t, 0, sizeof(*r2t)); 414 memset(r2t, 0, sizeof(*r2t));
415 415
416 if (session->imm_data_en) { 416 if (session->imm_data_en) {
417 if (out_len >= session->first_burst) 417 if (transfer_length >= session->first_burst)
418 task->imm_count = min(session->first_burst, 418 task->imm_count = min(session->first_burst,
419 conn->max_xmit_dlength); 419 conn->max_xmit_dlength);
420 else 420 else
421 task->imm_count = min(out_len, 421 task->imm_count = min(transfer_length,
422 conn->max_xmit_dlength); 422 conn->max_xmit_dlength);
423 hton24(hdr->dlength, task->imm_count); 423 hton24(hdr->dlength, task->imm_count);
424 } else 424 } else
425 zero_data(hdr->dlength); 425 zero_data(hdr->dlength);
426 426
427 if (!session->initial_r2t_en) { 427 if (!session->initial_r2t_en) {
428 r2t->data_length = min(session->first_burst, out_len) - 428 r2t->data_length = min(session->first_burst,
429 transfer_length) -
429 task->imm_count; 430 task->imm_count;
430 r2t->data_offset = task->imm_count; 431 r2t->data_offset = task->imm_count;
431 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 432 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
438 } else { 439 } else {
439 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 440 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
440 zero_data(hdr->dlength); 441 zero_data(hdr->dlength);
441 hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
442 442
443 if (sc->sc_data_direction == DMA_FROM_DEVICE) 443 if (sc->sc_data_direction == DMA_FROM_DEVICE)
444 hdr->flags |= ISCSI_FLAG_CMD_READ; 444 hdr->flags |= ISCSI_FLAG_CMD_READ;
@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
466 scsi_bidi_cmnd(sc) ? "bidirectional" : 466 scsi_bidi_cmnd(sc) ? "bidirectional" :
467 sc->sc_data_direction == DMA_TO_DEVICE ? 467 sc->sc_data_direction == DMA_TO_DEVICE ?
468 "write" : "read", conn->id, sc, sc->cmnd[0], 468 "write" : "read", conn->id, sc, sc->cmnd[0],
469 task->itt, scsi_bufflen(sc), 469 task->itt, transfer_length,
470 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 470 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
471 session->cmdsn, 471 session->cmdsn,
472 session->max_cmdsn - session->exp_cmdsn + 1); 472 session->max_cmdsn - session->exp_cmdsn + 1);
@@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1442 conn->task = NULL; 1442 conn->task = NULL;
1443 } 1443 }
1444 /* regular RX path uses back_lock */ 1444 /* regular RX path uses back_lock */
1445 spin_lock_bh(&conn->session->back_lock); 1445 spin_lock(&conn->session->back_lock);
1446 __iscsi_put_task(task); 1446 __iscsi_put_task(task);
1447 spin_unlock_bh(&conn->session->back_lock); 1447 spin_unlock(&conn->session->back_lock);
1448 return rc; 1448 return rc;
1449} 1449}
1450 1450
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 94a3cafe7197..434e9037908e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -640,6 +640,7 @@ struct lpfc_hba {
640#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 640#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
641#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 641#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
642#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ 642#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
643#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
643 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 644 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
644 struct lpfc_dmabuf slim2p; 645 struct lpfc_dmabuf slim2p;
645 646
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8d5b6ceec9c9..1d7a5c34ee8c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -919,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
919 phba->cfg_sriov_nr_virtfn = 0; 919 phba->cfg_sriov_nr_virtfn = 0;
920 } 920 }
921 921
922 if (opcode == LPFC_FW_DUMP)
923 phba->hba_flag |= HBA_FW_DUMP_OP;
924
922 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 925 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
923 926
924 if (status != 0) 927 if (status != 0) {
928 phba->hba_flag &= ~HBA_FW_DUMP_OP;
925 return status; 929 return status;
930 }
926 931
927 /* wait for the device to be quiesced before firmware reset */ 932 /* wait for the device to be quiesced before firmware reset */
928 msleep(100); 933 msleep(100);
@@ -2364,7 +2369,7 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2364 uint8_t wwpn[WWN_SZ]; 2369 uint8_t wwpn[WWN_SZ];
2365 int rc; 2370 int rc;
2366 2371
2367 if (!phba->cfg_EnableXLane) 2372 if (!phba->cfg_fof)
2368 return -EPERM; 2373 return -EPERM;
2369 2374
2370 /* count may include a LF at end of string */ 2375 /* count may include a LF at end of string */
@@ -2432,7 +2437,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
2432 uint8_t wwpn[WWN_SZ]; 2437 uint8_t wwpn[WWN_SZ];
2433 int rc; 2438 int rc;
2434 2439
2435 if (!phba->cfg_EnableXLane) 2440 if (!phba->cfg_fof)
2436 return -EPERM; 2441 return -EPERM;
2437 2442
2438 /* count may include a LF at end of string */ 2443 /* count may include a LF at end of string */
@@ -2499,7 +2504,7 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
2499 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2504 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2500 int val = 0; 2505 int val = 0;
2501 2506
2502 if (!phba->cfg_EnableXLane) 2507 if (!phba->cfg_fof)
2503 return -EPERM; 2508 return -EPERM;
2504 2509
2505 if (!isdigit(buf[0])) 2510 if (!isdigit(buf[0]))
@@ -2565,7 +2570,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2565 2570
2566 int rc = 0; 2571 int rc = 0;
2567 2572
2568 if (!phba->cfg_EnableXLane) 2573 if (!phba->cfg_fof)
2569 return -EPERM; 2574 return -EPERM;
2570 2575
2571 if (oas_state) { 2576 if (oas_state) {
@@ -2670,7 +2675,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
2670 uint64_t oas_lun; 2675 uint64_t oas_lun;
2671 int len = 0; 2676 int len = 0;
2672 2677
2673 if (!phba->cfg_EnableXLane) 2678 if (!phba->cfg_fof)
2674 return -EPERM; 2679 return -EPERM;
2675 2680
2676 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 2681 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -2716,7 +2721,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2716 uint64_t scsi_lun; 2721 uint64_t scsi_lun;
2717 ssize_t rc; 2722 ssize_t rc;
2718 2723
2719 if (!phba->cfg_EnableXLane) 2724 if (!phba->cfg_fof)
2720 return -EPERM; 2725 return -EPERM;
2721 2726
2722 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) 2727 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
@@ -4655,7 +4660,7 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
4655# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) 4660# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
4656# Value range is [0x0,0x7f]. Default value is 0 4661# Value range is [0x0,0x7f]. Default value is 0
4657*/ 4662*/
4658LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); 4663LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
4659 4664
4660/* 4665/*
4661# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) 4666# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index ca2f4ea7cdef..5b5c825d9576 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index a94d4c9dfaa5..928ef609f363 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2010-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2010-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index adda0bf7a244..db5604f01a1a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -289,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
289void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 289void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
290void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); 290void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
291void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 291void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
292void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
292void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 293void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
293void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 294void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
294int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 295int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -310,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
310int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); 311int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
311int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, 312int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
312 uint64_t, lpfc_ctx_cmd); 313 uint64_t, lpfc_ctx_cmd);
314int
315lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
316 uint16_t, uint64_t, lpfc_ctx_cmd);
313 317
314void lpfc_mbox_timeout(unsigned long); 318void lpfc_mbox_timeout(unsigned long);
315void lpfc_mbox_timeout_handler(struct lpfc_hba *); 319void lpfc_mbox_timeout_handler(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 828c08e9389e..b0aedce3f54b 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -2314,7 +2314,7 @@ proc_cq:
2314 goto too_big; 2314 goto too_big;
2315 } 2315 }
2316 2316
2317 if (phba->cfg_EnableXLane) { 2317 if (phba->cfg_fof) {
2318 2318
2319 /* OAS CQ */ 2319 /* OAS CQ */
2320 qp = phba->sli4_hba.oas_cq; 2320 qp = phba->sli4_hba.oas_cq;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 624fe0b3cc0b..7a5d81a65be8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 294c072e9083..2a17e31265b8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -5634,6 +5634,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5634 ndlp->active_rrqs_xri_bitmap = 5634 ndlp->active_rrqs_xri_bitmap =
5635 mempool_alloc(vport->phba->active_rrq_pool, 5635 mempool_alloc(vport->phba->active_rrq_pool,
5636 GFP_KERNEL); 5636 GFP_KERNEL);
5637 if (ndlp->active_rrqs_xri_bitmap)
5638 memset(ndlp->active_rrqs_xri_bitmap, 0,
5639 ndlp->phba->cfg_rrq_xri_bitmap_sz);
5637 } 5640 }
5638 5641
5639 5642
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3d9438ce59ab..236259252379 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index fd79f7de7666..f432ec180cf8 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 635eeb3d6987..06f9a5b79e66 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -820,57 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
820} 820}
821 821
822/** 822/**
823 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 823 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
824 * rspiocb which got deferred
825 *
824 * @phba: pointer to lpfc HBA data structure. 826 * @phba: pointer to lpfc HBA data structure.
825 * 827 *
826 * This routine will do uninitialization after the HBA is reset when bring 828 * This routine will cleanup completed slow path events after HBA is reset
827 * down the SLI Layer. 829 * when bringing down the SLI Layer.
830 *
828 * 831 *
829 * Return codes 832 * Return codes
830 * 0 - success. 833 * void.
831 * Any other value - error.
832 **/ 834 **/
833static int 835static void
834lpfc_hba_down_post_s3(struct lpfc_hba *phba) 836lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
837{
838 struct lpfc_iocbq *rspiocbq;
839 struct hbq_dmabuf *dmabuf;
840 struct lpfc_cq_event *cq_event;
841
842 spin_lock_irq(&phba->hbalock);
843 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
844 spin_unlock_irq(&phba->hbalock);
845
846 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
847 /* Get the response iocb from the head of work queue */
848 spin_lock_irq(&phba->hbalock);
849 list_remove_head(&phba->sli4_hba.sp_queue_event,
850 cq_event, struct lpfc_cq_event, list);
851 spin_unlock_irq(&phba->hbalock);
852
853 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
854 case CQE_CODE_COMPL_WQE:
855 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
856 cq_event);
857 lpfc_sli_release_iocbq(phba, rspiocbq);
858 break;
859 case CQE_CODE_RECEIVE:
860 case CQE_CODE_RECEIVE_V1:
861 dmabuf = container_of(cq_event, struct hbq_dmabuf,
862 cq_event);
863 lpfc_in_buf_free(phba, &dmabuf->dbuf);
864 }
865 }
866}
867
868/**
869 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
870 * @phba: pointer to lpfc HBA data structure.
871 *
872 * This routine will cleanup posted ELS buffers after the HBA is reset
873 * when bringing down the SLI Layer.
874 *
875 *
876 * Return codes
877 * void.
878 **/
879static void
880lpfc_hba_free_post_buf(struct lpfc_hba *phba)
835{ 881{
836 struct lpfc_sli *psli = &phba->sli; 882 struct lpfc_sli *psli = &phba->sli;
837 struct lpfc_sli_ring *pring; 883 struct lpfc_sli_ring *pring;
838 struct lpfc_dmabuf *mp, *next_mp; 884 struct lpfc_dmabuf *mp, *next_mp;
839 LIST_HEAD(completions); 885 LIST_HEAD(buflist);
840 int i; 886 int count;
841 887
842 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 888 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
843 lpfc_sli_hbqbuf_free_all(phba); 889 lpfc_sli_hbqbuf_free_all(phba);
844 else { 890 else {
845 /* Cleanup preposted buffers on the ELS ring */ 891 /* Cleanup preposted buffers on the ELS ring */
846 pring = &psli->ring[LPFC_ELS_RING]; 892 pring = &psli->ring[LPFC_ELS_RING];
847 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 893 spin_lock_irq(&phba->hbalock);
894 list_splice_init(&pring->postbufq, &buflist);
895 spin_unlock_irq(&phba->hbalock);
896
897 count = 0;
898 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
848 list_del(&mp->list); 899 list_del(&mp->list);
849 pring->postbufq_cnt--; 900 count++;
850 lpfc_mbuf_free(phba, mp->virt, mp->phys); 901 lpfc_mbuf_free(phba, mp->virt, mp->phys);
851 kfree(mp); 902 kfree(mp);
852 } 903 }
904
905 spin_lock_irq(&phba->hbalock);
906 pring->postbufq_cnt -= count;
907 spin_unlock_irq(&phba->hbalock);
853 } 908 }
909}
910
911/**
912 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
913 * @phba: pointer to lpfc HBA data structure.
914 *
915 * This routine will cleanup the txcmplq after the HBA is reset when bringing
916 * down the SLI Layer.
917 *
918 * Return codes
919 * void
920 **/
921static void
922lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
923{
924 struct lpfc_sli *psli = &phba->sli;
925 struct lpfc_sli_ring *pring;
926 LIST_HEAD(completions);
927 int i;
854 928
855 spin_lock_irq(&phba->hbalock);
856 for (i = 0; i < psli->num_rings; i++) { 929 for (i = 0; i < psli->num_rings; i++) {
857 pring = &psli->ring[i]; 930 pring = &psli->ring[i];
858 931 if (phba->sli_rev >= LPFC_SLI_REV4)
932 spin_lock_irq(&pring->ring_lock);
933 else
934 spin_lock_irq(&phba->hbalock);
859 /* At this point in time the HBA is either reset or DOA. Either 935 /* At this point in time the HBA is either reset or DOA. Either
860 * way, nothing should be on txcmplq as it will NEVER complete. 936 * way, nothing should be on txcmplq as it will NEVER complete.
861 */ 937 */
862 list_splice_init(&pring->txcmplq, &completions); 938 list_splice_init(&pring->txcmplq, &completions);
863 spin_unlock_irq(&phba->hbalock); 939 pring->txcmplq_cnt = 0;
940
941 if (phba->sli_rev >= LPFC_SLI_REV4)
942 spin_unlock_irq(&pring->ring_lock);
943 else
944 spin_unlock_irq(&phba->hbalock);
864 945
865 /* Cancel all the IOCBs from the completions list */ 946 /* Cancel all the IOCBs from the completions list */
866 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 947 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
867 IOERR_SLI_ABORTED); 948 IOERR_SLI_ABORTED);
868
869 lpfc_sli_abort_iocb_ring(phba, pring); 949 lpfc_sli_abort_iocb_ring(phba, pring);
870 spin_lock_irq(&phba->hbalock);
871 } 950 }
872 spin_unlock_irq(&phba->hbalock); 951}
873 952
953/**
954 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
955 int i;
956 * @phba: pointer to lpfc HBA data structure.
957 *
958 * This routine will do uninitialization after the HBA is reset when bring
959 * down the SLI Layer.
960 *
961 * Return codes
962 * 0 - success.
963 * Any other value - error.
964 **/
965static int
966lpfc_hba_down_post_s3(struct lpfc_hba *phba)
967{
968 lpfc_hba_free_post_buf(phba);
969 lpfc_hba_clean_txcmplq(phba);
874 return 0; 970 return 0;
875} 971}
876 972
@@ -890,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
890{ 986{
891 struct lpfc_scsi_buf *psb, *psb_next; 987 struct lpfc_scsi_buf *psb, *psb_next;
892 LIST_HEAD(aborts); 988 LIST_HEAD(aborts);
893 int ret;
894 unsigned long iflag = 0; 989 unsigned long iflag = 0;
895 struct lpfc_sglq *sglq_entry = NULL; 990 struct lpfc_sglq *sglq_entry = NULL;
896 991
897 ret = lpfc_hba_down_post_s3(phba); 992 lpfc_hba_free_post_buf(phba);
898 if (ret) 993 lpfc_hba_clean_txcmplq(phba);
899 return ret; 994
900 /* At this point in time the HBA is either reset or DOA. Either 995 /* At this point in time the HBA is either reset or DOA. Either
901 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 996 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
902 * on the lpfc_sgl_list so that it can either be freed if the 997 * on the lpfc_sgl_list so that it can either be freed if the
@@ -932,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
932 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1027 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
933 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1028 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
934 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1029 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1030
1031 lpfc_sli4_free_sp_events(phba);
935 return 0; 1032 return 0;
936} 1033}
937 1034
@@ -1250,7 +1347,6 @@ static void
1250lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1347lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1251{ 1348{
1252 uint32_t old_host_status = phba->work_hs; 1349 uint32_t old_host_status = phba->work_hs;
1253 struct lpfc_sli_ring *pring;
1254 struct lpfc_sli *psli = &phba->sli; 1350 struct lpfc_sli *psli = &phba->sli;
1255 1351
1256 /* If the pci channel is offline, ignore possible errors, 1352 /* If the pci channel is offline, ignore possible errors,
@@ -1279,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1279 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1375 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1280 * SCSI layer retry it after re-establishing link. 1376 * SCSI layer retry it after re-establishing link.
1281 */ 1377 */
1282 pring = &psli->ring[psli->fcp_ring]; 1378 lpfc_sli_abort_fcp_rings(phba);
1283 lpfc_sli_abort_iocb_ring(phba, pring);
1284 1379
1285 /* 1380 /*
1286 * There was a firmware error. Take the hba offline and then 1381 * There was a firmware error. Take the hba offline and then
@@ -1348,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1348{ 1443{
1349 struct lpfc_vport *vport = phba->pport; 1444 struct lpfc_vport *vport = phba->pport;
1350 struct lpfc_sli *psli = &phba->sli; 1445 struct lpfc_sli *psli = &phba->sli;
1351 struct lpfc_sli_ring *pring;
1352 uint32_t event_data; 1446 uint32_t event_data;
1353 unsigned long temperature; 1447 unsigned long temperature;
1354 struct temp_event temp_event_data; 1448 struct temp_event temp_event_data;
@@ -1400,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1400 * Error iocb (I/O) on txcmplq and let the SCSI layer 1494 * Error iocb (I/O) on txcmplq and let the SCSI layer
1401 * retry it after re-establishing link. 1495 * retry it after re-establishing link.
1402 */ 1496 */
1403 pring = &psli->ring[psli->fcp_ring]; 1497 lpfc_sli_abort_fcp_rings(phba);
1404 lpfc_sli_abort_iocb_ring(phba, pring);
1405 1498
1406 /* 1499 /*
1407 * There was a firmware error. Take the hba offline and then 1500 * There was a firmware error. Take the hba offline and then
@@ -1940,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1940 2033
1941 switch (dev_id) { 2034 switch (dev_id) {
1942 case PCI_DEVICE_ID_FIREFLY: 2035 case PCI_DEVICE_ID_FIREFLY:
1943 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 2036 m = (typeof(m)){"LP6000", "PCI",
2037 "Obsolete, Unsupported Fibre Channel Adapter"};
1944 break; 2038 break;
1945 case PCI_DEVICE_ID_SUPERFLY: 2039 case PCI_DEVICE_ID_SUPERFLY:
1946 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2040 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1947 m = (typeof(m)){"LP7000", "PCI", 2041 m = (typeof(m)){"LP7000", "PCI", ""};
1948 "Fibre Channel Adapter"};
1949 else 2042 else
1950 m = (typeof(m)){"LP7000E", "PCI", 2043 m = (typeof(m)){"LP7000E", "PCI", ""};
1951 "Fibre Channel Adapter"}; 2044 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
1952 break; 2045 break;
1953 case PCI_DEVICE_ID_DRAGONFLY: 2046 case PCI_DEVICE_ID_DRAGONFLY:
1954 m = (typeof(m)){"LP8000", "PCI", 2047 m = (typeof(m)){"LP8000", "PCI",
1955 "Fibre Channel Adapter"}; 2048 "Obsolete, Unsupported Fibre Channel Adapter"};
1956 break; 2049 break;
1957 case PCI_DEVICE_ID_CENTAUR: 2050 case PCI_DEVICE_ID_CENTAUR:
1958 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2051 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1959 m = (typeof(m)){"LP9002", "PCI", 2052 m = (typeof(m)){"LP9002", "PCI", ""};
1960 "Fibre Channel Adapter"};
1961 else 2053 else
1962 m = (typeof(m)){"LP9000", "PCI", 2054 m = (typeof(m)){"LP9000", "PCI", ""};
1963 "Fibre Channel Adapter"}; 2055 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
1964 break; 2056 break;
1965 case PCI_DEVICE_ID_RFLY: 2057 case PCI_DEVICE_ID_RFLY:
1966 m = (typeof(m)){"LP952", "PCI", 2058 m = (typeof(m)){"LP952", "PCI",
1967 "Fibre Channel Adapter"}; 2059 "Obsolete, Unsupported Fibre Channel Adapter"};
1968 break; 2060 break;
1969 case PCI_DEVICE_ID_PEGASUS: 2061 case PCI_DEVICE_ID_PEGASUS:
1970 m = (typeof(m)){"LP9802", "PCI-X", 2062 m = (typeof(m)){"LP9802", "PCI-X",
1971 "Fibre Channel Adapter"}; 2063 "Obsolete, Unsupported Fibre Channel Adapter"};
1972 break; 2064 break;
1973 case PCI_DEVICE_ID_THOR: 2065 case PCI_DEVICE_ID_THOR:
1974 m = (typeof(m)){"LP10000", "PCI-X", 2066 m = (typeof(m)){"LP10000", "PCI-X",
1975 "Fibre Channel Adapter"}; 2067 "Obsolete, Unsupported Fibre Channel Adapter"};
1976 break; 2068 break;
1977 case PCI_DEVICE_ID_VIPER: 2069 case PCI_DEVICE_ID_VIPER:
1978 m = (typeof(m)){"LPX1000", "PCI-X", 2070 m = (typeof(m)){"LPX1000", "PCI-X",
1979 "Fibre Channel Adapter"}; 2071 "Obsolete, Unsupported Fibre Channel Adapter"};
1980 break; 2072 break;
1981 case PCI_DEVICE_ID_PFLY: 2073 case PCI_DEVICE_ID_PFLY:
1982 m = (typeof(m)){"LP982", "PCI-X", 2074 m = (typeof(m)){"LP982", "PCI-X",
1983 "Fibre Channel Adapter"}; 2075 "Obsolete, Unsupported Fibre Channel Adapter"};
1984 break; 2076 break;
1985 case PCI_DEVICE_ID_TFLY: 2077 case PCI_DEVICE_ID_TFLY:
1986 m = (typeof(m)){"LP1050", "PCI-X", 2078 m = (typeof(m)){"LP1050", "PCI-X",
1987 "Fibre Channel Adapter"}; 2079 "Obsolete, Unsupported Fibre Channel Adapter"};
1988 break; 2080 break;
1989 case PCI_DEVICE_ID_HELIOS: 2081 case PCI_DEVICE_ID_HELIOS:
1990 m = (typeof(m)){"LP11000", "PCI-X2", 2082 m = (typeof(m)){"LP11000", "PCI-X2",
1991 "Fibre Channel Adapter"}; 2083 "Obsolete, Unsupported Fibre Channel Adapter"};
1992 break; 2084 break;
1993 case PCI_DEVICE_ID_HELIOS_SCSP: 2085 case PCI_DEVICE_ID_HELIOS_SCSP:
1994 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2086 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1995 "Fibre Channel Adapter"}; 2087 "Obsolete, Unsupported Fibre Channel Adapter"};
1996 break; 2088 break;
1997 case PCI_DEVICE_ID_HELIOS_DCSP: 2089 case PCI_DEVICE_ID_HELIOS_DCSP:
1998 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2090 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1999 "Fibre Channel Adapter"}; 2091 "Obsolete, Unsupported Fibre Channel Adapter"};
2000 break; 2092 break;
2001 case PCI_DEVICE_ID_NEPTUNE: 2093 case PCI_DEVICE_ID_NEPTUNE:
2002 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 2094 m = (typeof(m)){"LPe1000", "PCIe",
2095 "Obsolete, Unsupported Fibre Channel Adapter"};
2003 break; 2096 break;
2004 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2097 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2005 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 2098 m = (typeof(m)){"LPe1000-SP", "PCIe",
2099 "Obsolete, Unsupported Fibre Channel Adapter"};
2006 break; 2100 break;
2007 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2101 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2008 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 2102 m = (typeof(m)){"LPe1002-SP", "PCIe",
2103 "Obsolete, Unsupported Fibre Channel Adapter"};
2009 break; 2104 break;
2010 case PCI_DEVICE_ID_BMID: 2105 case PCI_DEVICE_ID_BMID:
2011 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2106 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2012 break; 2107 break;
2013 case PCI_DEVICE_ID_BSMB: 2108 case PCI_DEVICE_ID_BSMB:
2014 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 2109 m = (typeof(m)){"LP111", "PCI-X2",
2110 "Obsolete, Unsupported Fibre Channel Adapter"};
2015 break; 2111 break;
2016 case PCI_DEVICE_ID_ZEPHYR: 2112 case PCI_DEVICE_ID_ZEPHYR:
2017 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2113 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
@@ -2030,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2030 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2126 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2031 break; 2127 break;
2032 case PCI_DEVICE_ID_LP101: 2128 case PCI_DEVICE_ID_LP101:
2033 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 2129 m = (typeof(m)){"LP101", "PCI-X",
2130 "Obsolete, Unsupported Fibre Channel Adapter"};
2034 break; 2131 break;
2035 case PCI_DEVICE_ID_LP10000S: 2132 case PCI_DEVICE_ID_LP10000S:
2036 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 2133 m = (typeof(m)){"LP10000-S", "PCI",
2134 "Obsolete, Unsupported Fibre Channel Adapter"};
2037 break; 2135 break;
2038 case PCI_DEVICE_ID_LP11000S: 2136 case PCI_DEVICE_ID_LP11000S:
2039 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 2137 m = (typeof(m)){"LP11000-S", "PCI-X2",
2138 "Obsolete, Unsupported Fibre Channel Adapter"};
2040 break; 2139 break;
2041 case PCI_DEVICE_ID_LPE11000S: 2140 case PCI_DEVICE_ID_LPE11000S:
2042 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 2141 m = (typeof(m)){"LPe11000-S", "PCIe",
2142 "Obsolete, Unsupported Fibre Channel Adapter"};
2043 break; 2143 break;
2044 case PCI_DEVICE_ID_SAT: 2144 case PCI_DEVICE_ID_SAT:
2045 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2145 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
@@ -2060,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2060 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2160 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2061 break; 2161 break;
2062 case PCI_DEVICE_ID_HORNET: 2162 case PCI_DEVICE_ID_HORNET:
2063 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 2163 m = (typeof(m)){"LP21000", "PCIe",
2164 "Obsolete, Unsupported FCoE Adapter"};
2064 GE = 1; 2165 GE = 1;
2065 break; 2166 break;
2066 case PCI_DEVICE_ID_PROTEUS_VF: 2167 case PCI_DEVICE_ID_PROTEUS_VF:
2067 m = (typeof(m)){"LPev12000", "PCIe IOV", 2168 m = (typeof(m)){"LPev12000", "PCIe IOV",
2068 "Fibre Channel Adapter"}; 2169 "Obsolete, Unsupported Fibre Channel Adapter"};
2069 break; 2170 break;
2070 case PCI_DEVICE_ID_PROTEUS_PF: 2171 case PCI_DEVICE_ID_PROTEUS_PF:
2071 m = (typeof(m)){"LPev12000", "PCIe IOV", 2172 m = (typeof(m)){"LPev12000", "PCIe IOV",
2072 "Fibre Channel Adapter"}; 2173 "Obsolete, Unsupported Fibre Channel Adapter"};
2073 break; 2174 break;
2074 case PCI_DEVICE_ID_PROTEUS_S: 2175 case PCI_DEVICE_ID_PROTEUS_S:
2075 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2176 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2076 "Fibre Channel Adapter"}; 2177 "Obsolete, Unsupported Fibre Channel Adapter"};
2077 break; 2178 break;
2078 case PCI_DEVICE_ID_TIGERSHARK: 2179 case PCI_DEVICE_ID_TIGERSHARK:
2079 oneConnect = 1; 2180 oneConnect = 1;
@@ -2089,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2089 break; 2190 break;
2090 case PCI_DEVICE_ID_BALIUS: 2191 case PCI_DEVICE_ID_BALIUS:
2091 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2192 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2092 "Fibre Channel Adapter"}; 2193 "Obsolete, Unsupported Fibre Channel Adapter"};
2093 break; 2194 break;
2094 case PCI_DEVICE_ID_LANCER_FC: 2195 case PCI_DEVICE_ID_LANCER_FC:
2095 case PCI_DEVICE_ID_LANCER_FC_VF:
2096 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2196 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2097 break; 2197 break;
2198 case PCI_DEVICE_ID_LANCER_FC_VF:
2199 m = (typeof(m)){"LPe16000", "PCIe",
2200 "Obsolete, Unsupported Fibre Channel Adapter"};
2201 break;
2098 case PCI_DEVICE_ID_LANCER_FCOE: 2202 case PCI_DEVICE_ID_LANCER_FCOE:
2099 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2100 oneConnect = 1; 2203 oneConnect = 1;
2101 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2204 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2102 break; 2205 break;
2206 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2207 oneConnect = 1;
2208 m = (typeof(m)){"OCe15100", "PCIe",
2209 "Obsolete, Unsupported FCoE"};
2210 break;
2103 case PCI_DEVICE_ID_SKYHAWK: 2211 case PCI_DEVICE_ID_SKYHAWK:
2104 case PCI_DEVICE_ID_SKYHAWK_VF: 2212 case PCI_DEVICE_ID_SKYHAWK_VF:
2105 oneConnect = 1; 2213 oneConnect = 1;
@@ -4614,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4614 phba->link_state = LPFC_HBA_ERROR; 4722 phba->link_state = LPFC_HBA_ERROR;
4615 return; 4723 return;
4616 } 4724 }
4617 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4725 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
4726 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4727 else
4728 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
4618 lpfc_offline(phba); 4729 lpfc_offline(phba);
4619 lpfc_sli_brdrestart(phba); 4730 lpfc_sli_brdrestart(phba);
4620 lpfc_online(phba); 4731 lpfc_online(phba);
@@ -9663,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9663static void 9774static void
9664lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 9775lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9665{ 9776{
9666 struct lpfc_sli *psli = &phba->sli;
9667 struct lpfc_sli_ring *pring;
9668
9669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9670 "2723 PCI channel I/O abort preparing for recovery\n"); 9778 "2723 PCI channel I/O abort preparing for recovery\n");
9671 9779
@@ -9673,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9673 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 9781 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9674 * and let the SCSI mid-layer to retry them to recover. 9782 * and let the SCSI mid-layer to retry them to recover.
9675 */ 9783 */
9676 pring = &psli->ring[psli->fcp_ring]; 9784 lpfc_sli_abort_fcp_rings(phba);
9677 lpfc_sli_abort_iocb_ring(phba, pring);
9678} 9785}
9679 9786
9680/** 9787/**
@@ -10417,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10417static void 10524static void
10418lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10525lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10419{ 10526{
10420 struct lpfc_sli *psli = &phba->sli;
10421 struct lpfc_sli_ring *pring;
10422
10423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10424 "2828 PCI channel I/O abort preparing for recovery\n"); 10528 "2828 PCI channel I/O abort preparing for recovery\n");
10425 /* 10529 /*
10426 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10530 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10427 * and let the SCSI mid-layer to retry them to recover. 10531 * and let the SCSI mid-layer to retry them to recover.
10428 */ 10532 */
10429 pring = &psli->ring[psli->fcp_ring]; 10533 lpfc_sli_abort_fcp_rings(phba);
10430 lpfc_sli_abort_iocb_ring(phba, pring);
10431} 10534}
10432 10535
10433/** 10536/**
@@ -10898,7 +11001,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
10898 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11001 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
10899 phba->cfg_fof = 1; 11002 phba->cfg_fof = 1;
10900 } else { 11003 } else {
10901 phba->cfg_EnableXLane = 0; 11004 phba->cfg_fof = 0;
10902 if (phba->device_data_mem_pool) 11005 if (phba->device_data_mem_pool)
10903 mempool_destroy(phba->device_data_mem_pool); 11006 mempool_destroy(phba->device_data_mem_pool);
10904 phba->device_data_mem_pool = NULL; 11007 phba->device_data_mem_pool = NULL;
@@ -10928,7 +11031,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
10928 if (rc) 11031 if (rc)
10929 return -ENOMEM; 11032 return -ENOMEM;
10930 11033
10931 if (phba->cfg_EnableXLane) { 11034 if (phba->cfg_fof) {
10932 11035
10933 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11036 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
10934 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11037 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
@@ -10947,8 +11050,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
10947 return 0; 11050 return 0;
10948 11051
10949out_oas_wq: 11052out_oas_wq:
10950 if (phba->cfg_EnableXLane) 11053 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10951 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
10952out_oas_cq: 11054out_oas_cq:
10953 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11055 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
10954 return rc; 11056 return rc;
@@ -10982,7 +11084,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
10982 11084
10983 phba->sli4_hba.fof_eq = qdesc; 11085 phba->sli4_hba.fof_eq = qdesc;
10984 11086
10985 if (phba->cfg_EnableXLane) { 11087 if (phba->cfg_fof) {
10986 11088
10987 /* Create OAS CQ */ 11089 /* Create OAS CQ */
10988 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11090 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ed419aad2b1f..3fa65338d3f5 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 462453ee0bda..2df11daad85b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -73,7 +73,7 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73{ 73{
74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75 75
76 if (vport->phba->cfg_EnableXLane) 76 if (vport->phba->cfg_fof)
77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 else 78 else
79 return (struct lpfc_rport_data *)sdev->hostdata; 79 return (struct lpfc_rport_data *)sdev->hostdata;
@@ -3462,7 +3462,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3462 * If the OAS driver feature is enabled and the lun is enabled for 3462 * If the OAS driver feature is enabled and the lun is enabled for
3463 * OAS, set the oas iocb related flags. 3463 * OAS, set the oas iocb related flags.
3464 */ 3464 */
3465 if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *) 3465 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3466 scsi_cmnd->device->hostdata)->oas_enabled) 3466 scsi_cmnd->device->hostdata)->oas_enabled)
3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; 3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
3468 return 0; 3468 return 0;
@@ -4314,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4314 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4314 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4315 4315
4316 sli4 = (phba->sli_rev == LPFC_SLI_REV4); 4316 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4317 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4317 4318
4318 /* 4319 /*
4319 * There are three possibilities here - use scatter-gather segment, use 4320 * There are three possibilities here - use scatter-gather segment, use
@@ -4782,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4782 struct lpfc_scsi_buf *lpfc_cmd; 4783 struct lpfc_scsi_buf *lpfc_cmd;
4783 IOCB_t *cmd, *icmd; 4784 IOCB_t *cmd, *icmd;
4784 int ret = SUCCESS, status = 0; 4785 int ret = SUCCESS, status = 0;
4785 unsigned long flags; 4786 struct lpfc_sli_ring *pring_s4;
4787 int ring_number, ret_val;
4788 unsigned long flags, iflags;
4786 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4789 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4787 4790
4788 status = fc_block_scsi_eh(cmnd); 4791 status = fc_block_scsi_eh(cmnd);
@@ -4833,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4833 4836
4834 BUG_ON(iocb->context1 != lpfc_cmd); 4837 BUG_ON(iocb->context1 != lpfc_cmd);
4835 4838
4839 /* abort issued in recovery is still in progress */
4840 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4841 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4842 "3389 SCSI Layer I/O Abort Request is pending\n");
4843 spin_unlock_irqrestore(&phba->hbalock, flags);
4844 goto wait_for_cmpl;
4845 }
4846
4836 abtsiocb = __lpfc_sli_get_iocbq(phba); 4847 abtsiocb = __lpfc_sli_get_iocbq(phba);
4837 if (abtsiocb == NULL) { 4848 if (abtsiocb == NULL) {
4838 ret = FAILED; 4849 ret = FAILED;
@@ -4871,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4871 4882
4872 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4883 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4873 abtsiocb->vport = vport; 4884 abtsiocb->vport = vport;
4885 if (phba->sli_rev == LPFC_SLI_REV4) {
4886 ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
4887 pring_s4 = &phba->sli.ring[ring_number];
4888 /* Note: both hbalock and ring_lock must be set here */
4889 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
4890 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4891 abtsiocb, 0);
4892 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
4893 } else {
4894 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4895 abtsiocb, 0);
4896 }
4874 /* no longer need the lock after this point */ 4897 /* no longer need the lock after this point */
4875 spin_unlock_irqrestore(&phba->hbalock, flags); 4898 spin_unlock_irqrestore(&phba->hbalock, flags);
4876 4899
4877 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 4900
4878 IOCB_ERROR) { 4901 if (ret_val == IOCB_ERROR) {
4879 lpfc_sli_release_iocbq(phba, abtsiocb); 4902 lpfc_sli_release_iocbq(phba, abtsiocb);
4880 ret = FAILED; 4903 ret = FAILED;
4881 goto out; 4904 goto out;
@@ -4885,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4885 lpfc_sli_handle_fast_ring_event(phba, 4908 lpfc_sli_handle_fast_ring_event(phba,
4886 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 4909 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4887 4910
4911wait_for_cmpl:
4888 lpfc_cmd->waitq = &waitq; 4912 lpfc_cmd->waitq = &waitq;
4889 /* Wait for abort to complete */ 4913 /* Wait for abort to complete */
4890 wait_event_timeout(waitq, 4914 wait_event_timeout(waitq,
4891 (lpfc_cmd->pCmd != cmnd), 4915 (lpfc_cmd->pCmd != cmnd),
4892 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 4916 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4917
4918 spin_lock_irqsave(shost->host_lock, flags);
4893 lpfc_cmd->waitq = NULL; 4919 lpfc_cmd->waitq = NULL;
4920 spin_unlock_irqrestore(shost->host_lock, flags);
4894 4921
4895 if (lpfc_cmd->pCmd == cmnd) { 4922 if (lpfc_cmd->pCmd == cmnd) {
4896 ret = FAILED; 4923 ret = FAILED;
@@ -5172,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5172 5199
5173 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5200 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5174 if (cnt) 5201 if (cnt)
5175 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 5202 lpfc_sli_abort_taskmgmt(vport,
5176 tgt_id, lun_id, context); 5203 &phba->sli.ring[phba->sli.fcp_ring],
5204 tgt_id, lun_id, context);
5177 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5205 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5178 while (time_after(later, jiffies) && cnt) { 5206 while (time_after(later, jiffies) && cnt) {
5179 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 5207 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
@@ -5491,7 +5519,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5491 if (!rport || fc_remote_port_chkready(rport)) 5519 if (!rport || fc_remote_port_chkready(rport))
5492 return -ENXIO; 5520 return -ENXIO;
5493 5521
5494 if (phba->cfg_EnableXLane) { 5522 if (phba->cfg_fof) {
5495 5523
5496 /* 5524 /*
5497 * Check to see if the device data structure for the lun 5525 * Check to see if the device data structure for the lun
@@ -5616,7 +5644,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
5616 struct lpfc_device_data *device_data = sdev->hostdata; 5644 struct lpfc_device_data *device_data = sdev->hostdata;
5617 5645
5618 atomic_dec(&phba->sdev_cnt); 5646 atomic_dec(&phba->sdev_cnt);
5619 if ((phba->cfg_EnableXLane) && (device_data)) { 5647 if ((phba->cfg_fof) && (device_data)) {
5620 spin_lock_irqsave(&phba->devicelock, flags); 5648 spin_lock_irqsave(&phba->devicelock, flags);
5621 device_data->available = false; 5649 device_data->available = false;
5622 if (!device_data->oas_enabled) 5650 if (!device_data->oas_enabled)
@@ -5655,7 +5683,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5655 int memory_flags; 5683 int memory_flags;
5656 5684
5657 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5685 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5658 !(phba->cfg_EnableXLane)) 5686 !(phba->cfg_fof))
5659 return NULL; 5687 return NULL;
5660 5688
5661 /* Attempt to create the device data to contain lun info */ 5689 /* Attempt to create the device data to contain lun info */
@@ -5693,7 +5721,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
5693{ 5721{
5694 5722
5695 if (unlikely(!phba) || !lun_info || 5723 if (unlikely(!phba) || !lun_info ||
5696 !(phba->cfg_EnableXLane)) 5724 !(phba->cfg_fof))
5697 return; 5725 return;
5698 5726
5699 if (!list_empty(&lun_info->listentry)) 5727 if (!list_empty(&lun_info->listentry))
@@ -5727,7 +5755,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5727 struct lpfc_device_data *lun_info; 5755 struct lpfc_device_data *lun_info;
5728 5756
5729 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 5757 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5730 !phba->cfg_EnableXLane) 5758 !phba->cfg_fof)
5731 return NULL; 5759 return NULL;
5732 5760
5733 /* Check to see if the lun is already enabled for OAS. */ 5761 /* Check to see if the lun is already enabled for OAS. */
@@ -5789,7 +5817,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5789 !starting_lun || !found_vport_wwpn || 5817 !starting_lun || !found_vport_wwpn ||
5790 !found_target_wwpn || !found_lun || !found_lun_status || 5818 !found_target_wwpn || !found_lun || !found_lun_status ||
5791 (*starting_lun == NO_MORE_OAS_LUN) || 5819 (*starting_lun == NO_MORE_OAS_LUN) ||
5792 !phba->cfg_EnableXLane) 5820 !phba->cfg_fof)
5793 return false; 5821 return false;
5794 5822
5795 lun = *starting_lun; 5823 lun = *starting_lun;
@@ -5873,7 +5901,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5873 unsigned long flags; 5901 unsigned long flags;
5874 5902
5875 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5903 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5876 !phba->cfg_EnableXLane) 5904 !phba->cfg_fof)
5877 return false; 5905 return false;
5878 5906
5879 spin_lock_irqsave(&phba->devicelock, flags); 5907 spin_lock_irqsave(&phba->devicelock, flags);
@@ -5930,7 +5958,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5930 unsigned long flags; 5958 unsigned long flags;
5931 5959
5932 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 5960 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5933 !phba->cfg_EnableXLane) 5961 !phba->cfg_fof)
5934 return false; 5962 return false;
5935 5963
5936 spin_lock_irqsave(&phba->devicelock, flags); 5964 spin_lock_irqsave(&phba->devicelock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 0120bfccf50b..0389ac1e7b83 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6bb51f8e3c1b..32ada0505576 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
265 return NULL; 265 return NULL;
266 266
267 q->hba_index = idx; 267 q->hba_index = idx;
268
269 /*
270 * insert barrier for instruction interlock : data from the hardware
271 * must have the valid bit checked before it can be copied and acted
272 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
273 * instructions allowing action on content before valid bit checked,
274 * add barrier here as well. May not be needed as "content" is a
275 * single 32-bit entity here (vs multi word structure for cq's).
276 */
277 mb();
268 return eqe; 278 return eqe;
269} 279}
270 280
@@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
370 380
371 cqe = q->qe[q->hba_index].cqe; 381 cqe = q->qe[q->hba_index].cqe;
372 q->hba_index = idx; 382 q->hba_index = idx;
383
384 /*
385 * insert barrier for instruction interlock : data from the hardware
386 * must have the valid bit checked before it can be copied and acted
387 * upon. Speculative instructions were allowing a bcopy at the start
388 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
389 * after our return, to copy data before the valid bit check above
390 * was done. As such, some of the copied data was stale. The barrier
391 * ensures the check is before any data is copied.
392 */
393 mb();
373 return cqe; 394 return cqe;
374} 395}
375 396
@@ -3511,14 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3511 /* Error everything on txq and txcmplq 3532 /* Error everything on txq and txcmplq
3512 * First do the txq. 3533 * First do the txq.
3513 */ 3534 */
3514 spin_lock_irq(&phba->hbalock); 3535 if (phba->sli_rev >= LPFC_SLI_REV4) {
3515 list_splice_init(&pring->txq, &completions); 3536 spin_lock_irq(&pring->ring_lock);
3537 list_splice_init(&pring->txq, &completions);
3538 pring->txq_cnt = 0;
3539 spin_unlock_irq(&pring->ring_lock);
3516 3540
3517 /* Next issue ABTS for everything on the txcmplq */ 3541 spin_lock_irq(&phba->hbalock);
3518 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3542 /* Next issue ABTS for everything on the txcmplq */
3519 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3543 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3544 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3545 spin_unlock_irq(&phba->hbalock);
3546 } else {
3547 spin_lock_irq(&phba->hbalock);
3548 list_splice_init(&pring->txq, &completions);
3549 pring->txq_cnt = 0;
3520 3550
3521 spin_unlock_irq(&phba->hbalock); 3551 /* Next issue ABTS for everything on the txcmplq */
3552 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3553 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3554 spin_unlock_irq(&phba->hbalock);
3555 }
3522 3556
3523 /* Cancel all the IOCBs from the completions list */ 3557 /* Cancel all the IOCBs from the completions list */
3524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3558 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -3526,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3526} 3560}
3527 3561
3528/** 3562/**
3563 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3564 * @phba: Pointer to HBA context object.
3565 * @pring: Pointer to driver SLI ring object.
3566 *
3567 * This function aborts all iocbs in FCP rings and frees all the iocb
3568 * objects in txq. This function issues an abort iocb for all the iocb commands
3569 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3570 * the return of this function. The caller is not required to hold any locks.
3571 **/
3572void
3573lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3574{
3575 struct lpfc_sli *psli = &phba->sli;
3576 struct lpfc_sli_ring *pring;
3577 uint32_t i;
3578
3579 /* Look on all the FCP Rings for the iotag */
3580 if (phba->sli_rev >= LPFC_SLI_REV4) {
3581 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3582 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3583 lpfc_sli_abort_iocb_ring(phba, pring);
3584 }
3585 } else {
3586 pring = &psli->ring[psli->fcp_ring];
3587 lpfc_sli_abort_iocb_ring(phba, pring);
3588 }
3589}
3590
3591
3592/**
3529 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3593 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3530 * @phba: Pointer to HBA context object. 3594 * @phba: Pointer to HBA context object.
3531 * 3595 *
@@ -3542,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3542 LIST_HEAD(txcmplq); 3606 LIST_HEAD(txcmplq);
3543 struct lpfc_sli *psli = &phba->sli; 3607 struct lpfc_sli *psli = &phba->sli;
3544 struct lpfc_sli_ring *pring; 3608 struct lpfc_sli_ring *pring;
3545 3609 uint32_t i;
3546 /* Currently, only one fcp ring */
3547 pring = &psli->ring[psli->fcp_ring];
3548 3610
3549 spin_lock_irq(&phba->hbalock); 3611 spin_lock_irq(&phba->hbalock);
3550 /* Retrieve everything on txq */
3551 list_splice_init(&pring->txq, &txq);
3552
3553 /* Retrieve everything on the txcmplq */
3554 list_splice_init(&pring->txcmplq, &txcmplq);
3555
3556 /* Indicate the I/O queues are flushed */ 3612 /* Indicate the I/O queues are flushed */
3557 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3613 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3558 spin_unlock_irq(&phba->hbalock); 3614 spin_unlock_irq(&phba->hbalock);
3559 3615
3560 /* Flush the txq */ 3616 /* Look on all the FCP Rings for the iotag */
3561 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3617 if (phba->sli_rev >= LPFC_SLI_REV4) {
3562 IOERR_SLI_DOWN); 3618 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3619 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3620
3621 spin_lock_irq(&pring->ring_lock);
3622 /* Retrieve everything on txq */
3623 list_splice_init(&pring->txq, &txq);
3624 /* Retrieve everything on the txcmplq */
3625 list_splice_init(&pring->txcmplq, &txcmplq);
3626 pring->txq_cnt = 0;
3627 pring->txcmplq_cnt = 0;
3628 spin_unlock_irq(&pring->ring_lock);
3629
3630 /* Flush the txq */
3631 lpfc_sli_cancel_iocbs(phba, &txq,
3632 IOSTAT_LOCAL_REJECT,
3633 IOERR_SLI_DOWN);
3634 /* Flush the txcmpq */
3635 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3636 IOSTAT_LOCAL_REJECT,
3637 IOERR_SLI_DOWN);
3638 }
3639 } else {
3640 pring = &psli->ring[psli->fcp_ring];
3563 3641
3564 /* Flush the txcmpq */ 3642 spin_lock_irq(&phba->hbalock);
3565 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3643 /* Retrieve everything on txq */
3566 IOERR_SLI_DOWN); 3644 list_splice_init(&pring->txq, &txq);
3645 /* Retrieve everything on the txcmplq */
3646 list_splice_init(&pring->txcmplq, &txcmplq);
3647 pring->txq_cnt = 0;
3648 pring->txcmplq_cnt = 0;
3649 spin_unlock_irq(&phba->hbalock);
3650
3651 /* Flush the txq */
3652 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3653 IOERR_SLI_DOWN);
3654 /* Flush the txcmpq */
3655 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3656 IOERR_SLI_DOWN);
3657 }
3567} 3658}
3568 3659
3569/** 3660/**
@@ -3966,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3966{ 4057{
3967 struct lpfc_sli *psli = &phba->sli; 4058 struct lpfc_sli *psli = &phba->sli;
3968 uint16_t cfg_value; 4059 uint16_t cfg_value;
3969 int rc; 4060 int rc = 0;
3970 4061
3971 /* Reset HBA */ 4062 /* Reset HBA */
3972 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4063 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3973 "0295 Reset HBA Data: x%x x%x\n", 4064 "0295 Reset HBA Data: x%x x%x x%x\n",
3974 phba->pport->port_state, psli->sli_flag); 4065 phba->pport->port_state, psli->sli_flag,
4066 phba->hba_flag);
3975 4067
3976 /* perform board reset */ 4068 /* perform board reset */
3977 phba->fc_eventTag = 0; 4069 phba->fc_eventTag = 0;
@@ -3984,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3984 phba->fcf.fcf_flag = 0; 4076 phba->fcf.fcf_flag = 0;
3985 spin_unlock_irq(&phba->hbalock); 4077 spin_unlock_irq(&phba->hbalock);
3986 4078
4079 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4080 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4081 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4082 return rc;
4083 }
4084
3987 /* Now physically reset the device */ 4085 /* Now physically reset the device */
3988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3989 "0389 Performing PCI function reset!\n"); 4087 "0389 Performing PCI function reset!\n");
@@ -4981,7 +5079,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4981 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 5079 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4982 } 5080 }
4983 5081
4984 if (phba->cfg_EnableXLane) 5082 if (phba->cfg_fof)
4985 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5083 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
4986 5084
4987 if (phba->sli4_hba.hba_eq) { 5085 if (phba->sli4_hba.hba_eq) {
@@ -6701,7 +6799,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6701 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6799 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6702 MAILBOX_t *mb = &pmbox->u.mb; 6800 MAILBOX_t *mb = &pmbox->u.mb;
6703 struct lpfc_sli *psli = &phba->sli; 6801 struct lpfc_sli *psli = &phba->sli;
6704 struct lpfc_sli_ring *pring;
6705 6802
6706 /* If the mailbox completed, process the completion and return */ 6803 /* If the mailbox completed, process the completion and return */
6707 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6804 if (lpfc_sli4_process_missed_mbox_completions(phba))
@@ -6743,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6743 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6840 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6744 spin_unlock_irq(&phba->hbalock); 6841 spin_unlock_irq(&phba->hbalock);
6745 6842
6746 pring = &psli->ring[psli->fcp_ring]; 6843 lpfc_sli_abort_fcp_rings(phba);
6747 lpfc_sli_abort_iocb_ring(phba, pring);
6748 6844
6749 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6750 "0345 Resetting board due to mailbox timeout\n"); 6846 "0345 Resetting board due to mailbox timeout\n");
@@ -8112,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8112 abort_tag = (uint32_t) iocbq->iotag; 8208 abort_tag = (uint32_t) iocbq->iotag;
8113 xritag = iocbq->sli4_xritag; 8209 xritag = iocbq->sli4_xritag;
8114 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8210 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
8211 wqe->generic.wqe_com.word10 = 0;
8115 /* words0-2 bpl convert bde */ 8212 /* words0-2 bpl convert bde */
8116 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8213 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8117 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8214 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8618,8 +8715,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8618 8715
8619 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8716 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8620 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8717 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8621 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8718 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8622 LPFC_IO_OAS))) {
8623 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; 8719 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8624 } else { 8720 } else {
8625 wq = phba->sli4_hba.oas_wq; 8721 wq = phba->sli4_hba.oas_wq;
@@ -8714,7 +8810,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8714 8810
8715 if (phba->sli_rev == LPFC_SLI_REV4) { 8811 if (phba->sli_rev == LPFC_SLI_REV4) {
8716 if (piocb->iocb_flag & LPFC_IO_FCP) { 8812 if (piocb->iocb_flag & LPFC_IO_FCP) {
8717 if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & 8813 if (!phba->cfg_fof || (!(piocb->iocb_flag &
8718 LPFC_IO_OAS))) { 8814 LPFC_IO_OAS))) {
8719 if (unlikely(!phba->sli4_hba.fcp_wq)) 8815 if (unlikely(!phba->sli4_hba.fcp_wq))
8720 return IOCB_ERROR; 8816 return IOCB_ERROR;
@@ -9149,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
9149 pring->sli.sli3.next_cmdidx = 0; 9245 pring->sli.sli3.next_cmdidx = 0;
9150 pring->sli.sli3.local_getidx = 0; 9246 pring->sli.sli3.local_getidx = 0;
9151 pring->sli.sli3.cmdidx = 0; 9247 pring->sli.sli3.cmdidx = 0;
9248 pring->flag = 0;
9152 INIT_LIST_HEAD(&pring->txq); 9249 INIT_LIST_HEAD(&pring->txq);
9153 INIT_LIST_HEAD(&pring->txcmplq); 9250 INIT_LIST_HEAD(&pring->txcmplq);
9154 INIT_LIST_HEAD(&pring->iocb_continueq); 9251 INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -9784,43 +9881,6 @@ abort_iotag_exit:
9784} 9881}
9785 9882
9786/** 9883/**
9787 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9788 * @phba: Pointer to HBA context object.
9789 * @pring: Pointer to driver SLI ring object.
9790 *
9791 * This function aborts all iocbs in the given ring and frees all the iocb
9792 * objects in txq. This function issues abort iocbs unconditionally for all
9793 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9794 * to complete before the return of this function. The caller is not required
9795 * to hold any locks.
9796 **/
9797static void
9798lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9799{
9800 LIST_HEAD(completions);
9801 struct lpfc_iocbq *iocb, *next_iocb;
9802
9803 if (pring->ringno == LPFC_ELS_RING)
9804 lpfc_fabric_abort_hba(phba);
9805
9806 spin_lock_irq(&phba->hbalock);
9807
9808 /* Take off all the iocbs on txq for cancelling */
9809 list_splice_init(&pring->txq, &completions);
9810 pring->txq_cnt = 0;
9811
9812 /* Next issue ABTS for everything on the txcmplq */
9813 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9814 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9815
9816 spin_unlock_irq(&phba->hbalock);
9817
9818 /* Cancel all the IOCBs from the completions list */
9819 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9820 IOERR_SLI_ABORTED);
9821}
9822
9823/**
9824 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9884 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9825 * @phba: pointer to lpfc HBA data structure. 9885 * @phba: pointer to lpfc HBA data structure.
9826 * 9886 *
@@ -9835,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9835 9895
9836 for (i = 0; i < psli->num_rings; i++) { 9896 for (i = 0; i < psli->num_rings; i++) {
9837 pring = &psli->ring[i]; 9897 pring = &psli->ring[i];
9838 lpfc_sli_iocb_ring_abort(phba, pring); 9898 lpfc_sli_abort_iocb_ring(phba, pring);
9839 } 9899 }
9840} 9900}
9841 9901
@@ -10060,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10060} 10120}
10061 10121
10062/** 10122/**
10123 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10124 * @vport: Pointer to virtual port.
10125 * @pring: Pointer to driver SLI ring object.
10126 * @tgt_id: SCSI ID of the target.
10127 * @lun_id: LUN ID of the scsi device.
10128 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10129 *
10130 * This function sends an abort command for every SCSI command
10131 * associated with the given virtual port pending on the ring
10132 * filtered by lpfc_sli_validate_fcp_iocb function.
10133 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10134 * FCP iocbs associated with lun specified by tgt_id and lun_id
10135 * parameters
10136 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10137 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10138 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10139 * FCP iocbs associated with virtual port.
10140 * This function returns number of iocbs it aborted .
10141 * This function is called with no locks held right after a taskmgmt
10142 * command is sent.
10143 **/
10144int
10145lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10146 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10147{
10148 struct lpfc_hba *phba = vport->phba;
10149 struct lpfc_iocbq *abtsiocbq;
10150 struct lpfc_iocbq *iocbq;
10151 IOCB_t *icmd;
10152 int sum, i, ret_val;
10153 unsigned long iflags;
10154 struct lpfc_sli_ring *pring_s4;
10155 uint32_t ring_number;
10156
10157 spin_lock_irq(&phba->hbalock);
10158
10159 /* all I/Os are in process of being flushed */
10160 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10161 spin_unlock_irq(&phba->hbalock);
10162 return 0;
10163 }
10164 sum = 0;
10165
10166 for (i = 1; i <= phba->sli.last_iotag; i++) {
10167 iocbq = phba->sli.iocbq_lookup[i];
10168
10169 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10170 cmd) != 0)
10171 continue;
10172
10173 /*
10174 * If the iocbq is already being aborted, don't take a second
10175 * action, but do count it.
10176 */
10177 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10178 continue;
10179
10180 /* issue ABTS for this IOCB based on iotag */
10181 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10182 if (abtsiocbq == NULL)
10183 continue;
10184
10185 icmd = &iocbq->iocb;
10186 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10187 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10188 if (phba->sli_rev == LPFC_SLI_REV4)
10189 abtsiocbq->iocb.un.acxri.abortIoTag =
10190 iocbq->sli4_xritag;
10191 else
10192 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10193 abtsiocbq->iocb.ulpLe = 1;
10194 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10195 abtsiocbq->vport = vport;
10196
10197 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10198 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10199 if (iocbq->iocb_flag & LPFC_IO_FCP)
10200 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10201
10202 if (lpfc_is_link_up(phba))
10203 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10204 else
10205 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10206
10207 /* Setup callback routine and issue the command. */
10208 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10209
10210 /*
10211 * Indicate the IO is being aborted by the driver and set
10212 * the caller's flag into the aborted IO.
10213 */
10214 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10215
10216 if (phba->sli_rev == LPFC_SLI_REV4) {
10217 ring_number = MAX_SLI3_CONFIGURED_RINGS +
10218 iocbq->fcp_wqidx;
10219 pring_s4 = &phba->sli.ring[ring_number];
10220 /* Note: both hbalock and ring_lock must be set here */
10221 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10222 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10223 abtsiocbq, 0);
10224 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10225 } else {
10226 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10227 abtsiocbq, 0);
10228 }
10229
10230
10231 if (ret_val == IOCB_ERROR)
10232 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10233 else
10234 sum++;
10235 }
10236 spin_unlock_irq(&phba->hbalock);
10237 return sum;
10238}
10239
10240/**
10063 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 10241 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10064 * @phba: Pointer to HBA context object. 10242 * @phba: Pointer to HBA context object.
10065 * @cmdiocbq: Pointer to command iocb. 10243 * @cmdiocbq: Pointer to command iocb.
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6f04080f4ea8..edb48832c39b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 9b8cda866176..7f50aa04d66a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e32cbec70324..41675c1193e7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.45" 21#define LPFC_DRIVER_VERSION "10.2.8001.0."
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
30 30
31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
32 LPFC_DRIVER_VERSION 32 LPFC_DRIVER_VERSION
33#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved." 33#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index f5cdc68cd5b6..6a039eb1cbce 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -25,10 +25,6 @@
25 * 1+ (800) 334-5454 25 * 1+ (800) 334-5454
26 */ 26 */
27 27
28/*
29 * $Log: mac_NCR5380.c,v $
30 */
31
32#include <linux/types.h> 28#include <linux/types.h>
33#include <linux/stddef.h> 29#include <linux/stddef.h>
34#include <linux/ctype.h> 30#include <linux/ctype.h>
@@ -58,12 +54,6 @@
58 54
59#include "NCR5380.h" 55#include "NCR5380.h"
60 56
61#if 0
62#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
63#else
64#define NDEBUG (NDEBUG_ABORT)
65#endif
66
67#define RESET_BOOT 57#define RESET_BOOT
68#define DRIVER_SETUP 58#define DRIVER_SETUP
69 59
diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h
index 7dc62fce1c4c..06969b06e54b 100644
--- a/drivers/scsi/mac_scsi.h
+++ b/drivers/scsi/mac_scsi.h
@@ -22,10 +22,6 @@
22 * 1+ (800) 334-5454 22 * 1+ (800) 334-5454
23 */ 23 */
24 24
25/*
26 * $Log: cumana_NCR5380.h,v $
27 */
28
29#ifndef MAC_NCR5380_H 25#ifndef MAC_NCR5380_H
30#define MAC_NCR5380_H 26#define MAC_NCR5380_H
31 27
@@ -51,8 +47,6 @@
51 47
52#include <scsi/scsicam.h> 48#include <scsi/scsicam.h>
53 49
54#ifndef HOSTS_C
55
56#define NCR5380_implementation_fields \ 50#define NCR5380_implementation_fields \
57 int port, ctrl 51 int port, ctrl
58 52
@@ -75,10 +69,6 @@
75#define NCR5380_show_info macscsi_show_info 69#define NCR5380_show_info macscsi_show_info
76#define NCR5380_write_info macscsi_write_info 70#define NCR5380_write_info macscsi_write_info
77 71
78#define BOARD_NORMAL 0
79#define BOARD_NCR53C400 1
80
81#endif /* ndef HOSTS_C */
82#endif /* ndef ASM */ 72#endif /* ndef ASM */
83#endif /* MAC_NCR5380_H */ 73#endif /* MAC_NCR5380_H */
84 74
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d84d02c2aad9..112799b131a9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3061 u32 cur_state; 3061 u32 cur_state;
3062 u32 abs_state, curr_abs_state; 3062 u32 abs_state, curr_abs_state;
3063 3063
3064 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 3064 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3065 fw_state = abs_state & MFI_STATE_MASK;
3065 3066
3066 if (fw_state != MFI_STATE_READY) 3067 if (fw_state != MFI_STATE_READY)
3067 printk(KERN_INFO "megasas: Waiting for FW to come to ready" 3068 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
@@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3069 3070
3070 while (fw_state != MFI_STATE_READY) { 3071 while (fw_state != MFI_STATE_READY) {
3071 3072
3072 abs_state =
3073 instance->instancet->read_fw_status_reg(instance->reg_set);
3074
3075 switch (fw_state) { 3073 switch (fw_state) {
3076 3074
3077 case MFI_STATE_FAULT: 3075 case MFI_STATE_FAULT:
@@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3223 * The cur_state should not last for more than max_wait secs 3221 * The cur_state should not last for more than max_wait secs
3224 */ 3222 */
3225 for (i = 0; i < (max_wait * 1000); i++) { 3223 for (i = 0; i < (max_wait * 1000); i++) {
3226 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & 3224 curr_abs_state = instance->instancet->
3227 MFI_STATE_MASK ; 3225 read_fw_status_reg(instance->reg_set);
3228 curr_abs_state =
3229 instance->instancet->read_fw_status_reg(instance->reg_set);
3230 3226
3231 if (abs_state == curr_abs_state) { 3227 if (abs_state == curr_abs_state) {
3232 msleep(1); 3228 msleep(1);
@@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3242 "in %d secs\n", fw_state, max_wait); 3238 "in %d secs\n", fw_state, max_wait);
3243 return -ENODEV; 3239 return -ENODEV;
3244 } 3240 }
3241
3242 abs_state = curr_abs_state;
3243 fw_state = curr_abs_state & MFI_STATE_MASK;
3245 } 3244 }
3246 printk(KERN_INFO "megasas: FW now in Ready state\n"); 3245 printk(KERN_INFO "megasas: FW now in Ready state\n");
3247 3246
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index bde63f7452bd..8b88118e20e6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1739 list_for_each_entry_safe(chain_req, next, 1739 list_for_each_entry_safe(chain_req, next,
1740 &ioc->scsi_lookup[i].chain_list, tracker_list) { 1740 &ioc->scsi_lookup[i].chain_list, tracker_list) {
1741 list_del_init(&chain_req->tracker_list); 1741 list_del_init(&chain_req->tracker_list);
1742 list_add_tail(&chain_req->tracker_list, 1742 list_add(&chain_req->tracker_list,
1743 &ioc->free_chain_list); 1743 &ioc->free_chain_list);
1744 } 1744 }
1745 } 1745 }
1746 ioc->scsi_lookup[i].cb_idx = 0xFF; 1746 ioc->scsi_lookup[i].cb_idx = 0xFF;
1747 ioc->scsi_lookup[i].scmd = NULL; 1747 ioc->scsi_lookup[i].scmd = NULL;
1748 ioc->scsi_lookup[i].direct_io = 0; 1748 ioc->scsi_lookup[i].direct_io = 0;
1749 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 1749 list_add(&ioc->scsi_lookup[i].tracker_list,
1750 &ioc->free_list); 1750 &ioc->free_list);
1751 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1751 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1752 1752
@@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1764 /* hi-priority */ 1764 /* hi-priority */
1765 i = smid - ioc->hi_priority_smid; 1765 i = smid - ioc->hi_priority_smid;
1766 ioc->hpr_lookup[i].cb_idx = 0xFF; 1766 ioc->hpr_lookup[i].cb_idx = 0xFF;
1767 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 1767 list_add(&ioc->hpr_lookup[i].tracker_list,
1768 &ioc->hpr_free_list); 1768 &ioc->hpr_free_list);
1769 } else if (smid <= ioc->hba_queue_depth) { 1769 } else if (smid <= ioc->hba_queue_depth) {
1770 /* internal queue */ 1770 /* internal queue */
1771 i = smid - ioc->internal_smid; 1771 i = smid - ioc->internal_smid;
1772 ioc->internal_lookup[i].cb_idx = 0xFF; 1772 ioc->internal_lookup[i].cb_idx = 0xFF;
1773 list_add_tail(&ioc->internal_lookup[i].tracker_list, 1773 list_add(&ioc->internal_lookup[i].tracker_list,
1774 &ioc->internal_free_list); 1774 &ioc->internal_free_list);
1775 } 1775 }
1776 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1776 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 1f2ac3a28621..fd3b998c75b1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
1065 u32 reply); 1065 u32 reply);
1066int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, 1066int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1067 uint channel, uint id, uint lun, u8 type, u16 smid_task, 1067 uint channel, uint id, uint lun, u8 type, u16 smid_task,
1068 ulong timeout, unsigned long serial_number, enum mutex_type m_type); 1068 ulong timeout, enum mutex_type m_type);
1069void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 1069void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
1070void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 1070void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
1071void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1071void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index b7f887c9b0bf..62df8f9d4271 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
987 mpt2sas_scsih_issue_tm(ioc, 987 mpt2sas_scsih_issue_tm(ioc,
988 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 988 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
989 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, 989 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
990 0, TM_MUTEX_ON); 990 TM_MUTEX_ON);
991 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 991 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
992 } else 992 } else
993 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 993 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 6fd7d40b2c4d..5055f925d2cd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2368 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2368 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2369 * @smid_task: smid assigned to the task 2369 * @smid_task: smid assigned to the task
2370 * @timeout: timeout in seconds 2370 * @timeout: timeout in seconds
2371 * @serial_number: the serial_number from scmd
2372 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF 2371 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2373 * Context: user 2372 * Context: user
2374 * 2373 *
@@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2381int 2380int
2382mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, 2381mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
2383 uint id, uint lun, u8 type, u16 smid_task, ulong timeout, 2382 uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
2384 unsigned long serial_number, enum mutex_type m_type) 2383 enum mutex_type m_type)
2385{ 2384{
2386 Mpi2SCSITaskManagementRequest_t *mpi_request; 2385 Mpi2SCSITaskManagementRequest_t *mpi_request;
2387 Mpi2SCSITaskManagementReply_t *mpi_reply; 2386 Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
2634 handle = sas_device_priv_data->sas_target->handle; 2633 handle = sas_device_priv_data->sas_target->handle;
2635 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2634 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2636 scmd->device->id, scmd->device->lun, 2635 scmd->device->id, scmd->device->lun,
2637 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 2636 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
2638 scmd->serial_number, TM_MUTEX_ON);
2639 2637
2640 out: 2638 out:
2641 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2639 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2696 2694
2697 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2695 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2698 scmd->device->id, scmd->device->lun, 2696 scmd->device->id, scmd->device->lun,
2699 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, 2697 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
2700 TM_MUTEX_ON);
2701 2698
2702 out: 2699 out:
2703 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2700 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2757 2754
2758 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2755 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2759 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 2756 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
2760 30, 0, TM_MUTEX_ON); 2757 30, TM_MUTEX_ON);
2761 2758
2762 out: 2759 out:
2763 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2760 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3953 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 3950 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
3954 */ 3951 */
3955static int 3952static int
3956_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 3953_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3957{ 3954{
3958 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3955 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
3959 struct MPT2SAS_DEVICE *sas_device_priv_data; 3956 struct MPT2SAS_DEVICE *sas_device_priv_data;
3960 struct MPT2SAS_TARGET *sas_target_priv_data; 3957 struct MPT2SAS_TARGET *sas_target_priv_data;
3961 struct _raid_device *raid_device; 3958 struct _raid_device *raid_device;
@@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3963 u32 mpi_control; 3960 u32 mpi_control;
3964 u16 smid; 3961 u16 smid;
3965 3962
3966 scmd->scsi_done = done;
3967 sas_device_priv_data = scmd->device->hostdata; 3963 sas_device_priv_data = scmd->device->hostdata;
3968 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 3964 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
3969 scmd->result = DID_NO_CONNECT << 16; 3965 scmd->result = DID_NO_CONNECT << 16;
@@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
4039 MPT_TARGET_FLAGS_RAID_COMPONENT) 4035 MPT_TARGET_FLAGS_RAID_COMPONENT)
4040 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 4036 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4041 else 4037 else
4042 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 4038 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4043 mpi_request->DevHandle = 4039 mpi_request->DevHandle =
4044 cpu_to_le16(sas_device_priv_data->sas_target->handle); 4040 cpu_to_le16(sas_device_priv_data->sas_target->handle);
4045 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 4041 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
@@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
4083 return SCSI_MLQUEUE_HOST_BUSY; 4079 return SCSI_MLQUEUE_HOST_BUSY;
4084} 4080}
4085 4081
4086static DEF_SCSI_QCMD(_scsih_qcmd)
4087
4088/** 4082/**
4089 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 4083 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4090 * @sense_buffer: sense data returned by target 4084 * @sense_buffer: sense data returned by target
@@ -5880,7 +5874,7 @@ broadcast_aen_retry:
5880 5874
5881 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5875 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5882 r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5876 r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5883 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, 5877 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
5884 TM_MUTEX_OFF); 5878 TM_MUTEX_OFF);
5885 if (r == FAILED) { 5879 if (r == FAILED) {
5886 sdev_printk(KERN_WARNING, sdev, 5880 sdev_printk(KERN_WARNING, sdev,
@@ -5922,7 +5916,7 @@ broadcast_aen_retry:
5922 5916
5923 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 5917 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5924 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 5918 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5925 scmd->serial_number, TM_MUTEX_OFF); 5919 TM_MUTEX_OFF);
5926 if (r == FAILED) { 5920 if (r == FAILED) {
5927 sdev_printk(KERN_WARNING, sdev, 5921 sdev_printk(KERN_WARNING, sdev,
5928 "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " 5922 "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 0ebf5d913c80..9b90a6fef706 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
993 993
994int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 994int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
995 uint channel, uint id, uint lun, u8 type, u16 smid_task, 995 uint channel, uint id, uint lun, u8 type, u16 smid_task,
996 ulong timeout, unsigned long serial_number, enum mutex_type m_type); 996 ulong timeout, enum mutex_type m_type);
997void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 997void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
998void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 998void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
999void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); 999void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 9b89de14a0a3..ba9cbe598a91 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
980 mpt3sas_scsih_issue_tm(ioc, 980 mpt3sas_scsih_issue_tm(ioc,
981 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 981 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
982 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 982 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
983 0, TM_MUTEX_ON); 983 TM_MUTEX_ON);
984 } else 984 } else
985 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 985 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
986 FORCE_BIG_HAMMER); 986 FORCE_BIG_HAMMER);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a961fe11b527..18e713db1d32 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2029 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2030 * @smid_task: smid assigned to the task 2030 * @smid_task: smid assigned to the task
2031 * @timeout: timeout in seconds 2031 * @timeout: timeout in seconds
2032 * @serial_number: the serial_number from scmd
2033 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF 2032 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2034 * Context: user 2033 * Context: user
2035 * 2034 *
@@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2042int 2041int
2043mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 2042mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2044 uint id, uint lun, u8 type, u16 smid_task, ulong timeout, 2043 uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
2045 unsigned long serial_number, enum mutex_type m_type) 2044 enum mutex_type m_type)
2046{ 2045{
2047 Mpi2SCSITaskManagementRequest_t *mpi_request; 2046 Mpi2SCSITaskManagementRequest_t *mpi_request;
2048 Mpi2SCSITaskManagementReply_t *mpi_reply; 2047 Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
2293 handle = sas_device_priv_data->sas_target->handle; 2292 handle = sas_device_priv_data->sas_target->handle;
2294 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2293 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2295 scmd->device->id, scmd->device->lun, 2294 scmd->device->id, scmd->device->lun,
2296 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 2295 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
2297 scmd->serial_number, TM_MUTEX_ON);
2298 2296
2299 out: 2297 out:
2300 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2298 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2353 2351
2354 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2352 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2355 scmd->device->id, scmd->device->lun, 2353 scmd->device->id, scmd->device->lun,
2356 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, 2354 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
2357 TM_MUTEX_ON);
2358 2355
2359 out: 2356 out:
2360 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2357 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2414 2411
2415 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2412 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2416 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 2413 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
2417 30, 0, TM_MUTEX_ON); 2414 30, TM_MUTEX_ON);
2418 2415
2419 out: 2416 out:
2420 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2417 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3518 3515
3519 3516
3520/** 3517/**
3521 * _scsih_qcmd_lck - main scsi request entry point 3518 * _scsih_qcmd - main scsi request entry point
3522 * @scmd: pointer to scsi command object 3519 * @scmd: pointer to scsi command object
3523 * @done: function pointer to be invoked on completion 3520 * @done: function pointer to be invoked on completion
3524 * 3521 *
@@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3529 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 3526 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
3530 */ 3527 */
3531static int 3528static int
3532_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 3529_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3533{ 3530{
3534 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3531 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3535 struct MPT3SAS_DEVICE *sas_device_priv_data; 3532 struct MPT3SAS_DEVICE *sas_device_priv_data;
3536 struct MPT3SAS_TARGET *sas_target_priv_data; 3533 struct MPT3SAS_TARGET *sas_target_priv_data;
3537 Mpi2SCSIIORequest_t *mpi_request; 3534 Mpi2SCSIIORequest_t *mpi_request;
@@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3544 scsi_print_command(scmd); 3541 scsi_print_command(scmd);
3545#endif 3542#endif
3546 3543
3547 scmd->scsi_done = done;
3548 sas_device_priv_data = scmd->device->hostdata; 3544 sas_device_priv_data = scmd->device->hostdata;
3549 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 3545 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
3550 scmd->result = DID_NO_CONNECT << 16; 3546 scmd->result = DID_NO_CONNECT << 16;
@@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3659 out: 3655 out:
3660 return SCSI_MLQUEUE_HOST_BUSY; 3656 return SCSI_MLQUEUE_HOST_BUSY;
3661} 3657}
3662static DEF_SCSI_QCMD(_scsih_qcmd)
3663
3664 3658
3665/** 3659/**
3666 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 3660 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
@@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
5425 5419
5426 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 5420 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5427 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5421 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5428 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, 5422 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
5429 TM_MUTEX_OFF); 5423 TM_MUTEX_OFF);
5430 if (r == FAILED) { 5424 if (r == FAILED) {
5431 sdev_printk(KERN_WARNING, sdev, 5425 sdev_printk(KERN_WARNING, sdev,
@@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
5467 5461
5468 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 5462 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5469 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 5463 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5470 scmd->serial_number, TM_MUTEX_OFF); 5464 TM_MUTEX_OFF);
5471 if (r == FAILED) { 5465 if (r == FAILED) {
5472 sdev_printk(KERN_WARNING, sdev, 5466 sdev_printk(KERN_WARNING, sdev,
5473 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 5467 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 1e4479f3331a..9270d15ff1a4 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
564 u32 tmp; 564 u32 tmp;
565 565
566 tmp = mr32(MVS_GBL_CTL); 566 tmp = mr32(MVS_GBL_CTL);
567 tmp |= (IRQ_SAS_A | IRQ_SAS_B); 567 tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
568 mw32(MVS_GBL_INT_STAT, tmp); 568 mw32(MVS_GBL_INT_STAT, tmp);
569 writel(tmp, regs + 0x0C); 569 writel(tmp, regs + 0x0C);
570 writel(tmp, regs + 0x10); 570 writel(tmp, regs + 0x10);
@@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
580 580
581 tmp = mr32(MVS_GBL_CTL); 581 tmp = mr32(MVS_GBL_CTL);
582 582
583 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); 583 tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
584 mw32(MVS_GBL_INT_STAT, tmp); 584 mw32(MVS_GBL_INT_STAT, tmp);
585 writel(tmp, regs + 0x0C); 585 writel(tmp, regs + 0x0C);
586 writel(tmp, regs + 0x10); 586 writel(tmp, regs + 0x10);
@@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
596 if (!(mvi->flags & MVF_FLAG_SOC)) { 596 if (!(mvi->flags & MVF_FLAG_SOC)) {
597 stat = mr32(MVS_GBL_INT_STAT); 597 stat = mr32(MVS_GBL_INT_STAT);
598 598
599 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) 599 if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
600 return 0; 600 return 0;
601 } 601 }
602 return stat; 602 return stat;
@@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
606{ 606{
607 void __iomem *regs = mvi->regs; 607 void __iomem *regs = mvi->regs;
608 608
609 if (((stat & IRQ_SAS_A) && mvi->id == 0) || 609 if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
610 ((stat & IRQ_SAS_B) && mvi->id == 1)) { 610 ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
611 mw32_f(MVS_INT_STAT, CINT_DONE); 611 mw32_f(MVS_INT_STAT, CINT_DONE);
612 612
613 spin_lock(&mvi->lock); 613 spin_lock(&mvi->lock);
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 487aa6f97412..14e197497b46 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -150,35 +150,35 @@ enum chip_register_bits {
150 150
151enum pci_interrupt_cause { 151enum pci_interrupt_cause {
152 /* MAIN_IRQ_CAUSE (R10200) Bits*/ 152 /* MAIN_IRQ_CAUSE (R10200) Bits*/
153 IRQ_COM_IN_I2O_IOP0 = (1 << 0), 153 MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0),
154 IRQ_COM_IN_I2O_IOP1 = (1 << 1), 154 MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1),
155 IRQ_COM_IN_I2O_IOP2 = (1 << 2), 155 MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2),
156 IRQ_COM_IN_I2O_IOP3 = (1 << 3), 156 MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3),
157 IRQ_COM_OUT_I2O_HOS0 = (1 << 4), 157 MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
158 IRQ_COM_OUT_I2O_HOS1 = (1 << 5), 158 MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
159 IRQ_COM_OUT_I2O_HOS2 = (1 << 6), 159 MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
160 IRQ_COM_OUT_I2O_HOS3 = (1 << 7), 160 MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
161 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), 161 MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
162 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), 162 MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
163 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), 163 MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
164 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), 164 MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
165 IRQ_PCIF_DRBL0 = (1 << 12), 165 MVS_IRQ_PCIF_DRBL0 = (1 << 12),
166 IRQ_PCIF_DRBL1 = (1 << 13), 166 MVS_IRQ_PCIF_DRBL1 = (1 << 13),
167 IRQ_PCIF_DRBL2 = (1 << 14), 167 MVS_IRQ_PCIF_DRBL2 = (1 << 14),
168 IRQ_PCIF_DRBL3 = (1 << 15), 168 MVS_IRQ_PCIF_DRBL3 = (1 << 15),
169 IRQ_XOR_A = (1 << 16), 169 MVS_IRQ_XOR_A = (1 << 16),
170 IRQ_XOR_B = (1 << 17), 170 MVS_IRQ_XOR_B = (1 << 17),
171 IRQ_SAS_A = (1 << 18), 171 MVS_IRQ_SAS_A = (1 << 18),
172 IRQ_SAS_B = (1 << 19), 172 MVS_IRQ_SAS_B = (1 << 19),
173 IRQ_CPU_CNTRL = (1 << 20), 173 MVS_IRQ_CPU_CNTRL = (1 << 20),
174 IRQ_GPIO = (1 << 21), 174 MVS_IRQ_GPIO = (1 << 21),
175 IRQ_UART = (1 << 22), 175 MVS_IRQ_UART = (1 << 22),
176 IRQ_SPI = (1 << 23), 176 MVS_IRQ_SPI = (1 << 23),
177 IRQ_I2C = (1 << 24), 177 MVS_IRQ_I2C = (1 << 24),
178 IRQ_SGPIO = (1 << 25), 178 MVS_IRQ_SGPIO = (1 << 25),
179 IRQ_COM_ERR = (1 << 29), 179 MVS_IRQ_COM_ERR = (1 << 29),
180 IRQ_I2O_ERR = (1 << 30), 180 MVS_IRQ_I2O_ERR = (1 << 30),
181 IRQ_PCIE_ERR = (1 << 31), 181 MVS_IRQ_PCIE_ERR = (1 << 31),
182}; 182};
183 183
184union reg_phy_cfg { 184union reg_phy_cfg {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 5ff978be249d..eacee48a955c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = {
728 .class_mask = 0, 728 .class_mask = 0,
729 .driver_data = chip_9485, 729 .driver_data = chip_9485,
730 }, 730 },
731 {
732 .vendor = PCI_VENDOR_ID_MARVELL_EXT,
733 .device = 0x9485,
734 .subvendor = PCI_ANY_ID,
735 .subdevice = 0x9485,
736 .class = 0,
737 .class_mask = 0,
738 .driver_data = chip_9485,
739 },
731 { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ 740 { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
732 { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ 741 { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
733 { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ 742 { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index bac04c2335aa..5f4cbf0c4759 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
1570 if (unlikely(!req)) 1570 if (unlikely(!req))
1571 return ERR_PTR(-ENOMEM); 1571 return ERR_PTR(-ENOMEM);
1572 1572
1573 blk_rq_set_block_pc(req);
1573 return req; 1574 return req;
1574 } 1575 }
1575} 1576}
@@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or,
1590 } 1591 }
1591 1592
1592 or->request = req; 1593 or->request = req;
1593 req->cmd_type = REQ_TYPE_BLOCK_PC;
1594 req->cmd_flags |= REQ_QUIET; 1594 req->cmd_flags |= REQ_QUIET;
1595 1595
1596 req->timeout = or->timeout; 1596 req->timeout = or->timeout;
@@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or,
1608 ret = PTR_ERR(req); 1608 ret = PTR_ERR(req);
1609 goto out; 1609 goto out;
1610 } 1610 }
1611 req->cmd_type = REQ_TYPE_BLOCK_PC; 1611 blk_rq_set_block_pc(req);
1612 or->in.req = or->request->next_rq = req; 1612 or->in.req = or->request->next_rq = req;
1613 } 1613 }
1614 } else if (has_in) 1614 } else if (has_in)
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 21883a2d6324..0727ea7cc387 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
365 if (!req) 365 if (!req)
366 return DRIVER_ERROR << 24; 366 return DRIVER_ERROR << 24;
367 367
368 req->cmd_type = REQ_TYPE_BLOCK_PC; 368 blk_rq_set_block_pc(req);
369 req->cmd_flags |= REQ_QUIET; 369 req->cmd_flags |= REQ_QUIET;
370 370
371 SRpnt->bio = NULL; 371 SRpnt->bio = NULL;
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index 3721342835e9..aa528f53c533 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *);
129#define CAN_QUEUE 32 129#define CAN_QUEUE 32
130#endif 130#endif
131 131
132#ifndef HOSTS_C
133
134#define NCR5380_implementation_fields \ 132#define NCR5380_implementation_fields \
135 volatile unsigned short io_port 133 volatile unsigned short io_port
136 134
@@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *);
171 169
172#define PAS16_IRQS 0xd4a8 170#define PAS16_IRQS 0xd4a8
173 171
174#endif /* else def HOSTS_C */
175#endif /* ndef ASM */ 172#endif /* ndef ASM */
176#endif /* PAS16_H */ 173#endif /* PAS16_H */
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 28b4e8139153..a368d77b8d41 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
395 payload.offset = 0; 395 payload.offset = 0;
396 payload.length = 4096; 396 payload.length = 4096;
397 payload.func_specific = kzalloc(4096, GFP_KERNEL); 397 payload.func_specific = kzalloc(4096, GFP_KERNEL);
398 if (!payload.func_specific)
399 return -ENOMEM;
398 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 400 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
399 wait_for_completion(&completion); 401 wait_for_completion(&completion);
400 virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; 402 virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
@@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
402 bios_index++) 404 bios_index++)
403 str += sprintf(str, "%c", 405 str += sprintf(str, "%c",
404 *((u8 *)((u8 *)virt_addr+bios_index))); 406 *((u8 *)((u8 *)virt_addr+bios_index)));
407 kfree(payload.func_specific);
405 return str - buf; 408 return str - buf;
406} 409}
407static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); 410static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
@@ -729,7 +732,7 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
729 flash_error_table[i].reason); 732 flash_error_table[i].reason);
730} 733}
731 734
732static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, 735static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
733 pm8001_show_update_fw, pm8001_store_update_fw); 736 pm8001_show_update_fw, pm8001_store_update_fw);
734struct device_attribute *pm8001_host_attrs[] = { 737struct device_attribute *pm8001_host_attrs[] = {
735 &dev_attr_interface_rev, 738 &dev_attr_interface_rev,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c4f31b21feb8..e90c89f1d480 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
677 * pm8001_get_phy_settings_info : Read phy setting values. 677 * pm8001_get_phy_settings_info : Read phy setting values.
678 * @pm8001_ha : our hba. 678 * @pm8001_ha : our hba.
679 */ 679 */
680void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) 680static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
681{ 681{
682 682
683#ifdef PM8001_READ_VPD 683#ifdef PM8001_READ_VPD
@@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
691 payload.offset = 0; 691 payload.offset = 0;
692 payload.length = 4096; 692 payload.length = 4096;
693 payload.func_specific = kzalloc(4096, GFP_KERNEL); 693 payload.func_specific = kzalloc(4096, GFP_KERNEL);
694 if (!payload.func_specific)
695 return -ENOMEM;
694 /* Read phy setting values from flash */ 696 /* Read phy setting values from flash */
695 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 697 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
696 wait_for_completion(&completion); 698 wait_for_completion(&completion);
697 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); 699 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
700 kfree(payload.func_specific);
698#endif 701#endif
702 return 0;
699} 703}
700 704
701#ifdef PM8001_USE_MSIX 705#ifdef PM8001_USE_MSIX
@@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
879 pm8001_init_sas_add(pm8001_ha); 883 pm8001_init_sas_add(pm8001_ha);
880 /* phy setting support for motherboard controller */ 884 /* phy setting support for motherboard controller */
881 if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && 885 if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
882 pdev->subsystem_vendor != 0) 886 pdev->subsystem_vendor != 0) {
883 pm8001_get_phy_settings_info(pm8001_ha); 887 rc = pm8001_get_phy_settings_info(pm8001_ha);
888 if (rc)
889 goto err_out_shost;
890 }
884 pm8001_post_sas_ha_init(shost, chip); 891 pm8001_post_sas_ha_init(shost, chip);
885 rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 892 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
886 if (rc) 893 if (rc)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 07befcf365b8..16fe5196e6d9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -664,7 +664,7 @@ do_read:
664 } 664 }
665 665
666 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 666 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
667 addr, offset, SFP_BLOCK_SIZE, 0); 667 addr, offset, SFP_BLOCK_SIZE, BIT_1);
668 if (rval != QLA_SUCCESS) { 668 if (rval != QLA_SUCCESS) {
669 ql_log(ql_log_warn, vha, 0x706d, 669 ql_log(ql_log_warn, vha, 0x706d,
670 "Unable to read SFP data (%x/%x/%x).\n", rval, 670 "Unable to read SFP data (%x/%x/%x).\n", rval,
@@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1495 1495
1496 if (!ha->fw_dumped) 1496 if (!ha->fw_dumped)
1497 size = 0; 1497 size = 0;
1498 else if (IS_QLA82XX(ha)) 1498 else if (IS_P3P_TYPE(ha))
1499 size = ha->md_template_size + ha->md_dump_size; 1499 size = ha->md_template_size + ha->md_dump_size;
1500 else 1500 else
1501 size = ha->fw_dump_len; 1501 size = ha->fw_dump_len;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 71ff340f6de4..524f9eb7fcd1 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
2054 bsg_job->reply->reply_payload_rcv_len = sizeof(sr); 2054 bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
2055 break; 2055 break;
2056 default: 2056 default:
2057 ql_log(ql_log_warn, vha, 0x708c, 2057 ql_dbg(ql_dbg_user, vha, 0x708c,
2058 "Unknown serdes cmd %x.\n", sr.cmd); 2058 "Unknown serdes cmd %x.\n", sr.cmd);
2059 rval = -EDOM; 2059 rval = -EINVAL;
2060 break;
2061 }
2062
2063 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
2064 rval ? EXT_STATUS_MAILBOX : 0;
2065
2066 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2067 bsg_job->reply->result = DID_OK << 16;
2068 bsg_job->job_done(bsg_job);
2069 return 0;
2070}
2071
2072static int
2073qla8044_serdes_op(struct fc_bsg_job *bsg_job)
2074{
2075 struct Scsi_Host *host = bsg_job->shost;
2076 scsi_qla_host_t *vha = shost_priv(host);
2077 int rval = 0;
2078 struct qla_serdes_reg_ex sr;
2079
2080 memset(&sr, 0, sizeof(sr));
2081
2082 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2083 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2084
2085 switch (sr.cmd) {
2086 case INT_SC_SERDES_WRITE_REG:
2087 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2088 bsg_job->reply->reply_payload_rcv_len = 0;
2089 break;
2090 case INT_SC_SERDES_READ_REG:
2091 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2092 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2093 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2094 bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
2095 break;
2096 default:
2097 ql_dbg(ql_dbg_user, vha, 0x70cf,
2098 "Unknown serdes cmd %x.\n", sr.cmd);
2099 rval = -EINVAL;
2060 break; 2100 break;
2061 } 2101 }
2062 2102
@@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2121 case QL_VND_SERDES_OP: 2161 case QL_VND_SERDES_OP:
2122 return qla26xx_serdes_op(bsg_job); 2162 return qla26xx_serdes_op(bsg_job);
2123 2163
2164 case QL_VND_SERDES_OP_EX:
2165 return qla8044_serdes_op(bsg_job);
2166
2124 default: 2167 default:
2125 return -ENOSYS; 2168 return -ENOSYS;
2126 } 2169 }
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index e5c2126221e9..d38f9efa56fa 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -24,6 +24,7 @@
24#define QL_VND_READ_I2C 0x11 24#define QL_VND_READ_I2C 0x11
25#define QL_VND_FX00_MGMT_CMD 0x12 25#define QL_VND_FX00_MGMT_CMD 0x12
26#define QL_VND_SERDES_OP 0x13 26#define QL_VND_SERDES_OP 0x13
27#define QL_VND_SERDES_OP_EX 0x14
27 28
28/* BSG Vendor specific subcode returns */ 29/* BSG Vendor specific subcode returns */
29#define EXT_STATUS_OK 0 30#define EXT_STATUS_OK 0
@@ -225,4 +226,10 @@ struct qla_serdes_reg {
225 uint16_t val; 226 uint16_t val;
226} __packed; 227} __packed;
227 228
229struct qla_serdes_reg_ex {
230 uint16_t cmd;
231 uint32_t addr;
232 uint32_t val;
233} __packed;
234
228#endif 235#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 97255f7c3975..c72ee97bf3f7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -15,7 +15,7 @@
15 * | | | 0x0144,0x0146 | 15 * | | | 0x0144,0x0146 |
16 * | | | 0x015b-0x0160 | 16 * | | | 0x015b-0x0160 |
17 * | | | 0x016e-0x0170 | 17 * | | | 0x016e-0x0170 |
18 * | Mailbox commands | 0x1187 | 0x1018-0x1019 | 18 * | Mailbox commands | 0x118d | 0x1018-0x1019 |
19 * | | | 0x10ca | 19 * | | | 0x10ca |
20 * | | | 0x1115-0x1116 | 20 * | | | 0x1115-0x1116 |
21 * | | | 0x111a-0x111b | 21 * | | | 0x111a-0x111b |
@@ -45,12 +45,16 @@
45 * | | | 0x70ad-0x70ae | 45 * | | | 0x70ad-0x70ae |
46 * | | | 0x70d7-0x70db | 46 * | | | 0x70d7-0x70db |
47 * | | | 0x70de-0x70df | 47 * | | | 0x70de-0x70df |
48 * | Task Management | 0x803d | 0x8025-0x8026 | 48 * | Task Management | 0x803d | 0x8000,0x800b |
49 * | | | 0x800b,0x8039 | 49 * | | | 0x8019 |
50 * | | | 0x8025,0x8026 |
51 * | | | 0x8031,0x8032 |
52 * | | | 0x8039,0x803c |
50 * | AER/EEH | 0x9011 | | 53 * | AER/EEH | 0x9011 | |
51 * | Virtual Port | 0xa007 | | 54 * | Virtual Port | 0xa007 | |
52 * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 | 55 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
53 * | | | 0xb09e,0xb0ae | 56 * | | | 0xb09e,0xb0ae |
57 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef | 58 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc | 59 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 | 60 * | | | 0xb107,0xb108 |
@@ -60,12 +64,12 @@
60 * | | | 0xb13c-0xb140 | 64 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 | 65 * | | | 0xb149 |
62 * | MultiQ | 0xc00c | | 66 * | MultiQ | 0xc00c | |
63 * | Misc | 0xd2ff | 0xd017-0xd019 | 67 * | Misc | 0xd212 | 0xd017-0xd019 |
64 * | | | 0xd020 | 68 * | | | 0xd020 |
65 * | | | 0xd02e-0xd0ff | 69 * | | | 0xd030-0xd0ff |
66 * | | | 0xd101-0xd1fe | 70 * | | | 0xd101-0xd1fe |
67 * | | | 0xd212-0xd2fe | 71 * | | | 0xd213-0xd2fe |
68 * | Target Mode | 0xe070 | 0xe021 | 72 * | Target Mode | 0xe078 | |
69 * | Target Mode Management | 0xf072 | 0xf002-0xf003 | 73 * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
70 * | | | 0xf046-0xf049 | 74 * | | | 0xf046-0xf049 |
71 * | Target Mode Task Management | 0x1000b | | 75 * | Target Mode Task Management | 0x1000b | |
@@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
277 if (rval != QLA_SUCCESS) 281 if (rval != QLA_SUCCESS)
278 return rval; 282 return rval;
279 283
284 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285
280 /* External Memory. */ 286 /* External Memory. */
281 return qla24xx_dump_ram(ha, 0x100000, *nxt, 287 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 ha->fw_memory_size - 0x100000 + 1, nxt); 288 ha->fw_memory_size - 0x100000 + 1, nxt);
289 if (rval == QLA_SUCCESS)
290 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
291
292 return rval;
283} 293}
284 294
285static uint32_t * 295static uint32_t *
@@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
296 return buf; 306 return buf;
297} 307}
298 308
299int 309void
300qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) 310qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
301{ 311{
302 int rval = QLA_SUCCESS;
303 uint32_t cnt;
304
305 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 312 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
306 for (cnt = 30000;
307 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
308 rval == QLA_SUCCESS; cnt--) {
309 if (cnt)
310 udelay(100);
311 else
312 rval = QLA_FUNCTION_TIMEOUT;
313 }
314 313
315 return rval; 314 /* 100 usec delay is sufficient enough for hardware to pause RISC */
315 udelay(100);
316 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
317 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
316} 318}
317 319
318int 320int
@@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
320{ 322{
321 int rval = QLA_SUCCESS; 323 int rval = QLA_SUCCESS;
322 uint32_t cnt; 324 uint32_t cnt;
323 uint16_t mb0, wd; 325 uint16_t wd;
324 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 326 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
325 327
326 /* Reset RISC. */ 328 /*
329 * Reset RISC. The delay is dependent on system architecture.
330 * Driver can proceed with the reset sequence after waiting
331 * for a timeout period.
332 */
327 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 333 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 for (cnt = 0; cnt < 30000; cnt++) { 334 for (cnt = 0; cnt < 30000; cnt++) {
329 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0) 335 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
@@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
331 337
332 udelay(10); 338 udelay(10);
333 } 339 }
340 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
341 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
334 342
335 WRT_REG_DWORD(&reg->ctrl_status, 343 WRT_REG_DWORD(&reg->ctrl_status,
336 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); 344 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
337 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 345 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
338 346
339 udelay(100); 347 udelay(100);
340 /* Wait for firmware to complete NVRAM accesses. */
341 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
342 for (cnt = 10000 ; cnt && mb0; cnt--) {
343 udelay(5);
344 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
345 barrier();
346 }
347 348
348 /* Wait for soft-reset to complete. */ 349 /* Wait for soft-reset to complete. */
349 for (cnt = 0; cnt < 30000; cnt++) { 350 for (cnt = 0; cnt < 30000; cnt++) {
@@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
353 354
354 udelay(10); 355 udelay(10);
355 } 356 }
357 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
358 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
359
356 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); 360 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
357 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */ 361 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
358 362
359 for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 && 363 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
360 rval == QLA_SUCCESS; cnt--) { 364 rval == QLA_SUCCESS; cnt--) {
361 if (cnt) 365 if (cnt)
362 udelay(100); 366 udelay(10);
363 else 367 else
364 rval = QLA_FUNCTION_TIMEOUT; 368 rval = QLA_FUNCTION_TIMEOUT;
365 } 369 }
370 if (rval == QLA_SUCCESS)
371 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
366 372
367 return rval; 373 return rval;
368} 374}
@@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
659 665
660 if (rval != QLA_SUCCESS) { 666 if (rval != QLA_SUCCESS) {
661 ql_log(ql_log_warn, vha, 0xd000, 667 ql_log(ql_log_warn, vha, 0xd000,
662 "Failed to dump firmware (%x).\n", rval); 668 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
669 rval, ha->fw_dump_cap_flags);
663 ha->fw_dumped = 0; 670 ha->fw_dumped = 0;
664 } else { 671 } else {
665 ql_log(ql_log_info, vha, 0xd001, 672 ql_log(ql_log_info, vha, 0xd001,
666 "Firmware dump saved to temp buffer (%ld/%p).\n", 673 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
667 vha->host_no, ha->fw_dump); 674 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
668 ha->fw_dumped = 1; 675 ha->fw_dumped = 1;
669 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); 676 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
670 } 677 }
@@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1053 1060
1054 risc_address = ext_mem_cnt = 0; 1061 risc_address = ext_mem_cnt = 0;
1055 flags = 0; 1062 flags = 0;
1063 ha->fw_dump_cap_flags = 0;
1056 1064
1057 if (!hardware_locked) 1065 if (!hardware_locked)
1058 spin_lock_irqsave(&ha->hardware_lock, flags); 1066 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1075 1083
1076 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); 1084 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1077 1085
1078 /* Pause RISC. */ 1086 /*
1079 rval = qla24xx_pause_risc(reg); 1087 * Pause RISC. No need to track timeout, as resetting the chip
1080 if (rval != QLA_SUCCESS) 1088 * is the right approach incase of pause timeout
1081 goto qla24xx_fw_dump_failed_0; 1089 */
1090 qla24xx_pause_risc(reg, ha);
1082 1091
1083 /* Host interface registers. */ 1092 /* Host interface registers. */
1084 dmp_reg = &reg->flash_addr; 1093 dmp_reg = &reg->flash_addr;
@@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1302 1311
1303 risc_address = ext_mem_cnt = 0; 1312 risc_address = ext_mem_cnt = 0;
1304 flags = 0; 1313 flags = 0;
1314 ha->fw_dump_cap_flags = 0;
1305 1315
1306 if (!hardware_locked) 1316 if (!hardware_locked)
1307 spin_lock_irqsave(&ha->hardware_lock, flags); 1317 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1325 1335
1326 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); 1336 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1327 1337
1328 /* Pause RISC. */ 1338 /*
1329 rval = qla24xx_pause_risc(reg); 1339 * Pause RISC. No need to track timeout, as resetting the chip
1330 if (rval != QLA_SUCCESS) 1340 * is the right approach incase of pause timeout
1331 goto qla25xx_fw_dump_failed_0; 1341 */
1342 qla24xx_pause_risc(reg, ha);
1332 1343
1333 /* Host/Risc registers. */ 1344 /* Host/Risc registers. */
1334 iter_reg = fw->host_risc_reg; 1345 iter_reg = fw->host_risc_reg;
@@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1619 1630
1620 risc_address = ext_mem_cnt = 0; 1631 risc_address = ext_mem_cnt = 0;
1621 flags = 0; 1632 flags = 0;
1633 ha->fw_dump_cap_flags = 0;
1622 1634
1623 if (!hardware_locked) 1635 if (!hardware_locked)
1624 spin_lock_irqsave(&ha->hardware_lock, flags); 1636 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1641 1653
1642 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); 1654 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1643 1655
1644 /* Pause RISC. */ 1656 /*
1645 rval = qla24xx_pause_risc(reg); 1657 * Pause RISC. No need to track timeout, as resetting the chip
1646 if (rval != QLA_SUCCESS) 1658 * is the right approach incase of pause timeout
1647 goto qla81xx_fw_dump_failed_0; 1659 */
1660 qla24xx_pause_risc(reg, ha);
1648 1661
1649 /* Host/Risc registers. */ 1662 /* Host/Risc registers. */
1650 iter_reg = fw->host_risc_reg; 1663 iter_reg = fw->host_risc_reg;
@@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1938 1951
1939 risc_address = ext_mem_cnt = 0; 1952 risc_address = ext_mem_cnt = 0;
1940 flags = 0; 1953 flags = 0;
1954 ha->fw_dump_cap_flags = 0;
1941 1955
1942 if (!hardware_locked) 1956 if (!hardware_locked)
1943 spin_lock_irqsave(&ha->hardware_lock, flags); 1957 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1959 1973
1960 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); 1974 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1961 1975
1962 /* Pause RISC. */ 1976 /*
1963 rval = qla24xx_pause_risc(reg); 1977 * Pause RISC. No need to track timeout, as resetting the chip
1964 if (rval != QLA_SUCCESS) 1978 * is the right approach incase of pause timeout
1965 goto qla83xx_fw_dump_failed_0; 1979 */
1980 qla24xx_pause_risc(reg, ha);
1966 1981
1967 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 1982 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1968 dmp_reg = &reg->iobase_window; 1983 dmp_reg = &reg->iobase_window;
@@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2385 nxt += sizeof(fw->code_ram); 2400 nxt += sizeof(fw->code_ram);
2386 nxt += (ha->fw_memory_size - 0x100000 + 1); 2401 nxt += (ha->fw_memory_size - 0x100000 + 1);
2387 goto copy_queue; 2402 goto copy_queue;
2388 } else 2403 } else {
2404 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2389 ql_log(ql_log_warn, vha, 0xd010, 2405 ql_log(ql_log_warn, vha, 0xd010,
2390 "bigger hammer success?\n"); 2406 "bigger hammer success?\n");
2407 }
2391 } 2408 }
2392 2409
2393 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), 2410 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index cc961040f8b1..e1fc4e66966a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
353 uint32_t, void **); 353 uint32_t, void **);
354extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, 354extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
355 uint32_t, void **); 355 uint32_t, void **);
356extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *); 356extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
357 struct qla_hw_data *);
357extern int qla24xx_soft_reset(struct qla_hw_data *); 358extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6a106136716c..de5d0ae19d83 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -965,6 +965,13 @@ struct mbx_cmd_32 {
965 */ 965 */
966#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ 966#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */
967 967
968/*
969 * ISP8044 mailbox commands
970 */
971#define MBC_SET_GET_ETH_SERDES_REG 0x150
972#define HCS_WRITE_SERDES 0x3
973#define HCS_READ_SERDES 0x4
974
968/* Firmware return data sizes */ 975/* Firmware return data sizes */
969#define FCAL_MAP_SIZE 128 976#define FCAL_MAP_SIZE 128
970 977
@@ -1622,25 +1629,35 @@ typedef struct {
1622#define PO_MODE_DIF_PASS 2 1629#define PO_MODE_DIF_PASS 2
1623#define PO_MODE_DIF_REPLACE 3 1630#define PO_MODE_DIF_REPLACE 3
1624#define PO_MODE_DIF_TCP_CKSUM 6 1631#define PO_MODE_DIF_TCP_CKSUM 6
1625#define PO_ENABLE_DIF_BUNDLING BIT_8
1626#define PO_ENABLE_INCR_GUARD_SEED BIT_3 1632#define PO_ENABLE_INCR_GUARD_SEED BIT_3
1627#define PO_DISABLE_INCR_REF_TAG BIT_5
1628#define PO_DISABLE_GUARD_CHECK BIT_4 1633#define PO_DISABLE_GUARD_CHECK BIT_4
1634#define PO_DISABLE_INCR_REF_TAG BIT_5
1635#define PO_DIS_HEADER_MODE BIT_7
1636#define PO_ENABLE_DIF_BUNDLING BIT_8
1637#define PO_DIS_FRAME_MODE BIT_9
1638#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */
1639#define PO_DIS_VALD_APP_REF_ESC BIT_11
1640
1641#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */
1642#define PO_DIS_REF_TAG_REPL BIT_13
1643#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */
1644#define PO_DIS_REF_TAG_VALD BIT_15
1645
1629/* 1646/*
1630 * ISP queue - 64-Bit addressing, continuation crc entry structure definition. 1647 * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
1631 */ 1648 */
1632struct crc_context { 1649struct crc_context {
1633 uint32_t handle; /* System handle. */ 1650 uint32_t handle; /* System handle. */
1634 uint32_t ref_tag; 1651 __le32 ref_tag;
1635 uint16_t app_tag; 1652 __le16 app_tag;
1636 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 1653 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
1637 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 1654 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
1638 uint16_t guard_seed; /* Initial Guard Seed */ 1655 __le16 guard_seed; /* Initial Guard Seed */
1639 uint16_t prot_opts; /* Requested Data Protection Mode */ 1656 __le16 prot_opts; /* Requested Data Protection Mode */
1640 uint16_t blk_size; /* Data size in bytes */ 1657 __le16 blk_size; /* Data size in bytes */
1641 uint16_t runt_blk_guard; /* Guard value for runt block (tape 1658 uint16_t runt_blk_guard; /* Guard value for runt block (tape
1642 * only) */ 1659 * only) */
1643 uint32_t byte_count; /* Total byte count/ total data 1660 __le32 byte_count; /* Total byte count/ total data
1644 * transfer count */ 1661 * transfer count */
1645 union { 1662 union {
1646 struct { 1663 struct {
@@ -1654,10 +1671,10 @@ struct crc_context {
1654 uint32_t reserved_6; 1671 uint32_t reserved_6;
1655 } nobundling; 1672 } nobundling;
1656 struct { 1673 struct {
1657 uint32_t dif_byte_count; /* Total DIF byte 1674 __le32 dif_byte_count; /* Total DIF byte
1658 * count */ 1675 * count */
1659 uint16_t reserved_1; 1676 uint16_t reserved_1;
1660 uint16_t dseg_count; /* Data segment count */ 1677 __le16 dseg_count; /* Data segment count */
1661 uint32_t reserved_2; 1678 uint32_t reserved_2;
1662 uint32_t data_address[2]; 1679 uint32_t data_address[2];
1663 uint32_t data_length; 1680 uint32_t data_length;
@@ -1748,6 +1765,8 @@ typedef struct {
1748#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ 1765#define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */
1749#define CS_PORT_BUSY 0x2B /* Port Busy */ 1766#define CS_PORT_BUSY 0x2B /* Port Busy */
1750#define CS_COMPLETE_CHKCOND 0x30 /* Error? */ 1767#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
1768#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
1769 failure */
1751#define CS_BAD_PAYLOAD 0x80 /* Driver defined */ 1770#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
1752#define CS_UNKNOWN 0x81 /* Driver defined */ 1771#define CS_UNKNOWN 0x81 /* Driver defined */
1753#define CS_RETRY 0x82 /* Driver defined */ 1772#define CS_RETRY 0x82 /* Driver defined */
@@ -2676,6 +2695,7 @@ struct rsp_que {
2676 uint32_t __iomem *rsp_q_out; 2695 uint32_t __iomem *rsp_q_out;
2677 uint16_t ring_index; 2696 uint16_t ring_index;
2678 uint16_t out_ptr; 2697 uint16_t out_ptr;
2698 uint16_t *in_ptr; /* queue shadow in index */
2679 uint16_t length; 2699 uint16_t length;
2680 uint16_t options; 2700 uint16_t options;
2681 uint16_t rid; 2701 uint16_t rid;
@@ -2702,6 +2722,7 @@ struct req_que {
2702 uint32_t __iomem *req_q_out; 2722 uint32_t __iomem *req_q_out;
2703 uint16_t ring_index; 2723 uint16_t ring_index;
2704 uint16_t in_ptr; 2724 uint16_t in_ptr;
2725 uint16_t *out_ptr; /* queue shadow out index */
2705 uint16_t cnt; 2726 uint16_t cnt;
2706 uint16_t length; 2727 uint16_t length;
2707 uint16_t options; 2728 uint16_t options;
@@ -2907,6 +2928,8 @@ struct qla_hw_data {
2907#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 2928#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
2908#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 2929#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
2909#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 2930#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
2931#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
2932
2910 uint32_t device_type; 2933 uint32_t device_type;
2911#define DT_ISP2100 BIT_0 2934#define DT_ISP2100 BIT_0
2912#define DT_ISP2200 BIT_1 2935#define DT_ISP2200 BIT_1
@@ -2928,7 +2951,8 @@ struct qla_hw_data {
2928#define DT_ISPFX00 BIT_17 2951#define DT_ISPFX00 BIT_17
2929#define DT_ISP8044 BIT_18 2952#define DT_ISP8044 BIT_18
2930#define DT_ISP2071 BIT_19 2953#define DT_ISP2071 BIT_19
2931#define DT_ISP_LAST (DT_ISP2071 << 1) 2954#define DT_ISP2271 BIT_20
2955#define DT_ISP_LAST (DT_ISP2271 << 1)
2932 2956
2933#define DT_T10_PI BIT_25 2957#define DT_T10_PI BIT_25
2934#define DT_IIDMA BIT_26 2958#define DT_IIDMA BIT_26
@@ -2959,6 +2983,7 @@ struct qla_hw_data {
2959#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) 2983#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2960#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) 2984#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
2961#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) 2985#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
2986#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
2962 2987
2963#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2988#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2964 IS_QLA6312(ha) || IS_QLA6322(ha)) 2989 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2967,7 +2992,7 @@ struct qla_hw_data {
2967#define IS_QLA25XX(ha) (IS_QLA2532(ha)) 2992#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2968#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) 2993#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
2969#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2994#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2970#define IS_QLA27XX(ha) (IS_QLA2071(ha)) 2995#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
2971#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2996#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2972 IS_QLA84XX(ha)) 2997 IS_QLA84XX(ha))
2973#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ 2998#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3006,6 +3031,7 @@ struct qla_hw_data {
3006 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3031 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3007#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3032#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
3008#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3033#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3034#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3009 3035
3010 /* HBA serial number */ 3036 /* HBA serial number */
3011 uint8_t serial0; 3037 uint8_t serial0;
@@ -3136,7 +3162,15 @@ struct qla_hw_data {
3136 struct qla2xxx_fw_dump *fw_dump; 3162 struct qla2xxx_fw_dump *fw_dump;
3137 uint32_t fw_dump_len; 3163 uint32_t fw_dump_len;
3138 int fw_dumped; 3164 int fw_dumped;
3165 unsigned long fw_dump_cap_flags;
3166#define RISC_PAUSE_CMPL 0
3167#define DMA_SHUTDOWN_CMPL 1
3168#define ISP_RESET_CMPL 2
3169#define RISC_RDY_AFT_RESET 3
3170#define RISC_SRAM_DUMP_CMPL 4
3171#define RISC_EXT_MEM_DUMP_CMPL 5
3139 int fw_dump_reading; 3172 int fw_dump_reading;
3173 int prev_minidump_failed;
3140 dma_addr_t eft_dma; 3174 dma_addr_t eft_dma;
3141 void *eft; 3175 void *eft;
3142/* Current size of mctp dump is 0x086064 bytes */ 3176/* Current size of mctp dump is 0x086064 bytes */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 32ab80957688..2ca39b8e7166 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 3a7353eaccbd..eb8f57249f1d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -371,7 +371,10 @@ struct init_cb_24xx {
371 * BIT 14 = Data Rate bit 1 371 * BIT 14 = Data Rate bit 1
372 * BIT 15 = Data Rate bit 2 372 * BIT 15 = Data Rate bit 2
373 * BIT 16 = Enable 75 ohm Termination Select 373 * BIT 16 = Enable 75 ohm Termination Select
374 * BIT 17-31 = Reserved 374 * BIT 17-28 = Reserved
375 * BIT 29 = Enable response queue 0 in index shadowing
376 * BIT 30 = Enable request queue 0 out index shadowing
377 * BIT 31 = Reserved
375 */ 378 */
376 uint32_t firmware_options_3; 379 uint32_t firmware_options_3;
377 uint16_t qos; 380 uint16_t qos;
@@ -1134,13 +1137,6 @@ struct device_reg_24xx {
1134#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ 1137#define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */
1135#define MAX_MULTI_ID_FABRIC 256 /* ... */ 1138#define MAX_MULTI_ID_FABRIC 256 /* ... */
1136 1139
1137#define for_each_mapped_vp_idx(_ha, _idx) \
1138 for (_idx = find_next_bit((_ha)->vp_idx_map, \
1139 (_ha)->max_npiv_vports + 1, 1); \
1140 _idx <= (_ha)->max_npiv_vports; \
1141 _idx = find_next_bit((_ha)->vp_idx_map, \
1142 (_ha)->max_npiv_vports + 1, _idx + 1)) \
1143
1144struct mid_conf_entry_24xx { 1140struct mid_conf_entry_24xx {
1145 uint16_t reserved_1; 1141 uint16_t reserved_1;
1146 1142
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e665e8109933..d48dea8fab1b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
220 220
221extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 221extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
222extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 222extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
223extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
224 uint32_t *, uint16_t, struct qla_tgt_cmd *);
225extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
226 uint32_t *, uint16_t, struct qla_tgt_cmd *);
227extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
228 uint32_t *, uint16_t, struct qla_tgt_cmd *);
229
223 230
224/* 231/*
225 * Global Function Prototypes in qla_mbx.c source file. 232 * Global Function Prototypes in qla_mbx.c source file.
@@ -347,6 +354,11 @@ extern int
347qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); 354qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
348 355
349extern int 356extern int
357qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
358extern int
359qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
360
361extern int
350qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); 362qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
351 363
352extern int 364extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index e377f9d2f92a..a0df3b1b3823 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 38aeb54cd9d8..e2184412617d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1476 } 1476 }
1477 1477
1478 ha->fw_dumped = 0; 1478 ha->fw_dumped = 0;
1479 ha->fw_dump_cap_flags = 0;
1479 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 1480 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1480 req_q_size = rsp_q_size = 0; 1481 req_q_size = rsp_q_size = 0;
1481 1482
@@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
2061 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 2062 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
2062 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 2063 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
2063 2064
2065 if (IS_SHADOW_REG_CAPABLE(ha))
2066 icb->firmware_options_2 |=
2067 __constant_cpu_to_le32(BIT_30|BIT_29);
2068
2064 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2069 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2065 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 2070 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
2066 icb->rid = __constant_cpu_to_le16(rid); 2071 icb->rid = __constant_cpu_to_le16(rid);
@@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2138 req = ha->req_q_map[que]; 2143 req = ha->req_q_map[que];
2139 if (!req) 2144 if (!req)
2140 continue; 2145 continue;
2146 req->out_ptr = (void *)(req->ring + req->length);
2147 *req->out_ptr = 0;
2141 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 2148 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
2142 req->outstanding_cmds[cnt] = NULL; 2149 req->outstanding_cmds[cnt] = NULL;
2143 2150
@@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2153 rsp = ha->rsp_q_map[que]; 2160 rsp = ha->rsp_q_map[que];
2154 if (!rsp) 2161 if (!rsp)
2155 continue; 2162 continue;
2163 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2164 *rsp->in_ptr = 0;
2156 /* Initialize response queue entries */ 2165 /* Initialize response queue entries */
2157 if (IS_QLAFX00(ha)) 2166 if (IS_QLAFX00(ha))
2158 qlafx00_init_response_q_entries(rsp); 2167 qlafx00_init_response_q_entries(rsp);
@@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3406 fcport->d_id.b.domain, 3415 fcport->d_id.b.domain,
3407 fcport->d_id.b.area, 3416 fcport->d_id.b.area,
3408 fcport->d_id.b.al_pa); 3417 fcport->d_id.b.al_pa);
3409 fcport->loop_id = FC_NO_LOOP_ID; 3418 qla2x00_clear_loop_id(fcport);
3410 } 3419 }
3411 } 3420 }
3412 } 3421 }
@@ -4727,7 +4736,6 @@ static int
4727qla2x00_restart_isp(scsi_qla_host_t *vha) 4736qla2x00_restart_isp(scsi_qla_host_t *vha)
4728{ 4737{
4729 int status = 0; 4738 int status = 0;
4730 uint32_t wait_time;
4731 struct qla_hw_data *ha = vha->hw; 4739 struct qla_hw_data *ha = vha->hw;
4732 struct req_que *req = ha->req_q_map[0]; 4740 struct req_que *req = ha->req_q_map[0];
4733 struct rsp_que *rsp = ha->rsp_q_map[0]; 4741 struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4744 if (!status && !(status = qla2x00_init_rings(vha))) { 4752 if (!status && !(status = qla2x00_init_rings(vha))) {
4745 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4753 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4746 ha->flags.chip_reset_done = 1; 4754 ha->flags.chip_reset_done = 1;
4755
4747 /* Initialize the queues in use */ 4756 /* Initialize the queues in use */
4748 qla25xx_init_queues(ha); 4757 qla25xx_init_queues(ha);
4749 4758
4750 status = qla2x00_fw_ready(vha); 4759 status = qla2x00_fw_ready(vha);
4751 if (!status) { 4760 if (!status) {
4752 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4753 "Start configure loop status = %d.\n", status);
4754
4755 /* Issue a marker after FW becomes ready. */ 4761 /* Issue a marker after FW becomes ready. */
4756 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4762 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4757 4763
@@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4766 qlt_24xx_process_atio_queue(vha); 4772 qlt_24xx_process_atio_queue(vha);
4767 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4773 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4768 4774
4769 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 4775 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4770 wait_time = 256;
4771 do {
4772 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4773 qla2x00_configure_loop(vha);
4774 wait_time--;
4775 } while (!atomic_read(&vha->loop_down_timer) &&
4776 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4777 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4778 &vha->dpc_flags)));
4779 } 4776 }
4780 4777
4781 /* if no cable then assume it's good */ 4778 /* if no cable then assume it's good */
4782 if ((vha->device_flags & DFLG_NO_CABLE)) 4779 if ((vha->device_flags & DFLG_NO_CABLE))
4783 status = 0; 4780 status = 0;
4784
4785 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4786 "Configure loop done, status = 0x%x.\n", status);
4787 } 4781 }
4788 return (status); 4782 return (status);
4789} 4783}
@@ -6130,7 +6124,6 @@ int
6130qla82xx_restart_isp(scsi_qla_host_t *vha) 6124qla82xx_restart_isp(scsi_qla_host_t *vha)
6131{ 6125{
6132 int status, rval; 6126 int status, rval;
6133 uint32_t wait_time;
6134 struct qla_hw_data *ha = vha->hw; 6127 struct qla_hw_data *ha = vha->hw;
6135 struct req_que *req = ha->req_q_map[0]; 6128 struct req_que *req = ha->req_q_map[0];
6136 struct rsp_que *rsp = ha->rsp_q_map[0]; 6129 struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
6144 6137
6145 status = qla2x00_fw_ready(vha); 6138 status = qla2x00_fw_ready(vha);
6146 if (!status) { 6139 if (!status) {
6147 ql_log(ql_log_info, vha, 0x803c,
6148 "Start configure loop, status =%d.\n", status);
6149
6150 /* Issue a marker after FW becomes ready. */ 6140 /* Issue a marker after FW becomes ready. */
6151 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 6141 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6152
6153 vha->flags.online = 1; 6142 vha->flags.online = 1;
6154 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 6143 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6155 wait_time = 256;
6156 do {
6157 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6158 qla2x00_configure_loop(vha);
6159 wait_time--;
6160 } while (!atomic_read(&vha->loop_down_timer) &&
6161 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
6162 wait_time &&
6163 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
6164 } 6144 }
6165 6145
6166 /* if no cable then assume it's good */ 6146 /* if no cable then assume it's good */
6167 if ((vha->device_flags & DFLG_NO_CABLE)) 6147 if ((vha->device_flags & DFLG_NO_CABLE))
6168 status = 0; 6148 status = 0;
6169
6170 ql_log(ql_log_info, vha, 0x8000,
6171 "Configure loop done, status = 0x%x.\n", status);
6172 } 6149 }
6173 6150
6174 if (!status) { 6151 if (!status) {
@@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
6182 vha->marker_needed = 1; 6159 vha->marker_needed = 1;
6183 } 6160 }
6184 6161
6185 vha->flags.online = 1;
6186
6187 ha->isp_ops->enable_intrs(ha); 6162 ha->isp_ops->enable_intrs(ha);
6188 6163
6189 ha->isp_abort_cnt = 0; 6164 ha->isp_abort_cnt = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index ce8b5fb0f347..b3b1d6fc2d6c 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,10 +1,11 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 7
8#include "qla_target.h"
8/** 9/**
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 10 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate. 11 * Continuation Type 1 IOCBs to allocate.
@@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
128} 129}
129 130
130static inline void 131static inline void
131qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 132qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
133 struct qla_tgt_cmd *tc)
132{ 134{
133 struct dsd_dma *dsd_ptr, *tdsd_ptr; 135 struct dsd_dma *dsd_ptr, *tdsd_ptr;
134 struct crc_context *ctx; 136 struct crc_context *ctx;
135 137
136 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); 138 if (sp)
139 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
140 else if (tc)
141 ctx = (struct crc_context *)tc->ctx;
142 else {
143 BUG();
144 return;
145 }
137 146
138 /* clean up allocated prev pool */ 147 /* clean up allocated prev pool */
139 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 148 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e607568bce49..760931529592 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
936 return 1; 936 return 1;
937} 937}
938 938
939static int 939int
940qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 940qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
941 uint32_t *dsd, uint16_t tot_dsds) 941 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
942{ 942{
943 void *next_dsd; 943 void *next_dsd;
944 uint8_t avail_dsds = 0; 944 uint8_t avail_dsds = 0;
@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
948 uint32_t *cur_dsd = dsd; 948 uint32_t *cur_dsd = dsd;
949 uint16_t used_dsds = tot_dsds; 949 uint16_t used_dsds = tot_dsds;
950 950
951 uint32_t prot_int; 951 uint32_t prot_int; /* protection interval */
952 uint32_t partial; 952 uint32_t partial;
953 struct qla2_sgx sgx; 953 struct qla2_sgx sgx;
954 dma_addr_t sle_dma; 954 dma_addr_t sle_dma;
955 uint32_t sle_dma_len, tot_prot_dma_len = 0; 955 uint32_t sle_dma_len, tot_prot_dma_len = 0;
956 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 956 struct scsi_cmnd *cmd;
957 957 struct scsi_qla_host *vha;
958 prot_int = cmd->device->sector_size;
959 958
960 memset(&sgx, 0, sizeof(struct qla2_sgx)); 959 memset(&sgx, 0, sizeof(struct qla2_sgx));
961 sgx.tot_bytes = scsi_bufflen(cmd); 960 if (sp) {
962 sgx.cur_sg = scsi_sglist(cmd); 961 vha = sp->fcport->vha;
963 sgx.sp = sp; 962 cmd = GET_CMD_SP(sp);
964 963 prot_int = cmd->device->sector_size;
965 sg_prot = scsi_prot_sglist(cmd); 964
965 sgx.tot_bytes = scsi_bufflen(cmd);
966 sgx.cur_sg = scsi_sglist(cmd);
967 sgx.sp = sp;
968
969 sg_prot = scsi_prot_sglist(cmd);
970 } else if (tc) {
971 vha = tc->vha;
972 prot_int = tc->blk_sz;
973 sgx.tot_bytes = tc->bufflen;
974 sgx.cur_sg = tc->sg;
975 sg_prot = tc->prot_sg;
976 } else {
977 BUG();
978 return 1;
979 }
966 980
967 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 981 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
968 982
@@ -995,10 +1009,18 @@ alloc_and_fill:
995 return 1; 1009 return 1;
996 } 1010 }
997 1011
998 list_add_tail(&dsd_ptr->list, 1012 if (sp) {
999 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); 1013 list_add_tail(&dsd_ptr->list,
1014 &((struct crc_context *)
1015 sp->u.scmd.ctx)->dsd_list);
1016
1017 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1018 } else {
1019 list_add_tail(&dsd_ptr->list,
1020 &(tc->ctx->dsd_list));
1021 tc->ctx_dsd_alloced = 1;
1022 }
1000 1023
1001 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1002 1024
1003 /* add new list to cmd iocb or last list */ 1025 /* add new list to cmd iocb or last list */
1004 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1026 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1033,21 +1055,35 @@ alloc_and_fill:
1033 return 0; 1055 return 0;
1034} 1056}
1035 1057
1036static int 1058int
1037qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1059qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1038 uint16_t tot_dsds) 1060 uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1039{ 1061{
1040 void *next_dsd; 1062 void *next_dsd;
1041 uint8_t avail_dsds = 0; 1063 uint8_t avail_dsds = 0;
1042 uint32_t dsd_list_len; 1064 uint32_t dsd_list_len;
1043 struct dsd_dma *dsd_ptr; 1065 struct dsd_dma *dsd_ptr;
1044 struct scatterlist *sg; 1066 struct scatterlist *sg, *sgl;
1045 uint32_t *cur_dsd = dsd; 1067 uint32_t *cur_dsd = dsd;
1046 int i; 1068 int i;
1047 uint16_t used_dsds = tot_dsds; 1069 uint16_t used_dsds = tot_dsds;
1048 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1070 struct scsi_cmnd *cmd;
1071 struct scsi_qla_host *vha;
1072
1073 if (sp) {
1074 cmd = GET_CMD_SP(sp);
1075 sgl = scsi_sglist(cmd);
1076 vha = sp->fcport->vha;
1077 } else if (tc) {
1078 sgl = tc->sg;
1079 vha = tc->vha;
1080 } else {
1081 BUG();
1082 return 1;
1083 }
1049 1084
1050 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 1085
1086 for_each_sg(sgl, sg, tot_dsds, i) {
1051 dma_addr_t sle_dma; 1087 dma_addr_t sle_dma;
1052 1088
1053 /* Allocate additional continuation packets? */ 1089 /* Allocate additional continuation packets? */
@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1076 return 1; 1112 return 1;
1077 } 1113 }
1078 1114
1079 list_add_tail(&dsd_ptr->list, 1115 if (sp) {
1080 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); 1116 list_add_tail(&dsd_ptr->list,
1117 &((struct crc_context *)
1118 sp->u.scmd.ctx)->dsd_list);
1081 1119
1082 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1120 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1121 } else {
1122 list_add_tail(&dsd_ptr->list,
1123 &(tc->ctx->dsd_list));
1124 tc->ctx_dsd_alloced = 1;
1125 }
1083 1126
1084 /* add new list to cmd iocb or last list */ 1127 /* add new list to cmd iocb or last list */
1085 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1128 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1102 return 0; 1145 return 0;
1103} 1146}
1104 1147
1105static int 1148int
1106qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1149qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1107 uint32_t *dsd, 1150 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1108 uint16_t tot_dsds)
1109{ 1151{
1110 void *next_dsd; 1152 void *next_dsd;
1111 uint8_t avail_dsds = 0; 1153 uint8_t avail_dsds = 0;
1112 uint32_t dsd_list_len; 1154 uint32_t dsd_list_len;
1113 struct dsd_dma *dsd_ptr; 1155 struct dsd_dma *dsd_ptr;
1114 struct scatterlist *sg; 1156 struct scatterlist *sg, *sgl;
1115 int i; 1157 int i;
1116 struct scsi_cmnd *cmd; 1158 struct scsi_cmnd *cmd;
1117 uint32_t *cur_dsd = dsd; 1159 uint32_t *cur_dsd = dsd;
1118 uint16_t used_dsds = tot_dsds; 1160 uint16_t used_dsds = tot_dsds;
1161 struct scsi_qla_host *vha;
1162
1163 if (sp) {
1164 cmd = GET_CMD_SP(sp);
1165 sgl = scsi_prot_sglist(cmd);
1166 vha = sp->fcport->vha;
1167 } else if (tc) {
1168 vha = tc->vha;
1169 sgl = tc->prot_sg;
1170 } else {
1171 BUG();
1172 return 1;
1173 }
1119 1174
1120 cmd = GET_CMD_SP(sp); 1175 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1121 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { 1176 "%s: enter\n", __func__);
1177
1178 for_each_sg(sgl, sg, tot_dsds, i) {
1122 dma_addr_t sle_dma; 1179 dma_addr_t sle_dma;
1123 1180
1124 /* Allocate additional continuation packets? */ 1181 /* Allocate additional continuation packets? */
@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1147 return 1; 1204 return 1;
1148 } 1205 }
1149 1206
1150 list_add_tail(&dsd_ptr->list, 1207 if (sp) {
1151 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); 1208 list_add_tail(&dsd_ptr->list,
1209 &((struct crc_context *)
1210 sp->u.scmd.ctx)->dsd_list);
1152 1211
1153 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1212 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1213 } else {
1214 list_add_tail(&dsd_ptr->list,
1215 &(tc->ctx->dsd_list));
1216 tc->ctx_dsd_alloced = 1;
1217 }
1154 1218
1155 /* add new list to cmd iocb or last list */ 1219 /* add new list to cmd iocb or last list */
1156 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1220 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1386 1450
1387 if (!bundling && tot_prot_dsds) { 1451 if (!bundling && tot_prot_dsds) {
1388 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1452 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1389 cur_dsd, tot_dsds)) 1453 cur_dsd, tot_dsds, NULL))
1390 goto crc_queuing_error; 1454 goto crc_queuing_error;
1391 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1455 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1392 (tot_dsds - tot_prot_dsds))) 1456 (tot_dsds - tot_prot_dsds), NULL))
1393 goto crc_queuing_error; 1457 goto crc_queuing_error;
1394 1458
1395 if (bundling && tot_prot_dsds) { 1459 if (bundling && tot_prot_dsds) {
@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1398 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1462 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1399 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1463 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1400 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1464 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1401 tot_prot_dsds)) 1465 tot_prot_dsds, NULL))
1402 goto crc_queuing_error; 1466 goto crc_queuing_error;
1403 } 1467 }
1404 return QLA_SUCCESS; 1468 return QLA_SUCCESS;
@@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)
1478 tot_dsds = nseg; 1542 tot_dsds = nseg;
1479 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1543 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1480 if (req->cnt < (req_cnt + 2)) { 1544 if (req->cnt < (req_cnt + 2)) {
1481 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1545 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1482 1546 RD_REG_DWORD_RELAXED(req->req_q_out);
1483 if (req->ring_index < cnt) 1547 if (req->ring_index < cnt)
1484 req->cnt = cnt - req->ring_index; 1548 req->cnt = cnt - req->ring_index;
1485 else 1549 else
@@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
1697 tot_prot_dsds = nseg; 1761 tot_prot_dsds = nseg;
1698 tot_dsds += nseg; 1762 tot_dsds += nseg;
1699 if (req->cnt < (req_cnt + 2)) { 1763 if (req->cnt < (req_cnt + 2)) {
1700 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1764 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1701 1765 RD_REG_DWORD_RELAXED(req->req_q_out);
1702 if (req->ring_index < cnt) 1766 if (req->ring_index < cnt)
1703 req->cnt = cnt - req->ring_index; 1767 req->cnt = cnt - req->ring_index;
1704 else 1768 else
@@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2825 2889
2826 /* Check for room on request queue. */ 2890 /* Check for room on request queue. */
2827 if (req->cnt < req_cnt + 2) { 2891 if (req->cnt < req_cnt + 2) {
2828 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 2892 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2829 2893 RD_REG_DWORD_RELAXED(req->req_q_out);
2830 if (req->ring_index < cnt) 2894 if (req->ring_index < cnt)
2831 req->cnt = cnt - req->ring_index; 2895 req->cnt = cnt - req->ring_index;
2832 else 2896 else
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 95314ef2e505..a56825c73c31 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2009 ql_dbg(ql_dbg_io, vha, 0x3017, 2009 ql_dbg(ql_dbg_io, vha, 0x3017,
2010 "Invalid status handle (0x%x).\n", sts->handle); 2010 "Invalid status handle (0x%x).\n", sts->handle);
2011 2011
2012 if (IS_P3P_TYPE(ha)) 2012 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2013 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2013 if (IS_P3P_TYPE(ha))
2014 else 2014 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2015 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2015 else
2016 qla2xxx_wake_dpc(vha); 2016 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2017 qla2xxx_wake_dpc(vha);
2018 }
2017 return; 2019 return;
2018 } 2020 }
2019 2021
@@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2472 if (pkt->entry_status != 0) { 2474 if (pkt->entry_status != 0) {
2473 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2475 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2474 2476
2475 (void)qlt_24xx_process_response_error(vha, pkt); 2477 if (qlt_24xx_process_response_error(vha, pkt))
2478 goto process_err;
2476 2479
2477 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2480 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2478 wmb(); 2481 wmb();
2479 continue; 2482 continue;
2480 } 2483 }
2484process_err:
2481 2485
2482 switch (pkt->entry_type) { 2486 switch (pkt->entry_type) {
2483 case STATUS_TYPE: 2487 case STATUS_TYPE:
@@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2494 qla24xx_logio_entry(vha, rsp->req, 2498 qla24xx_logio_entry(vha, rsp->req,
2495 (struct logio_entry_24xx *)pkt); 2499 (struct logio_entry_24xx *)pkt);
2496 break; 2500 break;
2497 case CT_IOCB_TYPE: 2501 case CT_IOCB_TYPE:
2498 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2502 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2499 break; 2503 break;
2500 case ELS_IOCB_TYPE: 2504 case ELS_IOCB_TYPE:
2501 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2505 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2502 break; 2506 break;
2503 case ABTS_RECV_24XX: 2507 case ABTS_RECV_24XX:
@@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2506 case ABTS_RESP_24XX: 2510 case ABTS_RESP_24XX:
2507 case CTIO_TYPE7: 2511 case CTIO_TYPE7:
2508 case NOTIFY_ACK_TYPE: 2512 case NOTIFY_ACK_TYPE:
2513 case CTIO_CRC2:
2509 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2514 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2510 break; 2515 break;
2511 case MARKER_TYPE: 2516 case MARKER_TYPE:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2528709c4add..1c33a77db5c2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1319 1319
1320 left = 0; 1320 left = 0;
1321 1321
1322 list = kzalloc(dma_size, GFP_KERNEL); 1322 list = kmemdup(pmap, dma_size, GFP_KERNEL);
1323 if (!list) { 1323 if (!list) {
1324 ql_log(ql_log_warn, vha, 0x1140, 1324 ql_log(ql_log_warn, vha, 0x1140,
1325 "%s(%ld): failed to allocate node names list " 1325 "%s(%ld): failed to allocate node names list "
@@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1328 goto out_free; 1328 goto out_free;
1329 } 1329 }
1330 1330
1331 memcpy(list, pmap, dma_size);
1332restart: 1331restart:
1333 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); 1332 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1334 } 1333 }
@@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp)
2644 ql_dbg(ql_dbg_mbx, vha, 0x1090, 2643 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2645 "Failed to complete IOCB -- completion status (%x).\n", 2644 "Failed to complete IOCB -- completion status (%x).\n",
2646 le16_to_cpu(abt->nport_handle)); 2645 le16_to_cpu(abt->nport_handle));
2647 rval = QLA_FUNCTION_FAILED; 2646 if (abt->nport_handle == CS_IOCB_ERROR)
2647 rval = QLA_FUNCTION_PARAMETER_ERROR;
2648 else
2649 rval = QLA_FUNCTION_FAILED;
2648 } else { 2650 } else {
2649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 2651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2650 "Done %s.\n", __func__); 2652 "Done %s.\n", __func__);
@@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
2879 return rval; 2881 return rval;
2880} 2882}
2881 2883
2884int
2885qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
2886{
2887 int rval;
2888 mbx_cmd_t mc;
2889 mbx_cmd_t *mcp = &mc;
2890
2891 if (!IS_QLA8044(vha->hw))
2892 return QLA_FUNCTION_FAILED;
2893
2894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
2895 "Entered %s.\n", __func__);
2896
2897 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
2898 mcp->mb[1] = HCS_WRITE_SERDES;
2899 mcp->mb[3] = LSW(addr);
2900 mcp->mb[4] = MSW(addr);
2901 mcp->mb[5] = LSW(data);
2902 mcp->mb[6] = MSW(data);
2903 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2904 mcp->in_mb = MBX_0;
2905 mcp->tov = MBX_TOV_SECONDS;
2906 mcp->flags = 0;
2907 rval = qla2x00_mailbox_command(vha, mcp);
2908
2909 if (rval != QLA_SUCCESS) {
2910 ql_dbg(ql_dbg_mbx, vha, 0x1187,
2911 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2912 } else {
2913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
2914 "Done %s.\n", __func__);
2915 }
2916
2917 return rval;
2918}
2919
2920int
2921qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
2922{
2923 int rval;
2924 mbx_cmd_t mc;
2925 mbx_cmd_t *mcp = &mc;
2926
2927 if (!IS_QLA8044(vha->hw))
2928 return QLA_FUNCTION_FAILED;
2929
2930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
2931 "Entered %s.\n", __func__);
2932
2933 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
2934 mcp->mb[1] = HCS_READ_SERDES;
2935 mcp->mb[3] = LSW(addr);
2936 mcp->mb[4] = MSW(addr);
2937 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
2938 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2939 mcp->tov = MBX_TOV_SECONDS;
2940 mcp->flags = 0;
2941 rval = qla2x00_mailbox_command(vha, mcp);
2942
2943 *data = mcp->mb[2] << 16 | mcp->mb[1];
2944
2945 if (rval != QLA_SUCCESS) {
2946 ql_dbg(ql_dbg_mbx, vha, 0x118a,
2947 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2948 } else {
2949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
2950 "Done %s.\n", __func__);
2951 }
2952
2953 return rval;
2954}
2955
2882/** 2956/**
2883 * qla2x00_set_serdes_params() - 2957 * qla2x00_set_serdes_params() -
2884 * @ha: HA context 2958 * @ha: HA context
@@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 3734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3661 "Entered %s.\n", __func__); 3735 "Entered %s.\n", __func__);
3662 3736
3737 if (IS_SHADOW_REG_CAPABLE(ha))
3738 req->options |= BIT_13;
3739
3663 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3740 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3664 mcp->mb[1] = req->options; 3741 mcp->mb[1] = req->options;
3665 mcp->mb[2] = MSW(LSD(req->dma)); 3742 mcp->mb[2] = MSW(LSD(req->dma));
@@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3679 /* que in ptr index */ 3756 /* que in ptr index */
3680 mcp->mb[8] = 0; 3757 mcp->mb[8] = 0;
3681 /* que out ptr index */ 3758 /* que out ptr index */
3682 mcp->mb[9] = 0; 3759 mcp->mb[9] = *req->out_ptr = 0;
3683 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 3760 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3684 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3761 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3685 mcp->in_mb = MBX_0; 3762 mcp->in_mb = MBX_0;
@@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3688 3765
3689 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3766 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3690 mcp->in_mb |= MBX_1; 3767 mcp->in_mb |= MBX_1;
3691 if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) { 3768 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3692 mcp->out_mb |= MBX_15; 3769 mcp->out_mb |= MBX_15;
3693 /* debug q create issue in SR-IOV */ 3770 /* debug q create issue in SR-IOV */
3694 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 3771 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3697 spin_lock_irqsave(&ha->hardware_lock, flags); 3774 spin_lock_irqsave(&ha->hardware_lock, flags);
3698 if (!(req->options & BIT_0)) { 3775 if (!(req->options & BIT_0)) {
3699 WRT_REG_DWORD(req->req_q_in, 0); 3776 WRT_REG_DWORD(req->req_q_in, 0);
3700 if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha)) 3777 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3701 WRT_REG_DWORD(req->req_q_out, 0); 3778 WRT_REG_DWORD(req->req_q_out, 0);
3702 } 3779 }
3703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 3803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3727 "Entered %s.\n", __func__); 3804 "Entered %s.\n", __func__);
3728 3805
3806 if (IS_SHADOW_REG_CAPABLE(ha))
3807 rsp->options |= BIT_13;
3808
3729 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3809 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3730 mcp->mb[1] = rsp->options; 3810 mcp->mb[1] = rsp->options;
3731 mcp->mb[2] = MSW(LSD(rsp->dma)); 3811 mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3740 3820
3741 mcp->mb[4] = rsp->id; 3821 mcp->mb[4] = rsp->id;
3742 /* que in ptr index */ 3822 /* que in ptr index */
3743 mcp->mb[8] = 0; 3823 mcp->mb[8] = *rsp->in_ptr = 0;
3744 /* que out ptr index */ 3824 /* que out ptr index */
3745 mcp->mb[9] = 0; 3825 mcp->mb[9] = 0;
3746 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 3826 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f0a852257f99..89998244f48d 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 0aaf6a9c87d3..abeb3901498b 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
527 struct qla_hw_data *ha = vha->hw; 527 struct qla_hw_data *ha = vha->hw;
528 int i, core; 528 int i, core;
529 uint32_t cnt; 529 uint32_t cnt;
530 uint32_t reg_val;
531
532 spin_lock_irqsave(&ha->hardware_lock, flags);
533
534 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
535 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
536
537 /* stop the XOR DMA engines */
538 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
540 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
542
543 /* stop the IDMA engines */
544 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
545 reg_val &= ~(1<<12);
546 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
547
548 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
549 reg_val &= ~(1<<12);
550 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
551
552 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
553 reg_val &= ~(1<<12);
554 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
555
556 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
557 reg_val &= ~(1<<12);
558 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
559
560 for (i = 0; i < 100000; i++) {
561 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
562 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
563 break;
564 udelay(100);
565 }
530 566
531 /* Set all 4 cores in reset */ 567 /* Set all 4 cores in reset */
532 for (i = 0; i < 4; i++) { 568 for (i = 0; i < 4; i++) {
533 QLAFX00_SET_HBA_SOC_REG(ha, 569 QLAFX00_SET_HBA_SOC_REG(ha,
534 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); 570 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
535 }
536
537 /* Set all 4 core Clock gating control */
538 for (i = 0; i < 4; i++) {
539 QLAFX00_SET_HBA_SOC_REG(ha, 571 QLAFX00_SET_HBA_SOC_REG(ha,
540 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); 572 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
541 } 573 }
542 574
543 /* Reset all units in Fabric */ 575 /* Reset all units in Fabric */
544 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); 576 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
577
578 /* */
579 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
581
582 /* Set all 4 core Memory Power Down Registers */
583 for (i = 0; i < 5; i++) {
584 QLAFX00_SET_HBA_SOC_REG(ha,
585 (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
586 }
545 587
546 /* Reset all interrupt control registers */ 588 /* Reset all interrupt control registers */
547 for (i = 0; i < 115; i++) { 589 for (i = 0; i < 115; i++) {
@@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
564 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); 606 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
565 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); 607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
566 608
567 spin_lock_irqsave(&ha->hardware_lock, flags);
568
569 /* Kick in Fabric units */ 609 /* Kick in Fabric units */
570 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); 610 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
571 611
572 /* Kick in Core0 to start boot process */ 612 /* Kick in Core0 to start boot process */
573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); 613 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
574 614
615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
616
575 /* Wait 10secs for soft-reset to complete. */ 617 /* Wait 10secs for soft-reset to complete. */
576 for (cnt = 10; cnt; cnt--) { 618 for (cnt = 10; cnt; cnt--) {
577 msleep(1000); 619 msleep(1000);
578 barrier(); 620 barrier();
579 } 621 }
580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
581} 622}
582 623
583/** 624/**
@@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)
597 638
598 ha->isp_ops->disable_intrs(ha); 639 ha->isp_ops->disable_intrs(ha);
599 qlafx00_soc_cpu_reset(vha); 640 qlafx00_soc_cpu_reset(vha);
600 ha->isp_ops->enable_intrs(ha);
601} 641}
602 642
603/** 643/**
@@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2675 uint16_t lreq_q_out = 0; 2715 uint16_t lreq_q_out = 0;
2676 2716
2677 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); 2717 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
2678 lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out); 2718 lreq_q_out = rsp->ring_index;
2679 2719
2680 while (lreq_q_in != lreq_q_out) { 2720 while (lreq_q_in != lreq_q_out) {
2681 lptr = rsp->ring_ptr; 2721 lptr = rsp->ring_ptr;
@@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3426 sp->fcport->vha, 0x3047, 3466 sp->fcport->vha, 0x3047,
3427 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); 3467 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3428 3468
3429 memcpy((void *)pfxiocb, &fx_iocb, 3469 memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
3430 sizeof(struct fxdisc_entry_fx00)); 3470 sizeof(struct fxdisc_entry_fx00));
3431 wmb(); 3471 wmb();
3432} 3472}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index e529dfaeb854..aeaa1b40b1fc 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -351,6 +351,7 @@ struct config_info_data {
351#define SOC_FABRIC_RST_CONTROL_REG 0x0020840 351#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
352#define SOC_FABRIC_CONTROL_REG 0x0020200 352#define SOC_FABRIC_CONTROL_REG 0x0020200
353#define SOC_FABRIC_CONFIG_REG 0x0020204 353#define SOC_FABRIC_CONFIG_REG 0x0020204
354#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C
354 355
355#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 356#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
356#define SOC_CORE_TIMER_REG 0x0021850 357#define SOC_CORE_TIMER_REG 0x0021850
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 5511e24b1f11..58f3c912d96e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
848{ 848{
849 int done = 0, timeout = 0; 849 int done = 0, timeout = 0;
850 uint32_t lock_owner = 0; 850 uint32_t lock_owner = 0;
851 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
851 852
852 while (!done) { 853 while (!done) {
853 /* acquire semaphore2 from PCI HW block */ 854 /* acquire semaphore2 from PCI HW block */
@@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
856 break; 857 break;
857 if (timeout >= qla82xx_rom_lock_timeout) { 858 if (timeout >= qla82xx_rom_lock_timeout) {
858 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); 859 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
860 ql_log(ql_log_warn, vha, 0xb157,
861 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
862 __func__, ha->portnum, lock_owner);
859 return -1; 863 return -1;
860 } 864 }
861 timeout++; 865 timeout++;
862 } 866 }
863 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); 867 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
864 return 0; 868 return 0;
865} 869}
866 870
867static void 871static void
868qla82xx_rom_unlock(struct qla_hw_data *ha) 872qla82xx_rom_unlock(struct qla_hw_data *ha)
869{ 873{
874 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
870 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 875 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
871} 876}
872 877
@@ -950,6 +955,7 @@ static int
950qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 955qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
951{ 956{
952 int ret, loops = 0; 957 int ret, loops = 0;
958 uint32_t lock_owner = 0;
953 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 959 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
954 960
955 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 961 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
958 loops++; 964 loops++;
959 } 965 }
960 if (loops >= 50000) { 966 if (loops >= 50000) {
967 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
961 ql_log(ql_log_fatal, vha, 0x00b9, 968 ql_log(ql_log_fatal, vha, 0x00b9,
962 "Failed to acquire SEM2 lock.\n"); 969 "Failed to acquire SEM2 lock, Lock Owner %u.\n",
970 lock_owner);
963 return -1; 971 return -1;
964 } 972 }
965 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 973 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -1057,6 +1065,7 @@ static int
1057ql82xx_rom_lock_d(struct qla_hw_data *ha) 1065ql82xx_rom_lock_d(struct qla_hw_data *ha)
1058{ 1066{
1059 int loops = 0; 1067 int loops = 0;
1068 uint32_t lock_owner = 0;
1060 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1069 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1061 1070
1062 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1071 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
1065 loops++; 1074 loops++;
1066 } 1075 }
1067 if (loops >= 50000) { 1076 if (loops >= 50000) {
1077 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
1068 ql_log(ql_log_warn, vha, 0xb010, 1078 ql_log(ql_log_warn, vha, 0xb010,
1069 "ROM lock failed.\n"); 1079 "ROM lock failed, Lock Owner %u.\n", lock_owner);
1070 return -1; 1080 return -1;
1071 } 1081 }
1072 return 0; 1082 return 0;
@@ -2811,12 +2821,14 @@ static void
2811qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 2821qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
2812{ 2822{
2813 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2823 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2824 uint32_t lock_owner = 0;
2814 2825
2815 if (qla82xx_rom_lock(ha)) 2826 if (qla82xx_rom_lock(ha)) {
2827 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
2816 /* Someone else is holding the lock. */ 2828 /* Someone else is holding the lock. */
2817 ql_log(ql_log_info, vha, 0xb022, 2829 ql_log(ql_log_info, vha, 0xb022,
2818 "Resetting rom_lock.\n"); 2830 "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
2819 2831 }
2820 /* 2832 /*
2821 * Either we got the lock, or someone 2833 * Either we got the lock, or someone
2822 * else died while holding it. 2834 * else died while holding it.
@@ -2840,47 +2852,30 @@ static int
2840qla82xx_device_bootstrap(scsi_qla_host_t *vha) 2852qla82xx_device_bootstrap(scsi_qla_host_t *vha)
2841{ 2853{
2842 int rval = QLA_SUCCESS; 2854 int rval = QLA_SUCCESS;
2843 int i, timeout; 2855 int i;
2844 uint32_t old_count, count; 2856 uint32_t old_count, count;
2845 struct qla_hw_data *ha = vha->hw; 2857 struct qla_hw_data *ha = vha->hw;
2846 int need_reset = 0, peg_stuck = 1; 2858 int need_reset = 0;
2847 2859
2848 need_reset = qla82xx_need_reset(ha); 2860 need_reset = qla82xx_need_reset(ha);
2849 2861
2850 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2851
2852 for (i = 0; i < 10; i++) {
2853 timeout = msleep_interruptible(200);
2854 if (timeout) {
2855 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2856 QLA8XXX_DEV_FAILED);
2857 return QLA_FUNCTION_FAILED;
2858 }
2859
2860 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2861 if (count != old_count)
2862 peg_stuck = 0;
2863 }
2864
2865 if (need_reset) { 2862 if (need_reset) {
2866 /* We are trying to perform a recovery here. */ 2863 /* We are trying to perform a recovery here. */
2867 if (peg_stuck) 2864 if (ha->flags.isp82xx_fw_hung)
2868 qla82xx_rom_lock_recovery(ha); 2865 qla82xx_rom_lock_recovery(ha);
2869 goto dev_initialize;
2870 } else { 2866 } else {
2871 /* Start of day for this ha context. */ 2867 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2872 if (peg_stuck) { 2868 for (i = 0; i < 10; i++) {
2873 /* Either we are the first or recovery in progress. */ 2869 msleep(200);
2874 qla82xx_rom_lock_recovery(ha); 2870 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2875 goto dev_initialize; 2871 if (count != old_count) {
2876 } else 2872 rval = QLA_SUCCESS;
2877 /* Firmware already running. */ 2873 goto dev_ready;
2878 goto dev_ready; 2874 }
2875 }
2876 qla82xx_rom_lock_recovery(ha);
2879 } 2877 }
2880 2878
2881 return rval;
2882
2883dev_initialize:
2884 /* set to DEV_INITIALIZING */ 2879 /* set to DEV_INITIALIZING */
2885 ql_log(ql_log_info, vha, 0x009e, 2880 ql_log(ql_log_info, vha, 0x009e,
2886 "HW State: INITIALIZING.\n"); 2881 "HW State: INITIALIZING.\n");
@@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3142 3137
3143 if (ql2xmdenable) { 3138 if (ql2xmdenable) {
3144 if (!ha->fw_dumped) { 3139 if (!ha->fw_dumped) {
3145 if (fw_major_version != ha->fw_major_version || 3140 if ((fw_major_version != ha->fw_major_version ||
3146 fw_minor_version != ha->fw_minor_version || 3141 fw_minor_version != ha->fw_minor_version ||
3147 fw_subminor_version != ha->fw_subminor_version) { 3142 fw_subminor_version != ha->fw_subminor_version) ||
3143 (ha->prev_minidump_failed)) {
3148 ql_dbg(ql_dbg_p3p, vha, 0xb02d, 3144 ql_dbg(ql_dbg_p3p, vha, 0xb02d,
3149 "Firmware version differs " 3145 "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
3150 "Previous version: %d:%d:%d - "
3151 "New version: %d:%d:%d\n",
3152 fw_major_version, fw_minor_version, 3146 fw_major_version, fw_minor_version,
3153 fw_subminor_version, 3147 fw_subminor_version,
3154 ha->fw_major_version, 3148 ha->fw_major_version,
3155 ha->fw_minor_version, 3149 ha->fw_minor_version,
3156 ha->fw_subminor_version); 3150 ha->fw_subminor_version,
3151 ha->prev_minidump_failed);
3157 /* Release MiniDump resources */ 3152 /* Release MiniDump resources */
3158 qla82xx_md_free(vha); 3153 qla82xx_md_free(vha);
3159 /* ALlocate MiniDump resources */ 3154 /* ALlocate MiniDump resources */
@@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3682 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3677 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
3683 sp = req->outstanding_cmds[cnt]; 3678 sp = req->outstanding_cmds[cnt];
3684 if (sp) { 3679 if (sp) {
3685 if (!sp->u.scmd.ctx || 3680 if ((!sp->u.scmd.ctx ||
3686 (sp->flags & SRB_FCP_CMND_DMA_VALID)) { 3681 (sp->flags &
3682 SRB_FCP_CMND_DMA_VALID)) &&
3683 !ha->flags.isp82xx_fw_hung) {
3687 spin_unlock_irqrestore( 3684 spin_unlock_irqrestore(
3688 &ha->hardware_lock, flags); 3685 &ha->hardware_lock, flags);
3689 if (ha->isp_ops->abort_command(sp)) { 3686 if (ha->isp_ops->abort_command(sp)) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 1bb93dbbccbb..59c477883a73 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -333,9 +333,6 @@
333#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) 333#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
334#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) 334#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
335 335
336/* Lock IDs for ROM lock */
337#define ROM_LOCK_DRIVER 0x0d417340
338
339#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ 336#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
340#define QLA82XX_PCI_CRB_WINDOW(A) \ 337#define QLA82XX_PCI_CRB_WINDOW(A) \
341 (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) 338 (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
@@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1186#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1183#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
1187 1184
1188#define qla82xx_get_temp_val(x) ((x) >> 16) 1185#define qla82xx_get_temp_val(x) ((x) >> 16)
1186#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF)
1189#define qla82xx_get_temp_state(x) ((x) & 0xffff) 1187#define qla82xx_get_temp_state(x) ((x) & 0xffff)
1190#define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) 1188#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
1191 1189
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 86cf10815db0..da9e3902f219 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1,17 +1,20 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7 7
8#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
9#include <linux/delay.h>
9 10
10#include "qla_def.h" 11#include "qla_def.h"
11#include "qla_gbl.h" 12#include "qla_gbl.h"
12 13
13#include <linux/delay.h> 14#include <linux/delay.h>
14 15
16#define TIMEOUT_100_MS 100
17
15/* 8044 Flash Read/Write functions */ 18/* 8044 Flash Read/Write functions */
16uint32_t 19uint32_t
17qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) 20qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
@@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
117 qla8044_wr_reg_indirect(vha, waddr, value); 120 qla8044_wr_reg_indirect(vha, waddr, value);
118} 121}
119 122
123static int
124qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
125 uint32_t mask)
126{
127 unsigned long timeout;
128 uint32_t temp;
129
130 /* jiffies after 100ms */
131 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
132 do {
133 qla8044_rd_reg_indirect(vha, addr1, &temp);
134 if ((temp & mask) != 0)
135 break;
136 if (time_after_eq(jiffies, timeout)) {
137 ql_log(ql_log_warn, vha, 0xb151,
138 "Error in processing rdmdio entry\n");
139 return -1;
140 }
141 } while (1);
142
143 return 0;
144}
145
146static uint32_t
147qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
148 uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
149{
150 uint32_t temp;
151 int ret = 0;
152
153 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
154 if (ret == -1)
155 return -1;
156
157 temp = (0x40000000 | addr);
158 qla8044_wr_reg_indirect(vha, addr1, temp);
159
160 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
161 if (ret == -1)
162 return 0;
163
164 qla8044_rd_reg_indirect(vha, addr3, &ret);
165
166 return ret;
167}
168
169
170static int
171qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
172 uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
173{
174 unsigned long timeout;
175 uint32_t temp;
176
177 /* jiffies after 100 msecs */
178 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
179 do {
180 temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
181 if ((temp & 0x1) != 1)
182 break;
183 if (time_after_eq(jiffies, timeout)) {
184 ql_log(ql_log_warn, vha, 0xb152,
185 "Error in processing mdiobus idle\n");
186 return -1;
187 }
188 } while (1);
189
190 return 0;
191}
192
193static int
194qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
195 uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
196{
197 int ret = 0;
198
199 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
200 if (ret == -1)
201 return -1;
202
203 qla8044_wr_reg_indirect(vha, addr3, value);
204 qla8044_wr_reg_indirect(vha, addr1, addr);
205
206 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
207 if (ret == -1)
208 return -1;
209
210 return 0;
211}
120/* 212/*
121 * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, 213 * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
122 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. 214 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
@@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
356 lock_owner = qla8044_rd_reg(ha, 448 lock_owner = qla8044_rd_reg(ha,
357 QLA8044_FLASH_LOCK_ID); 449 QLA8044_FLASH_LOCK_ID);
358 ql_log(ql_log_warn, vha, 0xb113, 450 ql_log(ql_log_warn, vha, 0xb113,
359 "%s: flash lock by %d failed, held by %d\n", 451 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
360 __func__, ha->portnum, lock_owner); 452 __func__, ha->portnum, lock_owner);
361 ret_val = QLA_FUNCTION_FAILED; 453 ret_val = QLA_FUNCTION_FAILED;
362 break; 454 break;
363 } 455 }
@@ -1541,7 +1633,7 @@ static void
1541qla8044_need_reset_handler(struct scsi_qla_host *vha) 1633qla8044_need_reset_handler(struct scsi_qla_host *vha)
1542{ 1634{
1543 uint32_t dev_state = 0, drv_state, drv_active; 1635 uint32_t dev_state = 0, drv_state, drv_active;
1544 unsigned long reset_timeout, dev_init_timeout; 1636 unsigned long reset_timeout;
1545 struct qla_hw_data *ha = vha->hw; 1637 struct qla_hw_data *ha = vha->hw;
1546 1638
1547 ql_log(ql_log_fatal, vha, 0xb0c2, 1639 ql_log(ql_log_fatal, vha, 0xb0c2,
@@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
1555 qla8044_idc_lock(ha); 1647 qla8044_idc_lock(ha);
1556 } 1648 }
1557 1649
1650 dev_state = qla8044_rd_direct(vha,
1651 QLA8044_CRB_DEV_STATE_INDEX);
1558 drv_state = qla8044_rd_direct(vha, 1652 drv_state = qla8044_rd_direct(vha,
1559 QLA8044_CRB_DRV_STATE_INDEX); 1653 QLA8044_CRB_DRV_STATE_INDEX);
1560 drv_active = qla8044_rd_direct(vha, 1654 drv_active = qla8044_rd_direct(vha,
1561 QLA8044_CRB_DRV_ACTIVE_INDEX); 1655 QLA8044_CRB_DRV_ACTIVE_INDEX);
1562 1656
1563 ql_log(ql_log_info, vha, 0xb0c5, 1657 ql_log(ql_log_info, vha, 0xb0c5,
1564 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 1658 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
1565 __func__, vha->host_no, drv_state, drv_active); 1659 __func__, vha->host_no, drv_state, drv_active, dev_state);
1566 1660
1567 if (!ha->flags.nic_core_reset_owner) { 1661 qla8044_set_rst_ready(vha);
1568 ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
1569 "%s(%ld): reset acknowledged\n",
1570 __func__, vha->host_no);
1571 qla8044_set_rst_ready(vha);
1572 1662
1573 /* Non-reset owners ACK Reset and wait for device INIT state 1663 /* wait for 10 seconds for reset ack from all functions */
1574 * as part of Reset Recovery by Reset Owner 1664 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1575 */
1576 dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1577 1665
1578 do { 1666 do {
1579 if (time_after_eq(jiffies, dev_init_timeout)) { 1667 if (time_after_eq(jiffies, reset_timeout)) {
1580 ql_log(ql_log_info, vha, 0xb0c4, 1668 ql_log(ql_log_info, vha, 0xb0c4,
1581 "%s: Non Reset owner: Reset Ack Timeout!\n", 1669 "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
1582 __func__); 1670 __func__, ha->portnum, drv_state, drv_active);
1583 break; 1671 break;
1584 } 1672 }
1585 1673
1586 qla8044_idc_unlock(ha); 1674 qla8044_idc_unlock(ha);
1587 msleep(1000); 1675 msleep(1000);
1588 qla8044_idc_lock(ha); 1676 qla8044_idc_lock(ha);
1589 1677
1590 dev_state = qla8044_rd_direct(vha, 1678 dev_state = qla8044_rd_direct(vha,
1591 QLA8044_CRB_DEV_STATE_INDEX); 1679 QLA8044_CRB_DEV_STATE_INDEX);
1592 } while (((drv_state & drv_active) != drv_active) && 1680 drv_state = qla8044_rd_direct(vha,
1593 (dev_state == QLA8XXX_DEV_NEED_RESET)); 1681 QLA8044_CRB_DRV_STATE_INDEX);
1682 drv_active = qla8044_rd_direct(vha,
1683 QLA8044_CRB_DRV_ACTIVE_INDEX);
1684 } while (((drv_state & drv_active) != drv_active) &&
1685 (dev_state == QLA8XXX_DEV_NEED_RESET));
1686
1687 /* Remove IDC participation of functions not acknowledging */
1688 if (drv_state != drv_active) {
1689 ql_log(ql_log_info, vha, 0xb0c7,
1690 "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
1691 __func__, vha->host_no, ha->portnum,
1692 (drv_active ^ drv_state));
1693 drv_active = drv_active & drv_state;
1694 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1695 drv_active);
1594 } else { 1696 } else {
1595 qla8044_set_rst_ready(vha); 1697 /*
1596 1698 * Reset owner should execute reset recovery,
1597 /* wait for 10 seconds for reset ack from all functions */ 1699 * if all functions acknowledged
1598 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); 1700 */
1599 1701 if ((ha->flags.nic_core_reset_owner) &&
1600 while ((drv_state & drv_active) != drv_active) { 1702 (dev_state == QLA8XXX_DEV_NEED_RESET)) {
1601 if (time_after_eq(jiffies, reset_timeout)) { 1703 ha->flags.nic_core_reset_owner = 0;
1602 ql_log(ql_log_info, vha, 0xb0c6, 1704 qla8044_device_bootstrap(vha);
1603 "%s: RESET TIMEOUT!" 1705 return;
1604 "drv_state: 0x%08x, drv_active: 0x%08x\n",
1605 QLA2XXX_DRIVER_NAME, drv_state, drv_active);
1606 break;
1607 }
1608
1609 qla8044_idc_unlock(ha);
1610 msleep(1000);
1611 qla8044_idc_lock(ha);
1612
1613 drv_state = qla8044_rd_direct(vha,
1614 QLA8044_CRB_DRV_STATE_INDEX);
1615 drv_active = qla8044_rd_direct(vha,
1616 QLA8044_CRB_DRV_ACTIVE_INDEX);
1617 }
1618
1619 if (drv_state != drv_active) {
1620 ql_log(ql_log_info, vha, 0xb0c7,
1621 "%s(%ld): Reset_owner turning off drv_active "
1622 "of non-acking function 0x%x\n", __func__,
1623 vha->host_no, (drv_active ^ drv_state));
1624 drv_active = drv_active & drv_state;
1625 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1626 drv_active);
1627 } 1706 }
1707 }
1628 1708
1629 /* 1709 /* Exit if non active function */
1630 * Clear RESET OWNER, will be set at next reset 1710 if (!(drv_active & (1 << ha->portnum))) {
1631 * by next RST_OWNER
1632 */
1633 ha->flags.nic_core_reset_owner = 0; 1711 ha->flags.nic_core_reset_owner = 0;
1712 return;
1713 }
1634 1714
1635 /* Start Reset Recovery */ 1715 /*
1716 * Execute Reset Recovery if Reset Owner or Function 7
1717 * is the only active function
1718 */
1719 if (ha->flags.nic_core_reset_owner ||
1720 ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
1721 ha->flags.nic_core_reset_owner = 0;
1636 qla8044_device_bootstrap(vha); 1722 qla8044_device_bootstrap(vha);
1637 } 1723 }
1638} 1724}
@@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha)
1655 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); 1741 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1656} 1742}
1657 1743
1744static int
1745qla8044_check_drv_active(struct scsi_qla_host *vha)
1746{
1747 uint32_t drv_active;
1748 struct qla_hw_data *ha = vha->hw;
1749
1750 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1751 if (drv_active & (1 << ha->portnum))
1752 return QLA_SUCCESS;
1753 else
1754 return QLA_TEST_FAILED;
1755}
1756
1658static void 1757static void
1659qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) 1758qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1660{ 1759{
@@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
1837 1936
1838 while (1) { 1937 while (1) {
1839 if (time_after_eq(jiffies, dev_init_timeout)) { 1938 if (time_after_eq(jiffies, dev_init_timeout)) {
1840 ql_log(ql_log_warn, vha, 0xb0cf, 1939 if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
1841 "%s: Device Init Failed 0x%x = %s\n", 1940 ql_log(ql_log_warn, vha, 0xb0cf,
1842 QLA2XXX_DRIVER_NAME, dev_state, 1941 "%s: Device Init Failed 0x%x = %s\n",
1843 dev_state < MAX_STATES ? 1942 QLA2XXX_DRIVER_NAME, dev_state,
1844 qdev_state(dev_state) : "Unknown"); 1943 dev_state < MAX_STATES ?
1845 1944 qdev_state(dev_state) : "Unknown");
1846 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, 1945 qla8044_wr_direct(vha,
1847 QLA8XXX_DEV_FAILED); 1946 QLA8044_CRB_DEV_STATE_INDEX,
1947 QLA8XXX_DEV_FAILED);
1948 }
1848 } 1949 }
1849 1950
1850 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 1951 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
@@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha)
2017 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { 2118 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2018 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 2119 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2019 2120
2121 if (qla8044_check_fw_alive(vha)) {
2122 ha->flags.isp82xx_fw_hung = 1;
2123 ql_log(ql_log_warn, vha, 0xb10a,
2124 "Firmware hung.\n");
2125 qla82xx_clear_pending_mbx(vha);
2126 }
2127
2020 if (qla8044_check_temp(vha)) { 2128 if (qla8044_check_temp(vha)) {
2021 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); 2129 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2022 ha->flags.isp82xx_fw_hung = 1; 2130 ha->flags.isp82xx_fw_hung = 1;
@@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha)
2037 qla2xxx_wake_dpc(vha); 2145 qla2xxx_wake_dpc(vha);
2038 } else { 2146 } else {
2039 /* Check firmware health */ 2147 /* Check firmware health */
2040 if (qla8044_check_fw_alive(vha)) { 2148 if (ha->flags.isp82xx_fw_hung) {
2041 halt_status = qla8044_rd_direct(vha, 2149 halt_status = qla8044_rd_direct(vha,
2042 QLA8044_PEG_HALT_STATUS1_INDEX); 2150 QLA8044_PEG_HALT_STATUS1_INDEX);
2043 if (halt_status & 2151 if (halt_status &
@@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha)
2073 __func__); 2181 __func__);
2074 set_bit(ISP_ABORT_NEEDED, 2182 set_bit(ISP_ABORT_NEEDED,
2075 &vha->dpc_flags); 2183 &vha->dpc_flags);
2076 qla82xx_clear_pending_mbx(vha);
2077 } 2184 }
2078 } 2185 }
2079 ha->flags.isp82xx_fw_hung = 1;
2080 ql_log(ql_log_warn, vha, 0xb10a,
2081 "Firmware hung.\n");
2082 qla2xxx_wake_dpc(vha); 2186 qla2xxx_wake_dpc(vha);
2083 } 2187 }
2084 } 2188 }
@@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2286 } 2390 }
2287 2391
2288 if (j >= MAX_CTL_CHECK) { 2392 if (j >= MAX_CTL_CHECK) {
2289 printk_ratelimited(KERN_ERR
2290 "%s: failed to read through agent\n", __func__);
2291 write_unlock_irqrestore(&ha->hw_lock, flags); 2393 write_unlock_irqrestore(&ha->hw_lock, flags);
2292 return QLA_SUCCESS; 2394 return QLA_SUCCESS;
2293 } 2395 }
@@ -2882,6 +2984,231 @@ error_exit:
2882 return rval; 2984 return rval;
2883} 2985}
2884 2986
2987static uint32_t
2988qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
2989 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2990{
2991 int loop_cnt;
2992 uint32_t addr1, addr2, value, data, temp, wrVal;
2993 uint8_t stride, stride2;
2994 uint16_t count;
2995 uint32_t poll, mask, data_size, modify_mask;
2996 uint32_t wait_count = 0;
2997
2998 uint32_t *data_ptr = *d_ptr;
2999
3000 struct qla8044_minidump_entry_rddfe *rddfe;
3001 rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
3002
3003 addr1 = rddfe->addr_1;
3004 value = rddfe->value;
3005 stride = rddfe->stride;
3006 stride2 = rddfe->stride2;
3007 count = rddfe->count;
3008
3009 poll = rddfe->poll;
3010 mask = rddfe->mask;
3011 modify_mask = rddfe->modify_mask;
3012 data_size = rddfe->data_size;
3013
3014 addr2 = addr1 + stride;
3015
3016 for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
3017 qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
3018
3019 wait_count = 0;
3020 while (wait_count < poll) {
3021 qla8044_rd_reg_indirect(vha, addr1, &temp);
3022 if ((temp & mask) != 0)
3023 break;
3024 wait_count++;
3025 }
3026
3027 if (wait_count == poll) {
3028 ql_log(ql_log_warn, vha, 0xb153,
3029 "%s: TIMEOUT\n", __func__);
3030 goto error;
3031 } else {
3032 qla8044_rd_reg_indirect(vha, addr2, &temp);
3033 temp = temp & modify_mask;
3034 temp = (temp | ((loop_cnt << 16) | loop_cnt));
3035 wrVal = ((temp << 16) | temp);
3036
3037 qla8044_wr_reg_indirect(vha, addr2, wrVal);
3038 qla8044_wr_reg_indirect(vha, addr1, value);
3039
3040 wait_count = 0;
3041 while (wait_count < poll) {
3042 qla8044_rd_reg_indirect(vha, addr1, &temp);
3043 if ((temp & mask) != 0)
3044 break;
3045 wait_count++;
3046 }
3047 if (wait_count == poll) {
3048 ql_log(ql_log_warn, vha, 0xb154,
3049 "%s: TIMEOUT\n", __func__);
3050 goto error;
3051 }
3052
3053 qla8044_wr_reg_indirect(vha, addr1,
3054 ((0x40000000 | value) + stride2));
3055 wait_count = 0;
3056 while (wait_count < poll) {
3057 qla8044_rd_reg_indirect(vha, addr1, &temp);
3058 if ((temp & mask) != 0)
3059 break;
3060 wait_count++;
3061 }
3062
3063 if (wait_count == poll) {
3064 ql_log(ql_log_warn, vha, 0xb155,
3065 "%s: TIMEOUT\n", __func__);
3066 goto error;
3067 }
3068
3069 qla8044_rd_reg_indirect(vha, addr2, &data);
3070
3071 *data_ptr++ = wrVal;
3072 *data_ptr++ = data;
3073 }
3074
3075 }
3076
3077 *d_ptr = data_ptr;
3078 return QLA_SUCCESS;
3079
3080error:
3081 return -1;
3082
3083}
3084
3085static uint32_t
3086qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3087 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3088{
3089 int ret = 0;
3090 uint32_t addr1, addr2, value1, value2, data, selVal;
3091 uint8_t stride1, stride2;
3092 uint32_t addr3, addr4, addr5, addr6, addr7;
3093 uint16_t count, loop_cnt;
3094 uint32_t poll, mask;
3095 uint32_t *data_ptr = *d_ptr;
3096
3097 struct qla8044_minidump_entry_rdmdio *rdmdio;
3098
3099 rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
3100
3101 addr1 = rdmdio->addr_1;
3102 addr2 = rdmdio->addr_2;
3103 value1 = rdmdio->value_1;
3104 stride1 = rdmdio->stride_1;
3105 stride2 = rdmdio->stride_2;
3106 count = rdmdio->count;
3107
3108 poll = rdmdio->poll;
3109 mask = rdmdio->mask;
3110 value2 = rdmdio->value_2;
3111
3112 addr3 = addr1 + stride1;
3113
3114 for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
3115 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3116 addr3, mask);
3117 if (ret == -1)
3118 goto error;
3119
3120 addr4 = addr2 - stride1;
3121 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
3122 value2);
3123 if (ret == -1)
3124 goto error;
3125
3126 addr5 = addr2 - (2 * stride1);
3127 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
3128 value1);
3129 if (ret == -1)
3130 goto error;
3131
3132 addr6 = addr2 - (3 * stride1);
3133 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
3134 addr6, 0x2);
3135 if (ret == -1)
3136 goto error;
3137
3138 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3139 addr3, mask);
3140 if (ret == -1)
3141 goto error;
3142
3143 addr7 = addr2 - (4 * stride1);
3144 data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
3145 mask, addr7);
3146 if (data == -1)
3147 goto error;
3148
3149 selVal = (value2 << 18) | (value1 << 2) | 2;
3150
3151 stride2 = rdmdio->stride_2;
3152 *data_ptr++ = selVal;
3153 *data_ptr++ = data;
3154
3155 value1 = value1 + stride2;
3156 *d_ptr = data_ptr;
3157 }
3158
3159 return 0;
3160
3161error:
3162 return -1;
3163}
3164
3165static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3166 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3167{
3168 uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
3169 uint32_t wait_count = 0;
3170 struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3171
3172 pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
3173 addr1 = pollwr_hdr->addr_1;
3174 addr2 = pollwr_hdr->addr_2;
3175 value1 = pollwr_hdr->value_1;
3176 value2 = pollwr_hdr->value_2;
3177
3178 poll = pollwr_hdr->poll;
3179 mask = pollwr_hdr->mask;
3180
3181 while (wait_count < poll) {
3182 qla8044_rd_reg_indirect(vha, addr1, &r_value);
3183
3184 if ((r_value & poll) != 0)
3185 break;
3186 wait_count++;
3187 }
3188
3189 if (wait_count == poll) {
3190 ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
3191 goto error;
3192 }
3193
3194 qla8044_wr_reg_indirect(vha, addr2, value2);
3195 qla8044_wr_reg_indirect(vha, addr1, value1);
3196
3197 wait_count = 0;
3198 while (wait_count < poll) {
3199 qla8044_rd_reg_indirect(vha, addr1, &r_value);
3200
3201 if ((r_value & poll) != 0)
3202 break;
3203 wait_count++;
3204 }
3205
3206 return QLA_SUCCESS;
3207
3208error:
3209 return -1;
3210}
3211
2885/* 3212/*
2886 * 3213 *
2887 * qla8044_collect_md_data - Retrieve firmware minidump data. 3214 * qla8044_collect_md_data - Retrieve firmware minidump data.
@@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
3089 if (rval != QLA_SUCCESS) 3416 if (rval != QLA_SUCCESS)
3090 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3417 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3091 break; 3418 break;
3419 case QLA8044_RDDFE:
3420 rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
3421 &data_ptr);
3422 if (rval != QLA_SUCCESS)
3423 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3424 break;
3425 case QLA8044_RDMDIO:
3426 rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
3427 &data_ptr);
3428 if (rval != QLA_SUCCESS)
3429 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3430 break;
3431 case QLA8044_POLLWR:
3432 rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
3433 &data_ptr);
3434 if (rval != QLA_SUCCESS)
3435 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3436 break;
3092 case QLA82XX_RDNOP: 3437 case QLA82XX_RDNOP:
3093 default: 3438 default:
3094 qla8044_mark_entry_skipped(vha, entry_hdr, i); 3439 qla8044_mark_entry_skipped(vha, entry_hdr, i);
@@ -3110,6 +3455,7 @@ skip_nxt_entry:
3110 "Dump data mismatch: Data collected: " 3455 "Dump data mismatch: Data collected: "
3111 "[0x%x], total_data_size:[0x%x]\n", 3456 "[0x%x], total_data_size:[0x%x]\n",
3112 data_collected, ha->md_dump_size); 3457 data_collected, ha->md_dump_size);
3458 rval = QLA_FUNCTION_FAILED;
3113 goto md_failed; 3459 goto md_failed;
3114 } 3460 }
3115 3461
@@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
3134 3480
3135 if (!qla8044_collect_md_data(vha)) { 3481 if (!qla8044_collect_md_data(vha)) {
3136 ha->fw_dumped = 1; 3482 ha->fw_dumped = 1;
3483 ha->prev_minidump_failed = 0;
3137 } else { 3484 } else {
3138 ql_log(ql_log_fatal, vha, 0xb0db, 3485 ql_log(ql_log_fatal, vha, 0xb0db,
3139 "%s: Unable to collect minidump\n", 3486 "%s: Unable to collect minidump\n",
3140 __func__); 3487 __func__);
3488 ha->prev_minidump_failed = 1;
3141 } 3489 }
3142} 3490}
3143 3491
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 2ab2eabab908..ada36057d7cd 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -133,6 +133,7 @@
133#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) 133#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
134#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) 134#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
135#define QLA8044_LINK_SPEED_FACTOR 10 135#define QLA8044_LINK_SPEED_FACTOR 10
136#define QLA8044_FUN7_ACTIVE_INDEX 0x80
136 137
137/* FLASH API Defines */ 138/* FLASH API Defines */
138#define QLA8044_FLASH_MAX_WAIT_USEC 100 139#define QLA8044_FLASH_MAX_WAIT_USEC 100
@@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd {
431 uint32_t rsvd_1; 432 uint32_t rsvd_1;
432} __packed; 433} __packed;
433 434
435struct qla8044_minidump_entry_rddfe {
436 struct qla8044_minidump_entry_hdr h;
437 uint32_t addr_1;
438 uint32_t value;
439 uint8_t stride;
440 uint8_t stride2;
441 uint16_t count;
442 uint32_t poll;
443 uint32_t mask;
444 uint32_t modify_mask;
445 uint32_t data_size;
446 uint32_t rsvd;
447
448} __packed;
449
450struct qla8044_minidump_entry_rdmdio {
451 struct qla8044_minidump_entry_hdr h;
452
453 uint32_t addr_1;
454 uint32_t addr_2;
455 uint32_t value_1;
456 uint8_t stride_1;
457 uint8_t stride_2;
458 uint16_t count;
459 uint32_t poll;
460 uint32_t mask;
461 uint32_t value_2;
462 uint32_t data_size;
463
464} __packed;
465
466struct qla8044_minidump_entry_pollwr {
467 struct qla8044_minidump_entry_hdr h;
468 uint32_t addr_1;
469 uint32_t addr_2;
470 uint32_t value_1;
471 uint32_t value_2;
472 uint32_t poll;
473 uint32_t mask;
474 uint32_t data_size;
475 uint32_t rsvd;
476
477} __packed;
478
434/* RDMUX2 Entry */ 479/* RDMUX2 Entry */
435struct qla8044_minidump_entry_rdmux2 { 480struct qla8044_minidump_entry_rdmux2 {
436 struct qla8044_minidump_entry_hdr h; 481 struct qla8044_minidump_entry_hdr h;
@@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = {
516#define QLA8044_DBG_RSVD_ARRAY_LEN 8 561#define QLA8044_DBG_RSVD_ARRAY_LEN 8
517#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 562#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
518#define QLA8044_SS_PCI_INDEX 0 563#define QLA8044_SS_PCI_INDEX 0
564#define QLA8044_RDDFE 38
565#define QLA8044_RDMDIO 39
566#define QLA8044_POLLWR 40
519 567
520struct qla8044_minidump_template_hdr { 568struct qla8044_minidump_template_hdr {
521 uint32_t entry_type; 569 uint32_t entry_type;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index afc84814e9bb..d96bfb55e57b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
616 616
617 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 617 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
618 /* List assured to be having elements */ 618 /* List assured to be having elements */
619 qla2x00_clean_dsd_pool(ha, sp); 619 qla2x00_clean_dsd_pool(ha, sp, NULL);
620 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 620 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
621 } 621 }
622 622
@@ -781,7 +781,7 @@ static int
781qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 781qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
782{ 782{
783#define ABORT_POLLING_PERIOD 1000 783#define ABORT_POLLING_PERIOD 1000
784#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 784#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
785 unsigned long wait_iter = ABORT_WAIT_ITER; 785 unsigned long wait_iter = ABORT_WAIT_ITER;
786 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 786 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
787 struct qla_hw_data *ha = vha->hw; 787 struct qla_hw_data *ha = vha->hw;
@@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
844} 844}
845 845
846/* 846/*
847 * qla2x00_wait_for_reset_ready 847 * qla2x00_wait_for_hba_ready
848 * Wait till the HBA is online after going through 848 * Wait till the HBA is ready before doing driver unload
849 * <= MAX_RETRIES_OF_ISP_ABORT or
850 * finally HBA is disabled ie marked offline or flash
851 * operations are in progress.
852 * 849 *
853 * Input: 850 * Input:
854 * ha - pointer to host adapter structure 851 * ha - pointer to host adapter structure
@@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
857 * Does context switching-Release SPIN_LOCK 854 * Does context switching-Release SPIN_LOCK
858 * (if any) before calling this routine. 855 * (if any) before calling this routine.
859 * 856 *
860 * Return:
861 * Success (Adapter is online/no flash ops) : 0
862 * Failed (Adapter is offline/disabled/flash ops in progress) : 1
863 */ 857 */
864static int 858static void
865qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) 859qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
866{ 860{
867 int return_status;
868 unsigned long wait_online;
869 struct qla_hw_data *ha = vha->hw; 861 struct qla_hw_data *ha = vha->hw;
870 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
871 862
872 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 863 while ((!(vha->flags.online) || ha->dpc_active ||
873 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || 864 ha->flags.mbox_busy))
874 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
875 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
876 ha->optrom_state != QLA_SWAITING ||
877 ha->dpc_active) && time_before(jiffies, wait_online))
878 msleep(1000); 865 msleep(1000);
879
880 if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
881 return_status = QLA_SUCCESS;
882 else
883 return_status = QLA_FUNCTION_FAILED;
884
885 ql_dbg(ql_dbg_taskm, vha, 0x8019,
886 "%s return status=%d.\n", __func__, return_status);
887
888 return return_status;
889} 866}
890 867
891int 868int
@@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
945 int ret; 922 int ret;
946 unsigned int id, lun; 923 unsigned int id, lun;
947 unsigned long flags; 924 unsigned long flags;
948 int wait = 0; 925 int rval, wait = 0;
949 struct qla_hw_data *ha = vha->hw; 926 struct qla_hw_data *ha = vha->hw;
950 927
951 if (!CMD_SP(cmd)) 928 if (!CMD_SP(cmd))
@@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
974 sp_get(sp); 951 sp_get(sp);
975 952
976 spin_unlock_irqrestore(&ha->hardware_lock, flags); 953 spin_unlock_irqrestore(&ha->hardware_lock, flags);
977 if (ha->isp_ops->abort_command(sp)) { 954 rval = ha->isp_ops->abort_command(sp);
978 ret = FAILED; 955 if (rval) {
956 if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
957 /*
958 * Decrement the ref_count since we can't find the
959 * command
960 */
961 atomic_dec(&sp->ref_count);
962 ret = SUCCESS;
963 } else
964 ret = FAILED;
965
979 ql_dbg(ql_dbg_taskm, vha, 0x8003, 966 ql_dbg(ql_dbg_taskm, vha, 0x8003,
980 "Abort command mbx failed cmd=%p.\n", cmd); 967 "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
981 } else { 968 } else {
982 ql_dbg(ql_dbg_taskm, vha, 0x8004, 969 ql_dbg(ql_dbg_taskm, vha, 0x8004,
983 "Abort command mbx success cmd=%p.\n", cmd); 970 "Abort command mbx success cmd=%p.\n", cmd);
@@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
985 } 972 }
986 973
987 spin_lock_irqsave(&ha->hardware_lock, flags); 974 spin_lock_irqsave(&ha->hardware_lock, flags);
975 /*
976 * Clear the slot in the oustanding_cmds array if we can't find the
977 * command to reclaim the resources.
978 */
979 if (rval == QLA_FUNCTION_PARAMETER_ERROR)
980 vha->req->outstanding_cmds[sp->handle] = NULL;
988 sp->done(ha, sp, 0); 981 sp->done(ha, sp, 0);
989 spin_unlock_irqrestore(&ha->hardware_lock, flags); 982 spin_unlock_irqrestore(&ha->hardware_lock, flags);
990 983
@@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1236 ql_log(ql_log_info, vha, 0x8018, 1229 ql_log(ql_log_info, vha, 0x8018,
1237 "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); 1230 "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1238 1231
1239 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1232 /*
1233 * No point in issuing another reset if one is active. Also do not
1234 * attempt a reset if we are updating flash.
1235 */
1236 if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1240 goto eh_host_reset_lock; 1237 goto eh_host_reset_lock;
1241 1238
1242 if (vha != base_vha) { 1239 if (vha != base_vha) {
@@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2270 ha->device_type |= DT_IIDMA; 2267 ha->device_type |= DT_IIDMA;
2271 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2268 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2272 break; 2269 break;
2270 case PCI_DEVICE_ID_QLOGIC_ISP2271:
2271 ha->device_type |= DT_ISP2271;
2272 ha->device_type |= DT_ZIO_SUPPORTED;
2273 ha->device_type |= DT_FWI2;
2274 ha->device_type |= DT_IIDMA;
2275 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2276 break;
2273 } 2277 }
2274 2278
2275 if (IS_QLA82XX(ha)) 2279 if (IS_QLA82XX(ha))
@@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2346 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2350 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2347 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2351 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2348 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2352 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2349 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) { 2353 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2354 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
2350 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2355 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2351 mem_only = 1; 2356 mem_only = 1;
2352 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2357 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2877,6 +2882,7 @@ skip_dpc:
2877 2882
2878 base_vha->flags.init_done = 1; 2883 base_vha->flags.init_done = 1;
2879 base_vha->flags.online = 1; 2884 base_vha->flags.online = 1;
2885 ha->prev_minidump_failed = 0;
2880 2886
2881 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2887 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2882 "Init done and hba is online.\n"); 2888 "Init done and hba is online.\n");
@@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3136 base_vha = pci_get_drvdata(pdev); 3142 base_vha = pci_get_drvdata(pdev);
3137 ha = base_vha->hw; 3143 ha = base_vha->hw;
3138 3144
3145 qla2x00_wait_for_hba_ready(base_vha);
3146
3139 set_bit(UNLOADING, &base_vha->dpc_flags); 3147 set_bit(UNLOADING, &base_vha->dpc_flags);
3140 3148
3141 if (IS_QLAFX00(ha)) 3149 if (IS_QLAFX00(ha))
@@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
3645 ha->eft = NULL; 3653 ha->eft = NULL;
3646 ha->eft_dma = 0; 3654 ha->eft_dma = 0;
3647 ha->fw_dumped = 0; 3655 ha->fw_dumped = 0;
3656 ha->fw_dump_cap_flags = 0;
3648 ha->fw_dump_reading = 0; 3657 ha->fw_dump_reading = 0;
3649 ha->fw_dump = NULL; 3658 ha->fw_dump = NULL;
3650 ha->fw_dump_len = 0; 3659 ha->fw_dump_len = 0;
@@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data)
4913 if (qlafx00_reset_initialize(base_vha)) { 4922 if (qlafx00_reset_initialize(base_vha)) {
4914 /* Failed. Abort isp later. */ 4923 /* Failed. Abort isp later. */
4915 if (!test_bit(UNLOADING, 4924 if (!test_bit(UNLOADING,
4916 &base_vha->dpc_flags)) 4925 &base_vha->dpc_flags)) {
4917 set_bit(ISP_UNRECOVERABLE, 4926 set_bit(ISP_UNRECOVERABLE,
4918 &base_vha->dpc_flags); 4927 &base_vha->dpc_flags);
4919 ql_dbg(ql_dbg_dpc, base_vha, 4928 ql_dbg(ql_dbg_dpc, base_vha,
4920 0x4021, 4929 0x4021,
4921 "Reset Recovery Failed\n"); 4930 "Reset Recovery Failed\n");
4931 }
4922 } 4932 }
4923 } 4933 }
4924 4934
@@ -5077,8 +5087,10 @@ intr_on_check:
5077 ha->isp_ops->enable_intrs(ha); 5087 ha->isp_ops->enable_intrs(ha);
5078 5088
5079 if (test_and_clear_bit(BEACON_BLINK_NEEDED, 5089 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
5080 &base_vha->dpc_flags)) 5090 &base_vha->dpc_flags)) {
5081 ha->isp_ops->beacon_blink(base_vha); 5091 if (ha->beacon_blink_led == 1)
5092 ha->isp_ops->beacon_blink(base_vha);
5093 }
5082 5094
5083 if (!IS_QLAFX00(ha)) 5095 if (!IS_QLAFX00(ha))
5084 qla2x00_do_dpc_all_vps(base_vha); 5096 qla2x00_do_dpc_all_vps(base_vha);
@@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5325#define FW_ISP82XX 7 5337#define FW_ISP82XX 7
5326#define FW_ISP2031 8 5338#define FW_ISP2031 8
5327#define FW_ISP8031 9 5339#define FW_ISP8031 9
5328#define FW_ISP2071 10 5340#define FW_ISP27XX 10
5329 5341
5330#define FW_FILE_ISP21XX "ql2100_fw.bin" 5342#define FW_FILE_ISP21XX "ql2100_fw.bin"
5331#define FW_FILE_ISP22XX "ql2200_fw.bin" 5343#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5337#define FW_FILE_ISP82XX "ql8200_fw.bin" 5349#define FW_FILE_ISP82XX "ql8200_fw.bin"
5338#define FW_FILE_ISP2031 "ql2600_fw.bin" 5350#define FW_FILE_ISP2031 "ql2600_fw.bin"
5339#define FW_FILE_ISP8031 "ql8300_fw.bin" 5351#define FW_FILE_ISP8031 "ql8300_fw.bin"
5340#define FW_FILE_ISP2071 "ql2700_fw.bin" 5352#define FW_FILE_ISP27XX "ql2700_fw.bin"
5341 5353
5342 5354
5343static DEFINE_MUTEX(qla_fw_lock); 5355static DEFINE_MUTEX(qla_fw_lock);
@@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
5353 { .name = FW_FILE_ISP82XX, }, 5365 { .name = FW_FILE_ISP82XX, },
5354 { .name = FW_FILE_ISP2031, }, 5366 { .name = FW_FILE_ISP2031, },
5355 { .name = FW_FILE_ISP8031, }, 5367 { .name = FW_FILE_ISP8031, },
5356 { .name = FW_FILE_ISP2071, }, 5368 { .name = FW_FILE_ISP27XX, },
5357}; 5369};
5358 5370
5359struct fw_blob * 5371struct fw_blob *
@@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
5382 blob = &qla_fw_blobs[FW_ISP2031]; 5394 blob = &qla_fw_blobs[FW_ISP2031];
5383 } else if (IS_QLA8031(ha)) { 5395 } else if (IS_QLA8031(ha)) {
5384 blob = &qla_fw_blobs[FW_ISP8031]; 5396 blob = &qla_fw_blobs[FW_ISP8031];
5385 } else if (IS_QLA2071(ha)) { 5397 } else if (IS_QLA27XX(ha)) {
5386 blob = &qla_fw_blobs[FW_ISP2071]; 5398 blob = &qla_fw_blobs[FW_ISP27XX];
5387 } else { 5399 } else {
5388 return NULL; 5400 return NULL;
5389 } 5401 }
@@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5714 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 5726 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
5715 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 5727 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
5716 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 5728 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
5729 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
5717 { 0 }, 5730 { 0 },
5718}; 5731};
5719MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5732MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 46ef0ac48f44..2fb7ebfbbc38 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f28123e8ed65..bca173e56f16 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
1727 if (IS_QLA2031(ha)) { 1727 if (IS_QLA2031(ha)) {
1728 led_select_value = qla83xx_select_led_port(ha); 1728 led_select_value = qla83xx_select_led_port(ha);
1729 1729
1730 qla83xx_wr_reg(vha, led_select_value, 0x40002000); 1730 qla83xx_wr_reg(vha, led_select_value, 0x40000230);
1731 qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); 1731 qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
1732 msleep(1000);
1733 qla83xx_wr_reg(vha, led_select_value, 0x40004000);
1734 qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
1735 } else if (IS_QLA8031(ha)) { 1732 } else if (IS_QLA8031(ha)) {
1736 led_select_value = qla83xx_select_led_port(ha); 1733 led_select_value = qla83xx_select_led_port(ha);
1737 1734
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0cb73074c199..e632e14180cf 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
104/* 104/*
105 * Global Variables 105 * Global Variables
106 */ 106 */
107static struct kmem_cache *qla_tgt_cmd_cachep;
108static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 107static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109static mempool_t *qla_tgt_mgmt_cmd_mempool; 108static mempool_t *qla_tgt_mgmt_cmd_mempool;
110static struct workqueue_struct *qla_tgt_wq; 109static struct workqueue_struct *qla_tgt_wq;
@@ -182,6 +181,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
182void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 181void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183 struct atio_from_isp *atio) 182 struct atio_from_isp *atio)
184{ 183{
184 ql_dbg(ql_dbg_tgt, vha, 0xe072,
185 "%s: qla_target(%d): type %x ox_id %04x\n",
186 __func__, vha->vp_idx, atio->u.raw.entry_type,
187 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
188
185 switch (atio->u.raw.entry_type) { 189 switch (atio->u.raw.entry_type) {
186 case ATIO_TYPE7: 190 case ATIO_TYPE7:
187 { 191 {
@@ -236,6 +240,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
236void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 240void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
237{ 241{
238 switch (pkt->entry_type) { 242 switch (pkt->entry_type) {
243 case CTIO_CRC2:
244 ql_dbg(ql_dbg_tgt, vha, 0xe073,
245 "qla_target(%d):%s: CRC2 Response pkt\n",
246 vha->vp_idx, __func__);
239 case CTIO_TYPE7: 247 case CTIO_TYPE7:
240 { 248 {
241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 249 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -1120,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1120 ctio->u.status1.flags = 1128 ctio->u.status1.flags =
1121 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1122 CTIO7_FLAGS_TERMINATE); 1130 CTIO7_FLAGS_TERMINATE);
1123 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1131 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1124 1132
1125 qla2x00_start_iocbs(vha, vha->req); 1133 qla2x00_start_iocbs(vha, vha->req);
1126 1134
@@ -1254,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1254{ 1262{
1255 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1263 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1256 struct ctio7_to_24xx *ctio; 1264 struct ctio7_to_24xx *ctio;
1265 uint16_t temp;
1257 1266
1258 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1267 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1259 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1268 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
@@ -1284,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1284 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1293 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1285 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1294 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1286 CTIO7_FLAGS_SEND_STATUS); 1295 CTIO7_FLAGS_SEND_STATUS);
1287 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1296 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1297 ctio->u.status1.ox_id = cpu_to_le16(temp);
1288 ctio->u.status1.scsi_status = 1298 ctio->u.status1.scsi_status =
1289 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1299 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1290 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1300 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
@@ -1350,13 +1360,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1350 1360
1351 prm->cmd->sg_mapped = 1; 1361 prm->cmd->sg_mapped = 1;
1352 1362
1353 /* 1363 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1354 * If greater than four sg entries then we need to allocate 1364 /*
1355 * the continuation entries 1365 * If greater than four sg entries then we need to allocate
1356 */ 1366 * the continuation entries
1357 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1367 */
1358 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1368 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1359 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); 1369 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1370 prm->tgt->datasegs_per_cmd,
1371 prm->tgt->datasegs_per_cont);
1372 } else {
1373 /* DIF */
1374 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1375 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1376 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1377 prm->tot_dsds = prm->seg_cnt;
1378 } else
1379 prm->tot_dsds = prm->seg_cnt;
1380
1381 if (cmd->prot_sg_cnt) {
1382 prm->prot_sg = cmd->prot_sg;
1383 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1384 cmd->prot_sg, cmd->prot_sg_cnt,
1385 cmd->dma_data_direction);
1386 if (unlikely(prm->prot_seg_cnt == 0))
1387 goto out_err;
1388
1389 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1390 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1391 /* Dif Bundling not support here */
1392 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1393 cmd->blk_sz);
1394 prm->tot_dsds += prm->prot_seg_cnt;
1395 } else
1396 prm->tot_dsds += prm->prot_seg_cnt;
1397 }
1398 }
1360 1399
1361 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1400 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1362 prm->seg_cnt, prm->req_cnt); 1401 prm->seg_cnt, prm->req_cnt);
@@ -1377,6 +1416,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1377 BUG_ON(!cmd->sg_mapped); 1416 BUG_ON(!cmd->sg_mapped);
1378 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1417 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1379 cmd->sg_mapped = 0; 1418 cmd->sg_mapped = 0;
1419
1420 if (cmd->prot_sg_cnt)
1421 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1422 cmd->dma_data_direction);
1423
1424 if (cmd->ctx_dsd_alloced)
1425 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1426
1427 if (cmd->ctx)
1428 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1380} 1429}
1381 1430
1382static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1431static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
@@ -1466,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1466 struct ctio7_to_24xx *pkt; 1515 struct ctio7_to_24xx *pkt;
1467 struct qla_hw_data *ha = vha->hw; 1516 struct qla_hw_data *ha = vha->hw;
1468 struct atio_from_isp *atio = &prm->cmd->atio; 1517 struct atio_from_isp *atio = &prm->cmd->atio;
1518 uint16_t temp;
1469 1519
1470 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1520 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1471 prm->pkt = pkt; 1521 prm->pkt = pkt;
@@ -1494,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1494 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1544 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1495 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1545 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1496 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1546 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1497 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1547 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1548 pkt->u.status0.ox_id = cpu_to_le16(temp);
1498 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1549 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1499 1550
1500 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1551 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1501 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1552 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1502 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1553 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
1503 le16_to_cpu(pkt->u.status0.ox_id));
1504 return 0; 1554 return 0;
1505} 1555}
1506 1556
@@ -1665,8 +1715,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1665 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1715 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1666 } 1716 }
1667 1717
1668 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", 1718 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
1669 vha->vp_idx, cmd->tag); 1719 vha->vp_idx, cmd->tag,
1720 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
1670 1721
1671 prm->cmd = cmd; 1722 prm->cmd = cmd;
1672 prm->tgt = tgt; 1723 prm->tgt = tgt;
@@ -1902,6 +1953,328 @@ skip_explict_conf:
1902 /* Sense with len > 24, is it possible ??? */ 1953 /* Sense with len > 24, is it possible ??? */
1903} 1954}
1904 1955
1956
1957
1958/* diff */
1959static inline int
1960qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
1961{
1962 /*
1963 * Uncomment when corresponding SCSI changes are done.
1964 *
1965 if (!sp->cmd->prot_chk)
1966 return 0;
1967 *
1968 */
1969 switch (se_cmd->prot_op) {
1970 case TARGET_PROT_DOUT_INSERT:
1971 case TARGET_PROT_DIN_STRIP:
1972 if (ql2xenablehba_err_chk >= 1)
1973 return 1;
1974 break;
1975 case TARGET_PROT_DOUT_PASS:
1976 case TARGET_PROT_DIN_PASS:
1977 if (ql2xenablehba_err_chk >= 2)
1978 return 1;
1979 break;
1980 case TARGET_PROT_DIN_INSERT:
1981 case TARGET_PROT_DOUT_STRIP:
1982 return 1;
1983 default:
1984 break;
1985 }
1986 return 0;
1987}
1988
1989/*
1990 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
1991 *
1992 */
1993static inline void
1994qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
1995{
1996 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
1997
1998 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
1999 * have been immplemented by TCM, before AppTag is avail.
2000 * Look for modesense_handlers[]
2001 */
2002 ctx->app_tag = 0;
2003 ctx->app_tag_mask[0] = 0x0;
2004 ctx->app_tag_mask[1] = 0x0;
2005
2006 switch (se_cmd->prot_type) {
2007 case TARGET_DIF_TYPE0_PROT:
2008 /*
2009 * No check for ql2xenablehba_err_chk, as it would be an
2010 * I/O error if hba tag generation is not done.
2011 */
2012 ctx->ref_tag = cpu_to_le32(lba);
2013
2014 if (!qlt_hba_err_chk_enabled(se_cmd))
2015 break;
2016
2017 /* enable ALL bytes of the ref tag */
2018 ctx->ref_tag_mask[0] = 0xff;
2019 ctx->ref_tag_mask[1] = 0xff;
2020 ctx->ref_tag_mask[2] = 0xff;
2021 ctx->ref_tag_mask[3] = 0xff;
2022 break;
2023 /*
2024 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2025 * 16 bit app tag.
2026 */
2027 case TARGET_DIF_TYPE1_PROT:
2028 ctx->ref_tag = cpu_to_le32(lba);
2029
2030 if (!qlt_hba_err_chk_enabled(se_cmd))
2031 break;
2032
2033 /* enable ALL bytes of the ref tag */
2034 ctx->ref_tag_mask[0] = 0xff;
2035 ctx->ref_tag_mask[1] = 0xff;
2036 ctx->ref_tag_mask[2] = 0xff;
2037 ctx->ref_tag_mask[3] = 0xff;
2038 break;
2039 /*
2040 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2041 * match LBA in CDB + N
2042 */
2043 case TARGET_DIF_TYPE2_PROT:
2044 ctx->ref_tag = cpu_to_le32(lba);
2045
2046 if (!qlt_hba_err_chk_enabled(se_cmd))
2047 break;
2048
2049 /* enable ALL bytes of the ref tag */
2050 ctx->ref_tag_mask[0] = 0xff;
2051 ctx->ref_tag_mask[1] = 0xff;
2052 ctx->ref_tag_mask[2] = 0xff;
2053 ctx->ref_tag_mask[3] = 0xff;
2054 break;
2055
2056 /* For Type 3 protection: 16 bit GUARD only */
2057 case TARGET_DIF_TYPE3_PROT:
2058 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2059 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2060 break;
2061 }
2062}
2063
2064
2065static inline int
2066qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2067{
2068 uint32_t *cur_dsd;
2069 int sgc;
2070 uint32_t transfer_length = 0;
2071 uint32_t data_bytes;
2072 uint32_t dif_bytes;
2073 uint8_t bundling = 1;
2074 uint8_t *clr_ptr;
2075 struct crc_context *crc_ctx_pkt = NULL;
2076 struct qla_hw_data *ha;
2077 struct ctio_crc2_to_fw *pkt;
2078 dma_addr_t crc_ctx_dma;
2079 uint16_t fw_prot_opts = 0;
2080 struct qla_tgt_cmd *cmd = prm->cmd;
2081 struct se_cmd *se_cmd = &cmd->se_cmd;
2082 uint32_t h;
2083 struct atio_from_isp *atio = &prm->cmd->atio;
2084 uint16_t t16;
2085
2086 sgc = 0;
2087 ha = vha->hw;
2088
2089 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2090 prm->pkt = pkt;
2091 memset(pkt, 0, sizeof(*pkt));
2092
2093 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2094 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2095 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2096 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2097
2098 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2099 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2100 bundling = 0;
2101
2102 /* Compute dif len and adjust data len to incude protection */
2103 data_bytes = cmd->bufflen;
2104 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2105
2106 switch (se_cmd->prot_op) {
2107 case TARGET_PROT_DIN_INSERT:
2108 case TARGET_PROT_DOUT_STRIP:
2109 transfer_length = data_bytes;
2110 data_bytes += dif_bytes;
2111 break;
2112
2113 case TARGET_PROT_DIN_STRIP:
2114 case TARGET_PROT_DOUT_INSERT:
2115 case TARGET_PROT_DIN_PASS:
2116 case TARGET_PROT_DOUT_PASS:
2117 transfer_length = data_bytes + dif_bytes;
2118 break;
2119
2120 default:
2121 BUG();
2122 break;
2123 }
2124
2125 if (!qlt_hba_err_chk_enabled(se_cmd))
2126 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2127 /* HBA error checking enabled */
2128 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2129 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2130 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2131 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2132 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2133 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2134 }
2135
2136 switch (se_cmd->prot_op) {
2137 case TARGET_PROT_DIN_INSERT:
2138 case TARGET_PROT_DOUT_INSERT:
2139 fw_prot_opts |= PO_MODE_DIF_INSERT;
2140 break;
2141 case TARGET_PROT_DIN_STRIP:
2142 case TARGET_PROT_DOUT_STRIP:
2143 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2144 break;
2145 case TARGET_PROT_DIN_PASS:
2146 case TARGET_PROT_DOUT_PASS:
2147 fw_prot_opts |= PO_MODE_DIF_PASS;
2148 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2149 break;
2150 default:/* Normal Request */
2151 fw_prot_opts |= PO_MODE_DIF_PASS;
2152 break;
2153 }
2154
2155
2156 /* ---- PKT ---- */
2157 /* Update entry type to indicate Command Type CRC_2 IOCB */
2158 pkt->entry_type = CTIO_CRC2;
2159 pkt->entry_count = 1;
2160 pkt->vp_index = vha->vp_idx;
2161
2162 h = qlt_make_handle(vha);
2163 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2164 /*
2165 * CTIO type 7 from the firmware doesn't provide a way to
2166 * know the initiator's LOOP ID, hence we can't find
2167 * the session and, so, the command.
2168 */
2169 return -EAGAIN;
2170 } else
2171 ha->tgt.cmds[h-1] = prm->cmd;
2172
2173
2174 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2175 pkt->nport_handle = prm->cmd->loop_id;
2176 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2177 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2178 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2179 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2180 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2181
2182 /* silence compile warning */
2183 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2184 pkt->ox_id = cpu_to_le16(t16);
2185
2186 t16 = (atio->u.isp24.attr << 9);
2187 pkt->flags |= cpu_to_le16(t16);
2188 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2189
2190 /* Set transfer direction */
2191 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2192 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2193 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2194 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2195
2196
2197 pkt->dseg_count = prm->tot_dsds;
2198 /* Fibre channel byte count */
2199 pkt->transfer_length = cpu_to_le32(transfer_length);
2200
2201
2202 /* ----- CRC context -------- */
2203
2204 /* Allocate CRC context from global pool */
2205 crc_ctx_pkt = cmd->ctx =
2206 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2207
2208 if (!crc_ctx_pkt)
2209 goto crc_queuing_error;
2210
2211 /* Zero out CTX area. */
2212 clr_ptr = (uint8_t *)crc_ctx_pkt;
2213 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2214
2215 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2216 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2217
2218 /* Set handle */
2219 crc_ctx_pkt->handle = pkt->handle;
2220
2221 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2222
2223 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2224 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2225 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2226
2227
2228 if (!bundling) {
2229 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2230 } else {
2231 /*
2232 * Configure Bundling if we need to fetch interlaving
2233 * protection PCI accesses
2234 */
2235 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2236 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2237 crc_ctx_pkt->u.bundling.dseg_count =
2238 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2239 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2240 }
2241
2242 /* Finish the common fields of CRC pkt */
2243 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2244 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2245 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2246 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
2247
2248
2249 /* Walks data segments */
2250 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2251
2252 if (!bundling && prm->prot_seg_cnt) {
2253 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2254 prm->tot_dsds, cmd))
2255 goto crc_queuing_error;
2256 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2257 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2258 goto crc_queuing_error;
2259
2260 if (bundling && prm->prot_seg_cnt) {
2261 /* Walks dif segments */
2262 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2263
2264 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2265 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2266 prm->prot_seg_cnt, cmd))
2267 goto crc_queuing_error;
2268 }
2269 return QLA_SUCCESS;
2270
2271crc_queuing_error:
2272 /* Cleanup will be performed by the caller */
2273
2274 return QLA_FUNCTION_FAILED;
2275}
2276
2277
1905/* 2278/*
1906 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 2279 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
1907 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 2280 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -1921,9 +2294,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1921 qlt_check_srr_debug(cmd, &xmit_type); 2294 qlt_check_srr_debug(cmd, &xmit_type);
1922 2295
1923 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 2296 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
1924 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " 2297 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
1925 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 2298 (xmit_type & QLA_TGT_XMIT_STATUS) ?
1926 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); 2299 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2300 &cmd->se_cmd);
1927 2301
1928 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2302 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
1929 &full_req_cnt); 2303 &full_req_cnt);
@@ -1941,7 +2315,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1941 if (unlikely(res)) 2315 if (unlikely(res))
1942 goto out_unmap_unlock; 2316 goto out_unmap_unlock;
1943 2317
1944 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2318 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2319 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2320 else
2321 res = qlt_24xx_build_ctio_pkt(&prm, vha);
1945 if (unlikely(res != 0)) 2322 if (unlikely(res != 0))
1946 goto out_unmap_unlock; 2323 goto out_unmap_unlock;
1947 2324
@@ -1953,7 +2330,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1953 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2330 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
1954 CTIO7_FLAGS_STATUS_MODE_0); 2331 CTIO7_FLAGS_STATUS_MODE_0);
1955 2332
1956 qlt_load_data_segments(&prm, vha); 2333 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2334 qlt_load_data_segments(&prm, vha);
1957 2335
1958 if (prm.add_status_pkt == 0) { 2336 if (prm.add_status_pkt == 0) {
1959 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2337 if (xmit_type & QLA_TGT_XMIT_STATUS) {
@@ -1983,8 +2361,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1983 ql_dbg(ql_dbg_tgt, vha, 0xe019, 2361 ql_dbg(ql_dbg_tgt, vha, 0xe019,
1984 "Building additional status packet\n"); 2362 "Building additional status packet\n");
1985 2363
2364 /*
2365 * T10Dif: ctio_crc2_to_fw overlay ontop of
2366 * ctio7_to_24xx
2367 */
1986 memcpy(ctio, pkt, sizeof(*ctio)); 2368 memcpy(ctio, pkt, sizeof(*ctio));
2369 /* reset back to CTIO7 */
1987 ctio->entry_count = 1; 2370 ctio->entry_count = 1;
2371 ctio->entry_type = CTIO_TYPE7;
1988 ctio->dseg_count = 0; 2372 ctio->dseg_count = 0;
1989 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 2373 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
1990 CTIO7_FLAGS_DATA_IN); 2374 CTIO7_FLAGS_DATA_IN);
@@ -1993,6 +2377,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
1993 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2377 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
1994 pkt->u.status0.flags |= __constant_cpu_to_le16( 2378 pkt->u.status0.flags |= __constant_cpu_to_le16(
1995 CTIO7_FLAGS_DONT_RET_CTIO); 2379 CTIO7_FLAGS_DONT_RET_CTIO);
2380
2381 /* qlt_24xx_init_ctio_to_isp will correct
2382 * all neccessary fields that's part of CTIO7.
2383 * There should be no residual of CTIO-CRC2 data.
2384 */
1996 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 2385 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
1997 &prm); 2386 &prm);
1998 pr_debug("Status CTIO7: %p\n", ctio); 2387 pr_debug("Status CTIO7: %p\n", ctio);
@@ -2041,8 +2430,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2041 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2430 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2042 return -EIO; 2431 return -EIO;
2043 2432
2044 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", 2433 ql_dbg(ql_dbg_tgt, vha, 0xe01b,
2045 (int)vha->vp_idx); 2434 "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
2435 __func__, (int)vha->vp_idx, &cmd->se_cmd,
2436 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2046 2437
2047 /* Calculate number of entries and segments required */ 2438 /* Calculate number of entries and segments required */
2048 if (qlt_pci_map_calc_cnt(&prm) != 0) 2439 if (qlt_pci_map_calc_cnt(&prm) != 0)
@@ -2054,14 +2445,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2054 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2445 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2055 if (res != 0) 2446 if (res != 0)
2056 goto out_unlock_free_unmap; 2447 goto out_unlock_free_unmap;
2448 if (cmd->se_cmd.prot_op)
2449 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2450 else
2451 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2057 2452
2058 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2059 if (unlikely(res != 0)) 2453 if (unlikely(res != 0))
2060 goto out_unlock_free_unmap; 2454 goto out_unlock_free_unmap;
2061 pkt = (struct ctio7_to_24xx *)prm.pkt; 2455 pkt = (struct ctio7_to_24xx *)prm.pkt;
2062 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2456 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2063 CTIO7_FLAGS_STATUS_MODE_0); 2457 CTIO7_FLAGS_STATUS_MODE_0);
2064 qlt_load_data_segments(&prm, vha); 2458
2459 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2460 qlt_load_data_segments(&prm, vha);
2065 2461
2066 cmd->state = QLA_TGT_STATE_NEED_DATA; 2462 cmd->state = QLA_TGT_STATE_NEED_DATA;
2067 2463
@@ -2079,6 +2475,143 @@ out_unlock_free_unmap:
2079} 2475}
2080EXPORT_SYMBOL(qlt_rdy_to_xfer); 2476EXPORT_SYMBOL(qlt_rdy_to_xfer);
2081 2477
2478
2479/*
2480 * Checks the guard or meta-data for the type of error
2481 * detected by the HBA.
2482 */
2483static inline int
2484qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2485 struct ctio_crc_from_fw *sts)
2486{
2487 uint8_t *ap = &sts->actual_dif[0];
2488 uint8_t *ep = &sts->expected_dif[0];
2489 uint32_t e_ref_tag, a_ref_tag;
2490 uint16_t e_app_tag, a_app_tag;
2491 uint16_t e_guard, a_guard;
2492 uint64_t lba = cmd->se_cmd.t_task_lba;
2493
2494 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2495 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2496 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2497
2498 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2499 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2500 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2501
2502 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2503 "iocb(s) %p Returned STATUS.\n", sts);
2504
2505 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2506 "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2507 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2508 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2509
2510 /*
2511 * Ignore sector if:
2512 * For type 3: ref & app tag is all 'f's
2513 * For type 0,1,2: app tag is all 'f's
2514 */
2515 if ((a_app_tag == 0xffff) &&
2516 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2517 (a_ref_tag == 0xffffffff))) {
2518 uint32_t blocks_done;
2519
2520 /* 2TB boundary case covered automatically with this */
2521 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2522 cmd->se_cmd.bad_sector = e_ref_tag;
2523 cmd->se_cmd.pi_err = 0;
2524 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2525 "need to return scsi good\n");
2526
2527 /* Update protection tag */
2528 if (cmd->prot_sg_cnt) {
2529 uint32_t i, j = 0, k = 0, num_ent;
2530 struct scatterlist *sg, *sgl;
2531
2532
2533 sgl = cmd->prot_sg;
2534
2535 /* Patch the corresponding protection tags */
2536 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2537 num_ent = sg_dma_len(sg) / 8;
2538 if (k + num_ent < blocks_done) {
2539 k += num_ent;
2540 continue;
2541 }
2542 j = blocks_done - k - 1;
2543 k = blocks_done;
2544 break;
2545 }
2546
2547 if (k != blocks_done) {
2548 ql_log(ql_log_warn, vha, 0xf076,
2549 "unexpected tag values tag:lba=%u:%llu)\n",
2550 e_ref_tag, (unsigned long long)lba);
2551 goto out;
2552 }
2553
2554#if 0
2555 struct sd_dif_tuple *spt;
2556 /* TODO:
2557 * This section came from initiator. Is it valid here?
2558 * should ulp be override with actual val???
2559 */
2560 spt = page_address(sg_page(sg)) + sg->offset;
2561 spt += j;
2562
2563 spt->app_tag = 0xffff;
2564 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2565 spt->ref_tag = 0xffffffff;
2566#endif
2567 }
2568
2569 return 0;
2570 }
2571
2572 /* check guard */
2573 if (e_guard != a_guard) {
2574 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2575 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2576
2577 ql_log(ql_log_warn, vha, 0xe076,
2578 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2579 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2580 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2581 a_guard, e_guard, cmd);
2582 goto out;
2583 }
2584
2585 /* check ref tag */
2586 if (e_ref_tag != a_ref_tag) {
2587 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
2588 cmd->se_cmd.bad_sector = e_ref_tag;
2589
2590 ql_log(ql_log_warn, vha, 0xe077,
2591 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2592 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2593 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2594 a_guard, e_guard, cmd);
2595 goto out;
2596 }
2597
2598 /* check appl tag */
2599 if (e_app_tag != a_app_tag) {
2600 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
2601 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2602
2603 ql_log(ql_log_warn, vha, 0xe078,
2604 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2605 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2606 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2607 a_guard, e_guard, cmd);
2608 goto out;
2609 }
2610out:
2611 return 1;
2612}
2613
2614
2082/* If hardware_lock held on entry, might drop it, then reaquire */ 2615/* If hardware_lock held on entry, might drop it, then reaquire */
2083/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2616/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2084static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2617static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@ -2089,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2089 struct qla_hw_data *ha = vha->hw; 2622 struct qla_hw_data *ha = vha->hw;
2090 request_t *pkt; 2623 request_t *pkt;
2091 int ret = 0; 2624 int ret = 0;
2625 uint16_t temp;
2092 2626
2093 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2627 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2094 2628
@@ -2125,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2125 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2659 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2126 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2660 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2127 CTIO7_FLAGS_TERMINATE); 2661 CTIO7_FLAGS_TERMINATE);
2128 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2662 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2663 ctio24->u.status1.ox_id = cpu_to_le16(temp);
2129 2664
2130 /* Most likely, it isn't needed */ 2665 /* Most likely, it isn't needed */
2131 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2666 ctio24->u.status1.residual = get_unaligned((uint32_t *)
@@ -2155,21 +2690,46 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2155 rc = __qlt_send_term_exchange(vha, cmd, atio); 2690 rc = __qlt_send_term_exchange(vha, cmd, atio);
2156 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2691 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2157done: 2692done:
2158 if (rc == 1) { 2693 /*
2694 * Terminate exchange will tell fw to release any active CTIO
2695 * that's in FW posession and cleanup the exchange.
2696 *
2697 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
2698 * down at FW. Free the cmd later when CTIO comes back later
2699 * w/aborted(0x2) status.
2700 *
2701 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
2702 * back w/some err. Free the cmd now.
2703 */
2704 if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
2159 if (!ha_locked && !in_interrupt()) 2705 if (!ha_locked && !in_interrupt())
2160 msleep(250); /* just in case */ 2706 msleep(250); /* just in case */
2161 2707
2708 if (cmd->sg_mapped)
2709 qlt_unmap_sg(vha, cmd);
2162 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2710 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2163 } 2711 }
2712 return;
2164} 2713}
2165 2714
2166void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2715void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2167{ 2716{
2168 BUG_ON(cmd->sg_mapped); 2717 struct qla_tgt_sess *sess = cmd->sess;
2169 2718
2719 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
2720 "%s: se_cmd[%p] ox_id %04x\n",
2721 __func__, &cmd->se_cmd,
2722 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2723
2724 BUG_ON(cmd->sg_mapped);
2170 if (unlikely(cmd->free_sg)) 2725 if (unlikely(cmd->free_sg))
2171 kfree(cmd->sg); 2726 kfree(cmd->sg);
2172 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2727
2728 if (!sess || !sess->se_sess) {
2729 WARN_ON(1);
2730 return;
2731 }
2732 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2173} 2733}
2174EXPORT_SYMBOL(qlt_free_cmd); 2734EXPORT_SYMBOL(qlt_free_cmd);
2175 2735
@@ -2374,6 +2934,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2374 case CTIO_LIP_RESET: 2934 case CTIO_LIP_RESET:
2375 case CTIO_TARGET_RESET: 2935 case CTIO_TARGET_RESET:
2376 case CTIO_ABORTED: 2936 case CTIO_ABORTED:
2937 /* driver request abort via Terminate exchange */
2377 case CTIO_TIMEOUT: 2938 case CTIO_TIMEOUT:
2378 case CTIO_INVALID_RX_ID: 2939 case CTIO_INVALID_RX_ID:
2379 /* They are OK */ 2940 /* They are OK */
@@ -2404,18 +2965,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2404 else 2965 else
2405 return; 2966 return;
2406 2967
2968 case CTIO_DIF_ERROR: {
2969 struct ctio_crc_from_fw *crc =
2970 (struct ctio_crc_from_fw *)ctio;
2971 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
2972 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
2973 vha->vp_idx, status, cmd->state, se_cmd,
2974 *((u64 *)&crc->actual_dif[0]),
2975 *((u64 *)&crc->expected_dif[0]));
2976
2977 if (qlt_handle_dif_error(vha, cmd, ctio)) {
2978 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2979 /* scsi Write/xfer rdy complete */
2980 goto skip_term;
2981 } else {
2982 /* scsi read/xmit respond complete
2983 * call handle dif to send scsi status
2984 * rather than terminate exchange.
2985 */
2986 cmd->state = QLA_TGT_STATE_PROCESSED;
2987 ha->tgt.tgt_ops->handle_dif_err(cmd);
2988 return;
2989 }
2990 } else {
2991 /* Need to generate a SCSI good completion.
2992 * because FW did not send scsi status.
2993 */
2994 status = 0;
2995 goto skip_term;
2996 }
2997 break;
2998 }
2407 default: 2999 default:
2408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3000 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2409 "qla_target(%d): CTIO with error status " 3001 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
2410 "0x%x received (state %x, se_cmd %p\n",
2411 vha->vp_idx, status, cmd->state, se_cmd); 3002 vha->vp_idx, status, cmd->state, se_cmd);
2412 break; 3003 break;
2413 } 3004 }
2414 3005
2415 if (cmd->state != QLA_TGT_STATE_NEED_DATA) 3006
3007 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3008 * cmd is already aborted/terminated, we don't
3009 * need to terminate again. The exchange is already
3010 * cleaned up/freed at FW level. Just cleanup at driver
3011 * level.
3012 */
3013 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3014 (cmd->state != QLA_TGT_STATE_ABORTED)) {
2416 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3015 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2417 return; 3016 return;
3017 }
2418 } 3018 }
3019skip_term:
2419 3020
2420 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3021 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2421 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 3022 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
@@ -2444,7 +3045,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2444 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3045 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2445 } 3046 }
2446 3047
2447 if (unlikely(status != CTIO_SUCCESS)) { 3048 if (unlikely(status != CTIO_SUCCESS) &&
3049 (cmd->state != QLA_TGT_STATE_ABORTED)) {
2448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3050 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2449 dump_stack(); 3051 dump_stack();
2450 } 3052 }
@@ -2489,13 +3091,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2489/* 3091/*
2490 * Process context for I/O path into tcm_qla2xxx code 3092 * Process context for I/O path into tcm_qla2xxx code
2491 */ 3093 */
2492static void qlt_do_work(struct work_struct *work) 3094static void __qlt_do_work(struct qla_tgt_cmd *cmd)
2493{ 3095{
2494 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2495 scsi_qla_host_t *vha = cmd->vha; 3096 scsi_qla_host_t *vha = cmd->vha;
2496 struct qla_hw_data *ha = vha->hw; 3097 struct qla_hw_data *ha = vha->hw;
2497 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3098 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2498 struct qla_tgt_sess *sess = NULL; 3099 struct qla_tgt_sess *sess = cmd->sess;
2499 struct atio_from_isp *atio = &cmd->atio; 3100 struct atio_from_isp *atio = &cmd->atio;
2500 unsigned char *cdb; 3101 unsigned char *cdb;
2501 unsigned long flags; 3102 unsigned long flags;
@@ -2505,41 +3106,6 @@ static void qlt_do_work(struct work_struct *work)
2505 if (tgt->tgt_stop) 3106 if (tgt->tgt_stop)
2506 goto out_term; 3107 goto out_term;
2507 3108
2508 spin_lock_irqsave(&ha->hardware_lock, flags);
2509 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2510 atio->u.isp24.fcp_hdr.s_id);
2511 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2512 if (sess)
2513 kref_get(&sess->se_sess->sess_kref);
2514 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2515
2516 if (unlikely(!sess)) {
2517 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2518
2519 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2520 "qla_target(%d): Unable to find wwn login"
2521 " (s_id %x:%x:%x), trying to create it manually\n",
2522 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2523
2524 if (atio->u.raw.entry_count > 1) {
2525 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2526 "Dropping multy entry cmd %p\n", cmd);
2527 goto out_term;
2528 }
2529
2530 mutex_lock(&vha->vha_tgt.tgt_mutex);
2531 sess = qlt_make_local_sess(vha, s_id);
2532 /* sess has an extra creation ref. */
2533 mutex_unlock(&vha->vha_tgt.tgt_mutex);
2534
2535 if (!sess)
2536 goto out_term;
2537 }
2538
2539 cmd->sess = sess;
2540 cmd->loop_id = sess->loop_id;
2541 cmd->conf_compl_supported = sess->conf_compl_supported;
2542
2543 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3109 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2544 cmd->tag = atio->u.isp24.exchange_addr; 3110 cmd->tag = atio->u.isp24.exchange_addr;
2545 cmd->unpacked_lun = scsilun_to_int( 3111 cmd->unpacked_lun = scsilun_to_int(
@@ -2563,11 +3129,12 @@ static void qlt_do_work(struct work_struct *work)
2563 atio->u.isp24.fcp_cmnd.add_cdb_len])); 3129 atio->u.isp24.fcp_cmnd.add_cdb_len]));
2564 3130
2565 ql_dbg(ql_dbg_tgt, vha, 0xe022, 3131 ql_dbg(ql_dbg_tgt, vha, 0xe022,
2566 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", 3132 "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
2567 cmd, cmd->unpacked_lun, cmd->tag); 3133 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
3134 cmd->atio.u.isp24.fcp_hdr.ox_id);
2568 3135
2569 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3136 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2570 fcp_task_attr, data_dir, bidi); 3137 fcp_task_attr, data_dir, bidi);
2571 if (ret != 0) 3138 if (ret != 0)
2572 goto out_term; 3139 goto out_term;
2573 /* 3140 /*
@@ -2586,17 +3153,114 @@ out_term:
2586 */ 3153 */
2587 spin_lock_irqsave(&ha->hardware_lock, flags); 3154 spin_lock_irqsave(&ha->hardware_lock, flags);
2588 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3155 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2589 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 3156 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2590 if (sess) 3157 ha->tgt.tgt_ops->put_sess(sess);
3158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3159}
3160
3161static void qlt_do_work(struct work_struct *work)
3162{
3163 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3164
3165 __qlt_do_work(cmd);
3166}
3167
3168static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3169 struct qla_tgt_sess *sess,
3170 struct atio_from_isp *atio)
3171{
3172 struct se_session *se_sess = sess->se_sess;
3173 struct qla_tgt_cmd *cmd;
3174 int tag;
3175
3176 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3177 if (tag < 0)
3178 return NULL;
3179
3180 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3181 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3182
3183 memcpy(&cmd->atio, atio, sizeof(*atio));
3184 cmd->state = QLA_TGT_STATE_NEW;
3185 cmd->tgt = vha->vha_tgt.qla_tgt;
3186 cmd->vha = vha;
3187 cmd->se_cmd.map_tag = tag;
3188 cmd->sess = sess;
3189 cmd->loop_id = sess->loop_id;
3190 cmd->conf_compl_supported = sess->conf_compl_supported;
3191
3192 return cmd;
3193}
3194
3195static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3196 uint16_t);
3197
3198static void qlt_create_sess_from_atio(struct work_struct *work)
3199{
3200 struct qla_tgt_sess_op *op = container_of(work,
3201 struct qla_tgt_sess_op, work);
3202 scsi_qla_host_t *vha = op->vha;
3203 struct qla_hw_data *ha = vha->hw;
3204 struct qla_tgt_sess *sess;
3205 struct qla_tgt_cmd *cmd;
3206 unsigned long flags;
3207 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3208
3209 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3210 "qla_target(%d): Unable to find wwn login"
3211 " (s_id %x:%x:%x), trying to create it manually\n",
3212 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3213
3214 if (op->atio.u.raw.entry_count > 1) {
3215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3216 "Dropping multy entry atio %p\n", &op->atio);
3217 goto out_term;
3218 }
3219
3220 mutex_lock(&vha->vha_tgt.tgt_mutex);
3221 sess = qlt_make_local_sess(vha, s_id);
3222 /* sess has an extra creation ref. */
3223 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3224
3225 if (!sess)
3226 goto out_term;
3227 /*
3228 * Now obtain a pre-allocated session tag using the original op->atio
3229 * packet header, and dispatch into __qlt_do_work() using the existing
3230 * process context.
3231 */
3232 cmd = qlt_get_tag(vha, sess, &op->atio);
3233 if (!cmd) {
3234 spin_lock_irqsave(&ha->hardware_lock, flags);
3235 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
2591 ha->tgt.tgt_ops->put_sess(sess); 3236 ha->tgt.tgt_ops->put_sess(sess);
3237 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3238 kfree(op);
3239 return;
3240 }
3241 /*
3242 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3243 * the extra reference taken above by qlt_make_local_sess()
3244 */
3245 __qlt_do_work(cmd);
3246 kfree(op);
3247 return;
3248
3249out_term:
3250 spin_lock_irqsave(&ha->hardware_lock, flags);
3251 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
2592 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3252 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3253 kfree(op);
3254
2593} 3255}
2594 3256
2595/* ha->hardware_lock supposed to be held on entry */ 3257/* ha->hardware_lock supposed to be held on entry */
2596static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 3258static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2597 struct atio_from_isp *atio) 3259 struct atio_from_isp *atio)
2598{ 3260{
3261 struct qla_hw_data *ha = vha->hw;
2599 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3262 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3263 struct qla_tgt_sess *sess;
2600 struct qla_tgt_cmd *cmd; 3264 struct qla_tgt_cmd *cmd;
2601 3265
2602 if (unlikely(tgt->tgt_stop)) { 3266 if (unlikely(tgt->tgt_stop)) {
@@ -2605,18 +3269,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2605 return -EFAULT; 3269 return -EFAULT;
2606 } 3270 }
2607 3271
2608 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 3272 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3273 if (unlikely(!sess)) {
3274 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3275 GFP_ATOMIC);
3276 if (!op)
3277 return -ENOMEM;
3278
3279 memcpy(&op->atio, atio, sizeof(*atio));
3280 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3281 queue_work(qla_tgt_wq, &op->work);
3282 return 0;
3283 }
3284 /*
3285 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3286 */
3287 kref_get(&sess->se_sess->sess_kref);
3288
3289 cmd = qlt_get_tag(vha, sess, atio);
2609 if (!cmd) { 3290 if (!cmd) {
2610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 3291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2611 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3292 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3293 ha->tgt.tgt_ops->put_sess(sess);
2612 return -ENOMEM; 3294 return -ENOMEM;
2613 } 3295 }
2614 3296
2615 memcpy(&cmd->atio, atio, sizeof(*atio));
2616 cmd->state = QLA_TGT_STATE_NEW;
2617 cmd->tgt = vha->vha_tgt.qla_tgt;
2618 cmd->vha = vha;
2619
2620 INIT_WORK(&cmd->work, qlt_do_work); 3297 INIT_WORK(&cmd->work, qlt_do_work);
2621 queue_work(qla_tgt_wq, &cmd->work); 3298 queue_work(qla_tgt_wq, &cmd->work);
2622 return 0; 3299 return 0;
@@ -3527,11 +4204,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3527 switch (atio->u.raw.entry_type) { 4204 switch (atio->u.raw.entry_type) {
3528 case ATIO_TYPE7: 4205 case ATIO_TYPE7:
3529 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 4206 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3530 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " 4207 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
3531 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3532 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 4208 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3533 atio->u.isp24.fcp_cmnd.rddata, 4209 atio->u.isp24.fcp_cmnd.rddata,
3534 atio->u.isp24.fcp_cmnd.wrdata, 4210 atio->u.isp24.fcp_cmnd.wrdata,
4211 atio->u.isp24.fcp_cmnd.cdb[0],
3535 atio->u.isp24.fcp_cmnd.add_cdb_len, 4212 atio->u.isp24.fcp_cmnd.add_cdb_len,
3536 be32_to_cpu(get_unaligned((uint32_t *) 4213 be32_to_cpu(get_unaligned((uint32_t *)
3537 &atio->u.isp24.fcp_cmnd.add_cdb[ 4214 &atio->u.isp24.fcp_cmnd.add_cdb[
@@ -3629,11 +4306,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3629 tgt->irq_cmd_count++; 4306 tgt->irq_cmd_count++;
3630 4307
3631 switch (pkt->entry_type) { 4308 switch (pkt->entry_type) {
4309 case CTIO_CRC2:
3632 case CTIO_TYPE7: 4310 case CTIO_TYPE7:
3633 { 4311 {
3634 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 4312 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3635 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", 4313 ql_dbg(ql_dbg_tgt, vha, 0xe030,
3636 vha->vp_idx); 4314 "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
4315 entry->entry_type, vha->vp_idx);
3637 qlt_do_ctio_completion(vha, entry->handle, 4316 qlt_do_ctio_completion(vha, entry->handle,
3638 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4317 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3639 entry); 4318 entry);
@@ -4768,6 +5447,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4768 case ABTS_RESP_24XX: 5447 case ABTS_RESP_24XX:
4769 case CTIO_TYPE7: 5448 case CTIO_TYPE7:
4770 case NOTIFY_ACK_TYPE: 5449 case NOTIFY_ACK_TYPE:
5450 case CTIO_CRC2:
4771 return 1; 5451 return 1;
4772 default: 5452 default:
4773 return 0; 5453 return 0;
@@ -4911,23 +5591,13 @@ int __init qlt_init(void)
4911 if (!QLA_TGT_MODE_ENABLED()) 5591 if (!QLA_TGT_MODE_ENABLED())
4912 return 0; 5592 return 0;
4913 5593
4914 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4915 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4916 NULL);
4917 if (!qla_tgt_cmd_cachep) {
4918 ql_log(ql_log_fatal, NULL, 0xe06c,
4919 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4920 return -ENOMEM;
4921 }
4922
4923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 5594 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 5595 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4925 qla_tgt_mgmt_cmd), 0, NULL); 5596 qla_tgt_mgmt_cmd), 0, NULL);
4926 if (!qla_tgt_mgmt_cmd_cachep) { 5597 if (!qla_tgt_mgmt_cmd_cachep) {
4927 ql_log(ql_log_fatal, NULL, 0xe06d, 5598 ql_log(ql_log_fatal, NULL, 0xe06d,
4928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 5599 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4929 ret = -ENOMEM; 5600 return -ENOMEM;
4930 goto out;
4931 } 5601 }
4932 5602
4933 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 5603 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
@@ -4955,8 +5625,6 @@ out_cmd_mempool:
4955 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5625 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4956out_mgmt_cmd_cachep: 5626out_mgmt_cmd_cachep:
4957 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5627 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4958out:
4959 kmem_cache_destroy(qla_tgt_cmd_cachep);
4960 return ret; 5628 return ret;
4961} 5629}
4962 5630
@@ -4968,5 +5636,4 @@ void qlt_exit(void)
4968 destroy_workqueue(qla_tgt_wq); 5636 destroy_workqueue(qla_tgt_wq);
4969 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5637 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4970 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5638 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4971 kmem_cache_destroy(qla_tgt_cmd_cachep);
4972} 5639}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index ce33d8c26406..d1d24fb0160a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -293,6 +293,7 @@ struct ctio_to_2xxx {
293#define CTIO_ABORTED 0x02 293#define CTIO_ABORTED 0x02
294#define CTIO_INVALID_RX_ID 0x08 294#define CTIO_INVALID_RX_ID 0x08
295#define CTIO_TIMEOUT 0x0B 295#define CTIO_TIMEOUT 0x0B
296#define CTIO_DIF_ERROR 0x0C /* DIF error detected */
296#define CTIO_LIP_RESET 0x0E 297#define CTIO_LIP_RESET 0x0E
297#define CTIO_TARGET_RESET 0x17 298#define CTIO_TARGET_RESET 0x17
298#define CTIO_PORT_UNAVAILABLE 0x28 299#define CTIO_PORT_UNAVAILABLE 0x28
@@ -315,7 +316,7 @@ struct fcp_hdr {
315 uint8_t seq_id; 316 uint8_t seq_id;
316 uint8_t df_ctl; 317 uint8_t df_ctl;
317 uint16_t seq_cnt; 318 uint16_t seq_cnt;
318 uint16_t ox_id; 319 __be16 ox_id;
319 uint16_t rx_id; 320 uint16_t rx_id;
320 uint32_t parameter; 321 uint32_t parameter;
321} __packed; 322} __packed;
@@ -440,9 +441,9 @@ struct ctio7_to_24xx {
440 union { 441 union {
441 struct { 442 struct {
442 uint16_t reserved1; 443 uint16_t reserved1;
443 uint16_t flags; 444 __le16 flags;
444 uint32_t residual; 445 uint32_t residual;
445 uint16_t ox_id; 446 __le16 ox_id;
446 uint16_t scsi_status; 447 uint16_t scsi_status;
447 uint32_t relative_offset; 448 uint32_t relative_offset;
448 uint32_t reserved2; 449 uint32_t reserved2;
@@ -457,7 +458,7 @@ struct ctio7_to_24xx {
457 uint16_t sense_length; 458 uint16_t sense_length;
458 uint16_t flags; 459 uint16_t flags;
459 uint32_t residual; 460 uint32_t residual;
460 uint16_t ox_id; 461 __le16 ox_id;
461 uint16_t scsi_status; 462 uint16_t scsi_status;
462 uint16_t response_len; 463 uint16_t response_len;
463 uint16_t reserved; 464 uint16_t reserved;
@@ -498,11 +499,12 @@ struct ctio7_from_24xx {
498#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 499#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
499#define CTIO7_FLAGS_STATUS_MODE_0 0 500#define CTIO7_FLAGS_STATUS_MODE_0 0
500#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 501#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
502#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7
501#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 503#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
502#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 504#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
503#define CTIO7_FLAGS_DSD_PTR BIT_2 505#define CTIO7_FLAGS_DSD_PTR BIT_2
504#define CTIO7_FLAGS_DATA_IN BIT_1 506#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */
505#define CTIO7_FLAGS_DATA_OUT BIT_0 507#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */
506 508
507#define ELS_PLOGI 0x3 509#define ELS_PLOGI 0x3
508#define ELS_FLOGI 0x4 510#define ELS_FLOGI 0x4
@@ -514,6 +516,68 @@ struct ctio7_from_24xx {
514#define ELS_ADISC 0x52 516#define ELS_ADISC 0x52
515 517
516/* 518/*
519 *CTIO Type CRC_2 IOCB
520 */
521struct ctio_crc2_to_fw {
522 uint8_t entry_type; /* Entry type. */
523#define CTIO_CRC2 0x7A
524 uint8_t entry_count; /* Entry count. */
525 uint8_t sys_define; /* System defined. */
526 uint8_t entry_status; /* Entry Status. */
527
528 uint32_t handle; /* System handle. */
529 uint16_t nport_handle; /* N_PORT handle. */
530 __le16 timeout; /* Command timeout. */
531
532 uint16_t dseg_count; /* Data segment count. */
533 uint8_t vp_index;
534 uint8_t add_flags; /* additional flags */
535#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
536
537 uint8_t initiator_id[3]; /* initiator ID */
538 uint8_t reserved1;
539 uint32_t exchange_addr; /* rcv exchange address */
540 uint16_t reserved2;
541 __le16 flags; /* refer to CTIO7 flags values */
542 uint32_t residual;
543 __le16 ox_id;
544 uint16_t scsi_status;
545 __le32 relative_offset;
546 uint32_t reserved5;
547 __le32 transfer_length; /* total fc transfer length */
548 uint32_t reserved6;
549 __le32 crc_context_address[2];/* Data segment address. */
550 uint16_t crc_context_len; /* Data segment length. */
551 uint16_t reserved_1; /* MUST be set to 0. */
552} __packed;
553
554/* CTIO Type CRC_x Status IOCB */
555struct ctio_crc_from_fw {
556 uint8_t entry_type; /* Entry type. */
557 uint8_t entry_count; /* Entry count. */
558 uint8_t sys_define; /* System defined. */
559 uint8_t entry_status; /* Entry Status. */
560
561 uint32_t handle; /* System handle. */
562 uint16_t status;
563 uint16_t timeout; /* Command timeout. */
564 uint16_t dseg_count; /* Data segment count. */
565 uint32_t reserved1;
566 uint16_t state_flags;
567#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
568
569 uint32_t exchange_address; /* rcv exchange address */
570 uint16_t reserved2;
571 uint16_t flags;
572 uint32_t resid_xfer_length;
573 uint16_t ox_id;
574 uint8_t reserved3[12];
575 uint16_t runt_guard; /* reported runt blk guard */
576 uint8_t actual_dif[8];
577 uint8_t expected_dif[8];
578} __packed;
579
580/*
517 * ISP queue - ABTS received/response entries structure definition for 24xx. 581 * ISP queue - ABTS received/response entries structure definition for 24xx.
518 */ 582 */
519#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ 583#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
@@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {
641 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 705 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
642 unsigned char *, uint32_t, int, int, int); 706 unsigned char *, uint32_t, int, int, int);
643 void (*handle_data)(struct qla_tgt_cmd *); 707 void (*handle_data)(struct qla_tgt_cmd *);
708 void (*handle_dif_err)(struct qla_tgt_cmd *);
644 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, 709 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
645 uint32_t); 710 uint32_t);
646 void (*free_cmd)(struct qla_tgt_cmd *); 711 void (*free_cmd)(struct qla_tgt_cmd *);
@@ -805,6 +870,12 @@ struct qla_tgt {
805 struct list_head tgt_list_entry; 870 struct list_head tgt_list_entry;
806}; 871};
807 872
873struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha;
875 struct atio_from_isp atio;
876 struct work_struct work;
877};
878
808/* 879/*
809 * Equivilant to IT Nexus (Initiator-Target) 880 * Equivilant to IT Nexus (Initiator-Target)
810 */ 881 */
@@ -829,9 +900,9 @@ struct qla_tgt_sess {
829}; 900};
830 901
831struct qla_tgt_cmd { 902struct qla_tgt_cmd {
903 struct se_cmd se_cmd;
832 struct qla_tgt_sess *sess; 904 struct qla_tgt_sess *sess;
833 int state; 905 int state;
834 struct se_cmd se_cmd;
835 struct work_struct free_work; 906 struct work_struct free_work;
836 struct work_struct work; 907 struct work_struct work;
837 /* Sense buffer that will be mapped into outgoing status */ 908 /* Sense buffer that will be mapped into outgoing status */
@@ -843,6 +914,7 @@ struct qla_tgt_cmd {
843 unsigned int free_sg:1; 914 unsigned int free_sg:1;
844 unsigned int aborted:1; /* Needed in case of SRR */ 915 unsigned int aborted:1; /* Needed in case of SRR */
845 unsigned int write_data_transferred:1; 916 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1;
846 918
847 struct scatterlist *sg; /* cmd data buffer SG vector */ 919 struct scatterlist *sg; /* cmd data buffer SG vector */
848 int sg_cnt; /* SG segments count */ 920 int sg_cnt; /* SG segments count */
@@ -857,6 +929,12 @@ struct qla_tgt_cmd {
857 struct scsi_qla_host *vha; 929 struct scsi_qla_host *vha;
858 930
859 struct atio_from_isp atio; 931 struct atio_from_isp atio;
932 /* t10dif */
933 struct scatterlist *prot_sg;
934 uint32_t prot_sg_cnt;
935 uint32_t blk_sz;
936 struct crc_context *ctx;
937
860}; 938};
861 939
862struct qla_tgt_sess_work_param { 940struct qla_tgt_sess_work_param {
@@ -901,6 +979,10 @@ struct qla_tgt_prm {
901 int sense_buffer_len; 979 int sense_buffer_len;
902 int residual; 980 int residual;
903 int add_status_pkt; 981 int add_status_pkt;
982 /* dif */
983 struct scatterlist *prot_sg;
984 uint16_t prot_seg_cnt;
985 uint16_t tot_dsds;
904}; 986};
905 987
906struct qla_tgt_srr_imm { 988struct qla_tgt_srr_imm {
@@ -976,6 +1058,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
976extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1058extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
977extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1059extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
978extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1060extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1061extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
1062extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
979extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1063extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
980extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1064extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
981extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1065extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index a804e9b744bb..cb9a0c4bc419 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
201 ql_dbg(ql_dbg_misc, NULL, 0xd014, 201 ql_dbg(ql_dbg_misc, NULL, 0xd014,
202 "%s: @%x\n", __func__, offset); 202 "%s: @%x\n", __func__, offset);
203 } 203 }
204 qla27xx_insert32(offset, buf, len);
205 qla27xx_read32(window, buf, len); 204 qla27xx_read32(window, buf, len);
206} 205}
207 206
@@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
220 219
221static inline void 220static inline void
222qla27xx_read_window(__iomem struct device_reg_24xx *reg, 221qla27xx_read_window(__iomem struct device_reg_24xx *reg,
223 uint32_t base, uint offset, uint count, uint width, void *buf, 222 uint32_t addr, uint offset, uint count, uint width, void *buf,
224 ulong *len) 223 ulong *len)
225{ 224{
226 void *window = (void *)reg + offset; 225 void *window = (void *)reg + offset;
@@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
229 if (buf) { 228 if (buf) {
230 ql_dbg(ql_dbg_misc, NULL, 0xd016, 229 ql_dbg(ql_dbg_misc, NULL, 0xd016,
231 "%s: base=%x offset=%x count=%x width=%x\n", 230 "%s: base=%x offset=%x count=%x width=%x\n",
232 __func__, base, offset, count, width); 231 __func__, addr, offset, count, width);
233 } 232 }
234 qla27xx_write_reg(reg, IOBASE_ADDR, base, buf); 233 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
235 while (count--) { 234 while (count--) {
236 qla27xx_insert32(base, buf, len); 235 qla27xx_insert32(addr, buf, len);
237 readn(window, buf, len); 236 readn(window, buf, len);
238 window += width; 237 window += width;
239 base += width; 238 addr++;
240 } 239 }
241} 240}
242 241
@@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
336 335
337 ql_dbg(ql_dbg_misc, vha, 0xd204, 336 ql_dbg(ql_dbg_misc, vha, 0xd204,
338 "%s: rdpci [%lx]\n", __func__, *len); 337 "%s: rdpci [%lx]\n", __func__, *len);
339 qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len); 338 qla27xx_insert32(ent->t260.pci_offset, buf, len);
339 qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
340 340
341 return false; 341 return false;
342} 342}
@@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
349 349
350 ql_dbg(ql_dbg_misc, vha, 0xd205, 350 ql_dbg(ql_dbg_misc, vha, 0xd205,
351 "%s: wrpci [%lx]\n", __func__, *len); 351 "%s: wrpci [%lx]\n", __func__, *len);
352 qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf); 352 qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
353 353
354 return false; 354 return false;
355} 355}
@@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
392 goto done; 392 goto done;
393 } 393 }
394 394
395 if (end < start) { 395 if (end < start || end == 0) {
396 ql_dbg(ql_dbg_misc, vha, 0xd023, 396 ql_dbg(ql_dbg_misc, vha, 0xd023,
397 "%s: bad range (start=%x end=%x)\n", __func__, 397 "%s: unusable range (start=%x end=%x)\n", __func__,
398 ent->t262.end_addr, ent->t262.start_addr); 398 ent->t262.end_addr, ent->t262.start_addr);
399 qla27xx_skip_entry(ent, buf); 399 qla27xx_skip_entry(ent, buf);
400 goto done; 400 goto done;
@@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
452 ql_dbg(ql_dbg_misc, vha, 0xd025, 452 ql_dbg(ql_dbg_misc, vha, 0xd025,
453 "%s: unsupported atio queue\n", __func__); 453 "%s: unsupported atio queue\n", __func__);
454 qla27xx_skip_entry(ent, buf); 454 qla27xx_skip_entry(ent, buf);
455 goto done;
456 } else { 455 } else {
457 ql_dbg(ql_dbg_misc, vha, 0xd026, 456 ql_dbg(ql_dbg_misc, vha, 0xd026,
458 "%s: unknown queue %u\n", __func__, ent->t263.queue_type); 457 "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
459 qla27xx_skip_entry(ent, buf); 458 qla27xx_skip_entry(ent, buf);
460 goto done;
461 } 459 }
462 460
463 if (buf) 461 if (buf)
464 ent->t263.num_queues = count; 462 ent->t263.num_queues = count;
465done: 463
466 return false; 464 return false;
467} 465}
468 466
@@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
503 ql_dbg(ql_dbg_misc, vha, 0xd209, 501 ql_dbg(ql_dbg_misc, vha, 0xd209,
504 "%s: pause risc [%lx]\n", __func__, *len); 502 "%s: pause risc [%lx]\n", __func__, *len);
505 if (buf) 503 if (buf)
506 qla24xx_pause_risc(reg); 504 qla24xx_pause_risc(reg, vha->hw);
507 505
508 return false; 506 return false;
509} 507}
@@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
590 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 588 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
591{ 589{
592 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 590 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
593 void *window = (void *)reg + 0xc4;
594 ulong dwords = ent->t270.count; 591 ulong dwords = ent->t270.count;
595 ulong addr = ent->t270.addr; 592 ulong addr = ent->t270.addr;
596 593
@@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
599 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); 596 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
600 while (dwords--) { 597 while (dwords--) {
601 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); 598 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
602 qla27xx_read_reg(reg, 0xc4, buf, len);
603 qla27xx_insert32(addr, buf, len); 599 qla27xx_insert32(addr, buf, len);
604 qla27xx_read32(window, buf, len); 600 qla27xx_read_reg(reg, 0xc4, buf, len);
605 addr++; 601 addr += sizeof(uint32_t);
606 } 602 }
607 603
608 return false; 604 return false;
@@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
614{ 610{
615 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); 611 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
616 ulong addr = ent->t271.addr; 612 ulong addr = ent->t271.addr;
613 ulong data = ent->t271.data;
617 614
618 ql_dbg(ql_dbg_misc, vha, 0xd20f, 615 ql_dbg(ql_dbg_misc, vha, 0xd20f,
619 "%s: wrremreg [%lx]\n", __func__, *len); 616 "%s: wrremreg [%lx]\n", __func__, *len);
620 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); 617 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
621 qla27xx_read_reg(reg, 0xc4, buf, len); 618 qla27xx_write_reg(reg, 0xc4, data, buf);
622 qla27xx_insert32(addr, buf, len);
623 qla27xx_write_reg(reg, 0xc0, addr, buf); 619 qla27xx_write_reg(reg, 0xc0, addr, buf);
624 620
625 return false; 621 return false;
@@ -662,9 +658,59 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
662 "%s: failed pcicfg read at %lx\n", __func__, addr); 658 "%s: failed pcicfg read at %lx\n", __func__, addr);
663 qla27xx_insert32(addr, buf, len); 659 qla27xx_insert32(addr, buf, len);
664 qla27xx_insert32(value, buf, len); 660 qla27xx_insert32(value, buf, len);
665 addr += 4; 661 addr += sizeof(uint32_t);
662 }
663
664 return false;
665}
666
667static int
668qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
669 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
670{
671 uint count = 0;
672 uint i;
673
674 ql_dbg(ql_dbg_misc, vha, 0xd212,
675 "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
676 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
677 for (i = 0; i < vha->hw->max_req_queues; i++) {
678 struct req_que *req = vha->hw->req_q_map[i];
679 if (req || !buf) {
680 qla27xx_insert16(i, buf, len);
681 qla27xx_insert16(1, buf, len);
682 qla27xx_insert32(req && req->out_ptr ?
683 *req->out_ptr : 0, buf, len);
684 count++;
685 }
686 }
687 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
688 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
689 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
690 if (rsp || !buf) {
691 qla27xx_insert16(i, buf, len);
692 qla27xx_insert16(1, buf, len);
693 qla27xx_insert32(rsp && rsp->in_ptr ?
694 *rsp->in_ptr : 0, buf, len);
695 count++;
696 }
697 }
698 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
699 ql_dbg(ql_dbg_misc, vha, 0xd02e,
700 "%s: unsupported atio queue\n", __func__);
701 qla27xx_skip_entry(ent, buf);
702 } else {
703 ql_dbg(ql_dbg_misc, vha, 0xd02f,
704 "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
705 qla27xx_skip_entry(ent, buf);
666 } 706 }
667 707
708 if (buf)
709 ent->t274.num_queues = count;
710
711 if (!count)
712 qla27xx_skip_entry(ent, buf);
713
668 return false; 714 return false;
669} 715}
670 716
@@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
709 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , 755 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
710 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , 756 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
711 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , 757 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
758 { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
712 { -1 , qla27xx_fwdt_entry_other } 759 { -1 , qla27xx_fwdt_entry_other }
713}; 760};
714 761
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index c9d2fff4d964..1967424c8e64 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template {
52#define ENTRY_TYPE_WRREMREG 271 52#define ENTRY_TYPE_WRREMREG 271
53#define ENTRY_TYPE_RDREMRAM 272 53#define ENTRY_TYPE_RDREMRAM 272
54#define ENTRY_TYPE_PCICFG 273 54#define ENTRY_TYPE_PCICFG 273
55#define ENTRY_TYPE_GET_SHADOW 274
55 56
56#define CAPTURE_FLAG_PHYS_ONLY BIT_0 57#define CAPTURE_FLAG_PHYS_ONLY BIT_0
57#define CAPTURE_FLAG_PHYS_VIRT BIT_1 58#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry {
109 } t259; 110 } t259;
110 111
111 struct __packed { 112 struct __packed {
112 uint8_t pci_addr; 113 uint8_t pci_offset;
113 uint8_t reserved[3]; 114 uint8_t reserved[3];
114 } t260; 115 } t260;
115 116
116 struct __packed { 117 struct __packed {
117 uint8_t pci_addr; 118 uint8_t pci_offset;
118 uint8_t reserved[3]; 119 uint8_t reserved[3];
119 uint32_t write_data; 120 uint32_t write_data;
120 } t261; 121 } t261;
@@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry {
186 uint32_t addr; 187 uint32_t addr;
187 uint32_t count; 188 uint32_t count;
188 } t273; 189 } t273;
190
191 struct __packed {
192 uint32_t num_queues;
193 uint8_t queue_type;
194 uint8_t reserved[3];
195 } t274;
189 }; 196 };
190}; 197};
191 198
@@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry {
202#define T268_BUF_TYPE_EXCH_BUFOFF 2 209#define T268_BUF_TYPE_EXCH_BUFOFF 2
203#define T268_BUF_TYPE_EXTD_LOGIN 3 210#define T268_BUF_TYPE_EXTD_LOGIN 3
204 211
212#define T274_QUEUE_TYPE_REQ_SHAD 1
213#define T274_QUEUE_TYPE_RSP_SHAD 2
214#define T274_QUEUE_TYPE_ATIO_SHAD 3
215
205#endif 216#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e36b94712544..4d2c98cbec4f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation 3 * Copyright (c) 2003-2014 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.07.00.02-k" 10#define QLA2XXX_VERSION "8.07.00.08-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 7 13#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 68fb66fdb757..e2beab962096 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
472 cmd->sg_cnt = se_cmd->t_data_nents; 472 cmd->sg_cnt = se_cmd->t_data_nents;
473 cmd->sg = se_cmd->t_data_sg; 473 cmd->sg = se_cmd->t_data_sg;
474 474
475 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
476 cmd->prot_sg = se_cmd->t_prot_sg;
477 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
478 se_cmd->pi_err = 0;
479
475 /* 480 /*
476 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup 481 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
477 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 482 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
@@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
567 return; 572 return;
568 } 573 }
569 574
570 transport_generic_request_failure(&cmd->se_cmd, 575 if (cmd->se_cmd.pi_err)
571 TCM_CHECK_CONDITION_ABORT_CMD); 576 transport_generic_request_failure(&cmd->se_cmd,
577 cmd->se_cmd.pi_err);
578 else
579 transport_generic_request_failure(&cmd->se_cmd,
580 TCM_CHECK_CONDITION_ABORT_CMD);
581
572 return; 582 return;
573 } 583 }
574 584
@@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
584 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 594 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
585} 595}
586 596
597static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
598{
599 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
600
601 /* take an extra kref to prevent cmd free too early.
602 * need to wait for SCSI status/check condition to
603 * finish responding generate by transport_generic_request_failure.
604 */
605 kref_get(&cmd->se_cmd.cmd_kref);
606 transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
607}
608
609/*
610 * Called from qla_target.c:qlt_do_ctio_completion()
611 */
612static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
613{
614 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
615 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
616}
617
587/* 618/*
588 * Called from qla_target.c:qlt_issue_task_mgmt() 619 * Called from qla_target.c:qlt_issue_task_mgmt()
589 */ 620 */
@@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
610 cmd->sg = se_cmd->t_data_sg; 641 cmd->sg = se_cmd->t_data_sg;
611 cmd->offset = 0; 642 cmd->offset = 0;
612 643
644 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
645 cmd->prot_sg = se_cmd->t_prot_sg;
646 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
647 se_cmd->pi_err = 0;
648
613 /* 649 /*
614 * Now queue completed DATA_IN the qla2xxx LLD and response ring 650 * Now queue completed DATA_IN the qla2xxx LLD and response ring
615 */ 651 */
@@ -1465,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1465 struct qla_tgt_sess *sess = qla_tgt_sess; 1501 struct qla_tgt_sess *sess = qla_tgt_sess;
1466 unsigned char port_name[36]; 1502 unsigned char port_name[36];
1467 unsigned long flags; 1503 unsigned long flags;
1504 int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
1505 TCM_QLA2XXX_DEFAULT_TAGS;
1468 1506
1469 lport = vha->vha_tgt.target_lport_ptr; 1507 lport = vha->vha_tgt.target_lport_ptr;
1470 if (!lport) { 1508 if (!lport) {
@@ -1482,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1482 } 1520 }
1483 se_tpg = &tpg->se_tpg; 1521 se_tpg = &tpg->se_tpg;
1484 1522
1485 se_sess = transport_init_session(TARGET_PROT_NORMAL); 1523 se_sess = transport_init_session_tags(num_tags,
1524 sizeof(struct qla_tgt_cmd),
1525 TARGET_PROT_NORMAL);
1486 if (IS_ERR(se_sess)) { 1526 if (IS_ERR(se_sess)) {
1487 pr_err("Unable to initialize struct se_session\n"); 1527 pr_err("Unable to initialize struct se_session\n");
1488 return PTR_ERR(se_sess); 1528 return PTR_ERR(se_sess);
@@ -1600,6 +1640,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1600static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1640static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1601 .handle_cmd = tcm_qla2xxx_handle_cmd, 1641 .handle_cmd = tcm_qla2xxx_handle_cmd,
1602 .handle_data = tcm_qla2xxx_handle_data, 1642 .handle_data = tcm_qla2xxx_handle_data,
1643 .handle_dif_err = tcm_qla2xxx_handle_dif_err,
1603 .handle_tmr = tcm_qla2xxx_handle_tmr, 1644 .handle_tmr = tcm_qla2xxx_handle_tmr,
1604 .free_cmd = tcm_qla2xxx_free_cmd, 1645 .free_cmd = tcm_qla2xxx_free_cmd,
1605 .free_mcmd = tcm_qla2xxx_free_mcmd, 1646 .free_mcmd = tcm_qla2xxx_free_mcmd,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 33aaac8c7d59..10c002145648 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,6 +4,11 @@
4#define TCM_QLA2XXX_VERSION "v0.1" 4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 6#define TCM_QLA2XXX_NAMELEN 32
7/*
8 * Number of pre-allocated per-session tags, based upon the worst-case
9 * per port number of iocbs
10 */
11#define TCM_QLA2XXX_DEFAULT_TAGS 2088
7 12
8#include "qla_target.h" 13#include "qla_target.h"
9 14
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 2eba35365920..556c1525f881 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
249 qla4_83xx_flash_unlock(ha); 249 qla4_83xx_flash_unlock(ha);
250} 250}
251 251
252/**
253 * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
254 * @ha: Pointer to adapter structure
255 * @addr: Flash address to write to
256 * @data: Data to be written
257 * @count: word_count to be written
258 *
259 * Return: On success return QLA_SUCCESS
260 * On error return QLA_ERROR
261 **/
262int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
263 uint32_t *data, uint32_t count)
264{
265 int i, j;
266 uint32_t agt_ctrl;
267 unsigned long flags;
268 int ret_val = QLA_SUCCESS;
269
270 /* Only 128-bit aligned access */
271 if (addr & 0xF) {
272 ret_val = QLA_ERROR;
273 goto exit_ms_mem_write;
274 }
275
276 write_lock_irqsave(&ha->hw_lock, flags);
277
278 /* Write address */
279 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
280 if (ret_val == QLA_ERROR) {
281 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
282 __func__);
283 goto exit_ms_mem_write_unlock;
284 }
285
286 for (i = 0; i < count; i++, addr += 16) {
287 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
288 QLA8XXX_ADDR_QDR_NET_MAX)) ||
289 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
290 QLA8XXX_ADDR_DDR_NET_MAX)))) {
291 ret_val = QLA_ERROR;
292 goto exit_ms_mem_write_unlock;
293 }
294
295 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
296 addr);
297 /* Write data */
298 ret_val |= qla4_83xx_wr_reg_indirect(ha,
299 MD_MIU_TEST_AGT_WRDATA_LO,
300 *data++);
301 ret_val |= qla4_83xx_wr_reg_indirect(ha,
302 MD_MIU_TEST_AGT_WRDATA_HI,
303 *data++);
304 ret_val |= qla4_83xx_wr_reg_indirect(ha,
305 MD_MIU_TEST_AGT_WRDATA_ULO,
306 *data++);
307 ret_val |= qla4_83xx_wr_reg_indirect(ha,
308 MD_MIU_TEST_AGT_WRDATA_UHI,
309 *data++);
310 if (ret_val == QLA_ERROR) {
311 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
312 __func__);
313 goto exit_ms_mem_write_unlock;
314 }
315
316 /* Check write status */
317 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
318 MIU_TA_CTL_WRITE_ENABLE);
319 ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
320 MIU_TA_CTL_WRITE_START);
321 if (ret_val == QLA_ERROR) {
322 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
323 __func__);
324 goto exit_ms_mem_write_unlock;
325 }
326
327 for (j = 0; j < MAX_CTL_CHECK; j++) {
328 ret_val = qla4_83xx_rd_reg_indirect(ha,
329 MD_MIU_TEST_AGT_CTRL,
330 &agt_ctrl);
331 if (ret_val == QLA_ERROR) {
332 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
333 __func__);
334 goto exit_ms_mem_write_unlock;
335 }
336 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
337 break;
338 }
339
340 /* Status check failed */
341 if (j >= MAX_CTL_CHECK) {
342 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
343 __func__);
344 ret_val = QLA_ERROR;
345 goto exit_ms_mem_write_unlock;
346 }
347 }
348
349exit_ms_mem_write_unlock:
350 write_unlock_irqrestore(&ha->hw_lock, flags);
351
352exit_ms_mem_write:
353 return ret_val;
354}
355
356#define INTENT_TO_RECOVER 0x01 252#define INTENT_TO_RECOVER 0x01
357#define PROCEED_TO_RECOVER 0x02 253#define PROCEED_TO_RECOVER 0x02
358 254
@@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
760 __func__)); 656 __func__));
761 657
762 /* 128 bit/16 byte write to MS memory */ 658 /* 128 bit/16 byte write to MS memory */
763 ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, 659 ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
764 count); 660 count);
765 if (ret_val == QLA_ERROR) { 661 if (ret_val == QLA_ERROR) {
766 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", 662 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index a0de6e25ea5a..775fdf9fcc87 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd {
254 uint32_t rsvd_1; 254 uint32_t rsvd_1;
255}; 255};
256 256
257struct qla8044_minidump_entry_rddfe {
258 struct qla8xxx_minidump_entry_hdr h;
259 uint32_t addr_1;
260 uint32_t value;
261 uint8_t stride;
262 uint8_t stride2;
263 uint16_t count;
264 uint32_t poll;
265 uint32_t mask;
266 uint32_t modify_mask;
267 uint32_t data_size;
268 uint32_t rsvd;
269
270} __packed;
271
272struct qla8044_minidump_entry_rdmdio {
273 struct qla8xxx_minidump_entry_hdr h;
274
275 uint32_t addr_1;
276 uint32_t addr_2;
277 uint32_t value_1;
278 uint8_t stride_1;
279 uint8_t stride_2;
280 uint16_t count;
281 uint32_t poll;
282 uint32_t mask;
283 uint32_t value_2;
284 uint32_t data_size;
285
286} __packed;
287
288struct qla8044_minidump_entry_pollwr {
289 struct qla8xxx_minidump_entry_hdr h;
290 uint32_t addr_1;
291 uint32_t addr_2;
292 uint32_t value_1;
293 uint32_t value_2;
294 uint32_t poll;
295 uint32_t mask;
296 uint32_t data_size;
297 uint32_t rsvd;
298
299} __packed;
300
257/* RDMUX2 Entry */ 301/* RDMUX2 Entry */
258struct qla83xx_minidump_entry_rdmux2 { 302struct qla83xx_minidump_entry_rdmux2 {
259 struct qla8xxx_minidump_entry_hdr h; 303 struct qla8xxx_minidump_entry_hdr h;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 73a502288bde..8f6d0fb2cd80 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -601,6 +601,7 @@ struct scsi_qla_host {
601#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ 601#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/
602#define DPC_POST_IDC_ACK 23 /* 0x00800000 */ 602#define DPC_POST_IDC_ACK 23 /* 0x00800000 */
603#define DPC_RESTORE_ACB 24 /* 0x01000000 */ 603#define DPC_RESTORE_ACB 24 /* 0x01000000 */
604#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */
604 605
605 struct Scsi_Host *host; /* pointer to host data */ 606 struct Scsi_Host *host; /* pointer to host data */
606 uint32_t tot_ddbs; 607 uint32_t tot_ddbs;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 209853ce0bbc..699575efc9ba 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1415,6 +1415,9 @@ struct ql_iscsi_stats {
1415#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 1415#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
1416#define QLA83XX_SS_OCM_WNDREG_INDEX 3 1416#define QLA83XX_SS_OCM_WNDREG_INDEX 3
1417#define QLA83XX_SS_PCI_INDEX 0 1417#define QLA83XX_SS_PCI_INDEX 0
1418#define QLA8022_TEMPLATE_CAP_OFFSET 172
1419#define QLA83XX_TEMPLATE_CAP_OFFSET 268
1420#define QLA80XX_TEMPLATE_RESERVED_BITS 16
1418 1421
1419struct qla4_8xxx_minidump_template_hdr { 1422struct qla4_8xxx_minidump_template_hdr {
1420 uint32_t entry_type; 1423 uint32_t entry_type;
@@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr {
1434 uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; 1437 uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
1435 uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; 1438 uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
1436 uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; 1439 uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
1440 uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
1437}; 1441};
1438 1442
1439#endif /* _QLA4X_FW_H */ 1443#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index b1a19cd8d5b2..5f58b451327e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
274int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, 274int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
275 uint32_t acb_type, uint32_t len); 275 uint32_t acb_type, uint32_t len);
276int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); 276int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
277int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, 277int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
278 uint64_t addr, uint32_t *data, uint32_t count); 278 uint64_t addr, uint32_t *data, uint32_t count);
279uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); 279uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
280int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); 280int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
281int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); 281int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
282int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); 282int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
283int qla4_83xx_is_detached(struct scsi_qla_host *ha); 283int qla4_83xx_is_detached(struct scsi_qla_host *ha);
284int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
284 285
285extern int ql4xextended_error_logging; 286extern int ql4xextended_error_logging;
286extern int ql4xdontresethba; 287extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 28fbece7e08f..6f12f859b11d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
282 return ipv4_wait|ipv6_wait; 282 return ipv4_wait|ipv6_wait;
283} 283}
284 284
285static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
286 struct qla4_8xxx_minidump_template_hdr *md_hdr)
287{
288 int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
289 QLA83XX_TEMPLATE_CAP_OFFSET;
290 int rval = 1;
291 uint32_t *cap_offset;
292
293 cap_offset = (uint32_t *)((char *)md_hdr + offset);
294
295 if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
296 ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
297 *cap_offset);
298 rval = 0;
299 }
300
301 return rval;
302}
303
285/** 304/**
286 * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. 305 * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
287 * @ha: pointer to host adapter structure. 306 * @ha: pointer to host adapter structure.
@@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
294 void *md_tmp; 313 void *md_tmp;
295 dma_addr_t md_tmp_dma; 314 dma_addr_t md_tmp_dma;
296 struct qla4_8xxx_minidump_template_hdr *md_hdr; 315 struct qla4_8xxx_minidump_template_hdr *md_hdr;
316 int dma_capable;
297 317
298 if (ha->fw_dump) { 318 if (ha->fw_dump) {
299 ql4_printk(KERN_WARNING, ha, 319 ql4_printk(KERN_WARNING, ha,
@@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
326 346
327 md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; 347 md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
328 348
349 dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
350
329 capture_debug_level = md_hdr->capture_debug_level; 351 capture_debug_level = md_hdr->capture_debug_level;
330 352
331 /* Get capture mask based on module loadtime setting. */ 353 /* Get capture mask based on module loadtime setting. */
332 if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) 354 if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
355 (ql4xmdcapmask == 0xFF && dma_capable)) {
333 ha->fw_dump_capture_mask = ql4xmdcapmask; 356 ha->fw_dump_capture_mask = ql4xmdcapmask;
334 else 357 } else {
358 if (ql4xmdcapmask == 0xFF)
359 ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
335 ha->fw_dump_capture_mask = capture_debug_level; 360 ha->fw_dump_capture_mask = capture_debug_level;
361 }
336 362
337 md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; 363 md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
338 364
@@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
864 if (status == QLA_SUCCESS) { 890 if (status == QLA_SUCCESS) {
865 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) 891 if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
866 qla4xxx_get_crash_record(ha); 892 qla4xxx_get_crash_record(ha);
893
894 qla4xxx_init_rings(ha);
867 } else { 895 } else {
868 DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", 896 DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
869 ha->host_no, __func__)); 897 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index b1925d195f41..081b6b78d2c6 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1526 1526
1527int qla4xxx_request_irqs(struct scsi_qla_host *ha) 1527int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1528{ 1528{
1529 int ret; 1529 int ret = 0;
1530 int rval = QLA_ERROR; 1530 int rval = QLA_ERROR;
1531 1531
1532 if (is_qla40XX(ha)) 1532 if (is_qla40XX(ha))
@@ -1580,15 +1580,13 @@ try_msi:
1580 } 1580 }
1581 } 1581 }
1582 1582
1583 /* 1583try_intx:
1584 * Prevent interrupts from falling back to INTx mode in cases where
1585 * interrupts cannot get acquired through MSI-X or MSI mode.
1586 */
1587 if (is_qla8022(ha)) { 1584 if (is_qla8022(ha)) {
1588 ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret); 1585 ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
1586 __func__);
1589 goto irq_not_attached; 1587 goto irq_not_attached;
1590 } 1588 }
1591try_intx: 1589
1592 /* Trying INTx */ 1590 /* Trying INTx */
1593 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1591 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1594 IRQF_SHARED, DRIVER_NAME, ha); 1592 IRQF_SHARED, DRIVER_NAME, ha);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 0a6b782d6fdb..0a3312c6dd6d 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
2381 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", 2381 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
2382 __func__); 2382 __func__);
2383 rval = QLA_ERROR; 2383 rval = QLA_ERROR;
2384 goto exit_config_acb; 2384 goto exit_free_acb;
2385 } 2385 }
2386 memcpy(ha->saved_acb, acb, acb_len); 2386 memcpy(ha->saved_acb, acb, acb_len);
2387 break; 2387 break;
@@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
2395 } 2395 }
2396 2396
2397 memcpy(acb, ha->saved_acb, acb_len); 2397 memcpy(acb, ha->saved_acb, acb_len);
2398 kfree(ha->saved_acb);
2399 ha->saved_acb = NULL;
2400 2398
2401 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); 2399 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
2402 if (rval != QLA_SUCCESS) 2400 if (rval != QLA_SUCCESS)
@@ -2412,6 +2410,10 @@ exit_free_acb:
2412 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, 2410 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
2413 acb_dma); 2411 acb_dma);
2414exit_config_acb: 2412exit_config_acb:
2413 if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
2414 kfree(ha->saved_acb);
2415 ha->saved_acb = NULL;
2416 }
2415 DEBUG2(ql4_printk(KERN_INFO, ha, 2417 DEBUG2(ql4_printk(KERN_INFO, ha,
2416 "%s %s\n", __func__, 2418 "%s %s\n", __func__,
2417 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); 2419 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 63328c812b70..9dbdb4be2d8f 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -14,6 +14,7 @@
14 14
15#include <asm-generic/io-64-nonatomic-lo-hi.h> 15#include <asm-generic/io-64-nonatomic-lo-hi.h>
16 16
17#define TIMEOUT_100_MS 100
17#define MASK(n) DMA_BIT_MASK(n) 18#define MASK(n) DMA_BIT_MASK(n)
18#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) 19#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
19#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) 20#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
@@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1176 return 0; 1177 return 0;
1177} 1178}
1178 1179
1180/**
1181 * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
1182 * @ha: Pointer to adapter structure
1183 * @addr: Flash address to write to
1184 * @data: Data to be written
1185 * @count: word_count to be written
1186 *
1187 * Return: On success return QLA_SUCCESS
1188 * On error return QLA_ERROR
1189 **/
1190int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
1191 uint32_t *data, uint32_t count)
1192{
1193 int i, j;
1194 uint32_t agt_ctrl;
1195 unsigned long flags;
1196 int ret_val = QLA_SUCCESS;
1197
1198 /* Only 128-bit aligned access */
1199 if (addr & 0xF) {
1200 ret_val = QLA_ERROR;
1201 goto exit_ms_mem_write;
1202 }
1203
1204 write_lock_irqsave(&ha->hw_lock, flags);
1205
1206 /* Write address */
1207 ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1208 if (ret_val == QLA_ERROR) {
1209 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
1210 __func__);
1211 goto exit_ms_mem_write_unlock;
1212 }
1213
1214 for (i = 0; i < count; i++, addr += 16) {
1215 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
1216 QLA8XXX_ADDR_QDR_NET_MAX)) ||
1217 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
1218 QLA8XXX_ADDR_DDR_NET_MAX)))) {
1219 ret_val = QLA_ERROR;
1220 goto exit_ms_mem_write_unlock;
1221 }
1222
1223 ret_val = ha->isp_ops->wr_reg_indirect(ha,
1224 MD_MIU_TEST_AGT_ADDR_LO,
1225 addr);
1226 /* Write data */
1227 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1228 MD_MIU_TEST_AGT_WRDATA_LO,
1229 *data++);
1230 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1231 MD_MIU_TEST_AGT_WRDATA_HI,
1232 *data++);
1233 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1234 MD_MIU_TEST_AGT_WRDATA_ULO,
1235 *data++);
1236 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1237 MD_MIU_TEST_AGT_WRDATA_UHI,
1238 *data++);
1239 if (ret_val == QLA_ERROR) {
1240 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
1241 __func__);
1242 goto exit_ms_mem_write_unlock;
1243 }
1244
1245 /* Check write status */
1246 ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
1247 MIU_TA_CTL_WRITE_ENABLE);
1248 ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1249 MD_MIU_TEST_AGT_CTRL,
1250 MIU_TA_CTL_WRITE_START);
1251 if (ret_val == QLA_ERROR) {
1252 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
1253 __func__);
1254 goto exit_ms_mem_write_unlock;
1255 }
1256
1257 for (j = 0; j < MAX_CTL_CHECK; j++) {
1258 ret_val = ha->isp_ops->rd_reg_indirect(ha,
1259 MD_MIU_TEST_AGT_CTRL,
1260 &agt_ctrl);
1261 if (ret_val == QLA_ERROR) {
1262 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
1263 __func__);
1264 goto exit_ms_mem_write_unlock;
1265 }
1266 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1267 break;
1268 }
1269
1270 /* Status check failed */
1271 if (j >= MAX_CTL_CHECK) {
1272 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
1273 __func__);
1274 ret_val = QLA_ERROR;
1275 goto exit_ms_mem_write_unlock;
1276 }
1277 }
1278
1279exit_ms_mem_write_unlock:
1280 write_unlock_irqrestore(&ha->hw_lock, flags);
1281
1282exit_ms_mem_write:
1283 return ret_val;
1284}
1285
1179static int 1286static int
1180qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) 1287qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1181{ 1288{
@@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
1714 qla4_82xx_rom_unlock(ha); 1821 qla4_82xx_rom_unlock(ha);
1715} 1822}
1716 1823
1824static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
1825 uint32_t addr1, uint32_t mask)
1826{
1827 unsigned long timeout;
1828 uint32_t rval = QLA_SUCCESS;
1829 uint32_t temp;
1830
1831 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1832 do {
1833 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
1834 if ((temp & mask) != 0)
1835 break;
1836
1837 if (time_after_eq(jiffies, timeout)) {
1838 ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
1839 return QLA_ERROR;
1840 }
1841 } while (1);
1842
1843 return rval;
1844}
1845
1846uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
1847 uint32_t addr3, uint32_t mask, uint32_t addr,
1848 uint32_t *data_ptr)
1849{
1850 int rval = QLA_SUCCESS;
1851 uint32_t temp;
1852 uint32_t data;
1853
1854 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1855 if (rval)
1856 goto exit_ipmdio_rd_reg;
1857
1858 temp = (0x40000000 | addr);
1859 ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
1860
1861 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1862 if (rval)
1863 goto exit_ipmdio_rd_reg;
1864
1865 ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
1866 *data_ptr = data;
1867
1868exit_ipmdio_rd_reg:
1869 return rval;
1870}
1871
1872
1873static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
1874 uint32_t addr1,
1875 uint32_t addr2,
1876 uint32_t addr3,
1877 uint32_t mask)
1878{
1879 unsigned long timeout;
1880 uint32_t temp;
1881 uint32_t rval = QLA_SUCCESS;
1882
1883 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1884 do {
1885 ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
1886 if ((temp & 0x1) != 1)
1887 break;
1888 if (time_after_eq(jiffies, timeout)) {
1889 ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
1890 return QLA_ERROR;
1891 }
1892 } while (1);
1893
1894 return rval;
1895}
1896
1897static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
1898 uint32_t addr1, uint32_t addr3,
1899 uint32_t mask, uint32_t addr,
1900 uint32_t value)
1901{
1902 int rval = QLA_SUCCESS;
1903
1904 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1905 if (rval)
1906 goto exit_ipmdio_wr_reg;
1907
1908 ha->isp_ops->wr_reg_indirect(ha, addr3, value);
1909 ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
1910
1911 rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1912 if (rval)
1913 goto exit_ipmdio_wr_reg;
1914
1915exit_ipmdio_wr_reg:
1916 return rval;
1917}
1918
1717static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, 1919static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1718 struct qla8xxx_minidump_entry_hdr *entry_hdr, 1920 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1719 uint32_t **d_ptr) 1921 uint32_t **d_ptr)
@@ -1822,7 +2024,7 @@ error_exit:
1822 return rval; 2024 return rval;
1823} 2025}
1824 2026
1825static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, 2027static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
1826 struct qla8xxx_minidump_entry_hdr *entry_hdr, 2028 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1827 uint32_t **d_ptr) 2029 uint32_t **d_ptr)
1828{ 2030{
@@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
1899 dma_desc.cmd.read_data_size = size; 2101 dma_desc.cmd.read_data_size = size;
1900 2102
1901 /* Prepare: Write pex-dma descriptor to MS memory. */ 2103 /* Prepare: Write pex-dma descriptor to MS memory. */
1902 rval = qla4_83xx_ms_mem_write_128b(ha, 2104 rval = qla4_8xxx_ms_mem_write_128b(ha,
1903 (uint64_t)m_hdr->desc_card_addr, 2105 (uint64_t)m_hdr->desc_card_addr,
1904 (uint32_t *)&dma_desc, 2106 (uint32_t *)&dma_desc,
1905 (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); 2107 (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
1906 if (rval == -1) { 2108 if (rval != QLA_SUCCESS) {
1907 ql4_printk(KERN_INFO, ha, 2109 ql4_printk(KERN_INFO, ha,
1908 "%s: Error writing rdmem-dma-init to MS !!!\n", 2110 "%s: Error writing rdmem-dma-init to MS !!!\n",
1909 __func__); 2111 __func__);
@@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2359 uint32_t *data_ptr = *d_ptr; 2561 uint32_t *data_ptr = *d_ptr;
2360 int rval = QLA_SUCCESS; 2562 int rval = QLA_SUCCESS;
2361 2563
2362 if (is_qla8032(ha) || is_qla8042(ha)) { 2564 rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
2363 rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr, 2565 if (rval != QLA_SUCCESS)
2364 &data_ptr);
2365 if (rval != QLA_SUCCESS) {
2366 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2367 &data_ptr);
2368 }
2369 } else {
2370 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, 2566 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2371 &data_ptr); 2567 &data_ptr);
2372 }
2373 *d_ptr = data_ptr; 2568 *d_ptr = data_ptr;
2374 return rval; 2569 return rval;
2375} 2570}
@@ -2440,6 +2635,227 @@ exit_process_pollrd:
2440 return rval; 2635 return rval;
2441} 2636}
2442 2637
2638static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
2639 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2640 uint32_t **d_ptr)
2641{
2642 int loop_cnt;
2643 uint32_t addr1, addr2, value, data, temp, wrval;
2644 uint8_t stride, stride2;
2645 uint16_t count;
2646 uint32_t poll, mask, data_size, modify_mask;
2647 uint32_t wait_count = 0;
2648 uint32_t *data_ptr = *d_ptr;
2649 struct qla8044_minidump_entry_rddfe *rddfe;
2650 uint32_t rval = QLA_SUCCESS;
2651
2652 rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
2653 addr1 = le32_to_cpu(rddfe->addr_1);
2654 value = le32_to_cpu(rddfe->value);
2655 stride = le32_to_cpu(rddfe->stride);
2656 stride2 = le32_to_cpu(rddfe->stride2);
2657 count = le32_to_cpu(rddfe->count);
2658
2659 poll = le32_to_cpu(rddfe->poll);
2660 mask = le32_to_cpu(rddfe->mask);
2661 modify_mask = le32_to_cpu(rddfe->modify_mask);
2662 data_size = le32_to_cpu(rddfe->data_size);
2663
2664 addr2 = addr1 + stride;
2665
2666 for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
2667 ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
2668
2669 wait_count = 0;
2670 while (wait_count < poll) {
2671 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2672 if ((temp & mask) != 0)
2673 break;
2674 wait_count++;
2675 }
2676
2677 if (wait_count == poll) {
2678 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2679 rval = QLA_ERROR;
2680 goto exit_process_rddfe;
2681 } else {
2682 ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
2683 temp = temp & modify_mask;
2684 temp = (temp | ((loop_cnt << 16) | loop_cnt));
2685 wrval = ((temp << 16) | temp);
2686
2687 ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
2688 ha->isp_ops->wr_reg_indirect(ha, addr1, value);
2689
2690 wait_count = 0;
2691 while (wait_count < poll) {
2692 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2693 if ((temp & mask) != 0)
2694 break;
2695 wait_count++;
2696 }
2697 if (wait_count == poll) {
2698 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2699 __func__);
2700 rval = QLA_ERROR;
2701 goto exit_process_rddfe;
2702 }
2703
2704 ha->isp_ops->wr_reg_indirect(ha, addr1,
2705 ((0x40000000 | value) +
2706 stride2));
2707 wait_count = 0;
2708 while (wait_count < poll) {
2709 ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2710 if ((temp & mask) != 0)
2711 break;
2712 wait_count++;
2713 }
2714
2715 if (wait_count == poll) {
2716 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2717 __func__);
2718 rval = QLA_ERROR;
2719 goto exit_process_rddfe;
2720 }
2721
2722 ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
2723
2724 *data_ptr++ = cpu_to_le32(wrval);
2725 *data_ptr++ = cpu_to_le32(data);
2726 }
2727 }
2728
2729 *d_ptr = data_ptr;
2730exit_process_rddfe:
2731 return rval;
2732}
2733
2734static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
2735 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2736 uint32_t **d_ptr)
2737{
2738 int rval = QLA_SUCCESS;
2739 uint32_t addr1, addr2, value1, value2, data, selval;
2740 uint8_t stride1, stride2;
2741 uint32_t addr3, addr4, addr5, addr6, addr7;
2742 uint16_t count, loop_cnt;
2743 uint32_t poll, mask;
2744 uint32_t *data_ptr = *d_ptr;
2745 struct qla8044_minidump_entry_rdmdio *rdmdio;
2746
2747 rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
2748 addr1 = le32_to_cpu(rdmdio->addr_1);
2749 addr2 = le32_to_cpu(rdmdio->addr_2);
2750 value1 = le32_to_cpu(rdmdio->value_1);
2751 stride1 = le32_to_cpu(rdmdio->stride_1);
2752 stride2 = le32_to_cpu(rdmdio->stride_2);
2753 count = le32_to_cpu(rdmdio->count);
2754
2755 poll = le32_to_cpu(rdmdio->poll);
2756 mask = le32_to_cpu(rdmdio->mask);
2757 value2 = le32_to_cpu(rdmdio->value_2);
2758
2759 addr3 = addr1 + stride1;
2760
2761 for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
2762 rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2763 addr3, mask);
2764 if (rval)
2765 goto exit_process_rdmdio;
2766
2767 addr4 = addr2 - stride1;
2768 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
2769 value2);
2770 if (rval)
2771 goto exit_process_rdmdio;
2772
2773 addr5 = addr2 - (2 * stride1);
2774 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
2775 value1);
2776 if (rval)
2777 goto exit_process_rdmdio;
2778
2779 addr6 = addr2 - (3 * stride1);
2780 rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
2781 addr6, 0x2);
2782 if (rval)
2783 goto exit_process_rdmdio;
2784
2785 rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2786 addr3, mask);
2787 if (rval)
2788 goto exit_process_rdmdio;
2789
2790 addr7 = addr2 - (4 * stride1);
2791 rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
2792 mask, addr7, &data);
2793 if (rval)
2794 goto exit_process_rdmdio;
2795
2796 selval = (value2 << 18) | (value1 << 2) | 2;
2797
2798 stride2 = le32_to_cpu(rdmdio->stride_2);
2799 *data_ptr++ = cpu_to_le32(selval);
2800 *data_ptr++ = cpu_to_le32(data);
2801
2802 value1 = value1 + stride2;
2803 *d_ptr = data_ptr;
2804 }
2805
2806exit_process_rdmdio:
2807 return rval;
2808}
2809
2810static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
2811 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2812 uint32_t **d_ptr)
2813{
2814 uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
2815 struct qla8044_minidump_entry_pollwr *pollwr_hdr;
2816 uint32_t wait_count = 0;
2817 uint32_t rval = QLA_SUCCESS;
2818
2819 pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
2820 addr1 = le32_to_cpu(pollwr_hdr->addr_1);
2821 addr2 = le32_to_cpu(pollwr_hdr->addr_2);
2822 value1 = le32_to_cpu(pollwr_hdr->value_1);
2823 value2 = le32_to_cpu(pollwr_hdr->value_2);
2824
2825 poll = le32_to_cpu(pollwr_hdr->poll);
2826 mask = le32_to_cpu(pollwr_hdr->mask);
2827
2828 while (wait_count < poll) {
2829 ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2830
2831 if ((r_value & poll) != 0)
2832 break;
2833
2834 wait_count++;
2835 }
2836
2837 if (wait_count == poll) {
2838 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2839 rval = QLA_ERROR;
2840 goto exit_process_pollwr;
2841 }
2842
2843 ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
2844 ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
2845
2846 wait_count = 0;
2847 while (wait_count < poll) {
2848 ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2849
2850 if ((r_value & poll) != 0)
2851 break;
2852 wait_count++;
2853 }
2854
2855exit_process_pollwr:
2856 return rval;
2857}
2858
2443static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, 2859static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
2444 struct qla8xxx_minidump_entry_hdr *entry_hdr, 2860 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2445 uint32_t **d_ptr) 2861 uint32_t **d_ptr)
@@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2753 if (rval != QLA_SUCCESS) 3169 if (rval != QLA_SUCCESS)
2754 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 3170 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2755 break; 3171 break;
3172 case QLA8044_RDDFE:
3173 rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
3174 &data_ptr);
3175 if (rval != QLA_SUCCESS)
3176 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3177 break;
3178 case QLA8044_RDMDIO:
3179 rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
3180 &data_ptr);
3181 if (rval != QLA_SUCCESS)
3182 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3183 break;
3184 case QLA8044_POLLWR:
3185 rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
3186 &data_ptr);
3187 if (rval != QLA_SUCCESS)
3188 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3189 break;
2756 case QLA8XXX_RDNOP: 3190 case QLA8XXX_RDNOP:
2757 default: 3191 default:
2758 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 3192 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 14500a0f62cc..337d9fcf6417 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -858,6 +858,9 @@ struct crb_addr_pair {
858#define QLA83XX_POLLRD 35 858#define QLA83XX_POLLRD 35
859#define QLA83XX_RDMUX2 36 859#define QLA83XX_RDMUX2 36
860#define QLA83XX_POLLRDMWR 37 860#define QLA83XX_POLLRDMWR 37
861#define QLA8044_RDDFE 38
862#define QLA8044_RDMDIO 39
863#define QLA8044_POLLWR 40
861#define QLA8XXX_RDROM 71 864#define QLA8XXX_RDROM 71
862#define QLA8XXX_RDMEM 72 865#define QLA8XXX_RDMEM 72
863#define QLA8XXX_CNTRL 98 866#define QLA8XXX_CNTRL 98
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 459b9f7186fd..320206376206 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n" 83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec."); 84 "\t\t Default: 120 sec.");
85 85
86int ql4xmdcapmask = 0x1F; 86int ql4xmdcapmask = 0;
87module_param(ql4xmdcapmask, int, S_IRUGO); 87module_param(ql4xmdcapmask, int, S_IRUGO);
88MODULE_PARM_DESC(ql4xmdcapmask, 88MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n" 89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0x1F.\n" 90 "\t\t Default is 0 (firmware default capture mask)\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); 91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
92 92
93int ql4xenablemd = 1; 93int ql4xenablemd = 1;
94module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); 94module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
@@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1742 struct sockaddr *dst_addr; 1742 struct sockaddr *dst_addr;
1743 struct scsi_qla_host *ha; 1743 struct scsi_qla_host *ha;
1744 1744
1745 if (!qla_ep)
1746 return -ENOTCONN;
1747
1745 ha = to_qla_host(qla_ep->host); 1748 ha = to_qla_host(qla_ep->host);
1746 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, 1749 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1747 ha->host_no)); 1750 ha->host_no));
@@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1749 switch (param) { 1752 switch (param) {
1750 case ISCSI_PARAM_CONN_PORT: 1753 case ISCSI_PARAM_CONN_PORT:
1751 case ISCSI_PARAM_CONN_ADDRESS: 1754 case ISCSI_PARAM_CONN_ADDRESS:
1752 if (!qla_ep)
1753 return -ENOTCONN;
1754
1755 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1755 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1756 if (!dst_addr) 1756 if (!dst_addr)
1757 return -ENOTCONN; 1757 return -ENOTCONN;
@@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2879 struct iscsi_conn *conn; 2879 struct iscsi_conn *conn;
2880 struct qla_conn *qla_conn; 2880 struct qla_conn *qla_conn;
2881 struct sockaddr *dst_addr; 2881 struct sockaddr *dst_addr;
2882 int len = 0;
2883 2882
2884 conn = cls_conn->dd_data; 2883 conn = cls_conn->dd_data;
2885 qla_conn = conn->dd_data; 2884 qla_conn = conn->dd_data;
@@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2893 default: 2892 default:
2894 return iscsi_conn_get_param(cls_conn, param, buf); 2893 return iscsi_conn_get_param(cls_conn, param, buf);
2895 } 2894 }
2896
2897 return len;
2898
2899} 2895}
2900 2896
2901int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) 2897int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
@@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
3569 if (test_bit(OPT_IPV6_DEVICE, &options)) { 3565 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3570 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; 3566 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
3571 3567
3572 conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); 3568 conn->link_local_ipv6_addr = kmemdup(
3569 fw_ddb_entry->link_local_ipv6_addr,
3570 IPv6_ADDR_LEN, GFP_KERNEL);
3573 if (!conn->link_local_ipv6_addr) { 3571 if (!conn->link_local_ipv6_addr) {
3574 rc = -ENOMEM; 3572 rc = -ENOMEM;
3575 goto exit_copy; 3573 goto exit_copy;
3576 } 3574 }
3577
3578 memcpy(conn->link_local_ipv6_addr,
3579 fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
3580 } else { 3575 } else {
3581 conn->ipv4_tos = fw_ddb_entry->ipv4_tos; 3576 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3582 } 3577 }
@@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
4565 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 4560 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
4566 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 4561 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4567 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 4562 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4563 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
4568 test_bit(DPC_AEN, &ha->dpc_flags)) { 4564 test_bit(DPC_AEN, &ha->dpc_flags)) {
4569 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 4565 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
4570 " - dpc flags = 0x%lx\n", 4566 " - dpc flags = 0x%lx\n",
@@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
4862 ha->host_no, __func__)); 4858 ha->host_no, __func__));
4863 status = ha->isp_ops->reset_firmware(ha); 4859 status = ha->isp_ops->reset_firmware(ha);
4864 if (status == QLA_SUCCESS) { 4860 if (status == QLA_SUCCESS) {
4865 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
4866 qla4xxx_cmd_wait(ha);
4867
4868 ha->isp_ops->disable_intrs(ha); 4861 ha->isp_ops->disable_intrs(ha);
4869 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4862 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4870 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 4863 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -5432,6 +5425,11 @@ dpc_post_reset_ha:
5432 qla4xxx_relogin_all_devices(ha); 5425 qla4xxx_relogin_all_devices(ha);
5433 } 5426 }
5434 } 5427 }
5428 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
5429 if (qla4xxx_sysfs_ddb_export(ha))
5430 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
5431 __func__);
5432 }
5435} 5433}
5436 5434
5437/** 5435/**
@@ -8409,7 +8407,7 @@ exit_ddb_del:
8409 * 8407 *
8410 * Export the firmware DDB for all send targets and normal targets to sysfs. 8408 * Export the firmware DDB for all send targets and normal targets to sysfs.
8411 **/ 8409 **/
8412static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) 8410int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
8413{ 8411{
8414 struct dev_db_entry *fw_ddb_entry = NULL; 8412 struct dev_db_entry *fw_ddb_entry = NULL;
8415 dma_addr_t fw_ddb_entry_dma; 8413 dma_addr_t fw_ddb_entry_dma;
@@ -8847,11 +8845,8 @@ skip_retry_init:
8847 ql4_printk(KERN_ERR, ha, 8845 ql4_printk(KERN_ERR, ha,
8848 "%s: No iSCSI boot target configured\n", __func__); 8846 "%s: No iSCSI boot target configured\n", __func__);
8849 8847
8850 if (qla4xxx_sysfs_ddb_export(ha)) 8848 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
8851 ql4_printk(KERN_ERR, ha, 8849 /* Perform the build ddb list and login to each */
8852 "%s: Error exporting ddb to sysfs\n", __func__);
8853
8854 /* Perform the build ddb list and login to each */
8855 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 8850 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
8856 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); 8851 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
8857 qla4xxx_wait_login_resp_boot_tgt(ha); 8852 qla4xxx_wait_login_resp_boot_tgt(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c6ba0a6b8458..f11eaa773339 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.04.00-k4" 8#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f3e9cc038d1d..1328a2621070 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324";
130#define SCSI_DEBUG_OPT_DIF_ERR 32 130#define SCSI_DEBUG_OPT_DIF_ERR 32
131#define SCSI_DEBUG_OPT_DIX_ERR 64 131#define SCSI_DEBUG_OPT_DIX_ERR 64
132#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 132#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
133#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256
133/* When "every_nth" > 0 then modulo "every_nth" commands: 134/* When "every_nth" > 0 then modulo "every_nth" commands:
134 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 135 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
135 * - a RECOVERED_ERROR is simulated on successful read and write 136 * - a RECOVERED_ERROR is simulated on successful read and write
@@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3583 int inj_transport = 0; 3584 int inj_transport = 0;
3584 int inj_dif = 0; 3585 int inj_dif = 0;
3585 int inj_dix = 0; 3586 int inj_dix = 0;
3587 int inj_short = 0;
3586 int delay_override = 0; 3588 int delay_override = 0;
3587 int unmap = 0; 3589 int unmap = 0;
3588 3590
@@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3628 inj_dif = 1; /* to reads and writes below */ 3630 inj_dif = 1; /* to reads and writes below */
3629 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) 3631 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3630 inj_dix = 1; /* to reads and writes below */ 3632 inj_dix = 1; /* to reads and writes below */
3633 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
3634 inj_short = 1;
3631 } 3635 }
3632 3636
3633 if (devip->wlun) { 3637 if (devip->wlun) {
@@ -3744,6 +3748,10 @@ read:
3744 if (scsi_debug_fake_rw) 3748 if (scsi_debug_fake_rw)
3745 break; 3749 break;
3746 get_data_transfer_info(cmd, &lba, &num, &ei_lba); 3750 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3751
3752 if (inj_short)
3753 num /= 2;
3754
3747 errsts = resp_read(SCpnt, lba, num, devip, ei_lba); 3755 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3748 if (inj_recovered && (0 == errsts)) { 3756 if (inj_recovered && (0 == errsts)) {
3749 mk_sense_buffer(devip, RECOVERED_ERROR, 3757 mk_sense_buffer(devip, RECOVERED_ERROR,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index f17aa7aa7879..7e957918f33f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
131 "aborting command %p\n", scmd)); 131 "aborting command %p\n", scmd));
132 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); 132 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
133 if (rtn == SUCCESS) { 133 if (rtn == SUCCESS) {
134 scmd->result |= DID_TIME_OUT << 16; 134 set_host_byte(scmd, DID_TIME_OUT);
135 if (scsi_host_eh_past_deadline(sdev->host)) { 135 if (scsi_host_eh_past_deadline(sdev->host)) {
136 SCSI_LOG_ERROR_RECOVERY(3, 136 SCSI_LOG_ERROR_RECOVERY(3,
137 scmd_printk(KERN_INFO, scmd, 137 scmd_printk(KERN_INFO, scmd,
@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
167 scmd_printk(KERN_WARNING, scmd, 167 scmd_printk(KERN_WARNING, scmd,
168 "scmd %p terminate " 168 "scmd %p terminate "
169 "aborted command\n", scmd)); 169 "aborted command\n", scmd));
170 scmd->result |= DID_TIME_OUT << 16; 170 set_host_byte(scmd, DID_TIME_OUT);
171 scsi_finish_command(scmd); 171 scsi_finish_command(scmd);
172 } 172 }
173} 173}
@@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
287 else if (host->hostt->eh_timed_out) 287 else if (host->hostt->eh_timed_out)
288 rtn = host->hostt->eh_timed_out(scmd); 288 rtn = host->hostt->eh_timed_out(scmd);
289 289
290 if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort) 290 if (rtn == BLK_EH_NOT_HANDLED) {
291 if (scsi_abort_command(scmd) == SUCCESS) 291 if (!host->hostt->no_async_abort &&
292 scsi_abort_command(scmd) == SUCCESS)
292 return BLK_EH_NOT_HANDLED; 293 return BLK_EH_NOT_HANDLED;
293 294
294 scmd->result |= DID_TIME_OUT << 16; 295 set_host_byte(scmd, DID_TIME_OUT);
295 296 if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
296 if (unlikely(rtn == BLK_EH_NOT_HANDLED && 297 rtn = BLK_EH_HANDLED;
297 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) 298 }
298 rtn = BLK_EH_HANDLED;
299 299
300 return rtn; 300 return rtn;
301} 301}
@@ -1029,6 +1029,7 @@ retry:
1029 rtn = NEEDS_RETRY; 1029 rtn = NEEDS_RETRY;
1030 } else { 1030 } else {
1031 timeleft = wait_for_completion_timeout(&done, timeout); 1031 timeleft = wait_for_completion_timeout(&done, timeout);
1032 rtn = SUCCESS;
1032 } 1033 }
1033 1034
1034 shost->eh_action = NULL; 1035 shost->eh_action = NULL;
@@ -1776,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1776 break; 1777 break;
1777 case DID_ABORT: 1778 case DID_ABORT:
1778 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { 1779 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1779 scmd->result |= DID_TIME_OUT << 16; 1780 set_host_byte(scmd, DID_TIME_OUT);
1780 return SUCCESS; 1781 return SUCCESS;
1781 } 1782 }
1782 case DID_NO_CONNECT: 1783 case DID_NO_CONNECT:
@@ -1951,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1951 */ 1952 */
1952 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1953 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1953 1954
1955 blk_rq_set_block_pc(req);
1956
1954 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1957 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1955 req->cmd[1] = 0; 1958 req->cmd[1] = 0;
1956 req->cmd[2] = 0; 1959 req->cmd[2] = 0;
@@ -1960,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1960 1963
1961 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 1964 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1962 1965
1963 req->cmd_type = REQ_TYPE_BLOCK_PC;
1964 req->cmd_flags |= REQ_QUIET; 1966 req->cmd_flags |= REQ_QUIET;
1965 req->timeout = 10 * HZ; 1967 req->timeout = 10 * HZ;
1966 req->retries = 5; 1968 req->retries = 5;
@@ -2306,6 +2308,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
2306 } 2308 }
2307 2309
2308 scmd = scsi_get_command(dev, GFP_KERNEL); 2310 scmd = scsi_get_command(dev, GFP_KERNEL);
2311 if (!scmd) {
2312 rtn = FAILED;
2313 put_device(&dev->sdev_gendev);
2314 goto out_put_autopm_host;
2315 }
2316
2309 blk_rq_init(NULL, &req); 2317 blk_rq_init(NULL, &req);
2310 scmd->request = &req; 2318 scmd->request = &req;
2311 2319
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a0c95cac91f0..f7e316368c99 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -195,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
195 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 195 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
196 if (!req) 196 if (!req)
197 return ret; 197 return ret;
198 blk_rq_set_block_pc(req);
198 199
199 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 200 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
200 buffer, bufflen, __GFP_WAIT)) 201 buffer, bufflen, __GFP_WAIT))
@@ -206,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
206 req->sense_len = 0; 207 req->sense_len = 0;
207 req->retries = retries; 208 req->retries = retries;
208 req->timeout = timeout; 209 req->timeout = timeout;
209 req->cmd_type = REQ_TYPE_BLOCK_PC;
210 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 210 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
211 211
212 /* 212 /*
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
512 scsi_run_queue(sdev->request_queue); 512 scsi_run_queue(sdev->request_queue);
513} 513}
514 514
515static void __scsi_release_buffers(struct scsi_cmnd *, int);
516
517/*
518 * Function: scsi_end_request()
519 *
520 * Purpose: Post-processing of completed commands (usually invoked at end
521 * of upper level post-processing and scsi_io_completion).
522 *
523 * Arguments: cmd - command that is complete.
524 * error - 0 if I/O indicates success, < 0 for I/O error.
525 * bytes - number of bytes of completed I/O
526 * requeue - indicates whether we should requeue leftovers.
527 *
528 * Lock status: Assumed that lock is not held upon entry.
529 *
530 * Returns: cmd if requeue required, NULL otherwise.
531 *
532 * Notes: This is called for block device requests in order to
533 * mark some number of sectors as complete.
534 *
535 * We are guaranteeing that the request queue will be goosed
536 * at some point during this call.
537 * Notes: If cmd was requeued, upon return it will be a stale pointer.
538 */
539static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
540 int bytes, int requeue)
541{
542 struct request_queue *q = cmd->device->request_queue;
543 struct request *req = cmd->request;
544
545 /*
546 * If there are blocks left over at the end, set up the command
547 * to queue the remainder of them.
548 */
549 if (blk_end_request(req, error, bytes)) {
550 /* kill remainder if no retrys */
551 if (error && scsi_noretry_cmd(cmd))
552 blk_end_request_all(req, error);
553 else {
554 if (requeue) {
555 /*
556 * Bleah. Leftovers again. Stick the
557 * leftovers in the front of the
558 * queue, and goose the queue again.
559 */
560 scsi_release_buffers(cmd);
561 scsi_requeue_command(q, cmd);
562 cmd = NULL;
563 }
564 return cmd;
565 }
566 }
567
568 /*
569 * This will goose the queue request function at the end, so we don't
570 * need to worry about launching another command.
571 */
572 __scsi_release_buffers(cmd, 0);
573 scsi_next_command(cmd);
574 return NULL;
575}
576
577static inline unsigned int scsi_sgtable_index(unsigned short nents) 515static inline unsigned int scsi_sgtable_index(unsigned short nents)
578{ 516{
579 unsigned int index; 517 unsigned int index;
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
625 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 563 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
626} 564}
627 565
628static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
629{
630
631 if (cmd->sdb.table.nents)
632 scsi_free_sgtable(&cmd->sdb);
633
634 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
635
636 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
637 struct scsi_data_buffer *bidi_sdb =
638 cmd->request->next_rq->special;
639 scsi_free_sgtable(bidi_sdb);
640 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
641 cmd->request->next_rq->special = NULL;
642 }
643
644 if (scsi_prot_sg_count(cmd))
645 scsi_free_sgtable(cmd->prot_sdb);
646}
647
648/* 566/*
649 * Function: scsi_release_buffers() 567 * Function: scsi_release_buffers()
650 * 568 *
651 * Purpose: Completion processing for block device I/O requests. 569 * Purpose: Free resources allocate for a scsi_command.
652 * 570 *
653 * Arguments: cmd - command that we are bailing. 571 * Arguments: cmd - command that we are bailing.
654 * 572 *
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
659 * Notes: In the event that an upper level driver rejects a 577 * Notes: In the event that an upper level driver rejects a
660 * command, we must release resources allocated during 578 * command, we must release resources allocated during
661 * the __init_io() function. Primarily this would involve 579 * the __init_io() function. Primarily this would involve
662 * the scatter-gather table, and potentially any bounce 580 * the scatter-gather table.
663 * buffers.
664 */ 581 */
665void scsi_release_buffers(struct scsi_cmnd *cmd) 582void scsi_release_buffers(struct scsi_cmnd *cmd)
666{ 583{
667 __scsi_release_buffers(cmd, 1); 584 if (cmd->sdb.table.nents)
585 scsi_free_sgtable(&cmd->sdb);
586
587 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
588
589 if (scsi_prot_sg_count(cmd))
590 scsi_free_sgtable(cmd->prot_sdb);
668} 591}
669EXPORT_SYMBOL(scsi_release_buffers); 592EXPORT_SYMBOL(scsi_release_buffers);
670 593
594static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
595{
596 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
597
598 scsi_free_sgtable(bidi_sdb);
599 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
600 cmd->request->next_rq->special = NULL;
601}
602
671/** 603/**
672 * __scsi_error_from_host_byte - translate SCSI error code into errno 604 * __scsi_error_from_host_byte - translate SCSI error code into errno
673 * @cmd: SCSI command (unused) 605 * @cmd: SCSI command (unused)
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
725 * 657 *
726 * Returns: Nothing 658 * Returns: Nothing
727 * 659 *
728 * Notes: This function is matched in terms of capabilities to 660 * Notes: We will finish off the specified number of sectors. If we
729 * the function that created the scatter-gather list. 661 * are done, the command block will be released and the queue
730 * In other words, if there are no bounce buffers 662 * function will be goosed. If we are not done then we have to
731 * (the normal case for most drivers), we don't need
732 * the logic to deal with cleaning up afterwards.
733 *
734 * We must call scsi_end_request(). This will finish off
735 * the specified number of sectors. If we are done, the
736 * command block will be released and the queue function
737 * will be goosed. If we are not done then we have to
738 * figure out what to do next: 663 * figure out what to do next:
739 * 664 *
740 * a) We can call scsi_requeue_command(). The request 665 * a) We can call scsi_requeue_command(). The request
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
743 * be used if we made forward progress, or if we want 668 * be used if we made forward progress, or if we want
744 * to switch from READ(10) to READ(6) for example. 669 * to switch from READ(10) to READ(6) for example.
745 * 670 *
746 * b) We can call scsi_queue_insert(). The request will 671 * b) We can call __scsi_queue_insert(). The request will
747 * be put back on the queue and retried using the same 672 * be put back on the queue and retried using the same
748 * command as before, possibly after a delay. 673 * command as before, possibly after a delay.
749 * 674 *
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
801 req->next_rq->resid_len = scsi_in(cmd)->resid; 726 req->next_rq->resid_len = scsi_in(cmd)->resid;
802 727
803 scsi_release_buffers(cmd); 728 scsi_release_buffers(cmd);
729 scsi_release_bidi_buffers(cmd);
730
804 blk_end_request_all(req, 0); 731 blk_end_request_all(req, 0);
805 732
806 scsi_next_command(cmd); 733 scsi_next_command(cmd);
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
840 } 767 }
841 768
842 /* 769 /*
843 * A number of bytes were successfully read. If there 770 * If we finished all bytes in the request we are done now.
844 * are leftovers and there is some kind of error
845 * (result != 0), retry the rest.
846 */ 771 */
847 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 772 if (!blk_end_request(req, error, good_bytes))
848 return; 773 goto next_command;
774
775 /*
776 * Kill remainder if no retrys.
777 */
778 if (error && scsi_noretry_cmd(cmd)) {
779 blk_end_request_all(req, error);
780 goto next_command;
781 }
782
783 /*
784 * If there had been no error, but we have leftover bytes in the
785 * requeues just queue the command up again.
786 */
787 if (result == 0)
788 goto requeue;
849 789
850 error = __scsi_error_from_host_byte(cmd, result); 790 error = __scsi_error_from_host_byte(cmd, result);
851 791
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
973 switch (action) { 913 switch (action) {
974 case ACTION_FAIL: 914 case ACTION_FAIL:
975 /* Give up and fail the remainder of the request */ 915 /* Give up and fail the remainder of the request */
976 scsi_release_buffers(cmd);
977 if (!(req->cmd_flags & REQ_QUIET)) { 916 if (!(req->cmd_flags & REQ_QUIET)) {
978 if (description) 917 if (description)
979 scmd_printk(KERN_INFO, cmd, "%s\n", 918 scmd_printk(KERN_INFO, cmd, "%s\n",
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
983 scsi_print_sense("", cmd); 922 scsi_print_sense("", cmd);
984 scsi_print_command(cmd); 923 scsi_print_command(cmd);
985 } 924 }
986 if (blk_end_request_err(req, error)) 925 if (!blk_end_request_err(req, error))
987 scsi_requeue_command(q, cmd); 926 goto next_command;
988 else 927 /*FALLTHRU*/
989 scsi_next_command(cmd);
990 break;
991 case ACTION_REPREP: 928 case ACTION_REPREP:
929 requeue:
992 /* Unprep the request and put it back at the head of the queue. 930 /* Unprep the request and put it back at the head of the queue.
993 * A new command will be prepared and issued. 931 * A new command will be prepared and issued.
994 */ 932 */
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1004 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 942 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1005 break; 943 break;
1006 } 944 }
945 return;
946
947next_command:
948 scsi_release_buffers(cmd);
949 scsi_next_command(cmd);
1007} 950}
1008 951
1009static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 952static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1128 1071
1129int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1072int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1130{ 1073{
1131 struct scsi_cmnd *cmd; 1074 struct scsi_cmnd *cmd = req->special;
1132 int ret = scsi_prep_state_check(sdev, req);
1133
1134 if (ret != BLKPREP_OK)
1135 return ret;
1136
1137 cmd = scsi_get_cmd_from_req(sdev, req);
1138 if (unlikely(!cmd))
1139 return BLKPREP_DEFER;
1140 1075
1141 /* 1076 /*
1142 * BLOCK_PC requests may transfer data, in which case they must 1077 * BLOCK_PC requests may transfer data, in which case they must
@@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1179 */ 1114 */
1180int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1115int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1181{ 1116{
1182 struct scsi_cmnd *cmd; 1117 struct scsi_cmnd *cmd = req->special;
1183 int ret = scsi_prep_state_check(sdev, req);
1184
1185 if (ret != BLKPREP_OK)
1186 return ret;
1187 1118
1188 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1119 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1189 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1120 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1190 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1121 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1191 if (ret != BLKPREP_OK) 1122 if (ret != BLKPREP_OK)
1192 return ret; 1123 return ret;
1193 } 1124 }
@@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1197 */ 1128 */
1198 BUG_ON(!req->nr_phys_segments); 1129 BUG_ON(!req->nr_phys_segments);
1199 1130
1200 cmd = scsi_get_cmd_from_req(sdev, req);
1201 if (unlikely(!cmd))
1202 return BLKPREP_DEFER;
1203
1204 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1131 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1205 return scsi_init_io(cmd, GFP_ATOMIC); 1132 return scsi_init_io(cmd, GFP_ATOMIC);
1206} 1133}
1207EXPORT_SYMBOL(scsi_setup_fs_cmnd); 1134EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1208 1135
1209int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1136static int
1137scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1210{ 1138{
1211 int ret = BLKPREP_OK; 1139 int ret = BLKPREP_OK;
1212 1140
@@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1258 } 1186 }
1259 return ret; 1187 return ret;
1260} 1188}
1261EXPORT_SYMBOL(scsi_prep_state_check);
1262 1189
1263int scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1190static int
1191scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1264{ 1192{
1265 struct scsi_device *sdev = q->queuedata; 1193 struct scsi_device *sdev = q->queuedata;
1266 1194
@@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1291 1219
1292 return ret; 1220 return ret;
1293} 1221}
1294EXPORT_SYMBOL(scsi_prep_return);
1295 1222
1296int scsi_prep_fn(struct request_queue *q, struct request *req) 1223static int scsi_prep_fn(struct request_queue *q, struct request *req)
1297{ 1224{
1298 struct scsi_device *sdev = q->queuedata; 1225 struct scsi_device *sdev = q->queuedata;
1299 int ret = BLKPREP_KILL; 1226 struct scsi_cmnd *cmd;
1227 int ret;
1300 1228
1301 if (req->cmd_type == REQ_TYPE_BLOCK_PC) 1229 ret = scsi_prep_state_check(sdev, req);
1230 if (ret != BLKPREP_OK)
1231 goto out;
1232
1233 cmd = scsi_get_cmd_from_req(sdev, req);
1234 if (unlikely(!cmd)) {
1235 ret = BLKPREP_DEFER;
1236 goto out;
1237 }
1238
1239 if (req->cmd_type == REQ_TYPE_FS)
1240 ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
1241 else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1302 ret = scsi_setup_blk_pc_cmnd(sdev, req); 1242 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1243 else
1244 ret = BLKPREP_KILL;
1245
1246out:
1303 return scsi_prep_return(q, req, ret); 1247 return scsi_prep_return(q, req, ret);
1304} 1248}
1305EXPORT_SYMBOL(scsi_prep_fn); 1249
1250static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1251{
1252 if (req->cmd_type == REQ_TYPE_FS) {
1253 struct scsi_cmnd *cmd = req->special;
1254 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
1255
1256 if (drv->uninit_command)
1257 drv->uninit_command(cmd);
1258 }
1259}
1306 1260
1307/* 1261/*
1308 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1262 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1723 return NULL; 1677 return NULL;
1724 1678
1725 blk_queue_prep_rq(q, scsi_prep_fn); 1679 blk_queue_prep_rq(q, scsi_prep_fn);
1680 blk_queue_unprep_rq(q, scsi_unprep_fn);
1726 blk_queue_softirq_done(q, scsi_softirq_done); 1681 blk_queue_softirq_done(q, scsi_softirq_done);
1727 blk_queue_rq_timed_out(q, scsi_times_out); 1682 blk_queue_rq_timed_out(q, scsi_times_out);
1728 blk_queue_lld_busy(q, scsi_lld_busy); 1683 blk_queue_lld_busy(q, scsi_lld_busy);
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 2b6b93f7d8ef..546f16299ef9 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -12,7 +12,7 @@
12#include "scsi_priv.h" 12#include "scsi_priv.h"
13 13
14 14
15static ctl_table scsi_table[] = { 15static struct ctl_table scsi_table[] = {
16 { .procname = "logging_level", 16 { .procname = "logging_level",
17 .data = &scsi_logging_level, 17 .data = &scsi_logging_level,
18 .maxlen = sizeof(scsi_logging_level), 18 .maxlen = sizeof(scsi_logging_level),
@@ -21,14 +21,14 @@ static ctl_table scsi_table[] = {
21 { } 21 { }
22}; 22};
23 23
24static ctl_table scsi_dir_table[] = { 24static struct ctl_table scsi_dir_table[] = {
25 { .procname = "scsi", 25 { .procname = "scsi",
26 .mode = 0555, 26 .mode = 0555,
27 .child = scsi_table }, 27 .child = scsi_table },
28 { } 28 { }
29}; 29};
30 30
31static ctl_table scsi_root_table[] = { 31static struct ctl_table scsi_root_table[] = {
32 { .procname = "dev", 32 { .procname = "dev",
33 .mode = 0555, 33 .mode = 0555,
34 .child = scsi_dir_table }, 34 .child = scsi_dir_table },
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f80908f74ca9..521f5838594b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)
2549 fc_flush_devloss(shost); 2549 fc_flush_devloss(shost);
2550 if (!cancel_delayed_work(&rport->dev_loss_work)) 2550 if (!cancel_delayed_work(&rport->dev_loss_work))
2551 fc_flush_devloss(shost); 2551 fc_flush_devloss(shost);
2552 cancel_work_sync(&rport->scan_work);
2552 spin_lock_irqsave(shost->host_lock, flags); 2553 spin_lock_irqsave(shost->host_lock, flags);
2553 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2554 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2554 } 2555 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 96af195224f2..6825eda1114a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -109,6 +109,8 @@ static int sd_suspend_system(struct device *);
109static int sd_suspend_runtime(struct device *); 109static int sd_suspend_runtime(struct device *);
110static int sd_resume(struct device *); 110static int sd_resume(struct device *);
111static void sd_rescan(struct device *); 111static void sd_rescan(struct device *);
112static int sd_init_command(struct scsi_cmnd *SCpnt);
113static void sd_uninit_command(struct scsi_cmnd *SCpnt);
112static int sd_done(struct scsi_cmnd *); 114static int sd_done(struct scsi_cmnd *);
113static int sd_eh_action(struct scsi_cmnd *, int); 115static int sd_eh_action(struct scsi_cmnd *, int);
114static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 116static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
@@ -503,6 +505,8 @@ static struct scsi_driver sd_template = {
503 .pm = &sd_pm_ops, 505 .pm = &sd_pm_ops,
504 }, 506 },
505 .rescan = sd_rescan, 507 .rescan = sd_rescan,
508 .init_command = sd_init_command,
509 .uninit_command = sd_uninit_command,
506 .done = sd_done, 510 .done = sd_done,
507 .eh_action = sd_eh_action, 511 .eh_action = sd_eh_action,
508}; 512};
@@ -836,9 +840,9 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
836 return scsi_setup_blk_pc_cmnd(sdp, rq); 840 return scsi_setup_blk_pc_cmnd(sdp, rq);
837} 841}
838 842
839static void sd_unprep_fn(struct request_queue *q, struct request *rq) 843static void sd_uninit_command(struct scsi_cmnd *SCpnt)
840{ 844{
841 struct scsi_cmnd *SCpnt = rq->special; 845 struct request *rq = SCpnt->request;
842 846
843 if (rq->cmd_flags & REQ_DISCARD) 847 if (rq->cmd_flags & REQ_DISCARD)
844 __free_page(rq->completion_data); 848 __free_page(rq->completion_data);
@@ -850,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
850 } 854 }
851} 855}
852 856
853/** 857static int sd_init_command(struct scsi_cmnd *SCpnt)
854 * sd_prep_fn - build a scsi (read or write) command from
855 * information in the request structure.
856 * @SCpnt: pointer to mid-level's per scsi command structure that
857 * contains request and into which the scsi command is written
858 *
859 * Returns 1 if successful and 0 if error (or cannot be done now).
860 **/
861static int sd_prep_fn(struct request_queue *q, struct request *rq)
862{ 858{
863 struct scsi_cmnd *SCpnt; 859 struct request *rq = SCpnt->request;
864 struct scsi_device *sdp = q->queuedata; 860 struct scsi_device *sdp = SCpnt->device;
865 struct gendisk *disk = rq->rq_disk; 861 struct gendisk *disk = rq->rq_disk;
866 struct scsi_disk *sdkp; 862 struct scsi_disk *sdkp;
867 sector_t block = blk_rq_pos(rq); 863 sector_t block = blk_rq_pos(rq);
@@ -883,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
883 } else if (rq->cmd_flags & REQ_FLUSH) { 879 } else if (rq->cmd_flags & REQ_FLUSH) {
884 ret = scsi_setup_flush_cmnd(sdp, rq); 880 ret = scsi_setup_flush_cmnd(sdp, rq);
885 goto out; 881 goto out;
886 } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
887 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
888 goto out;
889 } else if (rq->cmd_type != REQ_TYPE_FS) {
890 ret = BLKPREP_KILL;
891 goto out;
892 } 882 }
893 ret = scsi_setup_fs_cmnd(sdp, rq); 883 ret = scsi_setup_fs_cmnd(sdp, rq);
894 if (ret != BLKPREP_OK) 884 if (ret != BLKPREP_OK)
@@ -900,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
900 * is used for a killable error condition */ 890 * is used for a killable error condition */
901 ret = BLKPREP_KILL; 891 ret = BLKPREP_KILL;
902 892
903 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, 893 SCSI_LOG_HLQUEUE(1,
904 "sd_prep_fn: block=%llu, " 894 scmd_printk(KERN_INFO, SCpnt,
905 "count=%d\n", 895 "%s: block=%llu, count=%d\n",
906 (unsigned long long)block, 896 __func__, (unsigned long long)block, this_count));
907 this_count));
908 897
909 if (!sdp || !scsi_device_online(sdp) || 898 if (!sdp || !scsi_device_online(sdp) ||
910 block + blk_rq_sectors(rq) > get_capacity(disk)) { 899 block + blk_rq_sectors(rq) > get_capacity(disk)) {
@@ -1124,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
1124 */ 1113 */
1125 ret = BLKPREP_OK; 1114 ret = BLKPREP_OK;
1126 out: 1115 out:
1127 return scsi_prep_return(q, rq, ret); 1116 return ret;
1128} 1117}
1129 1118
1130/** 1119/**
@@ -1686,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1686 sshdr.ascq)); 1675 sshdr.ascq));
1687 } 1676 }
1688#endif 1677#endif
1678 sdkp->medium_access_timed_out = 0;
1679
1689 if (driver_byte(result) != DRIVER_SENSE && 1680 if (driver_byte(result) != DRIVER_SENSE &&
1690 (!sense_valid || sense_deferred)) 1681 (!sense_valid || sense_deferred))
1691 goto out; 1682 goto out;
1692 1683
1693 sdkp->medium_access_timed_out = 0;
1694
1695 switch (sshdr.sense_key) { 1684 switch (sshdr.sense_key) {
1696 case HARDWARE_ERROR: 1685 case HARDWARE_ERROR:
1697 case MEDIUM_ERROR: 1686 case MEDIUM_ERROR:
@@ -2452,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2452 } 2441 }
2453 2442
2454 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2443 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2455 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2444 if (sdp->broken_fua) {
2445 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2446 sdkp->DPOFUA = 0;
2447 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
2456 sd_first_printk(KERN_NOTICE, sdkp, 2448 sd_first_printk(KERN_NOTICE, sdkp,
2457 "Uses READ/WRITE(6), disabling FUA\n"); 2449 "Uses READ/WRITE(6), disabling FUA\n");
2458 sdkp->DPOFUA = 0; 2450 sdkp->DPOFUA = 0;
@@ -2875,9 +2867,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2875 2867
2876 sd_revalidate_disk(gd); 2868 sd_revalidate_disk(gd);
2877 2869
2878 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2879 blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
2880
2881 gd->driverfs_dev = &sdp->sdev_gendev; 2870 gd->driverfs_dev = &sdp->sdev_gendev;
2882 gd->flags = GENHD_FL_EXT_DEVT; 2871 gd->flags = GENHD_FL_EXT_DEVT;
2883 if (sdp->removable) { 2872 if (sdp->removable) {
@@ -3025,8 +3014,6 @@ static int sd_remove(struct device *dev)
3025 3014
3026 async_synchronize_full_domain(&scsi_sd_pm_domain); 3015 async_synchronize_full_domain(&scsi_sd_pm_domain);
3027 async_synchronize_full_domain(&scsi_sd_probe_domain); 3016 async_synchronize_full_domain(&scsi_sd_probe_domain);
3028 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
3029 blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
3030 device_del(&sdkp->dev); 3017 device_del(&sdkp->dev);
3031 del_gendisk(sdkp->disk); 3018 del_gendisk(sdkp->disk);
3032 sd_shutdown(dev); 3019 sd_shutdown(dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e1..53268aaba559 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1653,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1653 if (!rq) 1653 if (!rq)
1654 return -ENOMEM; 1654 return -ENOMEM;
1655 1655
1656 blk_rq_set_block_pc(rq);
1656 memcpy(rq->cmd, cmd, hp->cmd_len); 1657 memcpy(rq->cmd, cmd, hp->cmd_len);
1657
1658 rq->cmd_len = hp->cmd_len; 1658 rq->cmd_len = hp->cmd_len;
1659 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1660 1659
1661 srp->rq = rq; 1660 srp->rq = rq;
1662 rq->end_io_data = srp; 1661 rq->end_io_data = srp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 40d85929aefe..93cbd36c990b 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
79static DEFINE_MUTEX(sr_mutex); 79static DEFINE_MUTEX(sr_mutex);
80static int sr_probe(struct device *); 80static int sr_probe(struct device *);
81static int sr_remove(struct device *); 81static int sr_remove(struct device *);
82static int sr_init_command(struct scsi_cmnd *SCpnt);
82static int sr_done(struct scsi_cmnd *); 83static int sr_done(struct scsi_cmnd *);
83static int sr_runtime_suspend(struct device *dev); 84static int sr_runtime_suspend(struct device *dev);
84 85
@@ -94,6 +95,7 @@ static struct scsi_driver sr_template = {
94 .remove = sr_remove, 95 .remove = sr_remove,
95 .pm = &sr_pm_ops, 96 .pm = &sr_pm_ops,
96 }, 97 },
98 .init_command = sr_init_command,
97 .done = sr_done, 99 .done = sr_done,
98}; 100};
99 101
@@ -378,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
378 return good_bytes; 380 return good_bytes;
379} 381}
380 382
381static int sr_prep_fn(struct request_queue *q, struct request *rq) 383static int sr_init_command(struct scsi_cmnd *SCpnt)
382{ 384{
383 int block = 0, this_count, s_size; 385 int block = 0, this_count, s_size;
384 struct scsi_cd *cd; 386 struct scsi_cd *cd;
385 struct scsi_cmnd *SCpnt; 387 struct request *rq = SCpnt->request;
386 struct scsi_device *sdp = q->queuedata; 388 struct scsi_device *sdp = SCpnt->device;
387 int ret; 389 int ret;
388 390
389 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
390 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
391 goto out;
392 } else if (rq->cmd_type != REQ_TYPE_FS) {
393 ret = BLKPREP_KILL;
394 goto out;
395 }
396 ret = scsi_setup_fs_cmnd(sdp, rq); 391 ret = scsi_setup_fs_cmnd(sdp, rq);
397 if (ret != BLKPREP_OK) 392 if (ret != BLKPREP_OK)
398 goto out; 393 goto out;
@@ -517,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
517 */ 512 */
518 ret = BLKPREP_OK; 513 ret = BLKPREP_OK;
519 out: 514 out:
520 return scsi_prep_return(q, rq, ret); 515 return ret;
521} 516}
522 517
523static int sr_block_open(struct block_device *bdev, fmode_t mode) 518static int sr_block_open(struct block_device *bdev, fmode_t mode)
@@ -718,7 +713,6 @@ static int sr_probe(struct device *dev)
718 713
719 /* FIXME: need to handle a get_capabilities failure properly ?? */ 714 /* FIXME: need to handle a get_capabilities failure properly ?? */
720 get_capabilities(cd); 715 get_capabilities(cd);
721 blk_queue_prep_rq(sdev->request_queue, sr_prep_fn);
722 sr_vendor_init(cd); 716 sr_vendor_init(cd);
723 717
724 disk->driverfs_dev = &sdev->sdev_gendev; 718 disk->driverfs_dev = &sdev->sdev_gendev;
@@ -993,7 +987,6 @@ static int sr_remove(struct device *dev)
993 987
994 scsi_autopm_get_device(cd->device); 988 scsi_autopm_get_device(cd->device);
995 989
996 blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
997 del_gendisk(cd->disk); 990 del_gendisk(cd->disk);
998 991
999 mutex_lock(&sr_ref_mutex); 992 mutex_lock(&sr_ref_mutex);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index afc834e172c6..14eb4b256a03 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
484 if (!req) 484 if (!req)
485 return DRIVER_ERROR << 24; 485 return DRIVER_ERROR << 24;
486 486
487 req->cmd_type = REQ_TYPE_BLOCK_PC; 487 blk_rq_set_block_pc(req);
488 req->cmd_flags |= REQ_QUIET; 488 req->cmd_flags |= REQ_QUIET;
489 489
490 mdata->null_mapped = 1; 490 mdata->null_mapped = 1;
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 636bbe0ea84c..88220794cc98 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
364 return( 0 ); 364 return( 0 );
365 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= 365 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
366 TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { 366 TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) {
367 TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", 367 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
368 H_NO(cmd), cmd->device->id, cmd->device->lun ); 368 H_NO(cmd), cmd->device->id, cmd->device->lun );
369 return( 1 ); 369 return( 1 );
370 } 370 }
@@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
388 !setup_use_tagged_queuing || !cmd->device->tagged_supported) { 388 !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
389 cmd->tag = TAG_NONE; 389 cmd->tag = TAG_NONE;
390 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 390 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
391 TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " 391 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
392 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); 392 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );
393 } 393 }
394 else { 394 else {
@@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
397 cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); 397 cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
398 set_bit( cmd->tag, &ta->allocated ); 398 set_bit( cmd->tag, &ta->allocated );
399 ta->nr_allocated++; 399 ta->nr_allocated++;
400 TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " 400 dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
401 "(now %d tags in use)\n", 401 "(now %d tags in use)\n",
402 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, 402 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,
403 ta->nr_allocated ); 403 ta->nr_allocated );
@@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
415 415
416 if (cmd->tag == TAG_NONE) { 416 if (cmd->tag == TAG_NONE) {
417 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 417 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
418 TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", 418 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
419 H_NO(cmd), cmd->device->id, cmd->device->lun ); 419 H_NO(cmd), cmd->device->id, cmd->device->lun );
420 } 420 }
421 else if (cmd->tag >= MAX_TAGS) { 421 else if (cmd->tag >= MAX_TAGS) {
@@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
426 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 426 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
427 clear_bit( cmd->tag, &ta->allocated ); 427 clear_bit( cmd->tag, &ta->allocated );
428 ta->nr_allocated--; 428 ta->nr_allocated--;
429 TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", 429 dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
430 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); 430 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
431 } 431 }
432} 432}
@@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
484 484
485#include <linux/delay.h> 485#include <linux/delay.h>
486 486
487#if 1 487#if NDEBUG
488static struct { 488static struct {
489 unsigned char mask; 489 unsigned char mask;
490 const char * name;} 490 const char * name;}
@@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
572 } 572 }
573} 573}
574 574
575#else /* !NDEBUG */
576
577/* dummies... */
578__inline__ void NCR5380_print(struct Scsi_Host *instance) { };
579__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
580
581#endif 575#endif
582 576
583/* 577/*
@@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void)
618{ 612{
619 static int done = 0; 613 static int done = 0;
620 if (!done) { 614 if (!done) {
621 INI_PRINTK("scsi : NCR5380_all_init()\n"); 615 dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
622 done = 1; 616 done = 1;
623 } 617 }
624} 618}
@@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
681 Scsi_Cmnd *ptr; 675 Scsi_Cmnd *ptr;
682 unsigned long flags; 676 unsigned long flags;
683 677
684 NCR_PRINT(NDEBUG_ANY); 678 NCR5380_dprint(NDEBUG_ANY, instance);
685 NCR_PRINT_PHASE(NDEBUG_ANY); 679 NCR5380_dprint_phase(NDEBUG_ANY, instance);
686 680
687 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 681 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
688 682
@@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
928 922
929 local_irq_restore(flags); 923 local_irq_restore(flags);
930 924
931 QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), 925 dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
932 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); 926 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
933 927
934 /* If queue_command() is called from an interrupt (real one or bottom 928 /* If queue_command() is called from an interrupt (real one or bottom
@@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl)
998 done = 1; 992 done = 1;
999 993
1000 if (!hostdata->connected) { 994 if (!hostdata->connected) {
1001 MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); 995 dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO );
1002 /* 996 /*
1003 * Search through the issue_queue for a command destined 997 * Search through the issue_queue for a command destined
1004 * for a target that's not busy. 998 * for a target that's not busy.
@@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl)
1012 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, 1006 for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
1013 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { 1007 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
1014 1008
1015#if (NDEBUG & NDEBUG_LISTS)
1016 if (prev != tmp) 1009 if (prev != tmp)
1017 printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", 1010 dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
1018 tmp, tmp->target, hostdata->busy[tmp->target],
1019 tmp->lun);
1020#endif
1021 /* When we find one, remove it from the issue queue. */ 1011 /* When we find one, remove it from the issue queue. */
1022 /* ++guenther: possible race with Falcon locking */ 1012 /* ++guenther: possible race with Falcon locking */
1023 if ( 1013 if (
@@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl)
1047 * On failure, we must add the command back to the 1037 * On failure, we must add the command back to the
1048 * issue queue so we can keep trying. 1038 * issue queue so we can keep trying.
1049 */ 1039 */
1050 MAIN_PRINTK("scsi%d: main(): command for target %d " 1040 dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
1051 "lun %d removed from issue_queue\n", 1041 "lun %d removed from issue_queue\n",
1052 HOSTNO, tmp->target, tmp->lun); 1042 HOSTNO, tmp->device->id, tmp->device->lun);
1053 /* 1043 /*
1054 * REQUEST SENSE commands are issued without tagged 1044 * REQUEST SENSE commands are issued without tagged
1055 * queueing, even on SCSI-II devices because the 1045 * queueing, even on SCSI-II devices because the
@@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl)
1076 cmd_free_tag( tmp ); 1066 cmd_free_tag( tmp );
1077#endif 1067#endif
1078 local_irq_restore(flags); 1068 local_irq_restore(flags);
1079 MAIN_PRINTK("scsi%d: main(): select() failed, " 1069 dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
1080 "returned to issue_queue\n", HOSTNO); 1070 "returned to issue_queue\n", HOSTNO);
1081 if (hostdata->connected) 1071 if (hostdata->connected)
1082 break; 1072 break;
@@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl)
1090#endif 1080#endif
1091 ) { 1081 ) {
1092 local_irq_restore(flags); 1082 local_irq_restore(flags);
1093 MAIN_PRINTK("scsi%d: main: performing information transfer\n", 1083 dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
1094 HOSTNO); 1084 HOSTNO);
1095 NCR5380_information_transfer(instance); 1085 NCR5380_information_transfer(instance);
1096 MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); 1086 dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
1097 done = 0; 1087 done = 0;
1098 } 1088 }
1099 } while (!done); 1089 } while (!done);
@@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )
1130 return; 1120 return;
1131 } 1121 }
1132 1122
1133 DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", 1123 dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
1134 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), 1124 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1135 NCR5380_read(STATUS_REG)); 1125 NCR5380_read(STATUS_REG));
1136 1126
@@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1189 int done = 1, handled = 0; 1179 int done = 1, handled = 0;
1190 unsigned char basr; 1180 unsigned char basr;
1191 1181
1192 INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); 1182 dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
1193 1183
1194 /* Look for pending interrupts */ 1184 /* Look for pending interrupts */
1195 basr = NCR5380_read(BUS_AND_STATUS_REG); 1185 basr = NCR5380_read(BUS_AND_STATUS_REG);
1196 INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); 1186 dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
1197 /* dispatch to appropriate routine if found and done=0 */ 1187 /* dispatch to appropriate routine if found and done=0 */
1198 if (basr & BASR_IRQ) { 1188 if (basr & BASR_IRQ) {
1199 NCR_PRINT(NDEBUG_INTR); 1189 NCR5380_dprint(NDEBUG_INTR, instance);
1200 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { 1190 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1201 done = 0; 1191 done = 0;
1202// ENABLE_IRQ(); 1192// ENABLE_IRQ();
1203 INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); 1193 dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
1204 NCR5380_reselect(instance); 1194 NCR5380_reselect(instance);
1205 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1195 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1206 } 1196 }
1207 else if (basr & BASR_PARITY_ERROR) { 1197 else if (basr & BASR_PARITY_ERROR) {
1208 INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); 1198 dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
1209 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1199 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1210 } 1200 }
1211 else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { 1201 else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1212 INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); 1202 dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
1213 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1203 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1214 } 1204 }
1215 else { 1205 else {
@@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1229 ((basr & BASR_END_DMA_TRANSFER) || 1219 ((basr & BASR_END_DMA_TRANSFER) ||
1230 !(basr & BASR_PHASE_MATCH))) { 1220 !(basr & BASR_PHASE_MATCH))) {
1231 1221
1232 INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); 1222 dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1233 NCR5380_dma_complete( instance ); 1223 NCR5380_dma_complete( instance );
1234 done = 0; 1224 done = 0;
1235// ENABLE_IRQ(); 1225// ENABLE_IRQ();
@@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1238 { 1228 {
1239/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ 1229/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1240 if (basr & BASR_PHASE_MATCH) 1230 if (basr & BASR_PHASE_MATCH)
1241 INT_PRINTK("scsi%d: unknown interrupt, " 1231 dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
1242 "BASR 0x%x, MR 0x%x, SR 0x%x\n", 1232 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1243 HOSTNO, basr, NCR5380_read(MODE_REG), 1233 HOSTNO, basr, NCR5380_read(MODE_REG),
1244 NCR5380_read(STATUS_REG)); 1234 NCR5380_read(STATUS_REG));
@@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
1262 } 1252 }
1263 1253
1264 if (!done) { 1254 if (!done) {
1265 INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); 1255 dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
1266 /* Put a call to NCR5380_main() on the queue... */ 1256 /* Put a call to NCR5380_main() on the queue... */
1267 queue_main(); 1257 queue_main();
1268 } 1258 }
@@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1338 unsigned long flags; 1328 unsigned long flags;
1339 1329
1340 hostdata->restart_select = 0; 1330 hostdata->restart_select = 0;
1341 NCR_PRINT(NDEBUG_ARBITRATION); 1331 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
1342 ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, 1332 dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
1343 instance->this_id); 1333 instance->this_id);
1344 1334
1345 /* 1335 /*
@@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1385 && !hostdata->connected); 1375 && !hostdata->connected);
1386#endif 1376#endif
1387 1377
1388 ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); 1378 dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
1389 1379
1390 if (hostdata->connected) { 1380 if (hostdata->connected) {
1391 NCR5380_write(MODE_REG, MR_BASE); 1381 NCR5380_write(MODE_REG, MR_BASE);
@@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1406 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || 1396 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1407 hostdata->connected) { 1397 hostdata->connected) {
1408 NCR5380_write(MODE_REG, MR_BASE); 1398 NCR5380_write(MODE_REG, MR_BASE);
1409 ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", 1399 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1410 HOSTNO); 1400 HOSTNO);
1411 return -1; 1401 return -1;
1412 } 1402 }
@@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1421 hostdata->connected) { 1411 hostdata->connected) {
1422 NCR5380_write(MODE_REG, MR_BASE); 1412 NCR5380_write(MODE_REG, MR_BASE);
1423 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1413 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1424 ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", 1414 dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1425 HOSTNO); 1415 HOSTNO);
1426 return -1; 1416 return -1;
1427 } 1417 }
@@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1444 return -1; 1434 return -1;
1445 } 1435 }
1446 1436
1447 ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); 1437 dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
1448 1438
1449 /* 1439 /*
1450 * Now that we have won arbitration, start Selection process, asserting 1440 * Now that we have won arbitration, start Selection process, asserting
@@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1504 1494
1505 udelay(1); 1495 udelay(1);
1506 1496
1507 SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); 1497 dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1508 1498
1509 /* 1499 /*
1510 * The SCSI specification calls for a 250 ms timeout for the actual 1500 * The SCSI specification calls for a 250 ms timeout for the actual
@@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1559 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); 1549 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
1560 if (hostdata->restart_select) 1550 if (hostdata->restart_select)
1561 printk(KERN_NOTICE "\trestart select\n"); 1551 printk(KERN_NOTICE "\trestart select\n");
1562 NCR_PRINT(NDEBUG_ANY); 1552 NCR5380_dprint(NDEBUG_ANY, instance);
1563 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1553 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1564 return -1; 1554 return -1;
1565 } 1555 }
@@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1572#endif 1562#endif
1573 cmd->scsi_done(cmd); 1563 cmd->scsi_done(cmd);
1574 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1564 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1575 SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); 1565 dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
1576 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1566 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1577 return 0; 1567 return 0;
1578 } 1568 }
@@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1597 /* Wait for start of REQ/ACK handshake */ 1587 /* Wait for start of REQ/ACK handshake */
1598 while (!(NCR5380_read(STATUS_REG) & SR_REQ)); 1588 while (!(NCR5380_read(STATUS_REG) & SR_REQ));
1599 1589
1600 SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", 1590 dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
1601 HOSTNO, cmd->device->id); 1591 HOSTNO, cmd->device->id);
1602 tmp[0] = IDENTIFY(1, cmd->device->lun); 1592 tmp[0] = IDENTIFY(1, cmd->device->lun);
1603 1593
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
1617 data = tmp; 1607 data = tmp;
1618 phase = PHASE_MSGOUT; 1608 phase = PHASE_MSGOUT;
1619 NCR5380_transfer_pio(instance, &phase, &len, &data); 1609 NCR5380_transfer_pio(instance, &phase, &len, &data);
1620 SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); 1610 dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
1621 /* XXX need to handle errors here */ 1611 /* XXX need to handle errors here */
1622 hostdata->connected = cmd; 1612 hostdata->connected = cmd;
1623#ifndef SUPPORT_TAGS 1613#ifndef SUPPORT_TAGS
@@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
1680 */ 1670 */
1681 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); 1671 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1682 1672
1683 HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); 1673 dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
1684 1674
1685 /* Check for phase mismatch */ 1675 /* Check for phase mismatch */
1686 if ((tmp & PHASE_MASK) != p) { 1676 if ((tmp & PHASE_MASK) != p) {
1687 PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); 1677 dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
1688 NCR_PRINT_PHASE(NDEBUG_PIO); 1678 NCR5380_dprint_phase(NDEBUG_PIO, instance);
1689 break; 1679 break;
1690 } 1680 }
1691 1681
@@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
1708 if (!((p & SR_MSG) && c > 1)) { 1698 if (!((p & SR_MSG) && c > 1)) {
1709 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1699 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1710 ICR_ASSERT_DATA); 1700 ICR_ASSERT_DATA);
1711 NCR_PRINT(NDEBUG_PIO); 1701 NCR5380_dprint(NDEBUG_PIO, instance);
1712 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1702 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1713 ICR_ASSERT_DATA | ICR_ASSERT_ACK); 1703 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1714 } else { 1704 } else {
1715 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1705 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1716 ICR_ASSERT_DATA | ICR_ASSERT_ATN); 1706 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1717 NCR_PRINT(NDEBUG_PIO); 1707 NCR5380_dprint(NDEBUG_PIO, instance);
1718 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 1708 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1719 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); 1709 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1720 } 1710 }
1721 } else { 1711 } else {
1722 NCR_PRINT(NDEBUG_PIO); 1712 NCR5380_dprint(NDEBUG_PIO, instance);
1723 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); 1713 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1724 } 1714 }
1725 1715
1726 while (NCR5380_read(STATUS_REG) & SR_REQ); 1716 while (NCR5380_read(STATUS_REG) & SR_REQ);
1727 1717
1728 HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); 1718 dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
1729 1719
1730/* 1720/*
1731 * We have several special cases to consider during REQ/ACK handshaking : 1721 * We have several special cases to consider during REQ/ACK handshaking :
@@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
1746 } 1736 }
1747 } while (--c); 1737 } while (--c);
1748 1738
1749 PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); 1739 dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
1750 1740
1751 *count = c; 1741 *count = c;
1752 *data = d; 1742 *data = d;
@@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance,
1854 } 1844 }
1855 hostdata->dma_len = c; 1845 hostdata->dma_len = c;
1856 1846
1857 DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", 1847 dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1858 HOSTNO, (p & SR_IO) ? "reading" : "writing", 1848 HOSTNO, (p & SR_IO) ? "reading" : "writing",
1859 c, (p & SR_IO) ? "to" : "from", *data); 1849 c, (p & SR_IO) ? "to" : "from", *data);
1860 1850
@@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
1931 phase = (tmp & PHASE_MASK); 1921 phase = (tmp & PHASE_MASK);
1932 if (phase != old_phase) { 1922 if (phase != old_phase) {
1933 old_phase = phase; 1923 old_phase = phase;
1934 NCR_PRINT_PHASE(NDEBUG_INFORMATION); 1924 NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
1935 } 1925 }
1936 1926
1937 if(phase == PHASE_CMDOUT) { 1927 if(phase == PHASE_CMDOUT) {
@@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
1996 --cmd->SCp.buffers_residual; 1986 --cmd->SCp.buffers_residual;
1997 cmd->SCp.this_residual = cmd->SCp.buffer->length; 1987 cmd->SCp.this_residual = cmd->SCp.buffer->length;
1998 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); 1988 cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
1999 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", 1989 dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
2000 HOSTNO, cmd->SCp.this_residual, 1990 HOSTNO, cmd->SCp.this_residual,
2001 cmd->SCp.buffers_residual); 1991 cmd->SCp.buffers_residual);
2002 } 1992 }
@@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2088 /* Accept message by clearing ACK */ 2078 /* Accept message by clearing ACK */
2089 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2079 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2090 2080
2091 LNK_PRINTK("scsi%d: target %d lun %d linked command " 2081 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
2092 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); 2082 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
2093 2083
2094 /* Enable reselect interrupts */ 2084 /* Enable reselect interrupts */
@@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2113 * and don't free it! */ 2103 * and don't free it! */
2114 cmd->next_link->tag = cmd->tag; 2104 cmd->next_link->tag = cmd->tag;
2115 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2105 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2116 LNK_PRINTK("scsi%d: target %d lun %d linked request " 2106 dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
2117 "done, calling scsi_done().\n", 2107 "done, calling scsi_done().\n",
2118 HOSTNO, cmd->device->id, cmd->device->lun); 2108 HOSTNO, cmd->device->id, cmd->device->lun);
2119#ifdef NCR5380_STATS 2109#ifdef NCR5380_STATS
@@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2128 /* Accept message by clearing ACK */ 2118 /* Accept message by clearing ACK */
2129 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2119 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2130 hostdata->connected = NULL; 2120 hostdata->connected = NULL;
2131 QU_PRINTK("scsi%d: command for target %d, lun %d " 2121 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
2132 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); 2122 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2133#ifdef SUPPORT_TAGS 2123#ifdef SUPPORT_TAGS
2134 cmd_free_tag( cmd ); 2124 cmd_free_tag( cmd );
@@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2142 /* ++Andreas: the mid level code knows about 2132 /* ++Andreas: the mid level code knows about
2143 QUEUE_FULL now. */ 2133 QUEUE_FULL now. */
2144 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 2134 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
2145 TAG_PRINTK("scsi%d: target %d lun %d returned " 2135 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
2146 "QUEUE_FULL after %d commands\n", 2136 "QUEUE_FULL after %d commands\n",
2147 HOSTNO, cmd->device->id, cmd->device->lun, 2137 HOSTNO, cmd->device->id, cmd->device->lun,
2148 ta->nr_allocated); 2138 ta->nr_allocated);
@@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2186 if ((cmd->cmnd[0] != REQUEST_SENSE) && 2176 if ((cmd->cmnd[0] != REQUEST_SENSE) &&
2187 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { 2177 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2188 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); 2178 scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
2189 ASEN_PRINTK("scsi%d: performing request sense\n", 2179 dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
2190 HOSTNO); 2180 HOSTNO);
2191 /* this is initialized from initialize_SCp 2181 /* this is initialized from initialize_SCp
2192 cmd->SCp.buffer = NULL; 2182 cmd->SCp.buffer = NULL;
@@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2198 SET_NEXT(cmd, hostdata->issue_queue); 2188 SET_NEXT(cmd, hostdata->issue_queue);
2199 hostdata->issue_queue = (struct scsi_cmnd *) cmd; 2189 hostdata->issue_queue = (struct scsi_cmnd *) cmd;
2200 local_irq_restore(flags); 2190 local_irq_restore(flags);
2201 QU_PRINTK("scsi%d: REQUEST SENSE added to head of " 2191 dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
2202 "issue queue\n", H_NO(cmd)); 2192 "issue queue\n", H_NO(cmd));
2203 } else 2193 } else
2204#endif /* def AUTOSENSE */ 2194#endif /* def AUTOSENSE */
@@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2238 cmd->device->tagged_supported = 0; 2228 cmd->device->tagged_supported = 0;
2239 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 2229 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
2240 cmd->tag = TAG_NONE; 2230 cmd->tag = TAG_NONE;
2241 TAG_PRINTK("scsi%d: target %d lun %d rejected " 2231 dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
2242 "QUEUE_TAG message; tagged queuing " 2232 "QUEUE_TAG message; tagged queuing "
2243 "disabled\n", 2233 "disabled\n",
2244 HOSTNO, cmd->device->id, cmd->device->lun); 2234 HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2255 hostdata->connected = NULL; 2245 hostdata->connected = NULL;
2256 hostdata->disconnected_queue = cmd; 2246 hostdata->disconnected_queue = cmd;
2257 local_irq_restore(flags); 2247 local_irq_restore(flags);
2258 QU_PRINTK("scsi%d: command for target %d lun %d was " 2248 dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
2259 "moved from connected to the " 2249 "moved from connected to the "
2260 "disconnected_queue\n", HOSTNO, 2250 "disconnected_queue\n", HOSTNO,
2261 cmd->device->id, cmd->device->lun); 2251 cmd->device->id, cmd->device->lun);
@@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2308 /* Accept first byte by clearing ACK */ 2298 /* Accept first byte by clearing ACK */
2309 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2299 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2310 2300
2311 EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); 2301 dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
2312 2302
2313 len = 2; 2303 len = 2;
2314 data = extended_msg + 1; 2304 data = extended_msg + 1;
2315 phase = PHASE_MSGIN; 2305 phase = PHASE_MSGIN;
2316 NCR5380_transfer_pio(instance, &phase, &len, &data); 2306 NCR5380_transfer_pio(instance, &phase, &len, &data);
2317 EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, 2307 dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
2318 (int)extended_msg[1], (int)extended_msg[2]); 2308 (int)extended_msg[1], (int)extended_msg[2]);
2319 2309
2320 if (!len && extended_msg[1] <= 2310 if (!len && extended_msg[1] <=
@@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2326 phase = PHASE_MSGIN; 2316 phase = PHASE_MSGIN;
2327 2317
2328 NCR5380_transfer_pio(instance, &phase, &len, &data); 2318 NCR5380_transfer_pio(instance, &phase, &len, &data);
2329 EXT_PRINTK("scsi%d: message received, residual %d\n", 2319 dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
2330 HOSTNO, len); 2320 HOSTNO, len);
2331 2321
2332 switch (extended_msg[2]) { 2322 switch (extended_msg[2]) {
@@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
2416 break; 2406 break;
2417 default: 2407 default:
2418 printk("scsi%d: unknown phase\n", HOSTNO); 2408 printk("scsi%d: unknown phase\n", HOSTNO);
2419 NCR_PRINT(NDEBUG_ANY); 2409 NCR5380_dprint(NDEBUG_ANY, instance);
2420 } /* switch(phase) */ 2410 } /* switch(phase) */
2421 } /* if (tmp * SR_REQ) */ 2411 } /* if (tmp * SR_REQ) */
2422 } /* while (1) */ 2412 } /* while (1) */
@@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
2458 2448
2459 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2449 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2460 2450
2461 RSL_PRINTK("scsi%d: reselect\n", HOSTNO); 2451 dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
2462 2452
2463 /* 2453 /*
2464 * At this point, we have detected that our SCSI ID is on the bus, 2454 * At this point, we have detected that our SCSI ID is on the bus,
@@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
2580 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && 2570 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2581 msg[1] == SIMPLE_QUEUE_TAG) 2571 msg[1] == SIMPLE_QUEUE_TAG)
2582 tag = msg[2]; 2572 tag = msg[2];
2583 TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " 2573 dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
2584 "reselection\n", HOSTNO, target_mask, lun, tag); 2574 "reselection\n", HOSTNO, target_mask, lun, tag);
2585 } 2575 }
2586#endif 2576#endif
2587 2577
2588 hostdata->connected = tmp; 2578 hostdata->connected = tmp;
2589 RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", 2579 dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
2590 HOSTNO, tmp->target, tmp->lun, tmp->tag); 2580 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2591} 2581}
2592 2582
2593 2583
@@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2622 2612
2623 local_irq_save(flags); 2613 local_irq_save(flags);
2624 2614
2625 ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, 2615 dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2626 NCR5380_read(BUS_AND_STATUS_REG), 2616 NCR5380_read(BUS_AND_STATUS_REG),
2627 NCR5380_read(STATUS_REG)); 2617 NCR5380_read(STATUS_REG));
2628 2618
@@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2635 2625
2636 if (hostdata->connected == cmd) { 2626 if (hostdata->connected == cmd) {
2637 2627
2638 ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); 2628 dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
2639/* 2629/*
2640 * We should perform BSY checking, and make sure we haven't slipped 2630 * We should perform BSY checking, and make sure we haven't slipped
2641 * into BUS FREE. 2631 * into BUS FREE.
@@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2664#endif 2654#endif
2665 local_irq_restore(flags); 2655 local_irq_restore(flags);
2666 cmd->scsi_done(cmd); 2656 cmd->scsi_done(cmd);
2667 return SCSI_ABORT_SUCCESS; 2657 return SUCCESS;
2668 } else { 2658 } else {
2669/* local_irq_restore(flags); */ 2659/* local_irq_restore(flags); */
2670 printk("scsi%d: abort of connected command failed!\n", HOSTNO); 2660 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2671 return SCSI_ABORT_ERROR; 2661 return FAILED;
2672 } 2662 }
2673 } 2663 }
2674#endif 2664#endif
@@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2686 SET_NEXT(tmp, NULL); 2676 SET_NEXT(tmp, NULL);
2687 tmp->result = DID_ABORT << 16; 2677 tmp->result = DID_ABORT << 16;
2688 local_irq_restore(flags); 2678 local_irq_restore(flags);
2689 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", 2679 dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
2690 HOSTNO); 2680 HOSTNO);
2691 /* Tagged queuing note: no tag to free here, hasn't been assigned 2681 /* Tagged queuing note: no tag to free here, hasn't been assigned
2692 * yet... */ 2682 * yet... */
2693 tmp->scsi_done(tmp); 2683 tmp->scsi_done(tmp);
2694 return SCSI_ABORT_SUCCESS; 2684 return SUCCESS;
2695 } 2685 }
2696 2686
2697/* 2687/*
@@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2707 2697
2708 if (hostdata->connected) { 2698 if (hostdata->connected) {
2709 local_irq_restore(flags); 2699 local_irq_restore(flags);
2710 ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); 2700 dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
2711 return SCSI_ABORT_SNOOZE; 2701 return FAILED;
2712 } 2702 }
2713 2703
2714/* 2704/*
@@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2740 tmp = NEXT(tmp)) 2730 tmp = NEXT(tmp))
2741 if (cmd == tmp) { 2731 if (cmd == tmp) {
2742 local_irq_restore(flags); 2732 local_irq_restore(flags);
2743 ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); 2733 dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
2744 2734
2745 if (NCR5380_select (instance, cmd, (int) cmd->tag)) 2735 if (NCR5380_select (instance, cmd, (int) cmd->tag))
2746 return SCSI_ABORT_BUSY; 2736 return FAILED;
2747 2737
2748 ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); 2738 dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
2749 2739
2750 do_abort (instance); 2740 do_abort (instance);
2751 2741
@@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2769#endif 2759#endif
2770 local_irq_restore(flags); 2760 local_irq_restore(flags);
2771 tmp->scsi_done(tmp); 2761 tmp->scsi_done(tmp);
2772 return SCSI_ABORT_SUCCESS; 2762 return SUCCESS;
2773 } 2763 }
2774 } 2764 }
2775 2765
@@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2786 local_irq_restore(flags); 2776 local_irq_restore(flags);
2787 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); 2777 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);
2788 2778
2789 return SCSI_ABORT_NOT_RUNNING; 2779 return FAILED;
2790} 2780}
2791 2781
2792 2782
@@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2795 * 2785 *
2796 * Purpose : reset the SCSI bus. 2786 * Purpose : reset the SCSI bus.
2797 * 2787 *
2798 * Returns : SCSI_RESET_WAKEUP 2788 * Returns : SUCCESS or FAILURE
2799 * 2789 *
2800 */ 2790 */
2801 2791
@@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2804 SETUP_HOSTDATA(cmd->device->host); 2794 SETUP_HOSTDATA(cmd->device->host);
2805 int i; 2795 int i;
2806 unsigned long flags; 2796 unsigned long flags;
2807#if 1 2797#if defined(RESET_RUN_DONE)
2808 struct scsi_cmnd *connected, *disconnected_queue; 2798 struct scsi_cmnd *connected, *disconnected_queue;
2809#endif 2799#endif
2810 2800
@@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2826 * through anymore ... */ 2816 * through anymore ... */
2827 (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); 2817 (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
2828 2818
2829#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ 2819 /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
2830 /* XXX see below XXX */ 2820 * should go.
2821 * Catch-22: if we don't clear all queues, the SCSI driver lock will
2822 * not be released by atari_scsi_reset()!
2823 */
2824
2825#if defined(RESET_RUN_DONE)
2826 /* XXX Should now be done by midlevel code, but it's broken XXX */
2827 /* XXX see below XXX */
2831 2828
2832 /* MSch: old-style reset: actually abort all command processing here */ 2829 /* MSch: old-style reset: actually abort all command processing here */
2833 2830
@@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2857 */ 2854 */
2858 2855
2859 if ((cmd = connected)) { 2856 if ((cmd = connected)) {
2860 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 2857 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2861 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); 2858 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2862 cmd->scsi_done( cmd ); 2859 cmd->scsi_done( cmd );
2863 } 2860 }
@@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2869 cmd->scsi_done( cmd ); 2866 cmd->scsi_done( cmd );
2870 } 2867 }
2871 if (i > 0) 2868 if (i > 0)
2872 ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); 2869 dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
2873 2870
2874 2871
2875 /* since all commands have been explicitly terminated, we need to tell 2872 /* since all commands have been explicitly terminated, we need to tell
2876 * the midlevel code that the reset was SUCCESSFUL, and there is no 2873 * the midlevel code that the reset was SUCCESSFUL, and there is no
2877 * need to 'wake up' the commands by a request_sense 2874 * need to 'wake up' the commands by a request_sense
2878 */ 2875 */
2879 return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; 2876 return SUCCESS;
2880#else /* 1 */ 2877#else /* 1 */
2881 2878
2882 /* MSch: new-style reset handling: let the mid-level do what it can */ 2879 /* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2903 */ 2900 */
2904 2901
2905 if (hostdata->issue_queue) 2902 if (hostdata->issue_queue)
2906 ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); 2903 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
2907 if (hostdata->connected) 2904 if (hostdata->connected)
2908 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 2905 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
2909 if (hostdata->disconnected_queue) 2906 if (hostdata->disconnected_queue)
2910 ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); 2907 dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
2911 2908
2912 local_irq_save(flags); 2909 local_irq_save(flags);
2913 hostdata->issue_queue = NULL; 2910 hostdata->issue_queue = NULL;
@@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
2924 local_irq_restore(flags); 2921 local_irq_restore(flags);
2925 2922
2926 /* we did no complete reset of all commands, so a wakeup is required */ 2923 /* we did no complete reset of all commands, so a wakeup is required */
2927 return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; 2924 return SUCCESS;
2928#endif /* 1 */ 2925#endif /* 1 */
2929} 2926}
2930 2927
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index e2c009b033ce..9707b7494a89 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -3,6 +3,10 @@
3 * 3 *
4 * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) 4 * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
5 * 5 *
6 * VME support added by Sam Creasey
7 *
8 * TODO: modify this driver to support multiple Sun3 SCSI VME boards
9 *
6 * Adapted from mac_scsinew.c: 10 * Adapted from mac_scsinew.c:
7 */ 11 */
8/* 12/*
@@ -45,10 +49,6 @@
45 * USLEEP - enable support for devices that don't disconnect. Untested. 49 * USLEEP - enable support for devices that don't disconnect. Untested.
46 */ 50 */
47 51
48/*
49 * $Log: sun3_NCR5380.c,v $
50 */
51
52#define AUTOSENSE 52#define AUTOSENSE
53 53
54#include <linux/types.h> 54#include <linux/types.h>
@@ -69,23 +69,15 @@
69#include <asm/idprom.h> 69#include <asm/idprom.h>
70#include <asm/machines.h> 70#include <asm/machines.h>
71 71
72#define NDEBUG 0
73
74#define NDEBUG_ABORT 0x00100000
75#define NDEBUG_TAGS 0x00200000
76#define NDEBUG_MERGING 0x00400000
77
78/* dma on! */ 72/* dma on! */
79#define REAL_DMA 73#define REAL_DMA
80 74
81#include "scsi.h" 75#include "scsi.h"
82#include "initio.h"
83#include <scsi/scsi_host.h> 76#include <scsi/scsi_host.h>
84#include "sun3_scsi.h" 77#include "sun3_scsi.h"
78#include "NCR5380.h"
85 79
86static void NCR5380_print(struct Scsi_Host *instance); 80extern int sun3_map_test(unsigned long, char *);
87
88/* #define OLDDMA */
89 81
90#define USE_WRAPPER 82#define USE_WRAPPER
91/*#define RESET_BOOT */ 83/*#define RESET_BOOT */
@@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance);
101 93
102/* #define SUPPORT_TAGS */ 94/* #define SUPPORT_TAGS */
103 95
96#ifdef SUN3_SCSI_VME
97#define ENABLE_IRQ()
98#else
104#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); 99#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI );
100#endif
105 101
106 102
107static irqreturn_t scsi_sun3_intr(int irq, void *dummy); 103static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
@@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0);
123 119
124static struct scsi_cmnd *sun3_dma_setup_done = NULL; 120static struct scsi_cmnd *sun3_dma_setup_done = NULL;
125 121
122#define RESET_RUN_DONE
123
126#define AFTER_RESET_DELAY (HZ/2) 124#define AFTER_RESET_DELAY (HZ/2)
127 125
128/* ms to wait after hitting dma regs */ 126/* ms to wait after hitting dma regs */
@@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;
136 134
137static volatile unsigned char *sun3_scsi_regp; 135static volatile unsigned char *sun3_scsi_regp;
138static volatile struct sun3_dma_regs *dregs; 136static volatile struct sun3_dma_regs *dregs;
139#ifdef OLDDMA 137#ifndef SUN3_SCSI_VME
140static unsigned char *dmabuf = NULL; /* dma memory buffer */
141#endif
142static struct sun3_udc_regs *udc_regs = NULL; 138static struct sun3_udc_regs *udc_regs = NULL;
139#endif
143static unsigned char *sun3_dma_orig_addr = NULL; 140static unsigned char *sun3_dma_orig_addr = NULL;
144static unsigned long sun3_dma_orig_count = 0; 141static unsigned long sun3_dma_orig_count = 0;
145static int sun3_dma_active = 0; 142static int sun3_dma_active = 0;
@@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value)
159 sun3_scsi_regp[reg] = value; 156 sun3_scsi_regp[reg] = value;
160} 157}
161 158
159#ifndef SUN3_SCSI_VME
162/* dma controller register access functions */ 160/* dma controller register access functions */
163 161
164static inline unsigned short sun3_udc_read(unsigned char reg) 162static inline unsigned short sun3_udc_read(unsigned char reg)
@@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)
180 dregs->udc_data = val; 178 dregs->udc_data = val;
181 udelay(SUN3_DMA_DELAY); 179 udelay(SUN3_DMA_DELAY);
182} 180}
181#endif
183 182
184/* 183/*
185 * XXX: status debug 184 * XXX: status debug
@@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance;
198 * 197 *
199 */ 198 */
200 199
201int __init sun3scsi_detect(struct scsi_host_template * tpnt) 200static int __init sun3scsi_detect(struct scsi_host_template *tpnt)
202{ 201{
203 unsigned long ioaddr; 202 unsigned long ioaddr, irq;
204 static int called = 0; 203 static int called = 0;
205 struct Scsi_Host *instance; 204 struct Scsi_Host *instance;
205#ifdef SUN3_SCSI_VME
206 int i;
207 unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
208 IOBASE_SUN3_VMESCSI + 0x4000,
209 0 };
210 unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
211 SUN3_VEC_VMESCSI1,
212 0 };
213#endif
206 214
207 /* check that this machine has an onboard 5380 */ 215 /* check that this machine has an onboard 5380 */
208 switch(idprom->id_machtype) { 216 switch(idprom->id_machtype) {
217#ifdef SUN3_SCSI_VME
218 case SM_SUN3|SM_3_160:
219 case SM_SUN3|SM_3_260:
220 break;
221#else
209 case SM_SUN3|SM_3_50: 222 case SM_SUN3|SM_3_50:
210 case SM_SUN3|SM_3_60: 223 case SM_SUN3|SM_3_60:
211 break; 224 break;
225#endif
212 226
213 default: 227 default:
214 return 0; 228 return 0;
@@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
217 if(called) 231 if(called)
218 return 0; 232 return 0;
219 233
234#ifdef SUN3_SCSI_VME
235 tpnt->proc_name = "Sun3 5380 VME SCSI";
236#else
220 tpnt->proc_name = "Sun3 5380 SCSI"; 237 tpnt->proc_name = "Sun3 5380 SCSI";
238#endif
221 239
222 /* setup variables */ 240 /* setup variables */
223 tpnt->can_queue = 241 tpnt->can_queue =
@@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
234 tpnt->this_id = 7; 252 tpnt->this_id = 7;
235 } 253 }
236 254
255#ifdef SUN3_SCSI_VME
256 ioaddr = 0;
257 for (i = 0; addrs[i] != 0; i++) {
258 unsigned char x;
259
260 ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
261 SUN3_PAGE_TYPE_VME16);
262 irq = vecs[i];
263 sun3_scsi_regp = (unsigned char *)ioaddr;
264
265 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
266
267 if (sun3_map_test((unsigned long)dregs, &x)) {
268 unsigned short oldcsr;
269
270 oldcsr = dregs->csr;
271 dregs->csr = 0;
272 udelay(SUN3_DMA_DELAY);
273 if (dregs->csr == 0x1400)
274 break;
275
276 dregs->csr = oldcsr;
277 }
278
279 iounmap((void *)ioaddr);
280 ioaddr = 0;
281 }
282
283 if (!ioaddr)
284 return 0;
285#else
286 irq = IRQ_SUN3_SCSI;
237 ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); 287 ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);
238 sun3_scsi_regp = (unsigned char *)ioaddr; 288 sun3_scsi_regp = (unsigned char *)ioaddr;
239 289
@@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
244 printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); 294 printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
245 return 0; 295 return 0;
246 } 296 }
247#ifdef OLDDMA
248 if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) {
249 printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
250 return 0;
251 }
252#endif 297#endif
253#ifdef SUPPORT_TAGS 298#ifdef SUPPORT_TAGS
254 if (setup_use_tagged_queuing < 0) 299 if (setup_use_tagged_queuing < 0)
@@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
262 default_instance = instance; 307 default_instance = instance;
263 308
264 instance->io_port = (unsigned long) ioaddr; 309 instance->io_port = (unsigned long) ioaddr;
265 instance->irq = IRQ_SUN3_SCSI; 310 instance->irq = irq;
266 311
267 NCR5380_init(instance, 0); 312 NCR5380_init(instance, 0);
268 313
@@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
283#endif 328#endif
284 } 329 }
285 330
286 printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); 331 pr_info("scsi%d: %s at port %lX irq", instance->host_no,
332 tpnt->proc_name, instance->io_port);
287 if (instance->irq == SCSI_IRQ_NONE) 333 if (instance->irq == SCSI_IRQ_NONE)
288 printk ("s disabled"); 334 printk ("s disabled");
289 else 335 else
@@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
300 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; 346 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
301 udelay(SUN3_DMA_DELAY); 347 udelay(SUN3_DMA_DELAY);
302 dregs->fifo_count = 0; 348 dregs->fifo_count = 0;
349#ifdef SUN3_SCSI_VME
350 dregs->fifo_count_hi = 0;
351 dregs->dma_addr_hi = 0;
352 dregs->dma_addr_lo = 0;
353 dregs->dma_count_hi = 0;
354 dregs->dma_count_lo = 0;
355
356 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
357#endif
303 358
304 called = 1; 359 called = 1;
305 360
@@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
367} 422}
368#endif 423#endif
369 424
370const char * sun3scsi_info (struct Scsi_Host *spnt) { 425static const char *sun3scsi_info(struct Scsi_Host *spnt)
426{
371 return ""; 427 return "";
372} 428}
373 429
@@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
379 unsigned short csr = dregs->csr; 435 unsigned short csr = dregs->csr;
380 int handled = 0; 436 int handled = 0;
381 437
438#ifdef SUN3_SCSI_VME
439 dregs->csr &= ~CSR_DMA_ENABLE;
440#endif
441
382 if(csr & ~CSR_GOOD) { 442 if(csr & ~CSR_GOOD) {
383 if(csr & CSR_DMA_BUSERR) { 443 if(csr & CSR_DMA_BUSERR) {
384 printk("scsi%d: bus error in dma\n", default_instance->host_no); 444 printk("scsi%d: bus error in dma\n", default_instance->host_no);
@@ -422,31 +482,28 @@ void sun3_sun3_debug (void)
422/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ 482/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
423static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) 483static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
424{ 484{
425#ifdef OLDDMA
426 if(write_flag)
427 memcpy(dmabuf, data, count);
428 else {
429 sun3_dma_orig_addr = data;
430 sun3_dma_orig_count = count;
431 }
432#else
433 void *addr; 485 void *addr;
434 486
435 if(sun3_dma_orig_addr != NULL) 487 if(sun3_dma_orig_addr != NULL)
436 dvma_unmap(sun3_dma_orig_addr); 488 dvma_unmap(sun3_dma_orig_addr);
437 489
438// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); 490#ifdef SUN3_SCSI_VME
491 addr = (void *)dvma_map_vme((unsigned long) data, count);
492#else
439 addr = (void *)dvma_map((unsigned long) data, count); 493 addr = (void *)dvma_map((unsigned long) data, count);
494#endif
440 495
441 sun3_dma_orig_addr = addr; 496 sun3_dma_orig_addr = addr;
442 sun3_dma_orig_count = count; 497 sun3_dma_orig_count = count;
443#endif 498
499#ifndef SUN3_SCSI_VME
444 dregs->fifo_count = 0; 500 dregs->fifo_count = 0;
445 sun3_udc_write(UDC_RESET, UDC_CSR); 501 sun3_udc_write(UDC_RESET, UDC_CSR);
446 502
447 /* reset fifo */ 503 /* reset fifo */
448 dregs->csr &= ~CSR_FIFO; 504 dregs->csr &= ~CSR_FIFO;
449 dregs->csr |= CSR_FIFO; 505 dregs->csr |= CSR_FIFO;
506#endif
450 507
451 /* set direction */ 508 /* set direction */
452 if(write_flag) 509 if(write_flag)
@@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
454 else 511 else
455 dregs->csr &= ~CSR_SEND; 512 dregs->csr &= ~CSR_SEND;
456 513
514#ifdef SUN3_SCSI_VME
515 dregs->csr |= CSR_PACK_ENABLE;
516
517 dregs->dma_addr_hi = ((unsigned long)addr >> 16);
518 dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
519
520 dregs->dma_count_hi = 0;
521 dregs->dma_count_lo = 0;
522 dregs->fifo_count_hi = 0;
523 dregs->fifo_count = 0;
524#else
457 /* byte count for fifo */ 525 /* byte count for fifo */
458 dregs->fifo_count = count; 526 dregs->fifo_count = count;
459 527
@@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
467 printk("scsi%d: fifo_mismatch %04x not %04x\n", 535 printk("scsi%d: fifo_mismatch %04x not %04x\n",
468 default_instance->host_no, dregs->fifo_count, 536 default_instance->host_no, dregs->fifo_count,
469 (unsigned int) count); 537 (unsigned int) count);
470 NCR5380_print(default_instance); 538 NCR5380_dprint(NDEBUG_DMA, default_instance);
471 } 539 }
472 540
473 /* setup udc */ 541 /* setup udc */
474#ifdef OLDDMA
475 udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8);
476 udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff);
477#else
478 udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); 542 udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8);
479 udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); 543 udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff);
480#endif
481 udc_regs->count = count/2; /* count in words */ 544 udc_regs->count = count/2; /* count in words */
482 udc_regs->mode_hi = UDC_MODE_HIWORD; 545 udc_regs->mode_hi = UDC_MODE_HIWORD;
483 if(write_flag) { 546 if(write_flag) {
@@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
501 564
502 /* interrupt enable */ 565 /* interrupt enable */
503 sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); 566 sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
567#endif
504 568
505 return count; 569 return count;
506 570
507} 571}
508 572
573#ifndef SUN3_SCSI_VME
509static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) 574static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
510{ 575{
511 unsigned short resid; 576 unsigned short resid;
@@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
518 583
519 return (unsigned long) resid; 584 return (unsigned long) resid;
520} 585}
586#endif
521 587
522static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) 588static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
523{ 589{
@@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
536 602
537static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) 603static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
538{ 604{
605#ifdef SUN3_SCSI_VME
606 unsigned short csr;
607
608 csr = dregs->csr;
539 609
610 dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
611 dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
612
613 dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
614 dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
615
616/* if(!(csr & CSR_DMA_ENABLE))
617 * dregs->csr |= CSR_DMA_ENABLE;
618 */
619#else
540 sun3_udc_write(UDC_CHN_START, UDC_CSR); 620 sun3_udc_write(UDC_CHN_START, UDC_CSR);
621#endif
541 622
542 return 0; 623 return 0;
543} 624}
@@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
545/* clean up after our dma is done */ 626/* clean up after our dma is done */
546static int sun3scsi_dma_finish(int write_flag) 627static int sun3scsi_dma_finish(int write_flag)
547{ 628{
548 unsigned short count; 629 unsigned short __maybe_unused count;
549 unsigned short fifo; 630 unsigned short fifo;
550 int ret = 0; 631 int ret = 0;
551 632
552 sun3_dma_active = 0; 633 sun3_dma_active = 0;
553#if 1 634
635#ifdef SUN3_SCSI_VME
636 dregs->csr &= ~CSR_DMA_ENABLE;
637
638 fifo = dregs->fifo_count;
639 if (write_flag) {
640 if ((fifo > 0) && (fifo < sun3_dma_orig_count))
641 fifo++;
642 }
643
644 last_residual = fifo;
645 /* empty bytes from the fifo which didn't make it */
646 if ((!write_flag) && (dregs->csr & CSR_LEFT)) {
647 unsigned char *vaddr;
648
649 vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
650
651 vaddr += (sun3_dma_orig_count - fifo);
652 vaddr--;
653
654 switch (dregs->csr & CSR_LEFT) {
655 case CSR_LEFT_3:
656 *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
657 vaddr--;
658
659 case CSR_LEFT_2:
660 *vaddr = (dregs->bpack_hi & 0x00ff);
661 vaddr--;
662
663 case CSR_LEFT_1:
664 *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
665 break;
666 }
667 }
668#else
554 // check to empty the fifo on a read 669 // check to empty the fifo on a read
555 if(!write_flag) { 670 if(!write_flag) {
556 int tmo = 20000; /* .2 sec */ 671 int tmo = 20000; /* .2 sec */
@@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag)
566 udelay(10); 681 udelay(10);
567 } 682 }
568 } 683 }
569
570#endif
571 684
572 count = sun3scsi_dma_count(default_instance); 685 count = sun3scsi_dma_count(default_instance);
573#ifdef OLDDMA
574
575 /* if we've finished a read, copy out the data we read */
576 if(sun3_dma_orig_addr) {
577 /* check for residual bytes after dma end */
578 if(count && (NCR5380_read(BUS_AND_STATUS_REG) &
579 (BASR_PHASE_MATCH | BASR_ACK))) {
580 printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no);
581 printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG));
582 ret = count;
583 }
584
585 /* copy in what we dma'd no matter what */
586 memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count);
587 sun3_dma_orig_addr = NULL;
588
589 }
590#else
591 686
592 fifo = dregs->fifo_count; 687 fifo = dregs->fifo_count;
593 last_residual = fifo; 688 last_residual = fifo;
@@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag)
605 vaddr[-2] = (data & 0xff00) >> 8; 700 vaddr[-2] = (data & 0xff00) >> 8;
606 vaddr[-1] = (data & 0xff); 701 vaddr[-1] = (data & 0xff);
607 } 702 }
703#endif
608 704
609 dvma_unmap(sun3_dma_orig_addr); 705 dvma_unmap(sun3_dma_orig_addr);
610 sun3_dma_orig_addr = NULL; 706 sun3_dma_orig_addr = NULL;
611#endif 707
708#ifdef SUN3_SCSI_VME
709 dregs->dma_addr_hi = 0;
710 dregs->dma_addr_lo = 0;
711 dregs->dma_count_hi = 0;
712 dregs->dma_count_lo = 0;
713
714 dregs->fifo_count = 0;
715 dregs->fifo_count_hi = 0;
716
717 dregs->csr &= ~CSR_SEND;
718/* dregs->csr |= CSR_DMA_ENABLE; */
719#else
612 sun3_udc_write(UDC_RESET, UDC_CSR); 720 sun3_udc_write(UDC_RESET, UDC_CSR);
613 dregs->fifo_count = 0; 721 dregs->fifo_count = 0;
614 dregs->csr &= ~CSR_SEND; 722 dregs->csr &= ~CSR_SEND;
@@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag)
616 /* reset fifo */ 724 /* reset fifo */
617 dregs->csr &= ~CSR_FIFO; 725 dregs->csr &= ~CSR_FIFO;
618 dregs->csr |= CSR_FIFO; 726 dregs->csr |= CSR_FIFO;
727#endif
619 728
620 sun3_dma_setup_done = NULL; 729 sun3_dma_setup_done = NULL;
621 730
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
index a8da9c710fea..e96a37cf06ac 100644
--- a/drivers/scsi/sun3_scsi.h
+++ b/drivers/scsi/sun3_scsi.h
@@ -29,12 +29,8 @@
29 * 1+ (800) 334-5454 29 * 1+ (800) 334-5454
30 */ 30 */
31 31
32/* 32#ifndef SUN3_SCSI_H
33 * $Log: cumana_NCR5380.h,v $ 33#define SUN3_SCSI_H
34 */
35
36#ifndef SUN3_NCR5380_H
37#define SUN3_NCR5380_H
38 34
39#define SUN3SCSI_PUBLIC_RELEASE 1 35#define SUN3SCSI_PUBLIC_RELEASE 1
40 36
@@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *);
82#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" 78#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
83#endif 79#endif
84 80
85#ifndef HOSTS_C
86
87#define NCR5380_implementation_fields \ 81#define NCR5380_implementation_fields \
88 int port, ctrl 82 int port, ctrl
89 83
@@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *);
108#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) 102#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)
109#define NCR5380_dma_residual sun3scsi_dma_residual 103#define NCR5380_dma_residual sun3scsi_dma_residual
110 104
111#define BOARD_NORMAL 0
112#define BOARD_NCR53C400 1
113
114/* additional registers - mainly DMA control regs */ 105/* additional registers - mainly DMA control regs */
115/* these start at regbase + 8 -- directly after the NCR regs */ 106/* these start at regbase + 8 -- directly after the NCR regs */
116struct sun3_dma_regs { 107struct sun3_dma_regs {
@@ -191,189 +182,5 @@ struct sun3_udc_regs {
191 182
192#define VME_DATA24 0x3d00 183#define VME_DATA24 0x3d00
193 184
194// debugging printk's, taken from atari_scsi.h 185#endif /* SUN3_SCSI_H */
195/* Debugging printk definitions:
196 *
197 * ARB -> arbitration
198 * ASEN -> auto-sense
199 * DMA -> DMA
200 * HSH -> PIO handshake
201 * INF -> information transfer
202 * INI -> initialization
203 * INT -> interrupt
204 * LNK -> linked commands
205 * MAIN -> NCR5380_main() control flow
206 * NDAT -> no data-out phase
207 * NWR -> no write commands
208 * PIO -> PIO transfers
209 * PDMA -> pseudo DMA (unused on Atari)
210 * QU -> queues
211 * RSL -> reselections
212 * SEL -> selections
213 * USL -> usleep cpde (unused on Atari)
214 * LBS -> last byte sent (unused on Atari)
215 * RSS -> restarting of selections
216 * EXT -> extended messages
217 * ABRT -> aborting and resetting
218 * TAG -> queue tag handling
219 * MER -> merging of consec. buffers
220 *
221 */
222
223#include "NCR5380.h"
224
225#if NDEBUG & NDEBUG_ARBITRATION
226#define ARB_PRINTK(format, args...) \
227 printk(KERN_DEBUG format , ## args)
228#else
229#define ARB_PRINTK(format, args...)
230#endif
231#if NDEBUG & NDEBUG_AUTOSENSE
232#define ASEN_PRINTK(format, args...) \
233 printk(KERN_DEBUG format , ## args)
234#else
235#define ASEN_PRINTK(format, args...)
236#endif
237#if NDEBUG & NDEBUG_DMA
238#define DMA_PRINTK(format, args...) \
239 printk(KERN_DEBUG format , ## args)
240#else
241#define DMA_PRINTK(format, args...)
242#endif
243#if NDEBUG & NDEBUG_HANDSHAKE
244#define HSH_PRINTK(format, args...) \
245 printk(KERN_DEBUG format , ## args)
246#else
247#define HSH_PRINTK(format, args...)
248#endif
249#if NDEBUG & NDEBUG_INFORMATION
250#define INF_PRINTK(format, args...) \
251 printk(KERN_DEBUG format , ## args)
252#else
253#define INF_PRINTK(format, args...)
254#endif
255#if NDEBUG & NDEBUG_INIT
256#define INI_PRINTK(format, args...) \
257 printk(KERN_DEBUG format , ## args)
258#else
259#define INI_PRINTK(format, args...)
260#endif
261#if NDEBUG & NDEBUG_INTR
262#define INT_PRINTK(format, args...) \
263 printk(KERN_DEBUG format , ## args)
264#else
265#define INT_PRINTK(format, args...)
266#endif
267#if NDEBUG & NDEBUG_LINKED
268#define LNK_PRINTK(format, args...) \
269 printk(KERN_DEBUG format , ## args)
270#else
271#define LNK_PRINTK(format, args...)
272#endif
273#if NDEBUG & NDEBUG_MAIN
274#define MAIN_PRINTK(format, args...) \
275 printk(KERN_DEBUG format , ## args)
276#else
277#define MAIN_PRINTK(format, args...)
278#endif
279#if NDEBUG & NDEBUG_NO_DATAOUT
280#define NDAT_PRINTK(format, args...) \
281 printk(KERN_DEBUG format , ## args)
282#else
283#define NDAT_PRINTK(format, args...)
284#endif
285#if NDEBUG & NDEBUG_NO_WRITE
286#define NWR_PRINTK(format, args...) \
287 printk(KERN_DEBUG format , ## args)
288#else
289#define NWR_PRINTK(format, args...)
290#endif
291#if NDEBUG & NDEBUG_PIO
292#define PIO_PRINTK(format, args...) \
293 printk(KERN_DEBUG format , ## args)
294#else
295#define PIO_PRINTK(format, args...)
296#endif
297#if NDEBUG & NDEBUG_PSEUDO_DMA
298#define PDMA_PRINTK(format, args...) \
299 printk(KERN_DEBUG format , ## args)
300#else
301#define PDMA_PRINTK(format, args...)
302#endif
303#if NDEBUG & NDEBUG_QUEUES
304#define QU_PRINTK(format, args...) \
305 printk(KERN_DEBUG format , ## args)
306#else
307#define QU_PRINTK(format, args...)
308#endif
309#if NDEBUG & NDEBUG_RESELECTION
310#define RSL_PRINTK(format, args...) \
311 printk(KERN_DEBUG format , ## args)
312#else
313#define RSL_PRINTK(format, args...)
314#endif
315#if NDEBUG & NDEBUG_SELECTION
316#define SEL_PRINTK(format, args...) \
317 printk(KERN_DEBUG format , ## args)
318#else
319#define SEL_PRINTK(format, args...)
320#endif
321#if NDEBUG & NDEBUG_USLEEP
322#define USL_PRINTK(format, args...) \
323 printk(KERN_DEBUG format , ## args)
324#else
325#define USL_PRINTK(format, args...)
326#endif
327#if NDEBUG & NDEBUG_LAST_BYTE_SENT
328#define LBS_PRINTK(format, args...) \
329 printk(KERN_DEBUG format , ## args)
330#else
331#define LBS_PRINTK(format, args...)
332#endif
333#if NDEBUG & NDEBUG_RESTART_SELECT
334#define RSS_PRINTK(format, args...) \
335 printk(KERN_DEBUG format , ## args)
336#else
337#define RSS_PRINTK(format, args...)
338#endif
339#if NDEBUG & NDEBUG_EXTENDED
340#define EXT_PRINTK(format, args...) \
341 printk(KERN_DEBUG format , ## args)
342#else
343#define EXT_PRINTK(format, args...)
344#endif
345#if NDEBUG & NDEBUG_ABORT
346#define ABRT_PRINTK(format, args...) \
347 printk(KERN_DEBUG format , ## args)
348#else
349#define ABRT_PRINTK(format, args...)
350#endif
351#if NDEBUG & NDEBUG_TAGS
352#define TAG_PRINTK(format, args...) \
353 printk(KERN_DEBUG format , ## args)
354#else
355#define TAG_PRINTK(format, args...)
356#endif
357#if NDEBUG & NDEBUG_MERGING
358#define MER_PRINTK(format, args...) \
359 printk(KERN_DEBUG format , ## args)
360#else
361#define MER_PRINTK(format, args...)
362#endif
363
364/* conditional macros for NCR5380_print_{,phase,status} */
365
366#define NCR_PRINT(mask) \
367 ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
368
369#define NCR_PRINT_PHASE(mask) \
370 ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
371
372#define NCR_PRINT_STATUS(mask) \
373 ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
374
375
376
377#endif /* ndef HOSTS_C */
378#endif /* SUN3_NCR5380_H */
379 186
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index a3dd55d1d2fd..1eeece6e2040 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -1,589 +1,3 @@
1 /*
2 * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
3 *
4 * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
5 *
6 * VME support added by Sam Creasey
7 *
8 * Adapted from sun3_scsi.c -- see there for other headers
9 *
10 * TODO: modify this driver to support multiple Sun3 SCSI VME boards
11 *
12 */
13
14#define AUTOSENSE
15
16#include <linux/types.h>
17#include <linux/stddef.h>
18#include <linux/ctype.h>
19#include <linux/delay.h>
20
21#include <linux/module.h>
22#include <linux/signal.h>
23#include <linux/ioport.h>
24#include <linux/init.h>
25#include <linux/blkdev.h>
26
27#include <asm/io.h>
28
29#include <asm/sun3ints.h>
30#include <asm/dvma.h>
31#include <asm/idprom.h>
32#include <asm/machines.h>
33
34#define SUN3_SCSI_VME 1#define SUN3_SCSI_VME
35 2
36#undef SUN3_SCSI_DEBUG 3#include "sun3_scsi.c"
37
38/* dma on! */
39#define REAL_DMA
40
41#define NDEBUG 0
42
43#define NDEBUG_ABORT 0x00100000
44#define NDEBUG_TAGS 0x00200000
45#define NDEBUG_MERGING 0x00400000
46
47#include "scsi.h"
48#include "initio.h"
49#include <scsi/scsi_host.h>
50#include "sun3_scsi.h"
51
52extern int sun3_map_test(unsigned long, char *);
53
54#define USE_WRAPPER
55/*#define RESET_BOOT */
56#define DRIVER_SETUP
57
58/*
59 * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
60 */
61#ifdef BUG
62#undef RESET_BOOT
63#undef DRIVER_SETUP
64#endif
65
66/* #define SUPPORT_TAGS */
67
68//#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 );
69#define ENABLE_IRQ()
70
71
72static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
73static inline unsigned char sun3scsi_read(int reg);
74static inline void sun3scsi_write(int reg, int value);
75
76static int setup_can_queue = -1;
77module_param(setup_can_queue, int, 0);
78static int setup_cmd_per_lun = -1;
79module_param(setup_cmd_per_lun, int, 0);
80static int setup_sg_tablesize = -1;
81module_param(setup_sg_tablesize, int, 0);
82#ifdef SUPPORT_TAGS
83static int setup_use_tagged_queuing = -1;
84module_param(setup_use_tagged_queuing, int, 0);
85#endif
86static int setup_hostid = -1;
87module_param(setup_hostid, int, 0);
88
89static struct scsi_cmnd *sun3_dma_setup_done = NULL;
90
91#define AFTER_RESET_DELAY (HZ/2)
92
93/* ms to wait after hitting dma regs */
94#define SUN3_DMA_DELAY 10
95
96/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
97#define SUN3_DVMA_BUFSIZE 0xe000
98
99/* minimum number of bytes to do dma on */
100#define SUN3_DMA_MINSIZE 128
101
102static volatile unsigned char *sun3_scsi_regp;
103static volatile struct sun3_dma_regs *dregs;
104#ifdef OLDDMA
105static unsigned char *dmabuf = NULL; /* dma memory buffer */
106#endif
107static unsigned char *sun3_dma_orig_addr = NULL;
108static unsigned long sun3_dma_orig_count = 0;
109static int sun3_dma_active = 0;
110static unsigned long last_residual = 0;
111
112/*
113 * NCR 5380 register access functions
114 */
115
116static inline unsigned char sun3scsi_read(int reg)
117{
118 return( sun3_scsi_regp[reg] );
119}
120
121static inline void sun3scsi_write(int reg, int value)
122{
123 sun3_scsi_regp[reg] = value;
124}
125
126/*
127 * XXX: status debug
128 */
129static struct Scsi_Host *default_instance;
130
131/*
132 * Function : int sun3scsi_detect(struct scsi_host_template * tpnt)
133 *
134 * Purpose : initializes mac NCR5380 driver based on the
135 * command line / compile time port and irq definitions.
136 *
137 * Inputs : tpnt - template for this SCSI adapter.
138 *
139 * Returns : 1 if a host adapter was found, 0 if not.
140 *
141 */
142
143static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
144{
145 unsigned long ioaddr, irq = 0;
146 static int called = 0;
147 struct Scsi_Host *instance;
148 int i;
149 unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
150 IOBASE_SUN3_VMESCSI + 0x4000,
151 0 };
152 unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
153 SUN3_VEC_VMESCSI1,
154 0 };
155 /* check that this machine has an onboard 5380 */
156 switch(idprom->id_machtype) {
157 case SM_SUN3|SM_3_160:
158 case SM_SUN3|SM_3_260:
159 break;
160
161 default:
162 return 0;
163 }
164
165 if(called)
166 return 0;
167
168 tpnt->proc_name = "Sun3 5380 VME SCSI";
169
170 /* setup variables */
171 tpnt->can_queue =
172 (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
173 tpnt->cmd_per_lun =
174 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
175 tpnt->sg_tablesize =
176 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
177
178 if (setup_hostid >= 0)
179 tpnt->this_id = setup_hostid;
180 else {
181 /* use 7 as default */
182 tpnt->this_id = 7;
183 }
184
185 ioaddr = 0;
186 for(i = 0; addrs[i] != 0; i++) {
187 unsigned char x;
188
189 ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
190 SUN3_PAGE_TYPE_VME16);
191 irq = vecs[i];
192 sun3_scsi_regp = (unsigned char *)ioaddr;
193
194 dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
195
196 if(sun3_map_test((unsigned long)dregs, &x)) {
197 unsigned short oldcsr;
198
199 oldcsr = dregs->csr;
200 dregs->csr = 0;
201 udelay(SUN3_DMA_DELAY);
202 if(dregs->csr == 0x1400)
203 break;
204
205 dregs->csr = oldcsr;
206 }
207
208 iounmap((void *)ioaddr);
209 ioaddr = 0;
210 }
211
212 if(!ioaddr)
213 return 0;
214
215#ifdef SUPPORT_TAGS
216 if (setup_use_tagged_queuing < 0)
217 setup_use_tagged_queuing = USE_TAGGED_QUEUING;
218#endif
219
220 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
221 if(instance == NULL)
222 return 0;
223
224 default_instance = instance;
225
226 instance->io_port = (unsigned long) ioaddr;
227 instance->irq = irq;
228
229 NCR5380_init(instance, 0);
230
231 instance->n_io_port = 32;
232
233 ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
234
235 if (request_irq(instance->irq, scsi_sun3_intr,
236 0, "Sun3SCSI-5380VME", instance)) {
237#ifndef REAL_DMA
238 printk("scsi%d: IRQ%d not free, interrupts disabled\n",
239 instance->host_no, instance->irq);
240 instance->irq = SCSI_IRQ_NONE;
241#else
242 printk("scsi%d: IRQ%d not free, bailing out\n",
243 instance->host_no, instance->irq);
244 return 0;
245#endif
246 }
247
248 printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port);
249 if (instance->irq == SCSI_IRQ_NONE)
250 printk ("s disabled");
251 else
252 printk (" %d", instance->irq);
253 printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
254 instance->can_queue, instance->cmd_per_lun,
255 SUN3SCSI_PUBLIC_RELEASE);
256 printk("\nscsi%d:", instance->host_no);
257 NCR5380_print_options(instance);
258 printk("\n");
259
260 dregs->csr = 0;
261 udelay(SUN3_DMA_DELAY);
262 dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
263 udelay(SUN3_DMA_DELAY);
264 dregs->fifo_count = 0;
265 dregs->fifo_count_hi = 0;
266 dregs->dma_addr_hi = 0;
267 dregs->dma_addr_lo = 0;
268 dregs->dma_count_hi = 0;
269 dregs->dma_count_lo = 0;
270
271 dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
272
273 called = 1;
274
275#ifdef RESET_BOOT
276 sun3_scsi_reset_boot(instance);
277#endif
278
279 return 1;
280}
281
282int sun3scsi_release (struct Scsi_Host *shpnt)
283{
284 if (shpnt->irq != SCSI_IRQ_NONE)
285 free_irq(shpnt->irq, shpnt);
286
287 iounmap((void *)sun3_scsi_regp);
288
289 NCR5380_exit(shpnt);
290 return 0;
291}
292
293#ifdef RESET_BOOT
294/*
295 * Our 'bus reset on boot' function
296 */
297
298static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
299{
300 unsigned long end;
301
302 NCR5380_local_declare();
303 NCR5380_setup(instance);
304
305 /*
306 * Do a SCSI reset to clean up the bus during initialization. No
307 * messing with the queues, interrupts, or locks necessary here.
308 */
309
310 printk( "Sun3 SCSI: resetting the SCSI bus..." );
311
312 /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
313// sun3_disable_irq( IRQ_SUN3_SCSI );
314
315 /* get in phase */
316 NCR5380_write( TARGET_COMMAND_REG,
317 PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
318
319 /* assert RST */
320 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
321
322 /* The min. reset hold time is 25us, so 40us should be enough */
323 udelay( 50 );
324
325 /* reset RST and interrupt */
326 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
327 NCR5380_read( RESET_PARITY_INTERRUPT_REG );
328
329 for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
330 barrier();
331
332 /* switch on SCSI IRQ again */
333// sun3_enable_irq( IRQ_SUN3_SCSI );
334
335 printk( " done\n" );
336}
337#endif
338
339static const char * sun3scsi_info (struct Scsi_Host *spnt) {
340 return "";
341}
342
343// safe bits for the CSR
344#define CSR_GOOD 0x060f
345
346static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
347{
348 unsigned short csr = dregs->csr;
349 int handled = 0;
350
351 dregs->csr &= ~CSR_DMA_ENABLE;
352
353
354#ifdef SUN3_SCSI_DEBUG
355 printk("scsi_intr csr %x\n", csr);
356#endif
357
358 if(csr & ~CSR_GOOD) {
359 if(csr & CSR_DMA_BUSERR) {
360 printk("scsi%d: bus error in dma\n", default_instance->host_no);
361#ifdef SUN3_SCSI_DEBUG
362 printk("scsi: residual %x count %x addr %p dmaaddr %x\n",
363 dregs->fifo_count,
364 dregs->dma_count_lo | (dregs->dma_count_hi << 16),
365 sun3_dma_orig_addr,
366 dregs->dma_addr_lo | (dregs->dma_addr_hi << 16));
367#endif
368 }
369
370 if(csr & CSR_DMA_CONFLICT) {
371 printk("scsi%d: dma conflict\n", default_instance->host_no);
372 }
373 handled = 1;
374 }
375
376 if(csr & (CSR_SDB_INT | CSR_DMA_INT)) {
377 NCR5380_intr(irq, dummy);
378 handled = 1;
379 }
380
381 return IRQ_RETVAL(handled);
382}
383
384/*
385 * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk;
386 * reentering NCR5380_print_status seems to have ugly side effects
387 */
388
389/* this doesn't seem to get used at all -- sam */
390#if 0
391void sun3_sun3_debug (void)
392{
393 unsigned long flags;
394 NCR5380_local_declare();
395
396 if (default_instance) {
397 local_irq_save(flags);
398 NCR5380_print_status(default_instance);
399 local_irq_restore(flags);
400 }
401}
402#endif
403
404
405/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
406static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
407{
408 void *addr;
409
410 if(sun3_dma_orig_addr != NULL)
411 dvma_unmap(sun3_dma_orig_addr);
412
413// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf);
414 addr = (void *)dvma_map_vme((unsigned long) data, count);
415
416 sun3_dma_orig_addr = addr;
417 sun3_dma_orig_count = count;
418
419#ifdef SUN3_SCSI_DEBUG
420 printk("scsi: dma_setup addr %p count %x\n", addr, count);
421#endif
422
423// dregs->fifo_count = 0;
424#if 0
425 /* reset fifo */
426 dregs->csr &= ~CSR_FIFO;
427 dregs->csr |= CSR_FIFO;
428#endif
429 /* set direction */
430 if(write_flag)
431 dregs->csr |= CSR_SEND;
432 else
433 dregs->csr &= ~CSR_SEND;
434
435 /* reset fifo */
436// dregs->csr &= ~CSR_FIFO;
437// dregs->csr |= CSR_FIFO;
438
439 dregs->csr |= CSR_PACK_ENABLE;
440
441 dregs->dma_addr_hi = ((unsigned long)addr >> 16);
442 dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
443
444 dregs->dma_count_hi = 0;
445 dregs->dma_count_lo = 0;
446 dregs->fifo_count_hi = 0;
447 dregs->fifo_count = 0;
448
449#ifdef SUN3_SCSI_DEBUG
450 printk("scsi: dma_setup done csr %x\n", dregs->csr);
451#endif
452 return count;
453
454}
455
456static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
457{
458 return last_residual;
459}
460
461static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
462 struct scsi_cmnd *cmd,
463 int write_flag)
464{
465 if (cmd->request->cmd_type == REQ_TYPE_FS)
466 return wanted;
467 else
468 return 0;
469}
470
471static int sun3scsi_dma_start(unsigned long count, char *data)
472{
473
474 unsigned short csr;
475
476 csr = dregs->csr;
477#ifdef SUN3_SCSI_DEBUG
478 printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count);
479#endif
480
481 dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
482 dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
483
484 dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
485 dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
486
487// if(!(csr & CSR_DMA_ENABLE))
488// dregs->csr |= CSR_DMA_ENABLE;
489
490 return 0;
491}
492
493/* clean up after our dma is done */
494static int sun3scsi_dma_finish(int write_flag)
495{
496 unsigned short fifo;
497 int ret = 0;
498
499 sun3_dma_active = 0;
500
501 dregs->csr &= ~CSR_DMA_ENABLE;
502
503 fifo = dregs->fifo_count;
504 if(write_flag) {
505 if((fifo > 0) && (fifo < sun3_dma_orig_count))
506 fifo++;
507 }
508
509 last_residual = fifo;
510#ifdef SUN3_SCSI_DEBUG
511 printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count);
512#endif
513 /* empty bytes from the fifo which didn't make it */
514 if((!write_flag) && (dregs->csr & CSR_LEFT)) {
515 unsigned char *vaddr;
516
517#ifdef SUN3_SCSI_DEBUG
518 printk("scsi: got left over bytes\n");
519#endif
520
521 vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
522
523 vaddr += (sun3_dma_orig_count - fifo);
524 vaddr--;
525
526 switch(dregs->csr & CSR_LEFT) {
527 case CSR_LEFT_3:
528 *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
529 vaddr--;
530
531 case CSR_LEFT_2:
532 *vaddr = (dregs->bpack_hi & 0x00ff);
533 vaddr--;
534
535 case CSR_LEFT_1:
536 *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
537 break;
538 }
539
540
541 }
542
543 dvma_unmap(sun3_dma_orig_addr);
544 sun3_dma_orig_addr = NULL;
545
546 dregs->dma_addr_hi = 0;
547 dregs->dma_addr_lo = 0;
548 dregs->dma_count_hi = 0;
549 dregs->dma_count_lo = 0;
550
551 dregs->fifo_count = 0;
552 dregs->fifo_count_hi = 0;
553
554 dregs->csr &= ~CSR_SEND;
555
556// dregs->csr |= CSR_DMA_ENABLE;
557
558#if 0
559 /* reset fifo */
560 dregs->csr &= ~CSR_FIFO;
561 dregs->csr |= CSR_FIFO;
562#endif
563 sun3_dma_setup_done = NULL;
564
565 return ret;
566
567}
568
569#include "sun3_NCR5380.c"
570
571static struct scsi_host_template driver_template = {
572 .name = SUN3_SCSI_NAME,
573 .detect = sun3scsi_detect,
574 .release = sun3scsi_release,
575 .info = sun3scsi_info,
576 .queuecommand = sun3scsi_queue_command,
577 .eh_abort_handler = sun3scsi_abort,
578 .eh_bus_reset_handler = sun3scsi_bus_reset,
579 .can_queue = CAN_QUEUE,
580 .this_id = 7,
581 .sg_tablesize = SG_TABLESIZE,
582 .cmd_per_lun = CMD_PER_LUN,
583 .use_clustering = DISABLE_CLUSTERING
584};
585
586
587#include "scsi_module.c"
588
589MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index a4abce9d526e..8cc80931df14 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -102,10 +102,6 @@
102 * 15 9-11 102 * 15 9-11
103 */ 103 */
104 104
105/*
106 * $Log: t128.c,v $
107 */
108
109#include <linux/signal.h> 105#include <linux/signal.h>
110#include <linux/io.h> 106#include <linux/io.h>
111#include <linux/blkdev.h> 107#include <linux/blkdev.h>
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 1df82c28e56d..fd68cecc62af 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -34,10 +34,6 @@
34 * 1+ (800) 334-5454 34 * 1+ (800) 334-5454
35 */ 35 */
36 36
37/*
38 * $Log: t128.h,v $
39 */
40
41#ifndef T128_H 37#ifndef T128_H
42#define T128_H 38#define T128_H
43 39
@@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *);
107#define CAN_QUEUE 32 103#define CAN_QUEUE 32
108#endif 104#endif
109 105
110#ifndef HOSTS_C
111
112#define NCR5380_implementation_fields \ 106#define NCR5380_implementation_fields \
113 void __iomem *base 107 void __iomem *base
114 108
@@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *);
148 142
149#define T128_IRQS 0xc4a8 143#define T128_IRQS 0xc4a8
150 144
151#endif /* else def HOSTS_C */
152#endif /* ndef ASM */ 145#endif /* ndef ASM */
153#endif /* T128_H */ 146#endif /* T128_H */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 721050090520..f42d1cee652a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -196,9 +196,9 @@ enum {
196 * @dword_2: UPIU header DW-2 196 * @dword_2: UPIU header DW-2
197 */ 197 */
198struct utp_upiu_header { 198struct utp_upiu_header {
199 u32 dword_0; 199 __be32 dword_0;
200 u32 dword_1; 200 __be32 dword_1;
201 u32 dword_2; 201 __be32 dword_2;
202}; 202};
203 203
204/** 204/**
@@ -207,7 +207,7 @@ struct utp_upiu_header {
207 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 207 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
208 */ 208 */
209struct utp_upiu_cmd { 209struct utp_upiu_cmd {
210 u32 exp_data_transfer_len; 210 __be32 exp_data_transfer_len;
211 u8 cdb[MAX_CDB_SIZE]; 211 u8 cdb[MAX_CDB_SIZE];
212}; 212};
213 213
@@ -228,10 +228,10 @@ struct utp_upiu_query {
228 u8 idn; 228 u8 idn;
229 u8 index; 229 u8 index;
230 u8 selector; 230 u8 selector;
231 u16 reserved_osf; 231 __be16 reserved_osf;
232 u16 length; 232 __be16 length;
233 u32 value; 233 __be32 value;
234 u32 reserved[2]; 234 __be32 reserved[2];
235}; 235};
236 236
237/** 237/**
@@ -256,9 +256,9 @@ struct utp_upiu_req {
256 * @sense_data: Sense data field DW-8 to DW-12 256 * @sense_data: Sense data field DW-8 to DW-12
257 */ 257 */
258struct utp_cmd_rsp { 258struct utp_cmd_rsp {
259 u32 residual_transfer_count; 259 __be32 residual_transfer_count;
260 u32 reserved[4]; 260 __be32 reserved[4];
261 u16 sense_data_len; 261 __be16 sense_data_len;
262 u8 sense_data[18]; 262 u8 sense_data[18];
263}; 263};
264 264
@@ -286,10 +286,10 @@ struct utp_upiu_rsp {
286 */ 286 */
287struct utp_upiu_task_req { 287struct utp_upiu_task_req {
288 struct utp_upiu_header header; 288 struct utp_upiu_header header;
289 u32 input_param1; 289 __be32 input_param1;
290 u32 input_param2; 290 __be32 input_param2;
291 u32 input_param3; 291 __be32 input_param3;
292 u32 reserved[2]; 292 __be32 reserved[2];
293}; 293};
294 294
295/** 295/**
@@ -301,9 +301,9 @@ struct utp_upiu_task_req {
301 */ 301 */
302struct utp_upiu_task_rsp { 302struct utp_upiu_task_rsp {
303 struct utp_upiu_header header; 303 struct utp_upiu_header header;
304 u32 output_param1; 304 __be32 output_param1;
305 u32 output_param2; 305 __be32 output_param2;
306 u32 reserved[3]; 306 __be32 reserved[3];
307}; 307};
308 308
309/** 309/**
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 04884d663e4e..0c2877251251 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -55,6 +55,9 @@
55/* Query request timeout */ 55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */ 56#define QUERY_REQ_TIMEOUT 30 /* msec */
57 57
58/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */
60
58/* Expose the flag value from utp_upiu_query.value */ 61/* Expose the flag value from utp_upiu_query.value */
59#define MASK_QUERY_UPIU_FLAG_LOC 0xFF 62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
60 63
@@ -71,9 +74,22 @@ enum {
71 74
72/* UFSHCD states */ 75/* UFSHCD states */
73enum { 76enum {
74 UFSHCD_STATE_OPERATIONAL,
75 UFSHCD_STATE_RESET, 77 UFSHCD_STATE_RESET,
76 UFSHCD_STATE_ERROR, 78 UFSHCD_STATE_ERROR,
79 UFSHCD_STATE_OPERATIONAL,
80};
81
82/* UFSHCD error handling flags */
83enum {
84 UFSHCD_EH_IN_PROGRESS = (1 << 0),
85};
86
87/* UFSHCD UIC layer error flags */
88enum {
89 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
90 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
91 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
92 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
77}; 93};
78 94
79/* Interrupt configuration options */ 95/* Interrupt configuration options */
@@ -83,6 +99,18 @@ enum {
83 UFSHCD_INT_CLEAR, 99 UFSHCD_INT_CLEAR,
84}; 100};
85 101
102#define ufshcd_set_eh_in_progress(h) \
103 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
104#define ufshcd_eh_in_progress(h) \
105 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
106#define ufshcd_clear_eh_in_progress(h) \
107 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
108
109static void ufshcd_tmc_handler(struct ufs_hba *hba);
110static void ufshcd_async_scan(void *data, async_cookie_t cookie);
111static int ufshcd_reset_and_restore(struct ufs_hba *hba);
112static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
113
86/* 114/*
87 * ufshcd_wait_for_register - wait for register value to change 115 * ufshcd_wait_for_register - wait for register value to change
88 * @hba - per-adapter interface 116 * @hba - per-adapter interface
@@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs)
163 */ 191 */
164static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) 192static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
165{ 193{
166 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; 194 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
167} 195}
168 196
169/** 197/**
@@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
176static inline int 204static inline int
177ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) 205ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
178{ 206{
179 return task_req_descp->header.dword_2 & MASK_OCS; 207 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
180} 208}
181 209
182/** 210/**
183 * ufshcd_get_tm_free_slot - get a free slot for task management request 211 * ufshcd_get_tm_free_slot - get a free slot for task management request
184 * @hba: per adapter instance 212 * @hba: per adapter instance
213 * @free_slot: pointer to variable with available slot value
185 * 214 *
186 * Returns maximum number of task management request slots in case of 215 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
187 * task management queue full or returns the free slot number 216 * Returns 0 if free slot is not available, else return 1 with tag value
217 * in @free_slot.
188 */ 218 */
189static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) 219static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
190{ 220{
191 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); 221 int tag;
222 bool ret = false;
223
224 if (!free_slot)
225 goto out;
226
227 do {
228 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
229 if (tag >= hba->nutmrs)
230 goto out;
231 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
232
233 *free_slot = tag;
234 ret = true;
235out:
236 return ret;
237}
238
239static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
240{
241 clear_bit_unlock(slot, &hba->tm_slots_in_use);
192} 242}
193 243
194/** 244/**
@@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
390} 440}
391 441
392/** 442/**
393 * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
394 * @response: upiu query response to convert
395 */
396static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
397{
398 response->length = be16_to_cpu(response->length);
399 response->value = be32_to_cpu(response->value);
400}
401
402/**
403 * ufshcd_query_to_be() - formats the buffer to big endian
404 * @request: upiu query request to convert
405 */
406static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
407{
408 request->length = cpu_to_be16(request->length);
409 request->value = cpu_to_be32(request->value);
410}
411
412/**
413 * ufshcd_copy_query_response() - Copy the Query Response and the data 443 * ufshcd_copy_query_response() - Copy the Query Response and the data
414 * descriptor 444 * descriptor
415 * @hba: per adapter instance 445 * @hba: per adapter instance
@@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
425 UPIU_RSP_CODE_OFFSET; 455 UPIU_RSP_CODE_OFFSET;
426 456
427 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 457 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
428 ufshcd_query_to_cpu(&query_res->upiu_res);
429 458
430 459
431 /* Get the descriptor */ 460 /* Get the descriptor */
@@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
749{ 778{
750 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 779 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
751 struct ufs_query *query = &hba->dev_cmd.query; 780 struct ufs_query *query = &hba->dev_cmd.query;
752 u16 len = query->request.upiu_req.length; 781 u16 len = be16_to_cpu(query->request.upiu_req.length);
753 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; 782 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
754 783
755 /* Query request header */ 784 /* Query request header */
@@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
766 /* Copy the Query Request buffer as is */ 795 /* Copy the Query Request buffer as is */
767 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, 796 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
768 QUERY_OSF_SIZE); 797 QUERY_OSF_SIZE);
769 ufshcd_query_to_be(&ucd_req_ptr->qr);
770 798
771 /* Copy the Descriptor */ 799 /* Copy the Descriptor */
772 if ((len > 0) && (query->request.upiu_req.opcode == 800 if ((len > 0) && (query->request.upiu_req.opcode ==
@@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
853 881
854 tag = cmd->request->tag; 882 tag = cmd->request->tag;
855 883
856 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { 884 spin_lock_irqsave(hba->host->host_lock, flags);
885 switch (hba->ufshcd_state) {
886 case UFSHCD_STATE_OPERATIONAL:
887 break;
888 case UFSHCD_STATE_RESET:
857 err = SCSI_MLQUEUE_HOST_BUSY; 889 err = SCSI_MLQUEUE_HOST_BUSY;
858 goto out; 890 goto out_unlock;
891 case UFSHCD_STATE_ERROR:
892 set_host_byte(cmd, DID_ERROR);
893 cmd->scsi_done(cmd);
894 goto out_unlock;
895 default:
896 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
897 __func__, hba->ufshcd_state);
898 set_host_byte(cmd, DID_BAD_TARGET);
899 cmd->scsi_done(cmd);
900 goto out_unlock;
859 } 901 }
902 spin_unlock_irqrestore(hba->host->host_lock, flags);
860 903
861 /* acquire the tag to make sure device cmds don't use it */ 904 /* acquire the tag to make sure device cmds don't use it */
862 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { 905 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
@@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
893 /* issue command to the controller */ 936 /* issue command to the controller */
894 spin_lock_irqsave(hba->host->host_lock, flags); 937 spin_lock_irqsave(hba->host->host_lock, flags);
895 ufshcd_send_command(hba, tag); 938 ufshcd_send_command(hba, tag);
939out_unlock:
896 spin_unlock_irqrestore(hba->host->host_lock, flags); 940 spin_unlock_irqrestore(hba->host->host_lock, flags);
897out: 941out:
898 return err; 942 return err;
@@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1151 } 1195 }
1152 1196
1153 if (flag_res) 1197 if (flag_res)
1154 *flag_res = (response->upiu_res.value & 1198 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1155 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 1199 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1156 1200
1157out_unlock: 1201out_unlock:
@@ -1170,7 +1214,7 @@ out_unlock:
1170 * 1214 *
1171 * Returns 0 for success, non-zero in case of failure 1215 * Returns 0 for success, non-zero in case of failure
1172*/ 1216*/
1173int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 1217static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1174 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 1218 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1175{ 1219{
1176 struct ufs_query_req *request; 1220 struct ufs_query_req *request;
@@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1195 switch (opcode) { 1239 switch (opcode) {
1196 case UPIU_QUERY_OPCODE_WRITE_ATTR: 1240 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1197 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1241 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1198 request->upiu_req.value = *attr_val; 1242 request->upiu_req.value = cpu_to_be32(*attr_val);
1199 break; 1243 break;
1200 case UPIU_QUERY_OPCODE_READ_ATTR: 1244 case UPIU_QUERY_OPCODE_READ_ATTR:
1201 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1245 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
@@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1222 goto out_unlock; 1266 goto out_unlock;
1223 } 1267 }
1224 1268
1225 *attr_val = response->upiu_res.value; 1269 *attr_val = be32_to_cpu(response->upiu_res.value);
1226 1270
1227out_unlock: 1271out_unlock:
1228 mutex_unlock(&hba->dev_cmd.lock); 1272 mutex_unlock(&hba->dev_cmd.lock);
@@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1481 * 1525 *
1482 * Returns 0 on success, non-zero value on failure 1526 * Returns 0 on success, non-zero value on failure
1483 */ 1527 */
1484int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 1528static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1485{ 1529{
1486 struct uic_command uic_cmd = {0}; 1530 struct uic_command uic_cmd = {0};
1487 struct completion pwr_done; 1531 struct completion pwr_done;
@@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1701 goto out; 1745 goto out;
1702 } 1746 }
1703 1747
1704 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1705 scsi_unblock_requests(hba->host);
1706
1707 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
1708
1709out: 1748out:
1710 return err; 1749 return err;
1711} 1750}
@@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1831} 1870}
1832 1871
1833/** 1872/**
1834 * ufshcd_do_reset - reset the host controller
1835 * @hba: per adapter instance
1836 *
1837 * Returns SUCCESS/FAILED
1838 */
1839static int ufshcd_do_reset(struct ufs_hba *hba)
1840{
1841 struct ufshcd_lrb *lrbp;
1842 unsigned long flags;
1843 int tag;
1844
1845 /* block commands from midlayer */
1846 scsi_block_requests(hba->host);
1847
1848 spin_lock_irqsave(hba->host->host_lock, flags);
1849 hba->ufshcd_state = UFSHCD_STATE_RESET;
1850
1851 /* send controller to reset state */
1852 ufshcd_hba_stop(hba);
1853 spin_unlock_irqrestore(hba->host->host_lock, flags);
1854
1855 /* abort outstanding commands */
1856 for (tag = 0; tag < hba->nutrs; tag++) {
1857 if (test_bit(tag, &hba->outstanding_reqs)) {
1858 lrbp = &hba->lrb[tag];
1859 if (lrbp->cmd) {
1860 scsi_dma_unmap(lrbp->cmd);
1861 lrbp->cmd->result = DID_RESET << 16;
1862 lrbp->cmd->scsi_done(lrbp->cmd);
1863 lrbp->cmd = NULL;
1864 clear_bit_unlock(tag, &hba->lrb_in_use);
1865 }
1866 }
1867 }
1868
1869 /* complete device management command */
1870 if (hba->dev_cmd.complete)
1871 complete(hba->dev_cmd.complete);
1872
1873 /* clear outstanding request/task bit maps */
1874 hba->outstanding_reqs = 0;
1875 hba->outstanding_tasks = 0;
1876
1877 /* Host controller enable */
1878 if (ufshcd_hba_enable(hba)) {
1879 dev_err(hba->dev,
1880 "Reset: Controller initialization failed\n");
1881 return FAILED;
1882 }
1883
1884 if (ufshcd_link_startup(hba)) {
1885 dev_err(hba->dev,
1886 "Reset: Link start-up failed\n");
1887 return FAILED;
1888 }
1889
1890 return SUCCESS;
1891}
1892
1893/**
1894 * ufshcd_slave_alloc - handle initial SCSI device configurations 1873 * ufshcd_slave_alloc - handle initial SCSI device configurations
1895 * @sdev: pointer to SCSI device 1874 * @sdev: pointer to SCSI device
1896 * 1875 *
@@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
1907 sdev->use_10_for_ms = 1; 1886 sdev->use_10_for_ms = 1;
1908 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); 1887 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1909 1888
1889 /* allow SCSI layer to restart the device in case of errors */
1890 sdev->allow_restart = 1;
1891
1910 /* 1892 /*
1911 * Inform SCSI Midlayer that the LUN queue depth is same as the 1893 * Inform SCSI Midlayer that the LUN queue depth is same as the
1912 * controller queue depth. If a LUN queue depth is less than the 1894 * controller queue depth. If a LUN queue depth is less than the
@@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
1934 * ufshcd_task_req_compl - handle task management request completion 1916 * ufshcd_task_req_compl - handle task management request completion
1935 * @hba: per adapter instance 1917 * @hba: per adapter instance
1936 * @index: index of the completed request 1918 * @index: index of the completed request
1919 * @resp: task management service response
1937 * 1920 *
1938 * Returns SUCCESS/FAILED 1921 * Returns non-zero value on error, zero on success
1939 */ 1922 */
1940static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) 1923static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
1941{ 1924{
1942 struct utp_task_req_desc *task_req_descp; 1925 struct utp_task_req_desc *task_req_descp;
1943 struct utp_upiu_task_rsp *task_rsp_upiup; 1926 struct utp_upiu_task_rsp *task_rsp_upiup;
@@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1958 task_req_descp[index].task_rsp_upiu; 1941 task_req_descp[index].task_rsp_upiu;
1959 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); 1942 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1960 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); 1943 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1961 1944 if (resp)
1962 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && 1945 *resp = (u8)task_result;
1963 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1964 task_result = FAILED;
1965 else
1966 task_result = SUCCESS;
1967 } else { 1946 } else {
1968 task_result = FAILED; 1947 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
1969 dev_err(hba->dev, 1948 __func__, ocs_value);
1970 "trc: Invalid ocs = %x\n", ocs_value);
1971 } 1949 }
1972 spin_unlock_irqrestore(hba->host->host_lock, flags); 1950 spin_unlock_irqrestore(hba->host->host_lock, flags);
1973 return task_result; 1951
1952 return ocs_value;
1974} 1953}
1975 1954
1976/** 1955/**
@@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2105 case OCS_ABORTED: 2084 case OCS_ABORTED:
2106 result |= DID_ABORT << 16; 2085 result |= DID_ABORT << 16;
2107 break; 2086 break;
2087 case OCS_INVALID_COMMAND_STATUS:
2088 result |= DID_REQUEUE << 16;
2089 break;
2108 case OCS_INVALID_CMD_TABLE_ATTR: 2090 case OCS_INVALID_CMD_TABLE_ATTR:
2109 case OCS_INVALID_PRDT_ATTR: 2091 case OCS_INVALID_PRDT_ATTR:
2110 case OCS_MISMATCH_DATA_BUF_SIZE: 2092 case OCS_MISMATCH_DATA_BUF_SIZE:
@@ -2422,41 +2404,145 @@ out:
2422} 2404}
2423 2405
2424/** 2406/**
2425 * ufshcd_fatal_err_handler - handle fatal errors 2407 * ufshcd_err_handler - handle UFS errors that require s/w attention
2426 * @hba: per adapter instance 2408 * @work: pointer to work structure
2427 */ 2409 */
2428static void ufshcd_fatal_err_handler(struct work_struct *work) 2410static void ufshcd_err_handler(struct work_struct *work)
2429{ 2411{
2430 struct ufs_hba *hba; 2412 struct ufs_hba *hba;
2431 hba = container_of(work, struct ufs_hba, feh_workq); 2413 unsigned long flags;
2414 u32 err_xfer = 0;
2415 u32 err_tm = 0;
2416 int err = 0;
2417 int tag;
2418
2419 hba = container_of(work, struct ufs_hba, eh_work);
2432 2420
2433 pm_runtime_get_sync(hba->dev); 2421 pm_runtime_get_sync(hba->dev);
2434 /* check if reset is already in progress */ 2422
2435 if (hba->ufshcd_state != UFSHCD_STATE_RESET) 2423 spin_lock_irqsave(hba->host->host_lock, flags);
2436 ufshcd_do_reset(hba); 2424 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
2425 spin_unlock_irqrestore(hba->host->host_lock, flags);
2426 goto out;
2427 }
2428
2429 hba->ufshcd_state = UFSHCD_STATE_RESET;
2430 ufshcd_set_eh_in_progress(hba);
2431
2432 /* Complete requests that have door-bell cleared by h/w */
2433 ufshcd_transfer_req_compl(hba);
2434 ufshcd_tmc_handler(hba);
2435 spin_unlock_irqrestore(hba->host->host_lock, flags);
2436
2437 /* Clear pending transfer requests */
2438 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
2439 if (ufshcd_clear_cmd(hba, tag))
2440 err_xfer |= 1 << tag;
2441
2442 /* Clear pending task management requests */
2443 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
2444 if (ufshcd_clear_tm_cmd(hba, tag))
2445 err_tm |= 1 << tag;
2446
2447 /* Complete the requests that are cleared by s/w */
2448 spin_lock_irqsave(hba->host->host_lock, flags);
2449 ufshcd_transfer_req_compl(hba);
2450 ufshcd_tmc_handler(hba);
2451 spin_unlock_irqrestore(hba->host->host_lock, flags);
2452
2453 /* Fatal errors need reset */
2454 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
2455 ((hba->saved_err & UIC_ERROR) &&
2456 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
2457 err = ufshcd_reset_and_restore(hba);
2458 if (err) {
2459 dev_err(hba->dev, "%s: reset and restore failed\n",
2460 __func__);
2461 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2462 }
2463 /*
2464 * Inform scsi mid-layer that we did reset and allow to handle
2465 * Unit Attention properly.
2466 */
2467 scsi_report_bus_reset(hba->host, 0);
2468 hba->saved_err = 0;
2469 hba->saved_uic_err = 0;
2470 }
2471 ufshcd_clear_eh_in_progress(hba);
2472
2473out:
2474 scsi_unblock_requests(hba->host);
2437 pm_runtime_put_sync(hba->dev); 2475 pm_runtime_put_sync(hba->dev);
2438} 2476}
2439 2477
2440/** 2478/**
2441 * ufshcd_err_handler - Check for fatal errors 2479 * ufshcd_update_uic_error - check and set fatal UIC error flags.
2442 * @work: pointer to a work queue structure 2480 * @hba: per-adapter instance
2443 */ 2481 */
2444static void ufshcd_err_handler(struct ufs_hba *hba) 2482static void ufshcd_update_uic_error(struct ufs_hba *hba)
2445{ 2483{
2446 u32 reg; 2484 u32 reg;
2447 2485
2486 /* PA_INIT_ERROR is fatal and needs UIC reset */
2487 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2488 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2489 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2490
2491 /* UIC NL/TL/DME errors needs software retry */
2492 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2493 if (reg)
2494 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2495
2496 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2497 if (reg)
2498 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2499
2500 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2501 if (reg)
2502 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2503
2504 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2505 __func__, hba->uic_error);
2506}
2507
2508/**
2509 * ufshcd_check_errors - Check for errors that need s/w attention
2510 * @hba: per-adapter instance
2511 */
2512static void ufshcd_check_errors(struct ufs_hba *hba)
2513{
2514 bool queue_eh_work = false;
2515
2448 if (hba->errors & INT_FATAL_ERRORS) 2516 if (hba->errors & INT_FATAL_ERRORS)
2449 goto fatal_eh; 2517 queue_eh_work = true;
2450 2518
2451 if (hba->errors & UIC_ERROR) { 2519 if (hba->errors & UIC_ERROR) {
2452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 2520 hba->uic_error = 0;
2453 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 2521 ufshcd_update_uic_error(hba);
2454 goto fatal_eh; 2522 if (hba->uic_error)
2523 queue_eh_work = true;
2455 } 2524 }
2456 return; 2525
2457fatal_eh: 2526 if (queue_eh_work) {
2458 hba->ufshcd_state = UFSHCD_STATE_ERROR; 2527 /* handle fatal errors only when link is functional */
2459 schedule_work(&hba->feh_workq); 2528 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
2529 /* block commands from scsi mid-layer */
2530 scsi_block_requests(hba->host);
2531
2532 /* transfer error masks to sticky bits */
2533 hba->saved_err |= hba->errors;
2534 hba->saved_uic_err |= hba->uic_error;
2535
2536 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2537 schedule_work(&hba->eh_work);
2538 }
2539 }
2540 /*
2541 * if (!queue_eh_work) -
2542 * Other errors are either non-fatal where host recovers
2543 * itself without s/w intervention or errors that will be
2544 * handled by the SCSI core layer.
2545 */
2460} 2546}
2461 2547
2462/** 2548/**
@@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
2469 2555
2470 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 2556 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
2471 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; 2557 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
2472 wake_up_interruptible(&hba->ufshcd_tm_wait_queue); 2558 wake_up(&hba->tm_wq);
2473} 2559}
2474 2560
2475/** 2561/**
@@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2481{ 2567{
2482 hba->errors = UFSHCD_ERROR_MASK & intr_status; 2568 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2483 if (hba->errors) 2569 if (hba->errors)
2484 ufshcd_err_handler(hba); 2570 ufshcd_check_errors(hba);
2485 2571
2486 if (intr_status & UFSHCD_UIC_MASK) 2572 if (intr_status & UFSHCD_UIC_MASK)
2487 ufshcd_uic_cmd_compl(hba, intr_status); 2573 ufshcd_uic_cmd_compl(hba, intr_status);
@@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
2519 return retval; 2605 return retval;
2520} 2606}
2521 2607
2608static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2609{
2610 int err = 0;
2611 u32 mask = 1 << tag;
2612 unsigned long flags;
2613
2614 if (!test_bit(tag, &hba->outstanding_tasks))
2615 goto out;
2616
2617 spin_lock_irqsave(hba->host->host_lock, flags);
2618 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2619 spin_unlock_irqrestore(hba->host->host_lock, flags);
2620
2621 /* poll for max. 1 sec to clear door bell register by h/w */
2622 err = ufshcd_wait_for_register(hba,
2623 REG_UTP_TASK_REQ_DOOR_BELL,
2624 mask, 0, 1000, 1000);
2625out:
2626 return err;
2627}
2628
2522/** 2629/**
2523 * ufshcd_issue_tm_cmd - issues task management commands to controller 2630 * ufshcd_issue_tm_cmd - issues task management commands to controller
2524 * @hba: per adapter instance 2631 * @hba: per adapter instance
2525 * @lrbp: pointer to local reference block 2632 * @lun_id: LUN ID to which TM command is sent
2633 * @task_id: task ID to which the TM command is applicable
2634 * @tm_function: task management function opcode
2635 * @tm_response: task management service response return value
2526 * 2636 *
2527 * Returns SUCCESS/FAILED 2637 * Returns non-zero value on error, zero on success.
2528 */ 2638 */
2529static int 2639static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2530ufshcd_issue_tm_cmd(struct ufs_hba *hba, 2640 u8 tm_function, u8 *tm_response)
2531 struct ufshcd_lrb *lrbp,
2532 u8 tm_function)
2533{ 2641{
2534 struct utp_task_req_desc *task_req_descp; 2642 struct utp_task_req_desc *task_req_descp;
2535 struct utp_upiu_task_req *task_req_upiup; 2643 struct utp_upiu_task_req *task_req_upiup;
2536 struct Scsi_Host *host; 2644 struct Scsi_Host *host;
2537 unsigned long flags; 2645 unsigned long flags;
2538 int free_slot = 0; 2646 int free_slot;
2539 int err; 2647 int err;
2648 int task_tag;
2540 2649
2541 host = hba->host; 2650 host = hba->host;
2542 2651
2543 spin_lock_irqsave(host->host_lock, flags); 2652 /*
2544 2653 * Get free slot, sleep if slots are unavailable.
2545 /* If task management queue is full */ 2654 * Even though we use wait_event() which sleeps indefinitely,
2546 free_slot = ufshcd_get_tm_free_slot(hba); 2655 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2547 if (free_slot >= hba->nutmrs) { 2656 */
2548 spin_unlock_irqrestore(host->host_lock, flags); 2657 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
2549 dev_err(hba->dev, "Task management queue full\n");
2550 err = FAILED;
2551 goto out;
2552 }
2553 2658
2659 spin_lock_irqsave(host->host_lock, flags);
2554 task_req_descp = hba->utmrdl_base_addr; 2660 task_req_descp = hba->utmrdl_base_addr;
2555 task_req_descp += free_slot; 2661 task_req_descp += free_slot;
2556 2662
@@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2562 /* Configure task request UPIU */ 2668 /* Configure task request UPIU */
2563 task_req_upiup = 2669 task_req_upiup =
2564 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; 2670 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
2671 task_tag = hba->nutrs + free_slot;
2565 task_req_upiup->header.dword_0 = 2672 task_req_upiup->header.dword_0 =
2566 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, 2673 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
2567 lrbp->lun, lrbp->task_tag); 2674 lun_id, task_tag);
2568 task_req_upiup->header.dword_1 = 2675 task_req_upiup->header.dword_1 =
2569 UPIU_HEADER_DWORD(0, tm_function, 0, 0); 2676 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
2570 2677
2571 task_req_upiup->input_param1 = lrbp->lun; 2678 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2572 task_req_upiup->input_param1 = 2679 task_req_upiup->input_param2 = cpu_to_be32(task_id);
2573 cpu_to_be32(task_req_upiup->input_param1);
2574 task_req_upiup->input_param2 = lrbp->task_tag;
2575 task_req_upiup->input_param2 =
2576 cpu_to_be32(task_req_upiup->input_param2);
2577 2680
2578 /* send command to the controller */ 2681 /* send command to the controller */
2579 __set_bit(free_slot, &hba->outstanding_tasks); 2682 __set_bit(free_slot, &hba->outstanding_tasks);
@@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2582 spin_unlock_irqrestore(host->host_lock, flags); 2685 spin_unlock_irqrestore(host->host_lock, flags);
2583 2686
2584 /* wait until the task management command is completed */ 2687 /* wait until the task management command is completed */
2585 err = 2688 err = wait_event_timeout(hba->tm_wq,
2586 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, 2689 test_bit(free_slot, &hba->tm_condition),
2587 (test_bit(free_slot, 2690 msecs_to_jiffies(TM_CMD_TIMEOUT));
2588 &hba->tm_condition) != 0),
2589 60 * HZ);
2590 if (!err) { 2691 if (!err) {
2591 dev_err(hba->dev, 2692 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2592 "Task management command timed-out\n"); 2693 __func__, tm_function);
2593 err = FAILED; 2694 if (ufshcd_clear_tm_cmd(hba, free_slot))
2594 goto out; 2695 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2696 __func__, free_slot);
2697 err = -ETIMEDOUT;
2698 } else {
2699 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
2595 } 2700 }
2701
2596 clear_bit(free_slot, &hba->tm_condition); 2702 clear_bit(free_slot, &hba->tm_condition);
2597 err = ufshcd_task_req_compl(hba, free_slot); 2703 ufshcd_put_tm_slot(hba, free_slot);
2598out: 2704 wake_up(&hba->tm_tag_wq);
2705
2599 return err; 2706 return err;
2600} 2707}
2601 2708
2602/** 2709/**
2603 * ufshcd_device_reset - reset device and abort all the pending commands 2710 * ufshcd_eh_device_reset_handler - device reset handler registered to
2711 * scsi layer.
2604 * @cmd: SCSI command pointer 2712 * @cmd: SCSI command pointer
2605 * 2713 *
2606 * Returns SUCCESS/FAILED 2714 * Returns SUCCESS/FAILED
2607 */ 2715 */
2608static int ufshcd_device_reset(struct scsi_cmnd *cmd) 2716static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
2609{ 2717{
2610 struct Scsi_Host *host; 2718 struct Scsi_Host *host;
2611 struct ufs_hba *hba; 2719 struct ufs_hba *hba;
2612 unsigned int tag; 2720 unsigned int tag;
2613 u32 pos; 2721 u32 pos;
2614 int err; 2722 int err;
2723 u8 resp = 0xF;
2724 struct ufshcd_lrb *lrbp;
2725 unsigned long flags;
2615 2726
2616 host = cmd->device->host; 2727 host = cmd->device->host;
2617 hba = shost_priv(host); 2728 hba = shost_priv(host);
2618 tag = cmd->request->tag; 2729 tag = cmd->request->tag;
2619 2730
2620 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); 2731 lrbp = &hba->lrb[tag];
2621 if (err == FAILED) 2732 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2733 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2734 if (!err)
2735 err = resp;
2622 goto out; 2736 goto out;
2737 }
2623 2738
2624 for (pos = 0; pos < hba->nutrs; pos++) { 2739 /* clear the commands that were pending for corresponding LUN */
2625 if (test_bit(pos, &hba->outstanding_reqs) && 2740 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
2626 (hba->lrb[tag].lun == hba->lrb[pos].lun)) { 2741 if (hba->lrb[pos].lun == lrbp->lun) {
2627 2742 err = ufshcd_clear_cmd(hba, pos);
2628 /* clear the respective UTRLCLR register bit */ 2743 if (err)
2629 ufshcd_utrl_clear(hba, pos); 2744 break;
2630
2631 clear_bit(pos, &hba->outstanding_reqs);
2632
2633 if (hba->lrb[pos].cmd) {
2634 scsi_dma_unmap(hba->lrb[pos].cmd);
2635 hba->lrb[pos].cmd->result =
2636 DID_ABORT << 16;
2637 hba->lrb[pos].cmd->scsi_done(cmd);
2638 hba->lrb[pos].cmd = NULL;
2639 clear_bit_unlock(pos, &hba->lrb_in_use);
2640 wake_up(&hba->dev_cmd.tag_wq);
2641 }
2642 } 2745 }
2643 } /* end of for */ 2746 }
2747 spin_lock_irqsave(host->host_lock, flags);
2748 ufshcd_transfer_req_compl(hba);
2749 spin_unlock_irqrestore(host->host_lock, flags);
2644out: 2750out:
2751 if (!err) {
2752 err = SUCCESS;
2753 } else {
2754 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2755 err = FAILED;
2756 }
2645 return err; 2757 return err;
2646} 2758}
2647 2759
2648/** 2760/**
2649 * ufshcd_host_reset - Main reset function registered with scsi layer
2650 * @cmd: SCSI command pointer
2651 *
2652 * Returns SUCCESS/FAILED
2653 */
2654static int ufshcd_host_reset(struct scsi_cmnd *cmd)
2655{
2656 struct ufs_hba *hba;
2657
2658 hba = shost_priv(cmd->device->host);
2659
2660 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
2661 return SUCCESS;
2662
2663 return ufshcd_do_reset(hba);
2664}
2665
2666/**
2667 * ufshcd_abort - abort a specific command 2761 * ufshcd_abort - abort a specific command
2668 * @cmd: SCSI command pointer 2762 * @cmd: SCSI command pointer
2669 * 2763 *
2764 * Abort the pending command in device by sending UFS_ABORT_TASK task management
2765 * command, and in host controller by clearing the door-bell register. There can
2766 * be race between controller sending the command to the device while abort is
2767 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2768 * really issued and then try to abort it.
2769 *
2670 * Returns SUCCESS/FAILED 2770 * Returns SUCCESS/FAILED
2671 */ 2771 */
2672static int ufshcd_abort(struct scsi_cmnd *cmd) 2772static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2675 struct ufs_hba *hba; 2775 struct ufs_hba *hba;
2676 unsigned long flags; 2776 unsigned long flags;
2677 unsigned int tag; 2777 unsigned int tag;
2678 int err; 2778 int err = 0;
2779 int poll_cnt;
2780 u8 resp = 0xF;
2781 struct ufshcd_lrb *lrbp;
2679 2782
2680 host = cmd->device->host; 2783 host = cmd->device->host;
2681 hba = shost_priv(host); 2784 hba = shost_priv(host);
2682 tag = cmd->request->tag; 2785 tag = cmd->request->tag;
2683 2786
2684 spin_lock_irqsave(host->host_lock, flags); 2787 /* If command is already aborted/completed, return SUCCESS */
2788 if (!(test_bit(tag, &hba->outstanding_reqs)))
2789 goto out;
2685 2790
2686 /* check if command is still pending */ 2791 lrbp = &hba->lrb[tag];
2687 if (!(test_bit(tag, &hba->outstanding_reqs))) { 2792 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2688 err = FAILED; 2793 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2689 spin_unlock_irqrestore(host->host_lock, flags); 2794 UFS_QUERY_TASK, &resp);
2795 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2796 /* cmd pending in the device */
2797 break;
2798 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2799 u32 reg;
2800
2801 /*
2802 * cmd not pending in the device, check if it is
2803 * in transition.
2804 */
2805 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2806 if (reg & (1 << tag)) {
2807 /* sleep for max. 200us to stabilize */
2808 usleep_range(100, 200);
2809 continue;
2810 }
2811 /* command completed already */
2812 goto out;
2813 } else {
2814 if (!err)
2815 err = resp; /* service response error */
2816 goto out;
2817 }
2818 }
2819
2820 if (!poll_cnt) {
2821 err = -EBUSY;
2690 goto out; 2822 goto out;
2691 } 2823 }
2692 spin_unlock_irqrestore(host->host_lock, flags);
2693 2824
2694 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); 2825 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2695 if (err == FAILED) 2826 UFS_ABORT_TASK, &resp);
2827 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2828 if (!err)
2829 err = resp; /* service response error */
2830 goto out;
2831 }
2832
2833 err = ufshcd_clear_cmd(hba, tag);
2834 if (err)
2696 goto out; 2835 goto out;
2697 2836
2698 scsi_dma_unmap(cmd); 2837 scsi_dma_unmap(cmd);
2699 2838
2700 spin_lock_irqsave(host->host_lock, flags); 2839 spin_lock_irqsave(host->host_lock, flags);
2701
2702 /* clear the respective UTRLCLR register bit */
2703 ufshcd_utrl_clear(hba, tag);
2704
2705 __clear_bit(tag, &hba->outstanding_reqs); 2840 __clear_bit(tag, &hba->outstanding_reqs);
2706 hba->lrb[tag].cmd = NULL; 2841 hba->lrb[tag].cmd = NULL;
2707 spin_unlock_irqrestore(host->host_lock, flags); 2842 spin_unlock_irqrestore(host->host_lock, flags);
@@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2709 clear_bit_unlock(tag, &hba->lrb_in_use); 2844 clear_bit_unlock(tag, &hba->lrb_in_use);
2710 wake_up(&hba->dev_cmd.tag_wq); 2845 wake_up(&hba->dev_cmd.tag_wq);
2711out: 2846out:
2847 if (!err) {
2848 err = SUCCESS;
2849 } else {
2850 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2851 err = FAILED;
2852 }
2853
2854 return err;
2855}
2856
2857/**
2858 * ufshcd_host_reset_and_restore - reset and restore host controller
2859 * @hba: per-adapter instance
2860 *
2861 * Note that host controller reset may issue DME_RESET to
2862 * local and remote (device) Uni-Pro stack and the attributes
2863 * are reset to default state.
2864 *
2865 * Returns zero on success, non-zero on failure
2866 */
2867static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2868{
2869 int err;
2870 async_cookie_t cookie;
2871 unsigned long flags;
2872
2873 /* Reset the host controller */
2874 spin_lock_irqsave(hba->host->host_lock, flags);
2875 ufshcd_hba_stop(hba);
2876 spin_unlock_irqrestore(hba->host->host_lock, flags);
2877
2878 err = ufshcd_hba_enable(hba);
2879 if (err)
2880 goto out;
2881
2882 /* Establish the link again and restore the device */
2883 cookie = async_schedule(ufshcd_async_scan, hba);
2884 /* wait for async scan to be completed */
2885 async_synchronize_cookie(++cookie);
2886 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
2887 err = -EIO;
2888out:
2889 if (err)
2890 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
2891
2892 return err;
2893}
2894
2895/**
2896 * ufshcd_reset_and_restore - reset and re-initialize host/device
2897 * @hba: per-adapter instance
2898 *
2899 * Reset and recover device, host and re-establish link. This
2900 * is helpful to recover the communication in fatal error conditions.
2901 *
2902 * Returns zero on success, non-zero on failure
2903 */
2904static int ufshcd_reset_and_restore(struct ufs_hba *hba)
2905{
2906 int err = 0;
2907 unsigned long flags;
2908
2909 err = ufshcd_host_reset_and_restore(hba);
2910
2911 /*
2912 * After reset the door-bell might be cleared, complete
2913 * outstanding requests in s/w here.
2914 */
2915 spin_lock_irqsave(hba->host->host_lock, flags);
2916 ufshcd_transfer_req_compl(hba);
2917 ufshcd_tmc_handler(hba);
2918 spin_unlock_irqrestore(hba->host->host_lock, flags);
2919
2920 return err;
2921}
2922
2923/**
2924 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
2925 * @cmd - SCSI command pointer
2926 *
2927 * Returns SUCCESS/FAILED
2928 */
2929static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
2930{
2931 int err;
2932 unsigned long flags;
2933 struct ufs_hba *hba;
2934
2935 hba = shost_priv(cmd->device->host);
2936
2937 /*
2938 * Check if there is any race with fatal error handling.
2939 * If so, wait for it to complete. Even though fatal error
2940 * handling does reset and restore in some cases, don't assume
2941 * anything out of it. We are just avoiding race here.
2942 */
2943 do {
2944 spin_lock_irqsave(hba->host->host_lock, flags);
2945 if (!(work_pending(&hba->eh_work) ||
2946 hba->ufshcd_state == UFSHCD_STATE_RESET))
2947 break;
2948 spin_unlock_irqrestore(hba->host->host_lock, flags);
2949 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
2950 flush_work(&hba->eh_work);
2951 } while (1);
2952
2953 hba->ufshcd_state = UFSHCD_STATE_RESET;
2954 ufshcd_set_eh_in_progress(hba);
2955 spin_unlock_irqrestore(hba->host->host_lock, flags);
2956
2957 err = ufshcd_reset_and_restore(hba);
2958
2959 spin_lock_irqsave(hba->host->host_lock, flags);
2960 if (!err) {
2961 err = SUCCESS;
2962 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
2963 } else {
2964 err = FAILED;
2965 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2966 }
2967 ufshcd_clear_eh_in_progress(hba);
2968 spin_unlock_irqrestore(hba->host->host_lock, flags);
2969
2712 return err; 2970 return err;
2713} 2971}
2714 2972
@@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
2737 goto out; 2995 goto out;
2738 2996
2739 ufshcd_force_reset_auto_bkops(hba); 2997 ufshcd_force_reset_auto_bkops(hba);
2740 scsi_scan_host(hba->host); 2998 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
2741 pm_runtime_put_sync(hba->dev); 2999
3000 /* If we are in error handling context no need to scan the host */
3001 if (!ufshcd_eh_in_progress(hba)) {
3002 scsi_scan_host(hba->host);
3003 pm_runtime_put_sync(hba->dev);
3004 }
2742out: 3005out:
2743 return; 3006 return;
2744} 3007}
@@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = {
2751 .slave_alloc = ufshcd_slave_alloc, 3014 .slave_alloc = ufshcd_slave_alloc,
2752 .slave_destroy = ufshcd_slave_destroy, 3015 .slave_destroy = ufshcd_slave_destroy,
2753 .eh_abort_handler = ufshcd_abort, 3016 .eh_abort_handler = ufshcd_abort,
2754 .eh_device_reset_handler = ufshcd_device_reset, 3017 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
2755 .eh_host_reset_handler = ufshcd_host_reset, 3018 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
2756 .this_id = -1, 3019 .this_id = -1,
2757 .sg_tablesize = SG_ALL, 3020 .sg_tablesize = SG_ALL,
2758 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 3021 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
2916 host->max_cmd_len = MAX_CDB_SIZE; 3179 host->max_cmd_len = MAX_CDB_SIZE;
2917 3180
2918 /* Initailize wait queue for task management */ 3181 /* Initailize wait queue for task management */
2919 init_waitqueue_head(&hba->ufshcd_tm_wait_queue); 3182 init_waitqueue_head(&hba->tm_wq);
3183 init_waitqueue_head(&hba->tm_tag_wq);
2920 3184
2921 /* Initialize work queues */ 3185 /* Initialize work queues */
2922 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); 3186 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
2923 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 3187 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
2924 3188
2925 /* Initialize UIC command mutex */ 3189 /* Initialize UIC command mutex */
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 577679a2d189..acf318e338ed 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -174,15 +174,21 @@ struct ufs_dev_cmd {
174 * @irq: Irq number of the controller 174 * @irq: Irq number of the controller
175 * @active_uic_cmd: handle of active UIC command 175 * @active_uic_cmd: handle of active UIC command
176 * @uic_cmd_mutex: mutex for uic command 176 * @uic_cmd_mutex: mutex for uic command
177 * @ufshcd_tm_wait_queue: wait queue for task management 177 * @tm_wq: wait queue for task management
178 * @tm_tag_wq: wait queue for free task management slots
179 * @tm_slots_in_use: bit map of task management request slots in use
178 * @pwr_done: completion for power mode change 180 * @pwr_done: completion for power mode change
179 * @tm_condition: condition variable for task management 181 * @tm_condition: condition variable for task management
180 * @ufshcd_state: UFSHCD states 182 * @ufshcd_state: UFSHCD states
183 * @eh_flags: Error handling flags
181 * @intr_mask: Interrupt Mask Bits 184 * @intr_mask: Interrupt Mask Bits
182 * @ee_ctrl_mask: Exception event control mask 185 * @ee_ctrl_mask: Exception event control mask
183 * @feh_workq: Work queue for fatal controller error handling 186 * @eh_work: Worker to handle UFS errors that require s/w attention
184 * @eeh_work: Worker to handle exception events 187 * @eeh_work: Worker to handle exception events
185 * @errors: HBA errors 188 * @errors: HBA errors
189 * @uic_error: UFS interconnect layer error status
190 * @saved_err: sticky error mask
191 * @saved_uic_err: sticky UIC error mask
186 * @dev_cmd: ufs device management command information 192 * @dev_cmd: ufs device management command information
187 * @auto_bkops_enabled: to track whether bkops is enabled in device 193 * @auto_bkops_enabled: to track whether bkops is enabled in device
188 */ 194 */
@@ -217,21 +223,27 @@ struct ufs_hba {
217 struct uic_command *active_uic_cmd; 223 struct uic_command *active_uic_cmd;
218 struct mutex uic_cmd_mutex; 224 struct mutex uic_cmd_mutex;
219 225
220 wait_queue_head_t ufshcd_tm_wait_queue; 226 wait_queue_head_t tm_wq;
227 wait_queue_head_t tm_tag_wq;
221 unsigned long tm_condition; 228 unsigned long tm_condition;
229 unsigned long tm_slots_in_use;
222 230
223 struct completion *pwr_done; 231 struct completion *pwr_done;
224 232
225 u32 ufshcd_state; 233 u32 ufshcd_state;
234 u32 eh_flags;
226 u32 intr_mask; 235 u32 intr_mask;
227 u16 ee_ctrl_mask; 236 u16 ee_ctrl_mask;
228 237
229 /* Work Queues */ 238 /* Work Queues */
230 struct work_struct feh_workq; 239 struct work_struct eh_work;
231 struct work_struct eeh_work; 240 struct work_struct eeh_work;
232 241
233 /* HBA Errors */ 242 /* HBA Errors */
234 u32 errors; 243 u32 errors;
244 u32 uic_error;
245 u32 saved_err;
246 u32 saved_uic_err;
235 247
236 /* Device management request data */ 248 /* Device management request data */
237 struct ufs_dev_cmd dev_cmd; 249 struct ufs_dev_cmd dev_cmd;
@@ -263,6 +275,8 @@ static inline void check_upiu_size(void)
263 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); 275 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
264} 276}
265 277
278extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
279extern int ufshcd_resume(struct ufs_hba *hba);
266extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 280extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
267extern int ufshcd_runtime_resume(struct ufs_hba *hba); 281extern int ufshcd_runtime_resume(struct ufs_hba *hba);
268extern int ufshcd_runtime_idle(struct ufs_hba *hba); 282extern int ufshcd_runtime_idle(struct ufs_hba *hba);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0475c6619a68..9abc7e32b43d 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -304,10 +304,10 @@ enum {
304 * @size: size of physical segment DW-3 304 * @size: size of physical segment DW-3
305 */ 305 */
306struct ufshcd_sg_entry { 306struct ufshcd_sg_entry {
307 u32 base_addr; 307 __le32 base_addr;
308 u32 upper_addr; 308 __le32 upper_addr;
309 u32 reserved; 309 __le32 reserved;
310 u32 size; 310 __le32 size;
311}; 311};
312 312
313/** 313/**
@@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc {
330 * @dword3: Descriptor Header DW3 330 * @dword3: Descriptor Header DW3
331 */ 331 */
332struct request_desc_header { 332struct request_desc_header {
333 u32 dword_0; 333 __le32 dword_0;
334 u32 dword_1; 334 __le32 dword_1;
335 u32 dword_2; 335 __le32 dword_2;
336 u32 dword_3; 336 __le32 dword_3;
337}; 337};
338 338
339/** 339/**
@@ -352,16 +352,16 @@ struct utp_transfer_req_desc {
352 struct request_desc_header header; 352 struct request_desc_header header;
353 353
354 /* DW 4-5*/ 354 /* DW 4-5*/
355 u32 command_desc_base_addr_lo; 355 __le32 command_desc_base_addr_lo;
356 u32 command_desc_base_addr_hi; 356 __le32 command_desc_base_addr_hi;
357 357
358 /* DW 6 */ 358 /* DW 6 */
359 u16 response_upiu_length; 359 __le16 response_upiu_length;
360 u16 response_upiu_offset; 360 __le16 response_upiu_offset;
361 361
362 /* DW 7 */ 362 /* DW 7 */
363 u16 prd_table_length; 363 __le16 prd_table_length;
364 u16 prd_table_offset; 364 __le16 prd_table_offset;
365}; 365};
366 366
367/** 367/**
@@ -376,10 +376,10 @@ struct utp_task_req_desc {
376 struct request_desc_header header; 376 struct request_desc_header header;
377 377
378 /* DW 4-11 */ 378 /* DW 4-11 */
379 u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; 379 __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
380 380
381 /* DW 12-19 */ 381 /* DW 12-19 */
382 u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; 382 __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
383}; 383};
384 384
385#endif /* End of Header */ 385#endif /* End of Header */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index db3b494e5926..308256b5e4cb 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -23,6 +23,7 @@
23#include <linux/virtio_config.h> 23#include <linux/virtio_config.h>
24#include <linux/virtio_scsi.h> 24#include <linux/virtio_scsi.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/blkdev.h>
26#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -37,6 +38,7 @@ struct virtio_scsi_cmd {
37 struct completion *comp; 38 struct completion *comp;
38 union { 39 union {
39 struct virtio_scsi_cmd_req cmd; 40 struct virtio_scsi_cmd_req cmd;
41 struct virtio_scsi_cmd_req_pi cmd_pi;
40 struct virtio_scsi_ctrl_tmf_req tmf; 42 struct virtio_scsi_ctrl_tmf_req tmf;
41 struct virtio_scsi_ctrl_an_req an; 43 struct virtio_scsi_ctrl_an_req an;
42 } req; 44 } req;
@@ -73,17 +75,12 @@ struct virtio_scsi_vq {
73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues 75 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
74 * (each virtqueue's affinity is set to the CPU that "owns" the queue). 76 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
75 * 77 *
76 * An interesting effect of this policy is that only writes to req_vq need to 78 * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
77 * take the tgt_lock. Read can be done outside the lock because: 79 * could be done locklessly, but we do not do it yet.
78 * 80 *
79 * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. 81 * Decrements of reqs are never concurrent with writes of req_vq: before the
80 * In that case, no other CPU is reading req_vq: even if they were in 82 * decrement reqs will be != 0; after the decrement the virtqueue completion
81 * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. 83 * routine will not use the req_vq so it can be changed by a new request.
82 *
83 * - reads of req_vq only occur when the target is not idle (reqs != 0).
84 * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
85 *
86 * Similarly, decrements of reqs are never concurrent with writes of req_vq.
87 * Thus they can happen outside the tgt_lock, provided of course we make reqs 84 * Thus they can happen outside the tgt_lock, provided of course we make reqs
88 * an atomic_t. 85 * an atomic_t.
89 */ 86 */
@@ -204,7 +201,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
204 set_driver_byte(sc, DRIVER_SENSE); 201 set_driver_byte(sc, DRIVER_SENSE);
205 } 202 }
206 203
207 mempool_free(cmd, virtscsi_cmd_pool);
208 sc->scsi_done(sc); 204 sc->scsi_done(sc);
209 205
210 atomic_dec(&tgt->reqs); 206 atomic_dec(&tgt->reqs);
@@ -238,49 +234,25 @@ static void virtscsi_req_done(struct virtqueue *vq)
238 int index = vq->index - VIRTIO_SCSI_VQ_BASE; 234 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
239 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; 235 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
240 236
241 /*
242 * Read req_vq before decrementing the reqs field in
243 * virtscsi_complete_cmd.
244 *
245 * With barriers:
246 *
247 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
248 * ------------------------------------------------------------
249 * lock vq_lock
250 * read req_vq
251 * read reqs (reqs = 1)
252 * write reqs (reqs = 0)
253 * increment reqs (reqs = 1)
254 * write req_vq
255 *
256 * Possible reordering without barriers:
257 *
258 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
259 * ------------------------------------------------------------
260 * lock vq_lock
261 * read reqs (reqs = 1)
262 * write reqs (reqs = 0)
263 * increment reqs (reqs = 1)
264 * write req_vq
265 * read (wrong) req_vq
266 *
267 * We do not need a full smp_rmb, because req_vq is required to get
268 * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
269 * in the virtqueue as the user token.
270 */
271 smp_read_barrier_depends();
272
273 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); 237 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
274}; 238};
275 239
240static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
241{
242 int i, num_vqs;
243
244 num_vqs = vscsi->num_queues;
245 for (i = 0; i < num_vqs; i++)
246 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
247 virtscsi_complete_cmd);
248}
249
276static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) 250static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
277{ 251{
278 struct virtio_scsi_cmd *cmd = buf; 252 struct virtio_scsi_cmd *cmd = buf;
279 253
280 if (cmd->comp) 254 if (cmd->comp)
281 complete_all(cmd->comp); 255 complete_all(cmd->comp);
282 else
283 mempool_free(cmd, virtscsi_cmd_pool);
284} 256}
285 257
286static void virtscsi_ctrl_done(struct virtqueue *vq) 258static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -291,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
291 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); 263 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
292}; 264};
293 265
266static void virtscsi_handle_event(struct work_struct *work);
267
294static int virtscsi_kick_event(struct virtio_scsi *vscsi, 268static int virtscsi_kick_event(struct virtio_scsi *vscsi,
295 struct virtio_scsi_event_node *event_node) 269 struct virtio_scsi_event_node *event_node)
296{ 270{
@@ -298,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
298 struct scatterlist sg; 272 struct scatterlist sg;
299 unsigned long flags; 273 unsigned long flags;
300 274
275 INIT_WORK(&event_node->work, virtscsi_handle_event);
301 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); 276 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
302 277
303 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 278 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -415,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
415{ 390{
416 struct virtio_scsi_event_node *event_node = buf; 391 struct virtio_scsi_event_node *event_node = buf;
417 392
418 INIT_WORK(&event_node->work, virtscsi_handle_event);
419 schedule_work(&event_node->work); 393 schedule_work(&event_node->work);
420} 394}
421 395
@@ -433,14 +407,13 @@ static void virtscsi_event_done(struct virtqueue *vq)
433 * @cmd : command structure 407 * @cmd : command structure
434 * @req_size : size of the request buffer 408 * @req_size : size of the request buffer
435 * @resp_size : size of the response buffer 409 * @resp_size : size of the response buffer
436 * @gfp : flags to use for memory allocations
437 */ 410 */
438static int virtscsi_add_cmd(struct virtqueue *vq, 411static int virtscsi_add_cmd(struct virtqueue *vq,
439 struct virtio_scsi_cmd *cmd, 412 struct virtio_scsi_cmd *cmd,
440 size_t req_size, size_t resp_size, gfp_t gfp) 413 size_t req_size, size_t resp_size)
441{ 414{
442 struct scsi_cmnd *sc = cmd->sc; 415 struct scsi_cmnd *sc = cmd->sc;
443 struct scatterlist *sgs[4], req, resp; 416 struct scatterlist *sgs[6], req, resp;
444 struct sg_table *out, *in; 417 struct sg_table *out, *in;
445 unsigned out_num = 0, in_num = 0; 418 unsigned out_num = 0, in_num = 0;
446 419
@@ -458,30 +431,38 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
458 sgs[out_num++] = &req; 431 sgs[out_num++] = &req;
459 432
460 /* Data-out buffer. */ 433 /* Data-out buffer. */
461 if (out) 434 if (out) {
435 /* Place WRITE protection SGLs before Data OUT payload */
436 if (scsi_prot_sg_count(sc))
437 sgs[out_num++] = scsi_prot_sglist(sc);
462 sgs[out_num++] = out->sgl; 438 sgs[out_num++] = out->sgl;
439 }
463 440
464 /* Response header. */ 441 /* Response header. */
465 sg_init_one(&resp, &cmd->resp, resp_size); 442 sg_init_one(&resp, &cmd->resp, resp_size);
466 sgs[out_num + in_num++] = &resp; 443 sgs[out_num + in_num++] = &resp;
467 444
468 /* Data-in buffer */ 445 /* Data-in buffer */
469 if (in) 446 if (in) {
447 /* Place READ protection SGLs before Data IN payload */
448 if (scsi_prot_sg_count(sc))
449 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
470 sgs[out_num + in_num++] = in->sgl; 450 sgs[out_num + in_num++] = in->sgl;
451 }
471 452
472 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); 453 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
473} 454}
474 455
475static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, 456static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
476 struct virtio_scsi_cmd *cmd, 457 struct virtio_scsi_cmd *cmd,
477 size_t req_size, size_t resp_size, gfp_t gfp) 458 size_t req_size, size_t resp_size)
478{ 459{
479 unsigned long flags; 460 unsigned long flags;
480 int err; 461 int err;
481 bool needs_kick = false; 462 bool needs_kick = false;
482 463
483 spin_lock_irqsave(&vq->vq_lock, flags); 464 spin_lock_irqsave(&vq->vq_lock, flags);
484 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); 465 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
485 if (!err) 466 if (!err)
486 needs_kick = virtqueue_kick_prepare(vq->vq); 467 needs_kick = virtqueue_kick_prepare(vq->vq);
487 468
@@ -492,14 +473,46 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
492 return err; 473 return err;
493} 474}
494 475
476static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
477 struct scsi_cmnd *sc)
478{
479 cmd->lun[0] = 1;
480 cmd->lun[1] = sc->device->id;
481 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
482 cmd->lun[3] = sc->device->lun & 0xff;
483 cmd->tag = (unsigned long)sc;
484 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
485 cmd->prio = 0;
486 cmd->crn = 0;
487}
488
489static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
490 struct scsi_cmnd *sc)
491{
492 struct request *rq = sc->request;
493 struct blk_integrity *bi;
494
495 virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
496
497 if (!rq || !scsi_prot_sg_count(sc))
498 return;
499
500 bi = blk_get_integrity(rq->rq_disk);
501
502 if (sc->sc_data_direction == DMA_TO_DEVICE)
503 cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
504 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
505 cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
506}
507
495static int virtscsi_queuecommand(struct virtio_scsi *vscsi, 508static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
496 struct virtio_scsi_vq *req_vq, 509 struct virtio_scsi_vq *req_vq,
497 struct scsi_cmnd *sc) 510 struct scsi_cmnd *sc)
498{ 511{
499 struct virtio_scsi_cmd *cmd;
500 int ret;
501
502 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 512 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
513 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
514 int req_size;
515
503 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 516 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
504 517
505 /* TODO: check feature bit and fail if unsupported? */ 518 /* TODO: check feature bit and fail if unsupported? */
@@ -508,36 +521,24 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
508 dev_dbg(&sc->device->sdev_gendev, 521 dev_dbg(&sc->device->sdev_gendev,
509 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); 522 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
510 523
511 ret = SCSI_MLQUEUE_HOST_BUSY;
512 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
513 if (!cmd)
514 goto out;
515
516 memset(cmd, 0, sizeof(*cmd)); 524 memset(cmd, 0, sizeof(*cmd));
517 cmd->sc = sc; 525 cmd->sc = sc;
518 cmd->req.cmd = (struct virtio_scsi_cmd_req){
519 .lun[0] = 1,
520 .lun[1] = sc->device->id,
521 .lun[2] = (sc->device->lun >> 8) | 0x40,
522 .lun[3] = sc->device->lun & 0xff,
523 .tag = (unsigned long)sc,
524 .task_attr = VIRTIO_SCSI_S_SIMPLE,
525 .prio = 0,
526 .crn = 0,
527 };
528 526
529 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 527 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
530 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
531 528
532 if (virtscsi_kick_cmd(req_vq, cmd, 529 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
533 sizeof cmd->req.cmd, sizeof cmd->resp.cmd, 530 virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
534 GFP_ATOMIC) == 0) 531 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
535 ret = 0; 532 req_size = sizeof(cmd->req.cmd_pi);
536 else 533 } else {
537 mempool_free(cmd, virtscsi_cmd_pool); 534 virtio_scsi_init_hdr(&cmd->req.cmd, sc);
535 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
536 req_size = sizeof(cmd->req.cmd);
537 }
538 538
539out: 539 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
540 return ret; 540 return SCSI_MLQUEUE_HOST_BUSY;
541 return 0;
541} 542}
542 543
543static int virtscsi_queuecommand_single(struct Scsi_Host *sh, 544static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
@@ -560,12 +561,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
560 561
561 spin_lock_irqsave(&tgt->tgt_lock, flags); 562 spin_lock_irqsave(&tgt->tgt_lock, flags);
562 563
563 /*
564 * The memory barrier after atomic_inc_return matches
565 * the smp_read_barrier_depends() in virtscsi_req_done.
566 */
567 if (atomic_inc_return(&tgt->reqs) > 1) 564 if (atomic_inc_return(&tgt->reqs) > 1)
568 vq = ACCESS_ONCE(tgt->req_vq); 565 vq = tgt->req_vq;
569 else { 566 else {
570 queue_num = smp_processor_id(); 567 queue_num = smp_processor_id();
571 while (unlikely(queue_num >= vscsi->num_queues)) 568 while (unlikely(queue_num >= vscsi->num_queues))
@@ -596,8 +593,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
596 593
597 cmd->comp = &comp; 594 cmd->comp = &comp;
598 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, 595 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
599 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 596 sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
600 GFP_NOIO) < 0)
601 goto out; 597 goto out;
602 598
603 wait_for_completion(&comp); 599 wait_for_completion(&comp);
@@ -605,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
605 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 601 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
606 ret = SUCCESS; 602 ret = SUCCESS;
607 603
604 /*
605 * The spec guarantees that all requests related to the TMF have
606 * been completed, but the callback might not have run yet if
607 * we're using independent interrupts (e.g. MSI). Poll the
608 * virtqueues once.
609 *
610 * In the abort case, sc->scsi_done will do nothing, because
611 * the block layer must have detected a timeout and as a result
612 * REQ_ATOM_COMPLETE has been set.
613 */
614 virtscsi_poll_requests(vscsi);
615
608out: 616out:
609 mempool_free(cmd, virtscsi_cmd_pool); 617 mempool_free(cmd, virtscsi_cmd_pool);
610 return ret; 618 return ret;
@@ -683,6 +691,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
683 .name = "Virtio SCSI HBA", 691 .name = "Virtio SCSI HBA",
684 .proc_name = "virtio_scsi", 692 .proc_name = "virtio_scsi",
685 .this_id = -1, 693 .this_id = -1,
694 .cmd_size = sizeof(struct virtio_scsi_cmd),
686 .queuecommand = virtscsi_queuecommand_single, 695 .queuecommand = virtscsi_queuecommand_single,
687 .eh_abort_handler = virtscsi_abort, 696 .eh_abort_handler = virtscsi_abort,
688 .eh_device_reset_handler = virtscsi_device_reset, 697 .eh_device_reset_handler = virtscsi_device_reset,
@@ -699,6 +708,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
699 .name = "Virtio SCSI HBA", 708 .name = "Virtio SCSI HBA",
700 .proc_name = "virtio_scsi", 709 .proc_name = "virtio_scsi",
701 .this_id = -1, 710 .this_id = -1,
711 .cmd_size = sizeof(struct virtio_scsi_cmd),
702 .queuecommand = virtscsi_queuecommand_multi, 712 .queuecommand = virtscsi_queuecommand_multi,
703 .eh_abort_handler = virtscsi_abort, 713 .eh_abort_handler = virtscsi_abort,
704 .eh_device_reset_handler = virtscsi_device_reset, 714 .eh_device_reset_handler = virtscsi_device_reset,
@@ -875,7 +885,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
875{ 885{
876 struct Scsi_Host *shost; 886 struct Scsi_Host *shost;
877 struct virtio_scsi *vscsi; 887 struct virtio_scsi *vscsi;
878 int err; 888 int err, host_prot;
879 u32 sg_elems, num_targets; 889 u32 sg_elems, num_targets;
880 u32 cmd_per_lun; 890 u32 cmd_per_lun;
881 u32 num_queues; 891 u32 num_queues;
@@ -925,6 +935,16 @@ static int virtscsi_probe(struct virtio_device *vdev)
925 shost->max_id = num_targets; 935 shost->max_id = num_targets;
926 shost->max_channel = 0; 936 shost->max_channel = 0;
927 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; 937 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
938
939 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
940 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
941 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
942 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
943
944 scsi_host_set_prot(shost, host_prot);
945 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
946 }
947
928 err = scsi_add_host(shost, &vdev->dev); 948 err = scsi_add_host(shost, &vdev->dev);
929 if (err) 949 if (err)
930 goto scsi_add_host_failed; 950 goto scsi_add_host_failed;
@@ -994,6 +1014,7 @@ static struct virtio_device_id id_table[] = {
994static unsigned int features[] = { 1014static unsigned int features[] = {
995 VIRTIO_SCSI_F_HOTPLUG, 1015 VIRTIO_SCSI_F_HOTPLUG,
996 VIRTIO_SCSI_F_CHANGE, 1016 VIRTIO_SCSI_F_CHANGE,
1017 VIRTIO_SCSI_F_T10_PI,
997}; 1018};
998 1019
999static struct virtio_driver virtio_scsi_driver = { 1020static struct virtio_driver virtio_scsi_driver = {