aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas10
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c11
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c7
-rw-r--r--drivers/scsi/fnic/fnic.h8
-rw-r--r--drivers/scsi/fnic/fnic_main.c145
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c141
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h4
-rw-r--r--drivers/scsi/hpsa.c54
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c154
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h46
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c92
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h117
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c417
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c200
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h31
-rw-r--r--drivers/scsi/mpt3sas/Makefile2
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c328
-rw-r--r--drivers/scsi/ufs/ufshcd.h54
-rw-r--r--drivers/scsi/ufs/ufshci.h22
-rw-r--r--drivers/scsi/ufs/unipro.h151
-rw-r--r--include/linux/pci_ids.h1
37 files changed, 1718 insertions, 678 deletions
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index cc92ca8c8963..6edaa65b0818 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
1Release Date : Sat. Aug 31, 2013 17:00:00 PST 2013 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4 Kashyap Desai
5 Sumit Saxena
6Current Version : 06.700.06.00-rc1
7Old Version : 06.600.18.00-rc1
8 1. Add High Availability clustering support using shared Logical Disks.
9 2. Version and Changelog update.
10-------------------------------------------------------------------------------
1Release Date : Wed. May 15, 2013 17:00:00 PST 2013 - 11Release Date : Wed. May 15, 2013 17:00:00 PST 2013 -
2 (emaild-id:megaraidlinux@lsi.com) 12 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 13 Adam Radford
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 6917b4f5ac9e..22d5a949ec83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
692 * ID as valid. 692 * ID as valid.
693 */ 693 */
694 if (ahc_get_pci_function(pci) > 0 694 if (ahc_get_pci_function(pci) > 0
695 && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) 695 && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor)
696 && SUBID_9005_MFUNCENB(subdevice) == 0) 696 && SUBID_9005_MFUNCENB(subdevice) == 0)
697 return (NULL); 697 return (NULL);
698 698
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 8582929b1fef..2ec3c23275b8 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
860 return false; 860 return false;
861 } 861 }
862 862
863 if (fsc->command >= cmdcnt) {
864 fs->status = ATTO_STS_INV_FUNC;
865 return false;
866 }
867
863 func = cmd_to_fls_func[fsc->command]; 868 func = cmd_to_fls_func[fsc->command];
864 if (fsc->command >= cmdcnt || func == 0xFF) { 869 if (func == 0xFF) {
865 fs->status = ATTO_STS_INV_FUNC; 870 fs->status = ATTO_STS_INV_FUNC;
866 return false; 871 return false;
867 } 872 }
@@ -1355,7 +1360,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
1355 u32 time = jiffies_to_msecs(jiffies); 1360 u32 time = jiffies_to_msecs(jiffies);
1356 1361
1357 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); 1362 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
1358 memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); 1363 *n = default_sas_nvram;
1359 n->sas_addr[3] |= 0x0F; 1364 n->sas_addr[3] |= 0x0F;
1360 n->sas_addr[4] = HIBYTE(LOWORD(time)); 1365 n->sas_addr[4] = HIBYTE(LOWORD(time));
1361 n->sas_addr[5] = LOBYTE(LOWORD(time)); 1366 n->sas_addr[5] = LOBYTE(LOWORD(time));
@@ -1373,7 +1378,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1373 * address out first. 1378 * address out first.
1374 */ 1379 */
1375 memcpy(&sas_addr[0], a->nvram->sas_addr, 8); 1380 memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
1376 memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); 1381 *nvram = default_sas_nvram;
1377 memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); 1382 memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
1378} 1383}
1379 1384
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 3a798e7d5c56..da1869df2408 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -665,7 +665,7 @@ void esas2r_kill_adapter(int i)
665 665
666int esas2r_cleanup(struct Scsi_Host *host) 666int esas2r_cleanup(struct Scsi_Host *host)
667{ 667{
668 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 668 struct esas2r_adapter *a;
669 int index; 669 int index;
670 670
671 if (host == NULL) { 671 if (host == NULL) {
@@ -678,6 +678,7 @@ int esas2r_cleanup(struct Scsi_Host *host)
678 } 678 }
679 679
680 esas2r_debug("esas2r_cleanup called for host %p", host); 680 esas2r_debug("esas2r_cleanup called for host %p", host);
681 a = (struct esas2r_adapter *)host->hostdata;
681 index = a->index; 682 index = a->index;
682 esas2r_kill_adapter(index); 683 esas2r_kill_adapter(index);
683 return index; 684 return index;
@@ -808,7 +809,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
808 int pcie_cap_reg; 809 int pcie_cap_reg;
809 810
810 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 811 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
811 if (0xffff && pcie_cap_reg) { 812 if (0xffff & pcie_cap_reg) {
812 u16 devcontrol; 813 u16 devcontrol;
813 814
814 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 815 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -1550,8 +1551,7 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
1550 * to not overwrite a previous crash that was saved. 1551 * to not overwrite a previous crash that was saved.
1551 */ 1552 */
1552 if ((a->flags2 & AF2_COREDUMP_AVAIL) 1553 if ((a->flags2 & AF2_COREDUMP_AVAIL)
1553 && !(a->flags2 & AF2_COREDUMP_SAVED) 1554 && !(a->flags2 & AF2_COREDUMP_SAVED)) {
1554 && a->fw_coredump_buff) {
1555 esas2r_read_mem_block(a, 1555 esas2r_read_mem_block(a,
1556 a->fw_coredump_buff, 1556 a->fw_coredump_buff,
1557 MW_DATA_ADDR_SRAM + 0x80000, 1557 MW_DATA_ADDR_SRAM + 0x80000,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index f3d0cb885972..e5b09027e066 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
415 lun = tm->lun; 415 lun = tm->lun;
416 } 416 }
417 417
418 if (path > 0 || tid > ESAS2R_MAX_ID) { 418 if (path > 0) {
419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( 419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
420 CSMI_STS_INV_PARAM); 420 CSMI_STS_INV_PARAM);
421 return false; 421 return false;
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
index f8ec6d636846..fd1392879647 100644
--- a/drivers/scsi/esas2r/esas2r_vda.c
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { 302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; 303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; 304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
305 char buf[sizeof(cfg->data.init.fw_release) + 1];
305 306
306 cfg->data_length = 307 cfg->data_length =
307 cpu_to_le32(sizeof(struct atto_vda_cfg_init)); 308 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
@@ -309,11 +310,13 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
309 le32_to_cpu(rsp->vda_version); 310 le32_to_cpu(rsp->vda_version);
310 cfg->data.init.fw_build = rsp->fw_build; 311 cfg->data.init.fw_build = rsp->fw_build;
311 312
312 sprintf((char *)&cfg->data.init.fw_release, 313 snprintf(buf, sizeof(buf), "%1d.%02d",
313 "%1d.%02d",
314 (int)LOBYTE(le16_to_cpu(rsp->fw_release)), 314 (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
315 (int)HIBYTE(le16_to_cpu(rsp->fw_release))); 315 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
316 316
317 memcpy(&cfg->data.init.fw_release, buf,
318 sizeof(cfg->data.init.fw_release));
319
317 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') 320 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
318 cfg->data.init.fw_version = 321 cfg->data.init.fw_version =
319 cfg->data.init.fw_build; 322 cfg->data.init.fw_build;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c18c68150e9f..e4dd3d7cd236 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -43,6 +43,8 @@
43#define DFX DRV_NAME "%d: " 43#define DFX DRV_NAME "%d: "
44 44
45#define DESC_CLEAN_LOW_WATERMARK 8 45#define DESC_CLEAN_LOW_WATERMARK 8
46#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
47#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
46#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ 48#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
47#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 49#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
48#define FNIC_DFLT_QUEUE_DEPTH 32 50#define FNIC_DFLT_QUEUE_DEPTH 32
@@ -154,6 +156,9 @@ do { \
154 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ 156 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
155 shost_printk(kern_level, host, fmt, ##args);) 157 shost_printk(kern_level, host, fmt, ##args);)
156 158
159#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
160 shost_printk(kern_level, host, fmt, ##args)
161
157extern const char *fnic_state_str[]; 162extern const char *fnic_state_str[];
158 163
159enum fnic_intx_intr_index { 164enum fnic_intx_intr_index {
@@ -215,10 +220,12 @@ struct fnic {
215 220
216 struct vnic_stats *stats; 221 struct vnic_stats *stats;
217 unsigned long stats_time; /* time of stats update */ 222 unsigned long stats_time; /* time of stats update */
223 unsigned long stats_reset_time; /* time of stats reset */
218 struct vnic_nic_cfg *nic_cfg; 224 struct vnic_nic_cfg *nic_cfg;
219 char name[IFNAMSIZ]; 225 char name[IFNAMSIZ];
220 struct timer_list notify_timer; /* used for MSI interrupts */ 226 struct timer_list notify_timer; /* used for MSI interrupts */
221 227
228 unsigned int fnic_max_tag_id;
222 unsigned int err_intr_offset; 229 unsigned int err_intr_offset;
223 unsigned int link_intr_offset; 230 unsigned int link_intr_offset;
224 231
@@ -359,4 +366,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
359 return ((fnic->state_flags & st_flags) == st_flags); 366 return ((fnic->state_flags & st_flags) == st_flags);
360} 367}
361void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); 368void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
369void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
362#endif /* _FNIC_H_ */ 370#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 42e15ee6e1bb..bbf81ea3a252 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
74MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " 74MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
75 "for fnic trace buffer"); 75 "for fnic trace buffer");
76 76
77static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
78module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
80
77static struct libfc_function_template fnic_transport_template = { 81static struct libfc_function_template fnic_transport_template = {
78 .frame_send = fnic_send, 82 .frame_send = fnic_send,
79 .lport_set_port_id = fnic_set_port_id, 83 .lport_set_port_id = fnic_set_port_id,
@@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev)
91 if (!rport || fc_remote_port_chkready(rport)) 95 if (!rport || fc_remote_port_chkready(rport))
92 return -ENXIO; 96 return -ENXIO;
93 97
94 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); 98 scsi_activate_tcq(sdev, fnic_max_qdepth);
95 return 0; 99 return 0;
96} 100}
97 101
@@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
126static void fnic_get_host_speed(struct Scsi_Host *shost); 130static void fnic_get_host_speed(struct Scsi_Host *shost);
127static struct scsi_transport_template *fnic_fc_transport; 131static struct scsi_transport_template *fnic_fc_transport;
128static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); 132static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
133static void fnic_reset_host_stats(struct Scsi_Host *);
129 134
130static struct fc_function_template fnic_fc_functions = { 135static struct fc_function_template fnic_fc_functions = {
131 136
@@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = {
153 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, 158 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
154 .issue_fc_host_lip = fnic_reset, 159 .issue_fc_host_lip = fnic_reset,
155 .get_fc_host_stats = fnic_get_stats, 160 .get_fc_host_stats = fnic_get_stats,
161 .reset_fc_host_stats = fnic_reset_host_stats,
156 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 162 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
157 .terminate_rport_io = fnic_terminate_rport_io, 163 .terminate_rport_io = fnic_terminate_rport_io,
158 .bsg_request = fc_lport_bsg_request, 164 .bsg_request = fc_lport_bsg_request,
@@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
206 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; 212 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
207 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; 213 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
208 stats->invalid_crc_count = vs->rx.rx_crc_errors; 214 stats->invalid_crc_count = vs->rx.rx_crc_errors;
209 stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; 215 stats->seconds_since_last_reset =
216 (jiffies - fnic->stats_reset_time) / HZ;
210 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); 217 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
211 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); 218 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
212 219
213 return stats; 220 return stats;
214} 221}
215 222
223/*
224 * fnic_dump_fchost_stats
225 * note : dumps fc_statistics into system logs
226 */
227void fnic_dump_fchost_stats(struct Scsi_Host *host,
228 struct fc_host_statistics *stats)
229{
230 FNIC_MAIN_NOTE(KERN_NOTICE, host,
231 "fnic: seconds since last reset = %llu\n",
232 stats->seconds_since_last_reset);
233 FNIC_MAIN_NOTE(KERN_NOTICE, host,
234 "fnic: tx frames = %llu\n",
235 stats->tx_frames);
236 FNIC_MAIN_NOTE(KERN_NOTICE, host,
237 "fnic: tx words = %llu\n",
238 stats->tx_words);
239 FNIC_MAIN_NOTE(KERN_NOTICE, host,
240 "fnic: rx frames = %llu\n",
241 stats->rx_frames);
242 FNIC_MAIN_NOTE(KERN_NOTICE, host,
243 "fnic: rx words = %llu\n",
244 stats->rx_words);
245 FNIC_MAIN_NOTE(KERN_NOTICE, host,
246 "fnic: lip count = %llu\n",
247 stats->lip_count);
248 FNIC_MAIN_NOTE(KERN_NOTICE, host,
249 "fnic: nos count = %llu\n",
250 stats->nos_count);
251 FNIC_MAIN_NOTE(KERN_NOTICE, host,
252 "fnic: error frames = %llu\n",
253 stats->error_frames);
254 FNIC_MAIN_NOTE(KERN_NOTICE, host,
255 "fnic: dumped frames = %llu\n",
256 stats->dumped_frames);
257 FNIC_MAIN_NOTE(KERN_NOTICE, host,
258 "fnic: link failure count = %llu\n",
259 stats->link_failure_count);
260 FNIC_MAIN_NOTE(KERN_NOTICE, host,
261 "fnic: loss of sync count = %llu\n",
262 stats->loss_of_sync_count);
263 FNIC_MAIN_NOTE(KERN_NOTICE, host,
264 "fnic: loss of signal count = %llu\n",
265 stats->loss_of_signal_count);
266 FNIC_MAIN_NOTE(KERN_NOTICE, host,
267 "fnic: prim seq protocol err count = %llu\n",
268 stats->prim_seq_protocol_err_count);
269 FNIC_MAIN_NOTE(KERN_NOTICE, host,
270 "fnic: invalid tx word count= %llu\n",
271 stats->invalid_tx_word_count);
272 FNIC_MAIN_NOTE(KERN_NOTICE, host,
273 "fnic: invalid crc count = %llu\n",
274 stats->invalid_crc_count);
275 FNIC_MAIN_NOTE(KERN_NOTICE, host,
276 "fnic: fcp input requests = %llu\n",
277 stats->fcp_input_requests);
278 FNIC_MAIN_NOTE(KERN_NOTICE, host,
279 "fnic: fcp output requests = %llu\n",
280 stats->fcp_output_requests);
281 FNIC_MAIN_NOTE(KERN_NOTICE, host,
282 "fnic: fcp control requests = %llu\n",
283 stats->fcp_control_requests);
284 FNIC_MAIN_NOTE(KERN_NOTICE, host,
285 "fnic: fcp input megabytes = %llu\n",
286 stats->fcp_input_megabytes);
287 FNIC_MAIN_NOTE(KERN_NOTICE, host,
288 "fnic: fcp output megabytes = %llu\n",
289 stats->fcp_output_megabytes);
290 return;
291}
292
293/*
294 * fnic_reset_host_stats : clears host stats
295 * note : called when reset_statistics set under sysfs dir
296 */
297static void fnic_reset_host_stats(struct Scsi_Host *host)
298{
299 int ret;
300 struct fc_lport *lp = shost_priv(host);
301 struct fnic *fnic = lport_priv(lp);
302 struct fc_host_statistics *stats;
303 unsigned long flags;
304
305 /* dump current stats, before clearing them */
306 stats = fnic_get_stats(host);
307 fnic_dump_fchost_stats(host, stats);
308
309 spin_lock_irqsave(&fnic->fnic_lock, flags);
310 ret = vnic_dev_stats_clear(fnic->vdev);
311 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
312
313 if (ret) {
314 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
315 "fnic: Reset vnic stats failed"
316 " 0x%x", ret);
317 return;
318 }
319 fnic->stats_reset_time = jiffies;
320 memset(stats, 0, sizeof(*stats));
321
322 return;
323}
324
216void fnic_log_q_error(struct fnic *fnic) 325void fnic_log_q_error(struct fnic *fnic)
217{ 326{
218 unsigned int i; 327 unsigned int i;
@@ -447,13 +556,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
447 556
448 host->transportt = fnic_fc_transport; 557 host->transportt = fnic_fc_transport;
449 558
450 err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
451 if (err) {
452 shost_printk(KERN_ERR, fnic->lport->host,
453 "Unable to alloc shared tag map\n");
454 goto err_out_free_hba;
455 }
456
457 /* Setup PCI resources */ 559 /* Setup PCI resources */
458 pci_set_drvdata(pdev, fnic); 560 pci_set_drvdata(pdev, fnic);
459 561
@@ -476,10 +578,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
476 pci_set_master(pdev); 578 pci_set_master(pdev);
477 579
478 /* Query PCI controller on system for DMA addressing 580 /* Query PCI controller on system for DMA addressing
479 * limitation for the device. Try 40-bit first, and 581 * limitation for the device. Try 64-bit first, and
480 * fail to 32-bit. 582 * fail to 32-bit.
481 */ 583 */
482 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 584 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
483 if (err) { 585 if (err) {
484 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 586 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
485 if (err) { 587 if (err) {
@@ -496,10 +598,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
496 goto err_out_release_regions; 598 goto err_out_release_regions;
497 } 599 }
498 } else { 600 } else {
499 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 601 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
500 if (err) { 602 if (err) {
501 shost_printk(KERN_ERR, fnic->lport->host, 603 shost_printk(KERN_ERR, fnic->lport->host,
502 "Unable to obtain 40-bit DMA " 604 "Unable to obtain 64-bit DMA "
503 "for consistent allocations, aborting.\n"); 605 "for consistent allocations, aborting.\n");
504 goto err_out_release_regions; 606 goto err_out_release_regions;
505 } 607 }
@@ -566,6 +668,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
566 "aborting.\n"); 668 "aborting.\n");
567 goto err_out_dev_close; 669 goto err_out_dev_close;
568 } 670 }
671
672 /* Configure Maximum Outstanding IO reqs*/
673 if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
674 host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
675 max_t(u32, FNIC_MIN_IO_REQ,
676 fnic->config.io_throttle_count));
677 }
678 fnic->fnic_max_tag_id = host->can_queue;
679
680 err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
681 if (err) {
682 shost_printk(KERN_ERR, fnic->lport->host,
683 "Unable to alloc shared tag map\n");
684 goto err_out_dev_close;
685 }
686
569 host->max_lun = fnic->config.luns_per_tgt; 687 host->max_lun = fnic->config.luns_per_tgt;
570 host->max_id = FNIC_MAX_FCP_TARGET; 688 host->max_id = FNIC_MAX_FCP_TARGET;
571 host->max_cmd_len = FCOE_MAX_CMD_LEN; 689 host->max_cmd_len = FCOE_MAX_CMD_LEN;
@@ -719,6 +837,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
719 } 837 }
720 838
721 fc_lport_init_stats(lp); 839 fc_lport_init_stats(lp);
840 fnic->stats_reset_time = jiffies;
722 841
723 fc_lport_config(lp); 842 fc_lport_config(lp);
724 843
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index a97e6e584f8c..d014aae19134 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
111 return &fnic->io_req_lock[hash]; 111 return &fnic->io_req_lock[hash];
112} 112}
113 113
114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
115 int tag)
116{
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
118}
119
114/* 120/*
115 * Unmap the data buffer and sense buffer for an io_req, 121 * Unmap the data buffer and sense buffer for an io_req,
116 * also unmap and free the device-private scatter/gather list. 122 * also unmap and free the device-private scatter/gather list.
@@ -730,7 +736,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
730 fcpio_tag_id_dec(&tag, &id); 736 fcpio_tag_id_dec(&tag, &id);
731 icmnd_cmpl = &desc->u.icmnd_cmpl; 737 icmnd_cmpl = &desc->u.icmnd_cmpl;
732 738
733 if (id >= FNIC_MAX_IO_REQ) { 739 if (id >= fnic->fnic_max_tag_id) {
734 shost_printk(KERN_ERR, fnic->lport->host, 740 shost_printk(KERN_ERR, fnic->lport->host,
735 "Tag out of range tag %x hdr status = %s\n", 741 "Tag out of range tag %x hdr status = %s\n",
736 id, fnic_fcpio_status_to_str(hdr_status)); 742 id, fnic_fcpio_status_to_str(hdr_status));
@@ -818,38 +824,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
818 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) 824 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
819 xfer_len -= icmnd_cmpl->residual; 825 xfer_len -= icmnd_cmpl->residual;
820 826
821 /*
822 * If queue_full, then try to reduce queue depth for all
823 * LUNS on the target. Todo: this should be accompanied
824 * by a periodic queue_depth rampup based on successful
825 * IO completion.
826 */
827 if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
828 struct scsi_device *t_sdev;
829 int qd = 0;
830
831 shost_for_each_device(t_sdev, sc->device->host) {
832 if (t_sdev->id != sc->device->id)
833 continue;
834
835 if (t_sdev->queue_depth > 1) {
836 qd = scsi_track_queue_full
837 (t_sdev,
838 t_sdev->queue_depth - 1);
839 if (qd == -1)
840 qd = t_sdev->host->cmd_per_lun;
841 shost_printk(KERN_INFO,
842 fnic->lport->host,
843 "scsi[%d:%d:%d:%d"
844 "] queue full detected,"
845 "new depth = %d\n",
846 t_sdev->host->host_no,
847 t_sdev->channel,
848 t_sdev->id, t_sdev->lun,
849 t_sdev->queue_depth);
850 }
851 }
852 }
853 break; 827 break;
854 828
855 case FCPIO_TIMEOUT: /* request was timed out */ 829 case FCPIO_TIMEOUT: /* request was timed out */
@@ -939,7 +913,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
939 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 913 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
940 fcpio_tag_id_dec(&tag, &id); 914 fcpio_tag_id_dec(&tag, &id);
941 915
942 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { 916 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
943 shost_printk(KERN_ERR, fnic->lport->host, 917 shost_printk(KERN_ERR, fnic->lport->host,
944 "Tag out of range tag %x hdr status = %s\n", 918 "Tag out of range tag %x hdr status = %s\n",
945 id, fnic_fcpio_status_to_str(hdr_status)); 919 id, fnic_fcpio_status_to_str(hdr_status));
@@ -988,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
988 spin_unlock_irqrestore(io_lock, flags); 962 spin_unlock_irqrestore(io_lock, flags);
989 return; 963 return;
990 } 964 }
991 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
992 CMD_ABTS_STATUS(sc) = hdr_status; 965 CMD_ABTS_STATUS(sc) = hdr_status;
993
994 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 966 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
995 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 967 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
996 "abts cmpl recd. id %d status %s\n", 968 "abts cmpl recd. id %d status %s\n",
@@ -1148,23 +1120,25 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1148 1120
1149static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) 1121static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1150{ 1122{
1151 unsigned int i; 1123 int i;
1152 struct fnic_io_req *io_req; 1124 struct fnic_io_req *io_req;
1153 unsigned long flags = 0; 1125 unsigned long flags = 0;
1154 struct scsi_cmnd *sc; 1126 struct scsi_cmnd *sc;
1155 spinlock_t *io_lock; 1127 spinlock_t *io_lock;
1156 unsigned long start_time = 0; 1128 unsigned long start_time = 0;
1157 1129
1158 for (i = 0; i < FNIC_MAX_IO_REQ; i++) { 1130 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1159 if (i == exclude_id) 1131 if (i == exclude_id)
1160 continue; 1132 continue;
1161 1133
1134 io_lock = fnic_io_lock_tag(fnic, i);
1135 spin_lock_irqsave(io_lock, flags);
1162 sc = scsi_host_find_tag(fnic->lport->host, i); 1136 sc = scsi_host_find_tag(fnic->lport->host, i);
1163 if (!sc) 1137 if (!sc) {
1138 spin_unlock_irqrestore(io_lock, flags);
1164 continue; 1139 continue;
1140 }
1165 1141
1166 io_lock = fnic_io_lock_hash(fnic, sc);
1167 spin_lock_irqsave(io_lock, flags);
1168 io_req = (struct fnic_io_req *)CMD_SP(sc); 1142 io_req = (struct fnic_io_req *)CMD_SP(sc);
1169 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1143 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1170 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { 1144 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
@@ -1236,7 +1210,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1236 fcpio_tag_id_dec(&desc->hdr.tag, &id); 1210 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1237 id &= FNIC_TAG_MASK; 1211 id &= FNIC_TAG_MASK;
1238 1212
1239 if (id >= FNIC_MAX_IO_REQ) 1213 if (id >= fnic->fnic_max_tag_id)
1240 return; 1214 return;
1241 1215
1242 sc = scsi_host_find_tag(fnic->lport->host, id); 1216 sc = scsi_host_find_tag(fnic->lport->host, id);
@@ -1340,14 +1314,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1340 if (fnic->in_remove) 1314 if (fnic->in_remove)
1341 return; 1315 return;
1342 1316
1343 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1317 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1344 abt_tag = tag; 1318 abt_tag = tag;
1319 io_lock = fnic_io_lock_tag(fnic, tag);
1320 spin_lock_irqsave(io_lock, flags);
1345 sc = scsi_host_find_tag(fnic->lport->host, tag); 1321 sc = scsi_host_find_tag(fnic->lport->host, tag);
1346 if (!sc) 1322 if (!sc) {
1323 spin_unlock_irqrestore(io_lock, flags);
1347 continue; 1324 continue;
1348 1325 }
1349 io_lock = fnic_io_lock_hash(fnic, sc);
1350 spin_lock_irqsave(io_lock, flags);
1351 1326
1352 io_req = (struct fnic_io_req *)CMD_SP(sc); 1327 io_req = (struct fnic_io_req *)CMD_SP(sc);
1353 1328
@@ -1441,12 +1416,29 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1441 unsigned long flags; 1416 unsigned long flags;
1442 struct scsi_cmnd *sc; 1417 struct scsi_cmnd *sc;
1443 struct scsi_lun fc_lun; 1418 struct scsi_lun fc_lun;
1444 struct fc_rport_libfc_priv *rdata = rport->dd_data; 1419 struct fc_rport_libfc_priv *rdata;
1445 struct fc_lport *lport = rdata->local_port; 1420 struct fc_lport *lport;
1446 struct fnic *fnic = lport_priv(lport); 1421 struct fnic *fnic;
1447 struct fc_rport *cmd_rport; 1422 struct fc_rport *cmd_rport;
1448 enum fnic_ioreq_state old_ioreq_state; 1423 enum fnic_ioreq_state old_ioreq_state;
1449 1424
1425 if (!rport) {
1426 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1427 return;
1428 }
1429 rdata = rport->dd_data;
1430
1431 if (!rdata) {
1432 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1433 return;
1434 }
1435 lport = rdata->local_port;
1436
1437 if (!lport) {
1438 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1439 return;
1440 }
1441 fnic = lport_priv(lport);
1450 FNIC_SCSI_DBG(KERN_DEBUG, 1442 FNIC_SCSI_DBG(KERN_DEBUG,
1451 fnic->lport->host, "fnic_terminate_rport_io called" 1443 fnic->lport->host, "fnic_terminate_rport_io called"
1452 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", 1444 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
@@ -1456,18 +1448,21 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
1456 if (fnic->in_remove) 1448 if (fnic->in_remove)
1457 return; 1449 return;
1458 1450
1459 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1451 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1460 abt_tag = tag; 1452 abt_tag = tag;
1453 io_lock = fnic_io_lock_tag(fnic, tag);
1454 spin_lock_irqsave(io_lock, flags);
1461 sc = scsi_host_find_tag(fnic->lport->host, tag); 1455 sc = scsi_host_find_tag(fnic->lport->host, tag);
1462 if (!sc) 1456 if (!sc) {
1457 spin_unlock_irqrestore(io_lock, flags);
1463 continue; 1458 continue;
1459 }
1464 1460
1465 cmd_rport = starget_to_rport(scsi_target(sc->device)); 1461 cmd_rport = starget_to_rport(scsi_target(sc->device));
1466 if (rport != cmd_rport) 1462 if (rport != cmd_rport) {
1463 spin_unlock_irqrestore(io_lock, flags);
1467 continue; 1464 continue;
1468 1465 }
1469 io_lock = fnic_io_lock_hash(fnic, sc);
1470 spin_lock_irqsave(io_lock, flags);
1471 1466
1472 io_req = (struct fnic_io_req *)CMD_SP(sc); 1467 io_req = (struct fnic_io_req *)CMD_SP(sc);
1473 1468
@@ -1680,13 +1675,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1680 io_req->abts_done = NULL; 1675 io_req->abts_done = NULL;
1681 1676
1682 /* fw did not complete abort, timed out */ 1677 /* fw did not complete abort, timed out */
1683 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1678 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1684 spin_unlock_irqrestore(io_lock, flags); 1679 spin_unlock_irqrestore(io_lock, flags);
1685 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; 1680 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1686 ret = FAILED; 1681 ret = FAILED;
1687 goto fnic_abort_cmd_end; 1682 goto fnic_abort_cmd_end;
1688 } 1683 }
1689 1684
1685 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1686
1690 /* 1687 /*
1691 * firmware completed the abort, check the status, 1688 * firmware completed the abort, check the status,
1692 * free the io_req irrespective of failure or success 1689 * free the io_req irrespective of failure or success
@@ -1784,17 +1781,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1784 DECLARE_COMPLETION_ONSTACK(tm_done); 1781 DECLARE_COMPLETION_ONSTACK(tm_done);
1785 enum fnic_ioreq_state old_ioreq_state; 1782 enum fnic_ioreq_state old_ioreq_state;
1786 1783
1787 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1784 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1785 io_lock = fnic_io_lock_tag(fnic, tag);
1786 spin_lock_irqsave(io_lock, flags);
1788 sc = scsi_host_find_tag(fnic->lport->host, tag); 1787 sc = scsi_host_find_tag(fnic->lport->host, tag);
1789 /* 1788 /*
1790 * ignore this lun reset cmd or cmds that do not belong to 1789 * ignore this lun reset cmd or cmds that do not belong to
1791 * this lun 1790 * this lun
1792 */ 1791 */
1793 if (!sc || sc == lr_sc || sc->device != lun_dev) 1792 if (!sc || sc == lr_sc || sc->device != lun_dev) {
1793 spin_unlock_irqrestore(io_lock, flags);
1794 continue; 1794 continue;
1795 1795 }
1796 io_lock = fnic_io_lock_hash(fnic, sc);
1797 spin_lock_irqsave(io_lock, flags);
1798 1796
1799 io_req = (struct fnic_io_req *)CMD_SP(sc); 1797 io_req = (struct fnic_io_req *)CMD_SP(sc);
1800 1798
@@ -1823,6 +1821,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1823 spin_unlock_irqrestore(io_lock, flags); 1821 spin_unlock_irqrestore(io_lock, flags);
1824 continue; 1822 continue;
1825 } 1823 }
1824
1825 if (io_req->abts_done)
1826 shost_printk(KERN_ERR, fnic->lport->host,
1827 "%s: io_req->abts_done is set state is %s\n",
1828 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
1826 old_ioreq_state = CMD_STATE(sc); 1829 old_ioreq_state = CMD_STATE(sc);
1827 /* 1830 /*
1828 * Any pending IO issued prior to reset is expected to be 1831 * Any pending IO issued prior to reset is expected to be
@@ -1833,11 +1836,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1833 */ 1836 */
1834 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1837 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1835 1838
1836 if (io_req->abts_done)
1837 shost_printk(KERN_ERR, fnic->lport->host,
1838 "%s: io_req->abts_done is set state is %s\n",
1839 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
1840
1841 BUG_ON(io_req->abts_done); 1839 BUG_ON(io_req->abts_done);
1842 1840
1843 abt_tag = tag; 1841 abt_tag = tag;
@@ -1890,12 +1888,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
1890 io_req->abts_done = NULL; 1888 io_req->abts_done = NULL;
1891 1889
1892 /* if abort is still pending with fw, fail */ 1890 /* if abort is still pending with fw, fail */
1893 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1891 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1894 spin_unlock_irqrestore(io_lock, flags); 1892 spin_unlock_irqrestore(io_lock, flags);
1895 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 1893 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1896 ret = 1; 1894 ret = 1;
1897 goto clean_pending_aborts_end; 1895 goto clean_pending_aborts_end;
1898 } 1896 }
1897 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1899 CMD_SP(sc) = NULL; 1898 CMD_SP(sc) = NULL;
1900 spin_unlock_irqrestore(io_lock, flags); 1899 spin_unlock_irqrestore(io_lock, flags);
1901 1900
@@ -2093,8 +2092,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
2093 spin_unlock_irqrestore(io_lock, flags); 2092 spin_unlock_irqrestore(io_lock, flags);
2094 int_to_scsilun(sc->device->lun, &fc_lun); 2093 int_to_scsilun(sc->device->lun, &fc_lun);
2095 /* 2094 /*
2096 * Issue abort and terminate on the device reset request. 2095 * Issue abort and terminate on device reset request.
2097 * If q'ing of the abort fails, retry issue it after a delay. 2096 * If q'ing of terminate fails, retry it after a delay.
2098 */ 2097 */
2099 while (1) { 2098 while (1) {
2100 spin_lock_irqsave(io_lock, flags); 2099 spin_lock_irqsave(io_lock, flags);
@@ -2405,7 +2404,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2405 lun_dev = lr_sc->device; 2404 lun_dev = lr_sc->device;
2406 2405
2407 /* walk again to check, if IOs are still pending in fw */ 2406 /* walk again to check, if IOs are still pending in fw */
2408 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 2407 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2409 sc = scsi_host_find_tag(fnic->lport->host, tag); 2408 sc = scsi_host_find_tag(fnic->lport->host, tag);
2410 /* 2409 /*
2411 * ignore this lun reset cmd or cmds that do not belong to 2410 * ignore this lun reset cmd or cmds that do not belong to
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index fbb55364e272..e343e1d0f801 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -54,8 +54,8 @@
54#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 54#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
55#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 55#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
56 56
57#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 57#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1
58#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 58#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048
59 59
60#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 60#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
61#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 61#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index fac8cf5832dd..891c86b66253 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -54,7 +54,7 @@
54#include "hpsa.h" 54#include "hpsa.h"
55 55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57#define HPSA_DRIVER_VERSION "2.0.2-1" 57#define HPSA_DRIVER_VERSION "3.4.0-1"
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59#define HPSA "hpsa" 59#define HPSA "hpsa"
60 60
@@ -89,13 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
@@ -107,7 +108,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
111 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 124 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
112 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 125 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
113 {0,} 126 {0,}
@@ -125,24 +138,35 @@ static struct board_type products[] = {
125 {0x3245103C, "Smart Array P410i", &SA5_access}, 138 {0x3245103C, "Smart Array P410i", &SA5_access},
126 {0x3247103C, "Smart Array P411", &SA5_access}, 139 {0x3247103C, "Smart Array P411", &SA5_access},
127 {0x3249103C, "Smart Array P812", &SA5_access}, 140 {0x3249103C, "Smart Array P812", &SA5_access},
128 {0x324a103C, "Smart Array P712m", &SA5_access}, 141 {0x324A103C, "Smart Array P712m", &SA5_access},
129 {0x324b103C, "Smart Array P711m", &SA5_access}, 142 {0x324B103C, "Smart Array P711m", &SA5_access},
130 {0x3350103C, "Smart Array P222", &SA5_access}, 143 {0x3350103C, "Smart Array P222", &SA5_access},
131 {0x3351103C, "Smart Array P420", &SA5_access}, 144 {0x3351103C, "Smart Array P420", &SA5_access},
132 {0x3352103C, "Smart Array P421", &SA5_access}, 145 {0x3352103C, "Smart Array P421", &SA5_access},
133 {0x3353103C, "Smart Array P822", &SA5_access}, 146 {0x3353103C, "Smart Array P822", &SA5_access},
147 {0x334D103C, "Smart Array P822se", &SA5_access},
134 {0x3354103C, "Smart Array P420i", &SA5_access}, 148 {0x3354103C, "Smart Array P420i", &SA5_access},
135 {0x3355103C, "Smart Array P220i", &SA5_access}, 149 {0x3355103C, "Smart Array P220i", &SA5_access},
136 {0x3356103C, "Smart Array P721m", &SA5_access}, 150 {0x3356103C, "Smart Array P721m", &SA5_access},
137 {0x1920103C, "Smart Array", &SA5_access}, 151 {0x1921103C, "Smart Array P830i", &SA5_access},
138 {0x1921103C, "Smart Array", &SA5_access}, 152 {0x1922103C, "Smart Array P430", &SA5_access},
139 {0x1922103C, "Smart Array", &SA5_access}, 153 {0x1923103C, "Smart Array P431", &SA5_access},
140 {0x1923103C, "Smart Array", &SA5_access}, 154 {0x1924103C, "Smart Array P830", &SA5_access},
141 {0x1924103C, "Smart Array", &SA5_access}, 155 {0x1926103C, "Smart Array P731m", &SA5_access},
142 {0x1925103C, "Smart Array", &SA5_access}, 156 {0x1928103C, "Smart Array P230i", &SA5_access},
143 {0x1926103C, "Smart Array", &SA5_access}, 157 {0x1929103C, "Smart Array P530", &SA5_access},
144 {0x1928103C, "Smart Array", &SA5_access}, 158 {0x21BD103C, "Smart Array", &SA5_access},
145 {0x334d103C, "Smart Array P822se", &SA5_access}, 159 {0x21BE103C, "Smart Array", &SA5_access},
160 {0x21BF103C, "Smart Array", &SA5_access},
161 {0x21C0103C, "Smart Array", &SA5_access},
162 {0x21C1103C, "Smart Array", &SA5_access},
163 {0x21C2103C, "Smart Array", &SA5_access},
164 {0x21C3103C, "Smart Array", &SA5_access},
165 {0x21C4103C, "Smart Array", &SA5_access},
166 {0x21C5103C, "Smart Array", &SA5_access},
167 {0x21C7103C, "Smart Array", &SA5_access},
168 {0x21C8103C, "Smart Array", &SA5_access},
169 {0x21C9103C, "Smart Array", &SA5_access},
146 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 170 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
147}; 171};
148 172
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e31caa21ddf..23f5ba5e6472 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2208 2208
2209 if (rsp_rc != 0) { 2209 if (rsp_rc != 0) {
2210 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); 2210 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2211 return -EIO; 2211 /* If failure is received, the host adapter is most likely going
2212 through reset, return success so the caller will wait for the command
2213 being cancelled to get returned */
2214 return 0;
2212 } 2215 }
2213 2216
2214 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); 2217 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2221 2224
2222 if (status != IBMVFC_MAD_SUCCESS) { 2225 if (status != IBMVFC_MAD_SUCCESS) {
2223 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); 2226 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2224 return -EIO; 2227 switch (status) {
2228 case IBMVFC_MAD_DRIVER_FAILED:
2229 case IBMVFC_MAD_CRQ_ERROR:
2230 /* Host adapter most likely going through reset, return success to
2231 the caller will wait for the command being cancelled to get returned */
2232 return 0;
2233 default:
2234 return -EIO;
2235 };
2225 } 2236 }
2226 2237
2227 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); 2238 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..fa764406df68 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -241,7 +241,7 @@ static void gather_partition_info(void)
241 struct device_node *rootdn; 241 struct device_node *rootdn;
242 242
243 const char *ppartition_name; 243 const char *ppartition_name;
244 const unsigned int *p_number_ptr; 244 const __be32 *p_number_ptr;
245 245
246 /* Retrieve information about this partition */ 246 /* Retrieve information about this partition */
247 rootdn = of_find_node_by_path("/"); 247 rootdn = of_find_node_by_path("/");
@@ -255,7 +255,7 @@ static void gather_partition_info(void)
255 sizeof(partition_name)); 255 sizeof(partition_name));
256 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); 256 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
257 if (p_number_ptr) 257 if (p_number_ptr)
258 partition_number = *p_number_ptr; 258 partition_number = of_read_number(p_number_ptr, 1);
259 of_node_put(rootdn); 259 of_node_put(rootdn);
260} 260}
261 261
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
270 strncpy(hostdata->madapter_info.partition_name, partition_name, 270 strncpy(hostdata->madapter_info.partition_name, partition_name,
271 sizeof(hostdata->madapter_info.partition_name)); 271 sizeof(hostdata->madapter_info.partition_name));
272 272
273 hostdata->madapter_info.partition_number = partition_number; 273 hostdata->madapter_info.partition_number =
274 cpu_to_be32(partition_number);
274 275
275 hostdata->madapter_info.mad_version = 1; 276 hostdata->madapter_info.mad_version = cpu_to_be32(1);
276 hostdata->madapter_info.os_type = 2; 277 hostdata->madapter_info.os_type = cpu_to_be32(2);
277} 278}
278 279
279/** 280/**
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool,
464 memset(&evt->crq, 0x00, sizeof(evt->crq)); 465 memset(&evt->crq, 0x00, sizeof(evt->crq));
465 atomic_set(&evt->free, 1); 466 atomic_set(&evt->free, 1);
466 evt->crq.valid = 0x80; 467 evt->crq.valid = 0x80;
467 evt->crq.IU_length = sizeof(*evt->xfer_iu); 468 evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
468 evt->crq.IU_data_ptr = pool->iu_token + 469 evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
469 sizeof(*evt->xfer_iu) * i; 470 sizeof(*evt->xfer_iu) * i);
470 evt->xfer_iu = pool->iu_storage + i; 471 evt->xfer_iu = pool->iu_storage + i;
471 evt->hostdata = hostdata; 472 evt->hostdata = hostdata;
472 evt->ext_list = NULL; 473 evt->ext_list = NULL;
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
588 evt_struct->cmnd_done = NULL; 589 evt_struct->cmnd_done = NULL;
589 evt_struct->sync_srp = NULL; 590 evt_struct->sync_srp = NULL;
590 evt_struct->crq.format = format; 591 evt_struct->crq.format = format;
591 evt_struct->crq.timeout = timeout; 592 evt_struct->crq.timeout = cpu_to_be16(timeout);
592 evt_struct->done = done; 593 evt_struct->done = done;
593} 594}
594 595
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
659 660
660 scsi_for_each_sg(cmd, sg, nseg, i) { 661 scsi_for_each_sg(cmd, sg, nseg, i) {
661 struct srp_direct_buf *descr = md + i; 662 struct srp_direct_buf *descr = md + i;
662 descr->va = sg_dma_address(sg); 663 descr->va = cpu_to_be64(sg_dma_address(sg));
663 descr->len = sg_dma_len(sg); 664 descr->len = cpu_to_be32(sg_dma_len(sg));
664 descr->key = 0; 665 descr->key = 0;
665 total_length += sg_dma_len(sg); 666 total_length += sg_dma_len(sg);
666 } 667 }
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd,
703 } 704 }
704 705
705 indirect->table_desc.va = 0; 706 indirect->table_desc.va = 0;
706 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 707 indirect->table_desc.len = cpu_to_be32(sg_mapped *
708 sizeof(struct srp_direct_buf));
707 indirect->table_desc.key = 0; 709 indirect->table_desc.key = 0;
708 710
709 if (sg_mapped <= MAX_INDIRECT_BUFS) { 711 if (sg_mapped <= MAX_INDIRECT_BUFS) {
710 total_length = map_sg_list(cmd, sg_mapped, 712 total_length = map_sg_list(cmd, sg_mapped,
711 &indirect->desc_list[0]); 713 &indirect->desc_list[0]);
712 indirect->len = total_length; 714 indirect->len = cpu_to_be32(total_length);
713 return 1; 715 return 1;
714 } 716 }
715 717
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
731 733
732 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); 734 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
733 735
734 indirect->len = total_length; 736 indirect->len = cpu_to_be32(total_length);
735 indirect->table_desc.va = evt_struct->ext_list_token; 737 indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
736 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 738 indirect->table_desc.len = cpu_to_be32(sg_mapped *
739 sizeof(indirect->desc_list[0]));
737 memcpy(indirect->desc_list, evt_struct->ext_list, 740 memcpy(indirect->desc_list, evt_struct->ext_list,
738 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 741 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
739 return 1; 742 return 1;
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
849 struct ibmvscsi_host_data *hostdata, 852 struct ibmvscsi_host_data *hostdata,
850 unsigned long timeout) 853 unsigned long timeout)
851{ 854{
852 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 855 __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
853 int request_status = 0; 856 int request_status = 0;
854 int rc; 857 int rc;
855 int srp_req = 0; 858 int srp_req = 0;
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
920 add_timer(&evt_struct->timer); 923 add_timer(&evt_struct->timer);
921 } 924 }
922 925
923 if ((rc = 926 rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
924 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 927 be64_to_cpu(crq_as_u64[1]));
928 if (rc != 0) {
925 list_del(&evt_struct->list); 929 list_del(&evt_struct->list);
926 del_timer(&evt_struct->timer); 930 del_timer(&evt_struct->timer);
927 931
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
987 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 991 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
988 memcpy(cmnd->sense_buffer, 992 memcpy(cmnd->sense_buffer,
989 rsp->data, 993 rsp->data,
990 rsp->sense_data_len); 994 be32_to_cpu(rsp->sense_data_len));
991 unmap_cmd_data(&evt_struct->iu.srp.cmd, 995 unmap_cmd_data(&evt_struct->iu.srp.cmd,
992 evt_struct, 996 evt_struct,
993 evt_struct->hostdata->dev); 997 evt_struct->hostdata->dev);
994 998
995 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 999 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
996 scsi_set_resid(cmnd, rsp->data_out_res_cnt); 1000 scsi_set_resid(cmnd,
1001 be32_to_cpu(rsp->data_out_res_cnt));
997 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 1002 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
998 scsi_set_resid(cmnd, rsp->data_in_res_cnt); 1003 scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
999 } 1004 }
1000 1005
1001 if (evt_struct->cmnd_done) 1006 if (evt_struct->cmnd_done)
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
1037 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 1042 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
1038 srp_cmd->opcode = SRP_CMD; 1043 srp_cmd->opcode = SRP_CMD;
1039 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); 1044 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
1040 srp_cmd->lun = ((u64) lun) << 48; 1045 srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
1041 1046
1042 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 1047 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
1043 if (!firmware_has_feature(FW_FEATURE_CMO)) 1048 if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
1062 if ((in_fmt == SRP_DATA_DESC_INDIRECT || 1067 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
1063 out_fmt == SRP_DATA_DESC_INDIRECT) && 1068 out_fmt == SRP_DATA_DESC_INDIRECT) &&
1064 indirect->table_desc.va == 0) { 1069 indirect->table_desc.va == 0) {
1065 indirect->table_desc.va = evt_struct->crq.IU_data_ptr + 1070 indirect->table_desc.va =
1071 cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
1066 offsetof(struct srp_cmd, add_data) + 1072 offsetof(struct srp_cmd, add_data) +
1067 offsetof(struct srp_indirect_buf, desc_list); 1073 offsetof(struct srp_indirect_buf, desc_list));
1068 } 1074 }
1069 1075
1070 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); 1076 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
1158 * request_limit could have been set to -1 by this client. 1164 * request_limit could have been set to -1 by this client.
1159 */ 1165 */
1160 atomic_set(&hostdata->request_limit, 1166 atomic_set(&hostdata->request_limit,
1161 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 1167 be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
1162 1168
1163 /* If we had any pending I/Os, kick them */ 1169 /* If we had any pending I/Os, kick them */
1164 scsi_unblock_requests(hostdata->host); 1170 scsi_unblock_requests(hostdata->host);
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
1184 login = &evt_struct->iu.srp.login_req; 1190 login = &evt_struct->iu.srp.login_req;
1185 memset(login, 0, sizeof(*login)); 1191 memset(login, 0, sizeof(*login));
1186 login->opcode = SRP_LOGIN_REQ; 1192 login->opcode = SRP_LOGIN_REQ;
1187 login->req_it_iu_len = sizeof(union srp_iu); 1193 login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
1188 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 1194 login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
1195 SRP_BUF_FORMAT_INDIRECT);
1189 1196
1190 spin_lock_irqsave(hostdata->host->host_lock, flags); 1197 spin_lock_irqsave(hostdata->host->host_lock, flags);
1191 /* Start out with a request limit of 0, since this is negotiated in 1198 /* Start out with a request limit of 0, since this is negotiated in
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct)
1214 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", 1221 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
1215 evt_struct->xfer_iu->mad.capabilities.common.status); 1222 evt_struct->xfer_iu->mad.capabilities.common.status);
1216 } else { 1223 } else {
1217 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) 1224 if (hostdata->caps.migration.common.server_support !=
1225 cpu_to_be16(SERVER_SUPPORTS_CAP))
1218 dev_info(hostdata->dev, "Partition migration not supported\n"); 1226 dev_info(hostdata->dev, "Partition migration not supported\n");
1219 1227
1220 if (client_reserve) { 1228 if (client_reserve) {
1221 if (hostdata->caps.reserve.common.server_support == 1229 if (hostdata->caps.reserve.common.server_support ==
1222 SERVER_SUPPORTS_CAP) 1230 cpu_to_be16(SERVER_SUPPORTS_CAP))
1223 dev_info(hostdata->dev, "Client reserve enabled\n"); 1231 dev_info(hostdata->dev, "Client reserve enabled\n");
1224 else 1232 else
1225 dev_info(hostdata->dev, "Client reserve not supported\n"); 1233 dev_info(hostdata->dev, "Client reserve not supported\n");
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1251 req = &evt_struct->iu.mad.capabilities; 1259 req = &evt_struct->iu.mad.capabilities;
1252 memset(req, 0, sizeof(*req)); 1260 memset(req, 0, sizeof(*req));
1253 1261
1254 hostdata->caps.flags = CAP_LIST_SUPPORTED; 1262 hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
1255 if (hostdata->client_migrated) 1263 if (hostdata->client_migrated)
1256 hostdata->caps.flags |= CLIENT_MIGRATED; 1264 hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
1257 1265
1258 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), 1266 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
1259 sizeof(hostdata->caps.name)); 1267 sizeof(hostdata->caps.name));
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1264 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); 1272 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
1265 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; 1273 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
1266 1274
1267 req->common.type = VIOSRP_CAPABILITIES_TYPE; 1275 req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
1268 req->buffer = hostdata->caps_addr; 1276 req->buffer = cpu_to_be64(hostdata->caps_addr);
1269 1277
1270 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; 1278 hostdata->caps.migration.common.cap_type =
1271 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); 1279 cpu_to_be32(MIGRATION_CAPABILITIES);
1272 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; 1280 hostdata->caps.migration.common.length =
1273 hostdata->caps.migration.ecl = 1; 1281 cpu_to_be16(sizeof(hostdata->caps.migration));
1282 hostdata->caps.migration.common.server_support =
1283 cpu_to_be16(SERVER_SUPPORTS_CAP);
1284 hostdata->caps.migration.ecl = cpu_to_be32(1);
1274 1285
1275 if (client_reserve) { 1286 if (client_reserve) {
1276 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; 1287 hostdata->caps.reserve.common.cap_type =
1277 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); 1288 cpu_to_be32(RESERVATION_CAPABILITIES);
1278 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; 1289 hostdata->caps.reserve.common.length =
1279 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; 1290 cpu_to_be16(sizeof(hostdata->caps.reserve));
1280 req->common.length = sizeof(hostdata->caps); 1291 hostdata->caps.reserve.common.server_support =
1292 cpu_to_be16(SERVER_SUPPORTS_CAP);
1293 hostdata->caps.reserve.type =
1294 cpu_to_be32(CLIENT_RESERVE_SCSI_2);
1295 req->common.length =
1296 cpu_to_be16(sizeof(hostdata->caps));
1281 } else 1297 } else
1282 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); 1298 req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
1299 sizeof(hostdata->caps.reserve));
1283 1300
1284 spin_lock_irqsave(hostdata->host->host_lock, flags); 1301 spin_lock_irqsave(hostdata->host->host_lock, flags);
1285 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) 1302 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
1297static void fast_fail_rsp(struct srp_event_struct *evt_struct) 1314static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1298{ 1315{
1299 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 1316 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1300 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; 1317 u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
1301 1318
1302 if (status == VIOSRP_MAD_NOT_SUPPORTED) 1319 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1303 dev_err(hostdata->dev, "fast_fail not supported in server\n"); 1320 dev_err(hostdata->dev, "fast_fail not supported in server\n");
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1334 1351
1335 fast_fail_mad = &evt_struct->iu.mad.fast_fail; 1352 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1336 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); 1353 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1337 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; 1354 fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
1338 fast_fail_mad->common.length = sizeof(*fast_fail_mad); 1355 fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
1339 1356
1340 spin_lock_irqsave(hostdata->host->host_lock, flags); 1357 spin_lock_irqsave(hostdata->host->host_lock, flags);
1341 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); 1358 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1362 "host partition %s (%d), OS %d, max io %u\n", 1379 "host partition %s (%d), OS %d, max io %u\n",
1363 hostdata->madapter_info.srp_version, 1380 hostdata->madapter_info.srp_version,
1364 hostdata->madapter_info.partition_name, 1381 hostdata->madapter_info.partition_name,
1365 hostdata->madapter_info.partition_number, 1382 be32_to_cpu(hostdata->madapter_info.partition_number),
1366 hostdata->madapter_info.os_type, 1383 be32_to_cpu(hostdata->madapter_info.os_type),
1367 hostdata->madapter_info.port_max_txu[0]); 1384 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
1368 1385
1369 if (hostdata->madapter_info.port_max_txu[0]) 1386 if (hostdata->madapter_info.port_max_txu[0])
1370 hostdata->host->max_sectors = 1387 hostdata->host->max_sectors =
1371 hostdata->madapter_info.port_max_txu[0] >> 9; 1388 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
1372 1389
1373 if (hostdata->madapter_info.os_type == 3 && 1390 if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
1374 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 1391 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1375 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", 1392 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1376 hostdata->madapter_info.srp_version); 1393 hostdata->madapter_info.srp_version);
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1379 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 1396 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1380 } 1397 }
1381 1398
1382 if (hostdata->madapter_info.os_type == 3) { 1399 if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
1383 enable_fast_fail(hostdata); 1400 enable_fast_fail(hostdata);
1384 return; 1401 return;
1385 } 1402 }
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1414 req = &evt_struct->iu.mad.adapter_info; 1431 req = &evt_struct->iu.mad.adapter_info;
1415 memset(req, 0x00, sizeof(*req)); 1432 memset(req, 0x00, sizeof(*req));
1416 1433
1417 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 1434 req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
1418 req->common.length = sizeof(hostdata->madapter_info); 1435 req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
1419 req->buffer = hostdata->adapter_info_addr; 1436 req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
1420 1437
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1438 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) 1439 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1501 /* Set up an abort SRP command */ 1518 /* Set up an abort SRP command */
1502 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1519 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1503 tsk_mgmt->opcode = SRP_TSK_MGMT; 1520 tsk_mgmt->opcode = SRP_TSK_MGMT;
1504 tsk_mgmt->lun = ((u64) lun) << 48; 1521 tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
1505 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 1522 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
1506 tsk_mgmt->task_tag = (u64) found_evt; 1523 tsk_mgmt->task_tag = (u64) found_evt;
1507 1524
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1624 /* Set up a lun reset SRP command */ 1641 /* Set up a lun reset SRP command */
1625 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1642 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1626 tsk_mgmt->opcode = SRP_TSK_MGMT; 1643 tsk_mgmt->opcode = SRP_TSK_MGMT;
1627 tsk_mgmt->lun = ((u64) lun) << 48; 1644 tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
1628 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1645 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1629 1646
1630 evt->sync_srp = &srp_rsp; 1647 evt->sync_srp = &srp_rsp;
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1735{ 1752{
1736 long rc; 1753 long rc;
1737 unsigned long flags; 1754 unsigned long flags;
1755 /* The hypervisor copies our tag value here so no byteswapping */
1738 struct srp_event_struct *evt_struct = 1756 struct srp_event_struct *evt_struct =
1739 (struct srp_event_struct *)crq->IU_data_ptr; 1757 (__force struct srp_event_struct *)crq->IU_data_ptr;
1740 switch (crq->valid) { 1758 switch (crq->valid) {
1741 case 0xC0: /* initialization */ 1759 case 0xC0: /* initialization */
1742 switch (crq->format) { 1760 switch (crq->format) {
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1792 */ 1810 */
1793 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1811 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1794 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", 1812 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1795 (void *)crq->IU_data_ptr); 1813 evt_struct);
1796 return; 1814 return;
1797 } 1815 }
1798 1816
1799 if (atomic_read(&evt_struct->free)) { 1817 if (atomic_read(&evt_struct->free)) {
1800 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", 1818 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1801 (void *)crq->IU_data_ptr); 1819 evt_struct);
1802 return; 1820 return;
1803 } 1821 }
1804 1822
1805 if (crq->format == VIOSRP_SRP_FORMAT) 1823 if (crq->format == VIOSRP_SRP_FORMAT)
1806 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1824 atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
1807 &hostdata->request_limit); 1825 &hostdata->request_limit);
1808 1826
1809 del_timer(&evt_struct->timer); 1827 del_timer(&evt_struct->timer);
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1856 1874
1857 /* Set up a lun reset SRP command */ 1875 /* Set up a lun reset SRP command */
1858 memset(host_config, 0x00, sizeof(*host_config)); 1876 memset(host_config, 0x00, sizeof(*host_config));
1859 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1877 host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
1860 host_config->common.length = length; 1878 host_config->common.length = cpu_to_be16(length);
1861 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, 1879 addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
1862 length,
1863 DMA_BIDIRECTIONAL);
1864 1880
1865 if (dma_mapping_error(hostdata->dev, host_config->buffer)) { 1881 if (dma_mapping_error(hostdata->dev, addr)) {
1866 if (!firmware_has_feature(FW_FEATURE_CMO)) 1882 if (!firmware_has_feature(FW_FEATURE_CMO))
1867 dev_err(hostdata->dev, 1883 dev_err(hostdata->dev,
1868 "dma_mapping error getting host config\n"); 1884 "dma_mapping error getting host config\n");
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1870 return -1; 1886 return -1;
1871 } 1887 }
1872 1888
1889 host_config->buffer = cpu_to_be64(addr);
1890
1873 init_completion(&evt_struct->comp); 1891 init_completion(&evt_struct->comp);
1874 spin_lock_irqsave(hostdata->host->host_lock, flags); 1892 spin_lock_irqsave(hostdata->host->host_lock, flags);
1875 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); 1893 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 2cd735d1d196..116243087622 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -75,9 +75,9 @@ struct viosrp_crq {
75 u8 format; /* SCSI vs out-of-band */ 75 u8 format; /* SCSI vs out-of-band */
76 u8 reserved; 76 u8 reserved;
77 u8 status; /* non-scsi failure? (e.g. DMA failure) */ 77 u8 status; /* non-scsi failure? (e.g. DMA failure) */
78 u16 timeout; /* in seconds */ 78 __be16 timeout; /* in seconds */
79 u16 IU_length; /* in bytes */ 79 __be16 IU_length; /* in bytes */
80 u64 IU_data_ptr; /* the TCE for transferring data */ 80 __be64 IU_data_ptr; /* the TCE for transferring data */
81}; 81};
82 82
83/* MADs are Management requests above and beyond the IUs defined in the SRP 83/* MADs are Management requests above and beyond the IUs defined in the SRP
@@ -124,10 +124,10 @@ enum viosrp_capability_flag {
124 * Common MAD header 124 * Common MAD header
125 */ 125 */
126struct mad_common { 126struct mad_common {
127 u32 type; 127 __be32 type;
128 u16 status; 128 __be16 status;
129 u16 length; 129 __be16 length;
130 u64 tag; 130 __be64 tag;
131}; 131};
132 132
133/* 133/*
@@ -139,23 +139,23 @@ struct mad_common {
139 */ 139 */
140struct viosrp_empty_iu { 140struct viosrp_empty_iu {
141 struct mad_common common; 141 struct mad_common common;
142 u64 buffer; 142 __be64 buffer;
143 u32 port; 143 __be32 port;
144}; 144};
145 145
146struct viosrp_error_log { 146struct viosrp_error_log {
147 struct mad_common common; 147 struct mad_common common;
148 u64 buffer; 148 __be64 buffer;
149}; 149};
150 150
151struct viosrp_adapter_info { 151struct viosrp_adapter_info {
152 struct mad_common common; 152 struct mad_common common;
153 u64 buffer; 153 __be64 buffer;
154}; 154};
155 155
156struct viosrp_host_config { 156struct viosrp_host_config {
157 struct mad_common common; 157 struct mad_common common;
158 u64 buffer; 158 __be64 buffer;
159}; 159};
160 160
161struct viosrp_fast_fail { 161struct viosrp_fast_fail {
@@ -164,27 +164,27 @@ struct viosrp_fast_fail {
164 164
165struct viosrp_capabilities { 165struct viosrp_capabilities {
166 struct mad_common common; 166 struct mad_common common;
167 u64 buffer; 167 __be64 buffer;
168}; 168};
169 169
170struct mad_capability_common { 170struct mad_capability_common {
171 u32 cap_type; 171 __be32 cap_type;
172 u16 length; 172 __be16 length;
173 u16 server_support; 173 __be16 server_support;
174}; 174};
175 175
176struct mad_reserve_cap { 176struct mad_reserve_cap {
177 struct mad_capability_common common; 177 struct mad_capability_common common;
178 u32 type; 178 __be32 type;
179}; 179};
180 180
181struct mad_migration_cap { 181struct mad_migration_cap {
182 struct mad_capability_common common; 182 struct mad_capability_common common;
183 u32 ecl; 183 __be32 ecl;
184}; 184};
185 185
186struct capabilities{ 186struct capabilities{
187 u32 flags; 187 __be32 flags;
188 char name[SRP_MAX_LOC_LEN]; 188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN]; 189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration; 190 struct mad_migration_cap migration;
@@ -208,10 +208,10 @@ union viosrp_iu {
208struct mad_adapter_info_data { 208struct mad_adapter_info_data {
209 char srp_version[8]; 209 char srp_version[8];
210 char partition_name[96]; 210 char partition_name[96];
211 u32 partition_number; 211 __be32 partition_number;
212 u32 mad_version; 212 __be32 mad_version;
213 u32 os_type; 213 __be32 os_type;
214 u32 port_max_txu[8]; /* per-port maximum transfer */ 214 __be32 port_max_txu[8]; /* per-port maximum transfer */
215}; 215};
216 216
217#endif 217#endif
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index df43bfe6d573..4e1b75ca7451 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -708,6 +708,7 @@ struct lpfc_hba {
708 uint32_t cfg_multi_ring_type; 708 uint32_t cfg_multi_ring_type;
709 uint32_t cfg_poll; 709 uint32_t cfg_poll;
710 uint32_t cfg_poll_tmo; 710 uint32_t cfg_poll_tmo;
711 uint32_t cfg_task_mgmt_tmo;
711 uint32_t cfg_use_msi; 712 uint32_t cfg_use_msi;
712 uint32_t cfg_fcp_imax; 713 uint32_t cfg_fcp_imax;
713 uint32_t cfg_fcp_cpu_map; 714 uint32_t cfg_fcp_cpu_map;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 16498e030c70..00656fc92b93 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1865{ \ 1865{ \
1866 if (val >= minval && val <= maxval) {\ 1866 if (val >= minval && val <= maxval) {\
1867 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 1867 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
1868 "3053 lpfc_" #attr " changed from %d to %d\n", \ 1868 "3053 lpfc_" #attr \
1869 vport->cfg_##attr, val); \ 1869 " changed from %d (x%x) to %d (x%x)\n", \
1870 vport->cfg_##attr, vport->cfg_##attr, \
1871 val, val); \
1870 vport->cfg_##attr = val;\ 1872 vport->cfg_##attr = val;\
1871 return 0;\ 1873 return 0;\
1872 }\ 1874 }\
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
4011# For [0], FCP commands are issued to Work Queues ina round robin fashion. 4013# For [0], FCP commands are issued to Work Queues ina round robin fashion.
4012# For [1], FCP commands are issued to a Work Queue associated with the 4014# For [1], FCP commands are issued to a Work Queue associated with the
4013# current CPU. 4015# current CPU.
4016# It would be set to 1 by the driver if it's able to set up cpu affinity
4017# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
4018# roundrobin scheduling of FCP I/Os through WQs will be used.
4014*/ 4019*/
4015LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " 4020LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
4016 "issuing commands [0] - Round Robin, [1] - Current CPU"); 4021 "issuing commands [0] - Round Robin, [1] - Current CPU");
4017 4022
4018/* 4023/*
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
4110 "Milliseconds driver will wait between polling FCP ring"); 4115 "Milliseconds driver will wait between polling FCP ring");
4111 4116
4112/* 4117/*
4118# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
4119# to complete in seconds. Value range is [5,180], default value is 60.
4120*/
4121LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
4122 "Maximum time to wait for task management commands to complete");
4123/*
4113# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 4124# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
4114# support this feature 4125# support this feature
4115# 0 = MSI disabled 4126# 0 = MSI disabled
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4295 &dev_attr_issue_reset, 4306 &dev_attr_issue_reset,
4296 &dev_attr_lpfc_poll, 4307 &dev_attr_lpfc_poll,
4297 &dev_attr_lpfc_poll_tmo, 4308 &dev_attr_lpfc_poll_tmo,
4309 &dev_attr_lpfc_task_mgmt_tmo,
4298 &dev_attr_lpfc_use_msi, 4310 &dev_attr_lpfc_use_msi,
4299 &dev_attr_lpfc_fcp_imax, 4311 &dev_attr_lpfc_fcp_imax,
4300 &dev_attr_lpfc_fcp_cpu_map, 4312 &dev_attr_lpfc_fcp_cpu_map,
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5274 lpfc_topology_init(phba, lpfc_topology); 5286 lpfc_topology_init(phba, lpfc_topology);
5275 lpfc_link_speed_init(phba, lpfc_link_speed); 5287 lpfc_link_speed_init(phba, lpfc_link_speed);
5276 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5288 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
5289 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
5277 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 5290 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
5278 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 5291 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
5279 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5292 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 79c13c3263f1..b92aec989d60 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
317 } 317 }
318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
319 319
320 /* Close the timeout handler abort window */
321 spin_lock_irqsave(&phba->hbalock, flags);
322 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
323 spin_unlock_irqrestore(&phba->hbalock, flags);
324
320 iocb = &dd_data->context_un.iocb; 325 iocb = &dd_data->context_un.iocb;
321 ndlp = iocb->ndlp; 326 ndlp = iocb->ndlp;
322 rmp = iocb->rmp; 327 rmp = iocb->rmp;
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
387 int request_nseg; 392 int request_nseg;
388 int reply_nseg; 393 int reply_nseg;
389 struct bsg_job_data *dd_data; 394 struct bsg_job_data *dd_data;
395 unsigned long flags;
390 uint32_t creg_val; 396 uint32_t creg_val;
391 int rc = 0; 397 int rc = 0;
392 int iocb_stat; 398 int iocb_stat;
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
501 } 507 }
502 508
503 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 509 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
504 if (iocb_stat == IOCB_SUCCESS) 510
511 if (iocb_stat == IOCB_SUCCESS) {
512 spin_lock_irqsave(&phba->hbalock, flags);
513 /* make sure the I/O had not been completed yet */
514 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
515 /* open up abort window to timeout handler */
516 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
517 }
518 spin_unlock_irqrestore(&phba->hbalock, flags);
505 return 0; /* done for now */ 519 return 0; /* done for now */
506 else if (iocb_stat == IOCB_BUSY) 520 } else if (iocb_stat == IOCB_BUSY) {
507 rc = -EAGAIN; 521 rc = -EAGAIN;
508 else 522 } else {
509 rc = -EIO; 523 rc = -EIO;
524 }
510 525
511 /* iocb failed so cleanup */ 526 /* iocb failed so cleanup */
527 job->dd_data = NULL;
512 528
513free_rmp: 529free_rmp:
514 lpfc_free_bsg_buffers(phba, rmp); 530 lpfc_free_bsg_buffers(phba, rmp);
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
577 } 593 }
578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 594 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
579 595
596 /* Close the timeout handler abort window */
597 spin_lock_irqsave(&phba->hbalock, flags);
598 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
599 spin_unlock_irqrestore(&phba->hbalock, flags);
600
580 rsp = &rspiocbq->iocb; 601 rsp = &rspiocbq->iocb;
581 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 602 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
582 prsp = (struct lpfc_dmabuf *)pcmd->list.next; 603 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
639 struct lpfc_iocbq *cmdiocbq; 660 struct lpfc_iocbq *cmdiocbq;
640 uint16_t rpi = 0; 661 uint16_t rpi = 0;
641 struct bsg_job_data *dd_data; 662 struct bsg_job_data *dd_data;
663 unsigned long flags;
642 uint32_t creg_val; 664 uint32_t creg_val;
643 int rc = 0; 665 int rc = 0;
644 666
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
721 743
722 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 744 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
723 745
724 if (rc == IOCB_SUCCESS) 746 if (rc == IOCB_SUCCESS) {
747 spin_lock_irqsave(&phba->hbalock, flags);
748 /* make sure the I/O had not been completed/released */
749 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
750 /* open up abort window to timeout handler */
751 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
752 }
753 spin_unlock_irqrestore(&phba->hbalock, flags);
725 return 0; /* done for now */ 754 return 0; /* done for now */
726 else if (rc == IOCB_BUSY) 755 } else if (rc == IOCB_BUSY) {
727 rc = -EAGAIN; 756 rc = -EAGAIN;
728 else 757 } else {
729 rc = -EIO; 758 rc = -EIO;
759 }
730 760
731linkdown_err: 761 /* iocb failed so cleanup */
762 job->dd_data = NULL;
732 763
764linkdown_err:
733 cmdiocbq->context1 = ndlp; 765 cmdiocbq->context1 = ndlp;
734 lpfc_els_free_iocb(phba, cmdiocbq); 766 lpfc_els_free_iocb(phba, cmdiocbq);
735 767
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1249 struct lpfc_hba *phba = vport->phba; 1281 struct lpfc_hba *phba = vport->phba;
1250 struct get_ct_event *event_req; 1282 struct get_ct_event *event_req;
1251 struct get_ct_event_reply *event_reply; 1283 struct get_ct_event_reply *event_reply;
1252 struct lpfc_bsg_event *evt; 1284 struct lpfc_bsg_event *evt, *evt_next;
1253 struct event_data *evt_dat = NULL; 1285 struct event_data *evt_dat = NULL;
1254 unsigned long flags; 1286 unsigned long flags;
1255 uint32_t rc = 0; 1287 uint32_t rc = 0;
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1269 event_reply = (struct get_ct_event_reply *) 1301 event_reply = (struct get_ct_event_reply *)
1270 job->reply->reply_data.vendor_reply.vendor_rsp; 1302 job->reply->reply_data.vendor_reply.vendor_rsp;
1271 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1303 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1272 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1304 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1273 if (evt->reg_id == event_req->ev_reg_id) { 1305 if (evt->reg_id == event_req->ev_reg_id) {
1274 if (list_empty(&evt->events_to_get)) 1306 if (list_empty(&evt->events_to_get))
1275 break; 1307 break;
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1370 } 1402 }
1371 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1403 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1372 1404
1405 /* Close the timeout handler abort window */
1406 spin_lock_irqsave(&phba->hbalock, flags);
1407 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1408 spin_unlock_irqrestore(&phba->hbalock, flags);
1409
1373 ndlp = dd_data->context_un.iocb.ndlp; 1410 ndlp = dd_data->context_un.iocb.ndlp;
1374 cmp = cmdiocbq->context2; 1411 cmp = cmdiocbq->context2;
1375 bmp = cmdiocbq->context3; 1412 bmp = cmdiocbq->context3;
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1433 int rc = 0; 1470 int rc = 0;
1434 struct lpfc_nodelist *ndlp = NULL; 1471 struct lpfc_nodelist *ndlp = NULL;
1435 struct bsg_job_data *dd_data; 1472 struct bsg_job_data *dd_data;
1473 unsigned long flags;
1436 uint32_t creg_val; 1474 uint32_t creg_val;
1437 1475
1438 /* allocate our bsg tracking structure */ 1476 /* allocate our bsg tracking structure */
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1542 1580
1543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1544 1582
1545 if (rc == IOCB_SUCCESS) 1583 if (rc == IOCB_SUCCESS) {
1584 spin_lock_irqsave(&phba->hbalock, flags);
1585 /* make sure the I/O had not been completed/released */
1586 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1587 /* open up abort window to timeout handler */
1588 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1589 }
1590 spin_unlock_irqrestore(&phba->hbalock, flags);
1546 return 0; /* done for now */ 1591 return 0; /* done for now */
1592 }
1593
1594 /* iocb failed so cleanup */
1595 job->dd_data = NULL;
1547 1596
1548issue_ct_rsp_exit: 1597issue_ct_rsp_exit:
1549 lpfc_sli_release_iocbq(phba, ctiocb); 1598 lpfc_sli_release_iocbq(phba, ctiocb);
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5284 * remove it from the txq queue and call cancel iocbs. 5333 * remove it from the txq queue and call cancel iocbs.
5285 * Otherwise, call abort iotag 5334 * Otherwise, call abort iotag
5286 */ 5335 */
5287
5288 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5336 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5289 spin_lock_irq(&phba->hbalock); 5337 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5338
5339 spin_lock_irqsave(&phba->hbalock, flags);
5340 /* make sure the I/O abort window is still open */
5341 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5342 spin_unlock_irqrestore(&phba->hbalock, flags);
5343 return -EAGAIN;
5344 }
5290 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5345 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5291 list) { 5346 list) {
5292 if (check_iocb == cmdiocb) { 5347 if (check_iocb == cmdiocb) {
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5296 } 5351 }
5297 if (list_empty(&completions)) 5352 if (list_empty(&completions))
5298 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5353 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5299 spin_unlock_irq(&phba->hbalock); 5354 spin_unlock_irqrestore(&phba->hbalock, flags);
5300 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5301 if (!list_empty(&completions)) { 5355 if (!list_empty(&completions)) {
5302 lpfc_sli_cancel_iocbs(phba, &completions, 5356 lpfc_sli_cancel_iocbs(phba, &completions,
5303 IOSTAT_LOCAL_REJECT, 5357 IOSTAT_LOCAL_REJECT,
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5321 * remove it from the txq queue and call cancel iocbs. 5375 * remove it from the txq queue and call cancel iocbs.
5322 * Otherwise, call abort iotag. 5376 * Otherwise, call abort iotag.
5323 */ 5377 */
5324
5325 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5378 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5326 spin_lock_irq(&phba->hbalock); 5379 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5380
5381 spin_lock_irqsave(&phba->hbalock, flags);
5327 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5382 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5328 list) { 5383 list) {
5329 if (check_iocb == cmdiocb) { 5384 if (check_iocb == cmdiocb) {
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5333 } 5388 }
5334 if (list_empty(&completions)) 5389 if (list_empty(&completions))
5335 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5390 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5336 spin_unlock_irq(&phba->hbalock); 5391 spin_unlock_irqrestore(&phba->hbalock, flags);
5337 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5338 if (!list_empty(&completions)) { 5392 if (!list_empty(&completions)) {
5339 lpfc_sli_cancel_iocbs(phba, &completions, 5393 lpfc_sli_cancel_iocbs(phba, &completions,
5340 IOSTAT_LOCAL_REJECT, 5394 IOSTAT_LOCAL_REJECT,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 60d6ca2f68c2..7801601aa5d9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4437 if (!ndlp) 4437 if (!ndlp)
4438 return; 4438 return;
4439 lpfc_issue_els_logo(vport, ndlp, 0); 4439 lpfc_issue_els_logo(vport, ndlp, 0);
4440 mempool_free(pmb, phba->mbox_mem_pool);
4440} 4441}
4441 4442
4442/* 4443/*
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4456 int rc; 4457 int rc;
4457 uint16_t rpi; 4458 uint16_t rpi;
4458 4459
4459 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4460 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4461 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4462 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4463 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4464 "3366 RPI x%x needs to be "
4465 "unregistered nlp_flag x%x "
4466 "did x%x\n",
4467 ndlp->nlp_rpi, ndlp->nlp_flag,
4468 ndlp->nlp_DID);
4460 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4469 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4461 if (mbox) { 4470 if (mbox) {
4462 /* SLI4 ports require the physical rpi value. */ 4471 /* SLI4 ports require the physical rpi value. */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 501147c4a147..647f5bfb3bd3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3031 phba->sli4_hba.scsi_xri_max); 3031 phba->sli4_hba.scsi_xri_max);
3032 3032
3033 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3033 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3034 spin_lock_irq(&phba->scsi_buf_list_put_lock); 3034 spin_lock(&phba->scsi_buf_list_put_lock);
3035 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3035 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3036 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3036 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3037 spin_unlock_irq(&phba->scsi_buf_list_put_lock); 3037 spin_unlock(&phba->scsi_buf_list_put_lock);
3038 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3038 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3039 3039
3040 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3040 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3070 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3070 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3071 } 3071 }
3072 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3072 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3073 spin_lock_irq(&phba->scsi_buf_list_put_lock); 3073 spin_lock(&phba->scsi_buf_list_put_lock);
3074 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3074 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3075 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3075 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3076 spin_unlock_irq(&phba->scsi_buf_list_put_lock); 3076 spin_unlock(&phba->scsi_buf_list_put_lock);
3077 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3077 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3078 3078
3079 return 0; 3079 return 0;
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4859 struct lpfc_mqe *mqe; 4859 struct lpfc_mqe *mqe;
4860 int longs; 4860 int longs;
4861 4861
4862 /* Get all the module params for configuring this host */
4863 lpfc_get_cfgparam(phba);
4864
4862 /* Before proceed, wait for POST done and device ready */ 4865 /* Before proceed, wait for POST done and device ready */
4863 rc = lpfc_sli4_post_status_check(phba); 4866 rc = lpfc_sli4_post_status_check(phba);
4864 if (rc) 4867 if (rc)
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4902 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4905 sizeof(struct lpfc_mbox_ext_buf_ctx));
4903 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4906 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4904 4907
4905 /*
4906 * We need to do a READ_CONFIG mailbox command here before
4907 * calling lpfc_get_cfgparam. For VFs this will report the
4908 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4909 * All of the resources allocated
4910 * for this Port are tied to these values.
4911 */
4912 /* Get all the module params for configuring this host */
4913 lpfc_get_cfgparam(phba);
4914 phba->max_vpi = LPFC_MAX_VPI; 4908 phba->max_vpi = LPFC_MAX_VPI;
4915 4909
4916 /* This will be set to correct value after the read_config mbox */ 4910 /* This will be set to correct value after the read_config mbox */
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7141 phba->sli4_hba.fcp_wq = NULL; 7135 phba->sli4_hba.fcp_wq = NULL;
7142 } 7136 }
7143 7137
7144 if (phba->pci_bar0_memmap_p) {
7145 iounmap(phba->pci_bar0_memmap_p);
7146 phba->pci_bar0_memmap_p = NULL;
7147 }
7148 if (phba->pci_bar2_memmap_p) {
7149 iounmap(phba->pci_bar2_memmap_p);
7150 phba->pci_bar2_memmap_p = NULL;
7151 }
7152 if (phba->pci_bar4_memmap_p) {
7153 iounmap(phba->pci_bar4_memmap_p);
7154 phba->pci_bar4_memmap_p = NULL;
7155 }
7156
7157 /* Release FCP CQ mapping array */ 7138 /* Release FCP CQ mapping array */
7158 if (phba->sli4_hba.fcp_cq_map != NULL) { 7139 if (phba->sli4_hba.fcp_cq_map != NULL) {
7159 kfree(phba->sli4_hba.fcp_cq_map); 7140 kfree(phba->sli4_hba.fcp_cq_map);
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7942 * particular PCI BARs regions is dependent on the type of 7923 * particular PCI BARs regions is dependent on the type of
7943 * SLI4 device. 7924 * SLI4 device.
7944 */ 7925 */
7945 if (pci_resource_start(pdev, 0)) { 7926 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
7946 phba->pci_bar0_map = pci_resource_start(pdev, 0); 7927 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
7947 bar0map_len = pci_resource_len(pdev, 0); 7928 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
7948 7929
7949 /* 7930 /*
7950 * Map SLI4 PCI Config Space Register base to a kernel virtual 7931 * Map SLI4 PCI Config Space Register base to a kernel virtual
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7958 "registers.\n"); 7939 "registers.\n");
7959 goto out; 7940 goto out;
7960 } 7941 }
7942 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
7961 /* Set up BAR0 PCI config space register memory map */ 7943 /* Set up BAR0 PCI config space register memory map */
7962 lpfc_sli4_bar0_register_memmap(phba, if_type); 7944 lpfc_sli4_bar0_register_memmap(phba, if_type);
7963 } else { 7945 } else {
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7980 } 7962 }
7981 7963
7982 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7964 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7983 (pci_resource_start(pdev, 2))) { 7965 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
7984 /* 7966 /*
7985 * Map SLI4 if type 0 HBA Control Register base to a kernel 7967 * Map SLI4 if type 0 HBA Control Register base to a kernel
7986 * virtual address and setup the registers. 7968 * virtual address and setup the registers.
7987 */ 7969 */
7988 phba->pci_bar1_map = pci_resource_start(pdev, 2); 7970 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
7989 bar1map_len = pci_resource_len(pdev, 2); 7971 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
7990 phba->sli4_hba.ctrl_regs_memmap_p = 7972 phba->sli4_hba.ctrl_regs_memmap_p =
7991 ioremap(phba->pci_bar1_map, bar1map_len); 7973 ioremap(phba->pci_bar1_map, bar1map_len);
7992 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 7974 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7994 "ioremap failed for SLI4 HBA control registers.\n"); 7976 "ioremap failed for SLI4 HBA control registers.\n");
7995 goto out_iounmap_conf; 7977 goto out_iounmap_conf;
7996 } 7978 }
7979 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
7997 lpfc_sli4_bar1_register_memmap(phba); 7980 lpfc_sli4_bar1_register_memmap(phba);
7998 } 7981 }
7999 7982
8000 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7983 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8001 (pci_resource_start(pdev, 4))) { 7984 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8002 /* 7985 /*
8003 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7986 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8004 * virtual address and setup the registers. 7987 * virtual address and setup the registers.
8005 */ 7988 */
8006 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7989 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8007 bar2map_len = pci_resource_len(pdev, 4); 7990 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8008 phba->sli4_hba.drbl_regs_memmap_p = 7991 phba->sli4_hba.drbl_regs_memmap_p =
8009 ioremap(phba->pci_bar2_map, bar2map_len); 7992 ioremap(phba->pci_bar2_map, bar2map_len);
8010 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7993 if (!phba->sli4_hba.drbl_regs_memmap_p) {
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8012 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7995 "ioremap failed for SLI4 HBA doorbell registers.\n");
8013 goto out_iounmap_ctrl; 7996 goto out_iounmap_ctrl;
8014 } 7997 }
7998 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8015 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7999 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8016 if (error) 8000 if (error)
8017 goto out_iounmap_all; 8001 goto out_iounmap_all;
@@ -8405,7 +8389,8 @@ static int
8405lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8389lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8406{ 8390{
8407 int i, idx, saved_chann, used_chann, cpu, phys_id; 8391 int i, idx, saved_chann, used_chann, cpu, phys_id;
8408 int max_phys_id, num_io_channel, first_cpu; 8392 int max_phys_id, min_phys_id;
8393 int num_io_channel, first_cpu, chan;
8409 struct lpfc_vector_map_info *cpup; 8394 struct lpfc_vector_map_info *cpup;
8410#ifdef CONFIG_X86 8395#ifdef CONFIG_X86
8411 struct cpuinfo_x86 *cpuinfo; 8396 struct cpuinfo_x86 *cpuinfo;
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8423 phba->sli4_hba.num_present_cpu)); 8408 phba->sli4_hba.num_present_cpu));
8424 8409
8425 max_phys_id = 0; 8410 max_phys_id = 0;
8411 min_phys_id = 0xff;
8426 phys_id = 0; 8412 phys_id = 0;
8427 num_io_channel = 0; 8413 num_io_channel = 0;
8428 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8414 first_cpu = LPFC_VECTOR_MAP_EMPTY;
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8446 8432
8447 if (cpup->phys_id > max_phys_id) 8433 if (cpup->phys_id > max_phys_id)
8448 max_phys_id = cpup->phys_id; 8434 max_phys_id = cpup->phys_id;
8435 if (cpup->phys_id < min_phys_id)
8436 min_phys_id = cpup->phys_id;
8449 cpup++; 8437 cpup++;
8450 } 8438 }
8451 8439
8440 phys_id = min_phys_id;
8452 /* Now associate the HBA vectors with specific CPUs */ 8441 /* Now associate the HBA vectors with specific CPUs */
8453 for (idx = 0; idx < vectors; idx++) { 8442 for (idx = 0; idx < vectors; idx++) {
8454 cpup = phba->sli4_hba.cpu_map; 8443 cpup = phba->sli4_hba.cpu_map;
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8459 for (i = 1; i < max_phys_id; i++) { 8448 for (i = 1; i < max_phys_id; i++) {
8460 phys_id++; 8449 phys_id++;
8461 if (phys_id > max_phys_id) 8450 if (phys_id > max_phys_id)
8462 phys_id = 0; 8451 phys_id = min_phys_id;
8463 cpu = lpfc_find_next_cpu(phba, phys_id); 8452 cpu = lpfc_find_next_cpu(phba, phys_id);
8464 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8453 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8465 continue; 8454 continue;
8466 goto found; 8455 goto found;
8467 } 8456 }
8468 8457
8458 /* Use round robin for scheduling */
8459 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
8460 chan = 0;
8461 cpup = phba->sli4_hba.cpu_map;
8462 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
8463 cpup->channel_id = chan;
8464 cpup++;
8465 chan++;
8466 if (chan >= phba->cfg_fcp_io_channel)
8467 chan = 0;
8468 }
8469
8469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8470 "3329 Cannot set affinity:" 8471 "3329 Cannot set affinity:"
8471 "Error mapping vector %d (%d)\n", 8472 "Error mapping vector %d (%d)\n",
@@ -8503,7 +8504,7 @@ found:
8503 /* Spread vector mapping across multple physical CPU nodes */ 8504 /* Spread vector mapping across multple physical CPU nodes */
8504 phys_id++; 8505 phys_id++;
8505 if (phys_id > max_phys_id) 8506 if (phys_id > max_phys_id)
8506 phys_id = 0; 8507 phys_id = min_phys_id;
8507 } 8508 }
8508 8509
8509 /* 8510 /*
@@ -8513,7 +8514,7 @@ found:
8513 * Base the remaining IO channel assigned, to IO channels already 8514 * Base the remaining IO channel assigned, to IO channels already
8514 * assigned to other CPUs on the same phys_id. 8515 * assigned to other CPUs on the same phys_id.
8515 */ 8516 */
8516 for (i = 0; i <= max_phys_id; i++) { 8517 for (i = min_phys_id; i <= max_phys_id; i++) {
8517 /* 8518 /*
8518 * If there are no io channels already mapped to 8519 * If there are no io channels already mapped to
8519 * this phys_id, just round robin thru the io_channels. 8520 * this phys_id, just round robin thru the io_channels.
@@ -8595,10 +8596,11 @@ out:
8595 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8596 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8597 "3333 Set affinity mismatch:" 8598 "3333 Set affinity mismatch:"
8598 "%d chann != %d cpus: %d vactors\n", 8599 "%d chann != %d cpus: %d vectors\n",
8599 num_io_channel, phba->sli4_hba.num_present_cpu, 8600 num_io_channel, phba->sli4_hba.num_present_cpu,
8600 vectors); 8601 vectors);
8601 8602
8603 /* Enable using cpu affinity for scheduling */
8602 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8604 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8603 return 1; 8605 return 1;
8604} 8606}
@@ -8689,9 +8691,12 @@ enable_msix_vectors:
8689 8691
8690cfg_fail_out: 8692cfg_fail_out:
8691 /* free the irq already requested */ 8693 /* free the irq already requested */
8692 for (--index; index >= 0; index--) 8694 for (--index; index >= 0; index--) {
8695 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8696 vector, NULL);
8693 free_irq(phba->sli4_hba.msix_entries[index].vector, 8697 free_irq(phba->sli4_hba.msix_entries[index].vector,
8694 &phba->sli4_hba.fcp_eq_hdl[index]); 8698 &phba->sli4_hba.fcp_eq_hdl[index]);
8699 }
8695 8700
8696msi_fail_out: 8701msi_fail_out:
8697 /* Unconfigure MSI-X capability structure */ 8702 /* Unconfigure MSI-X capability structure */
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8712 int index; 8717 int index;
8713 8718
8714 /* Free up MSI-X multi-message vectors */ 8719 /* Free up MSI-X multi-message vectors */
8715 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 8720 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8721 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
8722 vector, NULL);
8716 free_irq(phba->sli4_hba.msix_entries[index].vector, 8723 free_irq(phba->sli4_hba.msix_entries[index].vector,
8717 &phba->sli4_hba.fcp_eq_hdl[index]); 8724 &phba->sli4_hba.fcp_eq_hdl[index]);
8725 }
8718 8726
8719 /* Disable MSI-X */ 8727 /* Disable MSI-X */
8720 pci_disable_msix(phba->pcidev); 8728 pci_disable_msix(phba->pcidev);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1242b6c4308b..c913e8cc3b26 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
926 926
927 /* get all SCSI buffers need to repost to a local list */ 927 /* get all SCSI buffers need to repost to a local list */
928 spin_lock_irq(&phba->scsi_buf_list_get_lock); 928 spin_lock_irq(&phba->scsi_buf_list_get_lock);
929 spin_lock_irq(&phba->scsi_buf_list_put_lock); 929 spin_lock(&phba->scsi_buf_list_put_lock);
930 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); 930 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
931 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); 931 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
932 spin_unlock_irq(&phba->scsi_buf_list_put_lock); 932 spin_unlock(&phba->scsi_buf_list_put_lock);
933 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 933 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
934 934
935 /* post the list of scsi buffer sgls to port if available */ 935 /* post the list of scsi buffer sgls to port if available */
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
1000 } 1000 }
1001 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 1001 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
1002 1002
1003 /* Page alignment is CRITICAL, double check to be sure */ 1003 /*
1004 if (((unsigned long)(psb->data) & 1004 * 4K Page alignment is CRITICAL to BlockGuard, double check
1005 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { 1005 * to be sure.
1006 */
1007 if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
1008 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
1006 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 1009 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
1007 psb->data, psb->dma_handle); 1010 psb->data, psb->dma_handle);
1008 kfree(psb); 1011 kfree(psb);
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1134{ 1137{
1135 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1138 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1136 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 1139 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1137 unsigned long gflag = 0; 1140 unsigned long iflag = 0;
1138 unsigned long pflag = 0;
1139 1141
1140 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); 1142 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1141 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, 1143 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1142 list); 1144 list);
1143 if (!lpfc_cmd) { 1145 if (!lpfc_cmd) {
1144 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); 1146 spin_lock(&phba->scsi_buf_list_put_lock);
1145 list_splice(&phba->lpfc_scsi_buf_list_put, 1147 list_splice(&phba->lpfc_scsi_buf_list_put,
1146 &phba->lpfc_scsi_buf_list_get); 1148 &phba->lpfc_scsi_buf_list_get);
1147 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 1149 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1148 list_remove_head(scsi_buf_list_get, lpfc_cmd, 1150 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1149 struct lpfc_scsi_buf, list); 1151 struct lpfc_scsi_buf, list);
1150 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); 1152 spin_unlock(&phba->scsi_buf_list_put_lock);
1151 } 1153 }
1152 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); 1154 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1153 return lpfc_cmd; 1155 return lpfc_cmd;
1154} 1156}
1155/** 1157/**
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf*
1167lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1169lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1168{ 1170{
1169 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; 1171 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1170 unsigned long gflag = 0; 1172 unsigned long iflag = 0;
1171 unsigned long pflag = 0;
1172 int found = 0; 1173 int found = 0;
1173 1174
1174 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); 1175 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1175 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1176 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1176 &phba->lpfc_scsi_buf_list_get, list) { 1177 &phba->lpfc_scsi_buf_list_get, list) {
1177 if (lpfc_test_rrq_active(phba, ndlp, 1178 if (lpfc_test_rrq_active(phba, ndlp,
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1182 break; 1183 break;
1183 } 1184 }
1184 if (!found) { 1185 if (!found) {
1185 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); 1186 spin_lock(&phba->scsi_buf_list_put_lock);
1186 list_splice(&phba->lpfc_scsi_buf_list_put, 1187 list_splice(&phba->lpfc_scsi_buf_list_put,
1187 &phba->lpfc_scsi_buf_list_get); 1188 &phba->lpfc_scsi_buf_list_get);
1188 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 1189 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1189 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); 1190 spin_unlock(&phba->scsi_buf_list_put_lock);
1190 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1191 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1191 &phba->lpfc_scsi_buf_list_get, list) { 1192 &phba->lpfc_scsi_buf_list_get, list) {
1192 if (lpfc_test_rrq_active( 1193 if (lpfc_test_rrq_active(
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1197 break; 1198 break;
1198 } 1199 }
1199 } 1200 }
1200 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); 1201 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1201 if (!found) 1202 if (!found)
1202 return NULL; 1203 return NULL;
1203 return lpfc_cmd; 1204 return lpfc_cmd;
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3966 3967
3967 /* 3968 /*
3968 * Check SLI validation that all the transfer was actually done 3969 * Check SLI validation that all the transfer was actually done
3969 * (fcpi_parm should be zero). 3970 * (fcpi_parm should be zero). Apply check only to reads.
3970 */ 3971 */
3971 } else if (fcpi_parm) { 3972 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3972 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3973 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3973 "9029 FCP Data Transfer Check Error: " 3974 "9029 FCP Read Check Error Data: "
3974 "x%x x%x x%x x%x x%x\n", 3975 "x%x x%x x%x x%x x%x\n",
3975 be32_to_cpu(fcpcmd->fcpDl), 3976 be32_to_cpu(fcpcmd->fcpDl),
3976 be32_to_cpu(fcprsp->rspResId), 3977 be32_to_cpu(fcprsp->rspResId),
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4342 char tag[2]; 4343 char tag[2];
4343 uint8_t *ptr; 4344 uint8_t *ptr;
4344 bool sli4; 4345 bool sli4;
4346 uint32_t fcpdl;
4345 4347
4346 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 4348 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4347 return; 4349 return;
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4389 iocb_cmd->ulpPU = PARM_READ_CHECK; 4391 iocb_cmd->ulpPU = PARM_READ_CHECK;
4390 if (vport->cfg_first_burst_size && 4392 if (vport->cfg_first_burst_size &&
4391 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4393 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4392 piocbq->iocb.un.fcpi.fcpi_XRdy = 4394 fcpdl = scsi_bufflen(scsi_cmnd);
4393 vport->cfg_first_burst_size; 4395 if (fcpdl < vport->cfg_first_burst_size)
4396 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4397 else
4398 piocbq->iocb.un.fcpi.fcpi_XRdy =
4399 vport->cfg_first_burst_size;
4394 } 4400 }
4395 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4401 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4396 phba->fc4OutputRequests++; 4402 phba->fc4OutputRequests++;
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4878 goto out_unlock; 4884 goto out_unlock;
4879 } 4885 }
4880 4886
4887 /* Indicate the IO is being aborted by the driver. */
4888 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4889
4881 /* 4890 /*
4882 * The scsi command can not be in txq and it is in flight because the 4891 * The scsi command can not be in txq and it is in flight because the
4883 * pCmd is still pointig at the SCSI command we have to abort. There 4892 * pCmd is still pointig at the SCSI command we have to abort. There
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5006 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); 5015 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
5007 if (lpfc_cmd == NULL) 5016 if (lpfc_cmd == NULL)
5008 return FAILED; 5017 return FAILED;
5009 lpfc_cmd->timeout = 60; 5018 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5010 lpfc_cmd->rdata = rdata; 5019 lpfc_cmd->rdata = rdata;
5011 5020
5012 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 5021 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0392e114531c..612f48973ff2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9831 abort_cmd) != 0) 9831 abort_cmd) != 0)
9832 continue; 9832 continue;
9833 9833
9834 /*
9835 * If the iocbq is already being aborted, don't take a second
9836 * action, but do count it.
9837 */
9838 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
9839 continue;
9840
9834 /* issue ABTS for this IOCB based on iotag */ 9841 /* issue ABTS for this IOCB based on iotag */
9835 abtsiocb = lpfc_sli_get_iocbq(phba); 9842 abtsiocb = lpfc_sli_get_iocbq(phba);
9836 if (abtsiocb == NULL) { 9843 if (abtsiocb == NULL) {
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9838 continue; 9845 continue;
9839 } 9846 }
9840 9847
9848 /* indicate the IO is being aborted by the driver. */
9849 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
9850
9841 cmd = &iocbq->iocb; 9851 cmd = &iocbq->iocb;
9842 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9852 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9843 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9853 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9847 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9857 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
9848 abtsiocb->iocb.ulpLe = 1; 9858 abtsiocb->iocb.ulpLe = 1;
9849 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9859 abtsiocb->iocb.ulpClass = cmd->ulpClass;
9850 abtsiocb->vport = phba->pport; 9860 abtsiocb->vport = vport;
9851 9861
9852 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9862 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9853 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9863 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
@@ -12233,7 +12243,6 @@ static void __iomem *
12233lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 12243lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12234{ 12244{
12235 struct pci_dev *pdev; 12245 struct pci_dev *pdev;
12236 unsigned long bar_map, bar_map_len;
12237 12246
12238 if (!phba->pcidev) 12247 if (!phba->pcidev)
12239 return NULL; 12248 return NULL;
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12242 12251
12243 switch (pci_barset) { 12252 switch (pci_barset) {
12244 case WQ_PCI_BAR_0_AND_1: 12253 case WQ_PCI_BAR_0_AND_1:
12245 if (!phba->pci_bar0_memmap_p) {
12246 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12247 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12248 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12249 }
12250 return phba->pci_bar0_memmap_p; 12254 return phba->pci_bar0_memmap_p;
12251 case WQ_PCI_BAR_2_AND_3: 12255 case WQ_PCI_BAR_2_AND_3:
12252 if (!phba->pci_bar2_memmap_p) {
12253 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12254 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12255 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12256 }
12257 return phba->pci_bar2_memmap_p; 12256 return phba->pci_bar2_memmap_p;
12258 case WQ_PCI_BAR_4_AND_5: 12257 case WQ_PCI_BAR_4_AND_5:
12259 if (!phba->pci_bar4_memmap_p) {
12260 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12261 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12262 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12263 }
12264 return phba->pci_bar4_memmap_p; 12258 return phba->pci_bar4_memmap_p;
12265 default: 12259 default:
12266 break; 12260 break;
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15808void 15802void
15809lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15803lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15810{ 15804{
15811 struct lpfc_fcf_pri *fcf_pri; 15805 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
15812 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15806 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15813 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15807 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15814 "2762 FCF (x%x) reached driver's book " 15808 "2762 FCF (x%x) reached driver's book "
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15818 } 15812 }
15819 /* Clear the eligible FCF record index bmask */ 15813 /* Clear the eligible FCF record index bmask */
15820 spin_lock_irq(&phba->hbalock); 15814 spin_lock_irq(&phba->hbalock);
15821 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15815 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
15816 list) {
15822 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15817 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15823 list_del_init(&fcf_pri->list); 15818 list_del_init(&fcf_pri->list);
15824 break; 15819 break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 97617996206d..6b0f2478706e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -58,7 +58,7 @@ struct lpfc_iocbq {
58 58
59 IOCB_t iocb; /* IOCB cmd */ 59 IOCB_t iocb; /* IOCB cmd */
60 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 60 uint8_t retry; /* retry counter for IOCB cmd - if needed */
61 uint16_t iocb_flag; 61 uint32_t iocb_flag;
62#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 62#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
63#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ 63#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
64#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ 64#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -73,11 +73,11 @@ struct lpfc_iocbq {
73#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ 73#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
74#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ 74#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
75#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ 75#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
76#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
76 77
77#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
78#define LPFC_FIP_ELS_ID_SHIFT 14 79#define LPFC_FIP_ELS_ID_SHIFT 14
79 80
80 uint8_t rsvd2;
81 uint32_t drvrTimeout; /* driver timeout in seconds */ 81 uint32_t drvrTimeout; /* driver timeout in seconds */
82 uint32_t fcp_wqidx; /* index to FCP work queue */ 82 uint32_t fcp_wqidx; /* index to FCP work queue */
83 struct lpfc_vport *vport;/* virtual port pointer */ 83 struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5bcc38223ac9..85120b77aa0e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba {
523 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 523 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
524 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 524 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
525 525
526 uint8_t fw_func_mode; /* FW function protocol mode */ 526 uint32_t fw_func_mode; /* FW function protocol mode */
527 uint32_t ulp0_mode; /* ULP0 protocol mode */ 527 uint32_t ulp0_mode; /* ULP0 protocol mode */
528 uint32_t ulp1_mode; /* ULP1 protocol mode */ 528 uint32_t ulp1_mode; /* ULP1 protocol mode */
529 529
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 21859d2006ce..f58f18342bc3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.41" 21#define LPFC_DRIVER_VERSION "8.3.42"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 04a42a505852..0c73ba4bf451 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "06.600.18.00-rc1" 36#define MEGASAS_VERSION "06.700.06.00-rc1"
37#define MEGASAS_RELDATE "May. 15, 2013" 37#define MEGASAS_RELDATE "Aug. 31, 2013"
38#define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013" 38#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -170,6 +170,7 @@
170 170
171#define MR_DCMD_CTRL_GET_INFO 0x01010000 171#define MR_DCMD_CTRL_GET_INFO 0x01010000
172#define MR_DCMD_LD_GET_LIST 0x03010000 172#define MR_DCMD_LD_GET_LIST 0x03010000
173#define MR_DCMD_LD_LIST_QUERY 0x03010100
173 174
174#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 175#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
175#define MR_FLUSH_CTRL_CACHE 0x01 176#define MR_FLUSH_CTRL_CACHE 0x01
@@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE {
345 MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, 346 MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
346}; 347};
347 348
349enum MR_LD_QUERY_TYPE {
350 MR_LD_QUERY_TYPE_ALL = 0,
351 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
352 MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
353 MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
354 MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
355};
356
357
348#define MR_EVT_CFG_CLEARED 0x0004 358#define MR_EVT_CFG_CLEARED 0x0004
349#define MR_EVT_LD_STATE_CHANGE 0x0051 359#define MR_EVT_LD_STATE_CHANGE 0x0051
350#define MR_EVT_PD_INSERTED 0x005b 360#define MR_EVT_PD_INSERTED 0x005b
@@ -435,6 +445,14 @@ struct MR_LD_LIST {
435 } ldList[MAX_LOGICAL_DRIVES]; 445 } ldList[MAX_LOGICAL_DRIVES];
436} __packed; 446} __packed;
437 447
448struct MR_LD_TARGETID_LIST {
449 u32 size;
450 u32 count;
451 u8 pad[3];
452 u8 targetId[MAX_LOGICAL_DRIVES];
453};
454
455
438/* 456/*
439 * SAS controller properties 457 * SAS controller properties
440 */ 458 */
@@ -474,21 +492,39 @@ struct megasas_ctrl_prop {
474 * a bit in the following structure. 492 * a bit in the following structure.
475 */ 493 */
476 struct { 494 struct {
477 u32 copyBackDisabled : 1; 495#if defined(__BIG_ENDIAN_BITFIELD)
478 u32 SMARTerEnabled : 1; 496 u32 reserved:18;
479 u32 prCorrectUnconfiguredAreas : 1; 497 u32 enableJBOD:1;
480 u32 useFdeOnly : 1; 498 u32 disableSpinDownHS:1;
481 u32 disableNCQ : 1; 499 u32 allowBootWithPinnedCache:1;
482 u32 SSDSMARTerEnabled : 1; 500 u32 disableOnlineCtrlReset:1;
483 u32 SSDPatrolReadEnabled : 1; 501 u32 enableSecretKeyControl:1;
484 u32 enableSpinDownUnconfigured : 1; 502 u32 autoEnhancedImport:1;
485 u32 autoEnhancedImport : 1; 503 u32 enableSpinDownUnconfigured:1;
486 u32 enableSecretKeyControl : 1; 504 u32 SSDPatrolReadEnabled:1;
487 u32 disableOnlineCtrlReset : 1; 505 u32 SSDSMARTerEnabled:1;
488 u32 allowBootWithPinnedCache : 1; 506 u32 disableNCQ:1;
489 u32 disableSpinDownHS : 1; 507 u32 useFdeOnly:1;
490 u32 enableJBOD : 1; 508 u32 prCorrectUnconfiguredAreas:1;
491 u32 reserved :18; 509 u32 SMARTerEnabled:1;
510 u32 copyBackDisabled:1;
511#else
512 u32 copyBackDisabled:1;
513 u32 SMARTerEnabled:1;
514 u32 prCorrectUnconfiguredAreas:1;
515 u32 useFdeOnly:1;
516 u32 disableNCQ:1;
517 u32 SSDSMARTerEnabled:1;
518 u32 SSDPatrolReadEnabled:1;
519 u32 enableSpinDownUnconfigured:1;
520 u32 autoEnhancedImport:1;
521 u32 enableSecretKeyControl:1;
522 u32 disableOnlineCtrlReset:1;
523 u32 allowBootWithPinnedCache:1;
524 u32 disableSpinDownHS:1;
525 u32 enableJBOD:1;
526 u32 reserved:18;
527#endif
492 } OnOffProperties; 528 } OnOffProperties;
493 u8 autoSnapVDSpace; 529 u8 autoSnapVDSpace;
494 u8 viewSpace; 530 u8 viewSpace;
@@ -802,6 +838,30 @@ struct megasas_ctrl_info {
802 u16 cacheMemorySize; /*7A2h */ 838 u16 cacheMemorySize; /*7A2h */
803 839
804 struct { /*7A4h */ 840 struct { /*7A4h */
841#if defined(__BIG_ENDIAN_BITFIELD)
842 u32 reserved:11;
843 u32 supportUnevenSpans:1;
844 u32 dedicatedHotSparesLimited:1;
845 u32 headlessMode:1;
846 u32 supportEmulatedDrives:1;
847 u32 supportResetNow:1;
848 u32 realTimeScheduler:1;
849 u32 supportSSDPatrolRead:1;
850 u32 supportPerfTuning:1;
851 u32 disableOnlinePFKChange:1;
852 u32 supportJBOD:1;
853 u32 supportBootTimePFKChange:1;
854 u32 supportSetLinkSpeed:1;
855 u32 supportEmergencySpares:1;
856 u32 supportSuspendResumeBGops:1;
857 u32 blockSSDWriteCacheChange:1;
858 u32 supportShieldState:1;
859 u32 supportLdBBMInfo:1;
860 u32 supportLdPIType3:1;
861 u32 supportLdPIType2:1;
862 u32 supportLdPIType1:1;
863 u32 supportPIcontroller:1;
864#else
805 u32 supportPIcontroller:1; 865 u32 supportPIcontroller:1;
806 u32 supportLdPIType1:1; 866 u32 supportLdPIType1:1;
807 u32 supportLdPIType2:1; 867 u32 supportLdPIType2:1;
@@ -827,6 +887,7 @@ struct megasas_ctrl_info {
827 887
828 u32 supportUnevenSpans:1; 888 u32 supportUnevenSpans:1;
829 u32 reserved:11; 889 u32 reserved:11;
890#endif
830 } adapterOperations2; 891 } adapterOperations2;
831 892
832 u8 driverVersion[32]; /*7A8h */ 893 u8 driverVersion[32]; /*7A8h */
@@ -863,7 +924,7 @@ struct megasas_ctrl_info {
863 * =============================== 924 * ===============================
864 */ 925 */
865#define MEGASAS_MAX_PD_CHANNELS 2 926#define MEGASAS_MAX_PD_CHANNELS 2
866#define MEGASAS_MAX_LD_CHANNELS 2 927#define MEGASAS_MAX_LD_CHANNELS 1
867#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ 928#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
868 MEGASAS_MAX_LD_CHANNELS) 929 MEGASAS_MAX_LD_CHANNELS)
869#define MEGASAS_MAX_DEV_PER_CHANNEL 128 930#define MEGASAS_MAX_DEV_PER_CHANNEL 128
@@ -1051,9 +1112,15 @@ union megasas_sgl_frame {
1051 1112
1052typedef union _MFI_CAPABILITIES { 1113typedef union _MFI_CAPABILITIES {
1053 struct { 1114 struct {
1115#if defined(__BIG_ENDIAN_BITFIELD)
1116 u32 reserved:30;
1117 u32 support_additional_msix:1;
1118 u32 support_fp_remote_lun:1;
1119#else
1054 u32 support_fp_remote_lun:1; 1120 u32 support_fp_remote_lun:1;
1055 u32 support_additional_msix:1; 1121 u32 support_additional_msix:1;
1056 u32 reserved:30; 1122 u32 reserved:30;
1123#endif
1057 } mfi_capabilities; 1124 } mfi_capabilities;
1058 u32 reg; 1125 u32 reg;
1059} MFI_CAPABILITIES; 1126} MFI_CAPABILITIES;
@@ -1656,4 +1723,16 @@ struct megasas_mgmt_info {
1656 int max_index; 1723 int max_index;
1657}; 1724};
1658 1725
1726u8
1727MR_BuildRaidContext(struct megasas_instance *instance,
1728 struct IO_REQUEST_INFO *io_info,
1729 struct RAID_CONTEXT *pRAID_Context,
1730 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
1731u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
1732struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
1733u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
1734u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
1735u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
1736u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
1737
1659#endif /*LSI_MEGARAID_SAS_H */ 1738#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 1f0ca68409d4..3020921a4746 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : 06.600.18.00-rc1 21 * Version : 06.700.06.00-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
92 92
93int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 93int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
94static int megasas_get_pd_list(struct megasas_instance *instance); 94static int megasas_get_pd_list(struct megasas_instance *instance);
95static int megasas_ld_list_query(struct megasas_instance *instance,
96 u8 query_type);
95static int megasas_issue_init_mfi(struct megasas_instance *instance); 97static int megasas_issue_init_mfi(struct megasas_instance *instance);
96static int megasas_register_aen(struct megasas_instance *instance, 98static int megasas_register_aen(struct megasas_instance *instance,
97 u32 seq_num, u32 class_locale_word); 99 u32 seq_num, u32 class_locale_word);
@@ -374,13 +376,11 @@ static int
374megasas_check_reset_xscale(struct megasas_instance *instance, 376megasas_check_reset_xscale(struct megasas_instance *instance,
375 struct megasas_register_set __iomem *regs) 377 struct megasas_register_set __iomem *regs)
376{ 378{
377 u32 consumer;
378 consumer = *instance->consumer;
379 379
380 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && 380 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
381 (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { 381 (le32_to_cpu(*instance->consumer) ==
382 MEGASAS_ADPRESET_INPROG_SIGN))
382 return 1; 383 return 1;
383 }
384 return 0; 384 return 0;
385} 385}
386 386
@@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
629{ 629{
630 unsigned long flags; 630 unsigned long flags;
631 spin_lock_irqsave(&instance->hba_lock, flags); 631 spin_lock_irqsave(&instance->hba_lock, flags);
632 writel(0, &(regs)->inbound_high_queue_port); 632 writel(upper_32_bits(frame_phys_addr),
633 writel((frame_phys_addr | (frame_count<<1))|1, 633 &(regs)->inbound_high_queue_port);
634 &(regs)->inbound_low_queue_port); 634 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
635 &(regs)->inbound_low_queue_port);
635 spin_unlock_irqrestore(&instance->hba_lock, flags); 636 spin_unlock_irqrestore(&instance->hba_lock, flags);
636} 637}
637 638
@@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
879 880
880 struct megasas_header *frame_hdr = &cmd->frame->hdr; 881 struct megasas_header *frame_hdr = &cmd->frame->hdr;
881 882
882 frame_hdr->cmd_status = 0xFF; 883 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
883 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 884 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
884 885
885 /* 886 /*
886 * Issue the frame using inbound queue port 887 * Issue the frame using inbound queue port
@@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
944 */ 945 */
945 abort_fr->cmd = MFI_CMD_ABORT; 946 abort_fr->cmd = MFI_CMD_ABORT;
946 abort_fr->cmd_status = 0xFF; 947 abort_fr->cmd_status = 0xFF;
947 abort_fr->flags = 0; 948 abort_fr->flags = cpu_to_le16(0);
948 abort_fr->abort_context = cmd_to_abort->index; 949 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
949 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 950 abort_fr->abort_mfi_phys_addr_lo =
950 abort_fr->abort_mfi_phys_addr_hi = 0; 951 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
952 abort_fr->abort_mfi_phys_addr_hi =
953 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
951 954
952 cmd->sync_cmd = 1; 955 cmd->sync_cmd = 1;
953 cmd->cmd_status = 0xFF; 956 cmd->cmd_status = 0xFF;
@@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
986 989
987 if (sge_count) { 990 if (sge_count) {
988 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 991 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
989 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); 992 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
990 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); 993 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
991 } 994 }
992 } 995 }
993 return sge_count; 996 return sge_count;
@@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1015 1018
1016 if (sge_count) { 1019 if (sge_count) {
1017 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1020 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1018 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); 1021 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1019 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); 1022 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1020 } 1023 }
1021 } 1024 }
1022 return sge_count; 1025 return sge_count;
@@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
1043 1046
1044 if (sge_count) { 1047 if (sge_count) {
1045 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1048 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1046 mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); 1049 mfi_sgl->sge_skinny[i].length =
1050 cpu_to_le32(sg_dma_len(os_sgl));
1047 mfi_sgl->sge_skinny[i].phys_addr = 1051 mfi_sgl->sge_skinny[i].phys_addr =
1048 sg_dma_address(os_sgl); 1052 cpu_to_le64(sg_dma_address(os_sgl));
1049 mfi_sgl->sge_skinny[i].flag = 0; 1053 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1050 } 1054 }
1051 } 1055 }
1052 return sge_count; 1056 return sge_count;
@@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1155 pthru->cdb_len = scp->cmd_len; 1159 pthru->cdb_len = scp->cmd_len;
1156 pthru->timeout = 0; 1160 pthru->timeout = 0;
1157 pthru->pad_0 = 0; 1161 pthru->pad_0 = 0;
1158 pthru->flags = flags; 1162 pthru->flags = cpu_to_le16(flags);
1159 pthru->data_xfer_len = scsi_bufflen(scp); 1163 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1160 1164
1161 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1165 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1162 1166
@@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1168 if ((scp->request->timeout / HZ) > 0xFFFF) 1172 if ((scp->request->timeout / HZ) > 0xFFFF)
1169 pthru->timeout = 0xFFFF; 1173 pthru->timeout = 0xFFFF;
1170 else 1174 else
1171 pthru->timeout = scp->request->timeout / HZ; 1175 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1172 } 1176 }
1173 1177
1174 /* 1178 /*
1175 * Construct SGL 1179 * Construct SGL
1176 */ 1180 */
1177 if (instance->flag_ieee == 1) { 1181 if (instance->flag_ieee == 1) {
1178 pthru->flags |= MFI_FRAME_SGL64; 1182 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1179 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1183 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1180 &pthru->sgl); 1184 &pthru->sgl);
1181 } else if (IS_DMA64) { 1185 } else if (IS_DMA64) {
1182 pthru->flags |= MFI_FRAME_SGL64; 1186 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1183 pthru->sge_count = megasas_make_sgl64(instance, scp, 1187 pthru->sge_count = megasas_make_sgl64(instance, scp,
1184 &pthru->sgl); 1188 &pthru->sgl);
1185 } else 1189 } else
@@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1196 * Sense info specific 1200 * Sense info specific
1197 */ 1201 */
1198 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1202 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1199 pthru->sense_buf_phys_addr_hi = 0; 1203 pthru->sense_buf_phys_addr_hi =
1200 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 1204 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1205 pthru->sense_buf_phys_addr_lo =
1206 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1201 1207
1202 /* 1208 /*
1203 * Compute the total number of frames this command consumes. FW uses 1209 * Compute the total number of frames this command consumes. FW uses
@@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1248 ldio->timeout = 0; 1254 ldio->timeout = 0;
1249 ldio->reserved_0 = 0; 1255 ldio->reserved_0 = 0;
1250 ldio->pad_0 = 0; 1256 ldio->pad_0 = 0;
1251 ldio->flags = flags; 1257 ldio->flags = cpu_to_le16(flags);
1252 ldio->start_lba_hi = 0; 1258 ldio->start_lba_hi = 0;
1253 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1259 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1254 1260
@@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1256 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1262 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1257 */ 1263 */
1258 if (scp->cmd_len == 6) { 1264 if (scp->cmd_len == 6) {
1259 ldio->lba_count = (u32) scp->cmnd[4]; 1265 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1260 ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1266 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1261 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1267 ((u32) scp->cmnd[2] << 8) |
1268 (u32) scp->cmnd[3]);
1262 1269
1263 ldio->start_lba_lo &= 0x1FFFFF; 1270 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1264 } 1271 }
1265 1272
1266 /* 1273 /*
1267 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1274 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1268 */ 1275 */
1269 else if (scp->cmd_len == 10) { 1276 else if (scp->cmd_len == 10) {
1270 ldio->lba_count = (u32) scp->cmnd[8] | 1277 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1271 ((u32) scp->cmnd[7] << 8); 1278 ((u32) scp->cmnd[7] << 8));
1272 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1279 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1273 ((u32) scp->cmnd[3] << 16) | 1280 ((u32) scp->cmnd[3] << 16) |
1274 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1281 ((u32) scp->cmnd[4] << 8) |
1282 (u32) scp->cmnd[5]);
1275 } 1283 }
1276 1284
1277 /* 1285 /*
1278 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1286 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1279 */ 1287 */
1280 else if (scp->cmd_len == 12) { 1288 else if (scp->cmd_len == 12) {
1281 ldio->lba_count = ((u32) scp->cmnd[6] << 24) | 1289 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1282 ((u32) scp->cmnd[7] << 16) | 1290 ((u32) scp->cmnd[7] << 16) |
1283 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1291 ((u32) scp->cmnd[8] << 8) |
1292 (u32) scp->cmnd[9]);
1284 1293
1285 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1294 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1286 ((u32) scp->cmnd[3] << 16) | 1295 ((u32) scp->cmnd[3] << 16) |
1287 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1296 ((u32) scp->cmnd[4] << 8) |
1297 (u32) scp->cmnd[5]);
1288 } 1298 }
1289 1299
1290 /* 1300 /*
1291 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1301 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1292 */ 1302 */
1293 else if (scp->cmd_len == 16) { 1303 else if (scp->cmd_len == 16) {
1294 ldio->lba_count = ((u32) scp->cmnd[10] << 24) | 1304 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1295 ((u32) scp->cmnd[11] << 16) | 1305 ((u32) scp->cmnd[11] << 16) |
1296 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1306 ((u32) scp->cmnd[12] << 8) |
1307 (u32) scp->cmnd[13]);
1297 1308
1298 ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1309 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1299 ((u32) scp->cmnd[7] << 16) | 1310 ((u32) scp->cmnd[7] << 16) |
1300 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1311 ((u32) scp->cmnd[8] << 8) |
1312 (u32) scp->cmnd[9]);
1301 1313
1302 ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | 1314 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1303 ((u32) scp->cmnd[3] << 16) | 1315 ((u32) scp->cmnd[3] << 16) |
1304 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1316 ((u32) scp->cmnd[4] << 8) |
1317 (u32) scp->cmnd[5]);
1305 1318
1306 } 1319 }
1307 1320
@@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1309 * Construct SGL 1322 * Construct SGL
1310 */ 1323 */
1311 if (instance->flag_ieee) { 1324 if (instance->flag_ieee) {
1312 ldio->flags |= MFI_FRAME_SGL64; 1325 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1313 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1326 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1314 &ldio->sgl); 1327 &ldio->sgl);
1315 } else if (IS_DMA64) { 1328 } else if (IS_DMA64) {
1316 ldio->flags |= MFI_FRAME_SGL64; 1329 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1317 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1330 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1318 } else 1331 } else
1319 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1332 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
@@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1329 */ 1342 */
1330 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1343 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1331 ldio->sense_buf_phys_addr_hi = 0; 1344 ldio->sense_buf_phys_addr_hi = 0;
1332 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 1345 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1333 1346
1334 /* 1347 /*
1335 * Compute the total number of frames this command consumes. FW uses 1348 * Compute the total number of frames this command consumes. FW uses
@@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
1400 ldio = (struct megasas_io_frame *)cmd->frame; 1413 ldio = (struct megasas_io_frame *)cmd->frame;
1401 mfi_sgl = &ldio->sgl; 1414 mfi_sgl = &ldio->sgl;
1402 sgcount = ldio->sge_count; 1415 sgcount = ldio->sge_count;
1403 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); 1416 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1417 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1418 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1419 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1420 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1404 } 1421 }
1405 else { 1422 else {
1406 pthru = (struct megasas_pthru_frame *) cmd->frame; 1423 pthru = (struct megasas_pthru_frame *) cmd->frame;
1407 mfi_sgl = &pthru->sgl; 1424 mfi_sgl = &pthru->sgl;
1408 sgcount = pthru->sge_count; 1425 sgcount = pthru->sge_count;
1409 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); 1426 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1427 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1428 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1429 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1430 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1410 } 1431 }
1411 if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ 1432 if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
1412 for (n = 0; n < sgcount; n++){ 1433 for (n = 0; n < sgcount; n++){
1413 if (IS_DMA64) 1434 if (IS_DMA64)
1414 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; 1435 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
1436 le32_to_cpu(mfi_sgl->sge64[n].length),
1437 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1415 else 1438 else
1416 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; 1439 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
1440 le32_to_cpu(mfi_sgl->sge32[n].length),
1441 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1417 } 1442 }
1418 } 1443 }
1419 printk(KERN_ERR "\n"); 1444 printk(KERN_ERR "\n");
@@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1674 1699
1675 spin_lock_irqsave(&instance->completion_lock, flags); 1700 spin_lock_irqsave(&instance->completion_lock, flags);
1676 1701
1677 producer = *instance->producer; 1702 producer = le32_to_cpu(*instance->producer);
1678 consumer = *instance->consumer; 1703 consumer = le32_to_cpu(*instance->consumer);
1679 1704
1680 while (consumer != producer) { 1705 while (consumer != producer) {
1681 context = instance->reply_queue[consumer]; 1706 context = le32_to_cpu(instance->reply_queue[consumer]);
1682 if (context >= instance->max_fw_cmds) { 1707 if (context >= instance->max_fw_cmds) {
1683 printk(KERN_ERR "Unexpected context value %x\n", 1708 printk(KERN_ERR "Unexpected context value %x\n",
1684 context); 1709 context);
@@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1695 } 1720 }
1696 } 1721 }
1697 1722
1698 *instance->consumer = producer; 1723 *instance->consumer = cpu_to_le32(producer);
1699 1724
1700 spin_unlock_irqrestore(&instance->completion_lock, flags); 1725 spin_unlock_irqrestore(&instance->completion_lock, flags);
1701 1726
@@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
1716 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 1741 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
1717 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 1742 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
1718 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 1743 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
1719 *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; 1744 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
1720 } 1745 }
1721 instance->instancet->disable_intr(instance); 1746 instance->instancet->disable_intr(instance);
1722 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 1747 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
@@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2186 struct megasas_header *hdr = &cmd->frame->hdr; 2211 struct megasas_header *hdr = &cmd->frame->hdr;
2187 unsigned long flags; 2212 unsigned long flags;
2188 struct fusion_context *fusion = instance->ctrl_context; 2213 struct fusion_context *fusion = instance->ctrl_context;
2214 u32 opcode;
2189 2215
2190 /* flag for the retry reset */ 2216 /* flag for the retry reset */
2191 cmd->retry_for_fw_reset = 0; 2217 cmd->retry_for_fw_reset = 0;
@@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2287 case MFI_CMD_SMP: 2313 case MFI_CMD_SMP:
2288 case MFI_CMD_STP: 2314 case MFI_CMD_STP:
2289 case MFI_CMD_DCMD: 2315 case MFI_CMD_DCMD:
2316 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
2290 /* Check for LD map update */ 2317 /* Check for LD map update */
2291 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 2318 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
2292 (cmd->frame->dcmd.mbox.b[1] == 1)) { 2319 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
2293 fusion->fast_path_io = 0; 2320 fusion->fast_path_io = 0;
2294 spin_lock_irqsave(instance->host->host_lock, flags); 2321 spin_lock_irqsave(instance->host->host_lock, flags);
2295 if (cmd->frame->hdr.cmd_status != 0) { 2322 if (cmd->frame->hdr.cmd_status != 0) {
@@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2323 flags); 2350 flags);
2324 break; 2351 break;
2325 } 2352 }
2326 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2353 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2327 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 2354 opcode == MR_DCMD_CTRL_EVENT_GET) {
2328 spin_lock_irqsave(&poll_aen_lock, flags); 2355 spin_lock_irqsave(&poll_aen_lock, flags);
2329 megasas_poll_wait_aen = 0; 2356 megasas_poll_wait_aen = 0;
2330 spin_unlock_irqrestore(&poll_aen_lock, flags); 2357 spin_unlock_irqrestore(&poll_aen_lock, flags);
@@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2333 /* 2360 /*
2334 * See if got an event notification 2361 * See if got an event notification
2335 */ 2362 */
2336 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 2363 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
2337 megasas_service_aen(instance, cmd); 2364 megasas_service_aen(instance, cmd);
2338 else 2365 else
2339 megasas_complete_int_cmd(instance, cmd); 2366 megasas_complete_int_cmd(instance, cmd);
@@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
2606 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2633 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2607 2634
2608 *instance->consumer = 2635 *instance->consumer =
2609 MEGASAS_ADPRESET_INPROG_SIGN; 2636 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2610 } 2637 }
2611 2638
2612 2639
@@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2983 } 3010 }
2984 3011
2985 memset(cmd->frame, 0, total_sz); 3012 memset(cmd->frame, 0, total_sz);
2986 cmd->frame->io.context = cmd->index; 3013 cmd->frame->io.context = cpu_to_le32(cmd->index);
2987 cmd->frame->io.pad_0 = 0; 3014 cmd->frame->io.pad_0 = 0;
2988 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && 3015 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
2989 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && 3016 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
@@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
3143 dcmd->cmd = MFI_CMD_DCMD; 3170 dcmd->cmd = MFI_CMD_DCMD;
3144 dcmd->cmd_status = 0xFF; 3171 dcmd->cmd_status = 0xFF;
3145 dcmd->sge_count = 1; 3172 dcmd->sge_count = 1;
3146 dcmd->flags = MFI_FRAME_DIR_READ; 3173 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3147 dcmd->timeout = 0; 3174 dcmd->timeout = 0;
3148 dcmd->pad_0 = 0; 3175 dcmd->pad_0 = 0;
3149 dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); 3176 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
3150 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3177 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
3151 dcmd->sgl.sge32[0].phys_addr = ci_h; 3178 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3152 dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); 3179 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
3153 3180
3154 if (!megasas_issue_polled(instance, cmd)) { 3181 if (!megasas_issue_polled(instance, cmd)) {
3155 ret = 0; 3182 ret = 0;
@@ -3164,16 +3191,16 @@ megasas_get_pd_list(struct megasas_instance *instance)
3164 pd_addr = ci->addr; 3191 pd_addr = ci->addr;
3165 3192
3166 if ( ret == 0 && 3193 if ( ret == 0 &&
3167 (ci->count < 3194 (le32_to_cpu(ci->count) <
3168 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { 3195 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
3169 3196
3170 memset(instance->pd_list, 0, 3197 memset(instance->pd_list, 0,
3171 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 3198 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
3172 3199
3173 for (pd_index = 0; pd_index < ci->count; pd_index++) { 3200 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
3174 3201
3175 instance->pd_list[pd_addr->deviceId].tid = 3202 instance->pd_list[pd_addr->deviceId].tid =
3176 pd_addr->deviceId; 3203 le16_to_cpu(pd_addr->deviceId);
3177 instance->pd_list[pd_addr->deviceId].driveType = 3204 instance->pd_list[pd_addr->deviceId].driveType =
3178 pd_addr->scsiDevType; 3205 pd_addr->scsiDevType;
3179 instance->pd_list[pd_addr->deviceId].driveState = 3206 instance->pd_list[pd_addr->deviceId].driveState =
@@ -3207,6 +3234,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
3207 struct megasas_dcmd_frame *dcmd; 3234 struct megasas_dcmd_frame *dcmd;
3208 struct MR_LD_LIST *ci; 3235 struct MR_LD_LIST *ci;
3209 dma_addr_t ci_h = 0; 3236 dma_addr_t ci_h = 0;
3237 u32 ld_count;
3210 3238
3211 cmd = megasas_get_cmd(instance); 3239 cmd = megasas_get_cmd(instance);
3212 3240
@@ -3233,12 +3261,12 @@ megasas_get_ld_list(struct megasas_instance *instance)
3233 dcmd->cmd = MFI_CMD_DCMD; 3261 dcmd->cmd = MFI_CMD_DCMD;
3234 dcmd->cmd_status = 0xFF; 3262 dcmd->cmd_status = 0xFF;
3235 dcmd->sge_count = 1; 3263 dcmd->sge_count = 1;
3236 dcmd->flags = MFI_FRAME_DIR_READ; 3264 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3237 dcmd->timeout = 0; 3265 dcmd->timeout = 0;
3238 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3266 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
3239 dcmd->opcode = MR_DCMD_LD_GET_LIST; 3267 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
3240 dcmd->sgl.sge32[0].phys_addr = ci_h; 3268 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3241 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3269 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
3242 dcmd->pad_0 = 0; 3270 dcmd->pad_0 = 0;
3243 3271
3244 if (!megasas_issue_polled(instance, cmd)) { 3272 if (!megasas_issue_polled(instance, cmd)) {
@@ -3247,12 +3275,14 @@ megasas_get_ld_list(struct megasas_instance *instance)
3247 ret = -1; 3275 ret = -1;
3248 } 3276 }
3249 3277
3278 ld_count = le32_to_cpu(ci->ldCount);
3279
3250 /* the following function will get the instance PD LIST */ 3280 /* the following function will get the instance PD LIST */
3251 3281
3252 if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { 3282 if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
3253 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3283 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
3254 3284
3255 for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { 3285 for (ld_index = 0; ld_index < ld_count; ld_index++) {
3256 if (ci->ldList[ld_index].state != 0) { 3286 if (ci->ldList[ld_index].state != 0) {
3257 ids = ci->ldList[ld_index].ref.targetId; 3287 ids = ci->ldList[ld_index].ref.targetId;
3258 instance->ld_ids[ids] = 3288 instance->ld_ids[ids] =
@@ -3271,6 +3301,87 @@ megasas_get_ld_list(struct megasas_instance *instance)
3271} 3301}
3272 3302
3273/** 3303/**
3304 * megasas_ld_list_query - Returns FW's ld_list structure
3305 * @instance: Adapter soft state
3306 * @ld_list: ld_list structure
3307 *
3308 * Issues an internal command (DCMD) to get the FW's controller PD
3309 * list structure. This information is mainly used to find out SYSTEM
3310 * supported by the FW.
3311 */
3312static int
3313megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3314{
3315 int ret = 0, ld_index = 0, ids = 0;
3316 struct megasas_cmd *cmd;
3317 struct megasas_dcmd_frame *dcmd;
3318 struct MR_LD_TARGETID_LIST *ci;
3319 dma_addr_t ci_h = 0;
3320 u32 tgtid_count;
3321
3322 cmd = megasas_get_cmd(instance);
3323
3324 if (!cmd) {
3325 printk(KERN_WARNING
3326 "megasas:(megasas_ld_list_query): Failed to get cmd\n");
3327 return -ENOMEM;
3328 }
3329
3330 dcmd = &cmd->frame->dcmd;
3331
3332 ci = pci_alloc_consistent(instance->pdev,
3333 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
3334
3335 if (!ci) {
3336 printk(KERN_WARNING
3337 "megasas: Failed to alloc mem for ld_list_query\n");
3338 megasas_return_cmd(instance, cmd);
3339 return -ENOMEM;
3340 }
3341
3342 memset(ci, 0, sizeof(*ci));
3343 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3344
3345 dcmd->mbox.b[0] = query_type;
3346
3347 dcmd->cmd = MFI_CMD_DCMD;
3348 dcmd->cmd_status = 0xFF;
3349 dcmd->sge_count = 1;
3350 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3351 dcmd->timeout = 0;
3352 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
3353 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
3354 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3355 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
3356 dcmd->pad_0 = 0;
3357
3358 if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
3359 ret = 0;
3360 } else {
3361 /* On failure, call older LD list DCMD */
3362 ret = 1;
3363 }
3364
3365 tgtid_count = le32_to_cpu(ci->count);
3366
3367 if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
3368 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
3369 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
3370 ids = ci->targetId[ld_index];
3371 instance->ld_ids[ids] = ci->targetId[ld_index];
3372 }
3373
3374 }
3375
3376 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
3377 ci, ci_h);
3378
3379 megasas_return_cmd(instance, cmd);
3380
3381 return ret;
3382}
3383
3384/**
3274 * megasas_get_controller_info - Returns FW's controller structure 3385 * megasas_get_controller_info - Returns FW's controller structure
3275 * @instance: Adapter soft state 3386 * @instance: Adapter soft state
3276 * @ctrl_info: Controller information structure 3387 * @ctrl_info: Controller information structure
@@ -3313,13 +3424,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
3313 dcmd->cmd = MFI_CMD_DCMD; 3424 dcmd->cmd = MFI_CMD_DCMD;
3314 dcmd->cmd_status = 0xFF; 3425 dcmd->cmd_status = 0xFF;
3315 dcmd->sge_count = 1; 3426 dcmd->sge_count = 1;
3316 dcmd->flags = MFI_FRAME_DIR_READ; 3427 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3317 dcmd->timeout = 0; 3428 dcmd->timeout = 0;
3318 dcmd->pad_0 = 0; 3429 dcmd->pad_0 = 0;
3319 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); 3430 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
3320 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3431 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
3321 dcmd->sgl.sge32[0].phys_addr = ci_h; 3432 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3322 dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); 3433 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
3323 3434
3324 if (!megasas_issue_polled(instance, cmd)) { 3435 if (!megasas_issue_polled(instance, cmd)) {
3325 ret = 0; 3436 ret = 0;
@@ -3375,17 +3486,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
3375 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 3486 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
3376 init_frame->context = context; 3487 init_frame->context = context;
3377 3488
3378 initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 3489 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
3379 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; 3490 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
3380 3491
3381 initq_info->producer_index_phys_addr_lo = instance->producer_h; 3492 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
3382 initq_info->consumer_index_phys_addr_lo = instance->consumer_h; 3493 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
3383 3494
3384 init_frame->cmd = MFI_CMD_INIT; 3495 init_frame->cmd = MFI_CMD_INIT;
3385 init_frame->cmd_status = 0xFF; 3496 init_frame->cmd_status = 0xFF;
3386 init_frame->queue_info_new_phys_addr_lo = initq_info_h; 3497 init_frame->queue_info_new_phys_addr_lo =
3498 cpu_to_le32(lower_32_bits(initq_info_h));
3499 init_frame->queue_info_new_phys_addr_hi =
3500 cpu_to_le32(upper_32_bits(initq_info_h));
3387 3501
3388 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); 3502 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
3389 3503
3390 /* 3504 /*
3391 * disable the intr before firing the init frame to FW 3505 * disable the intr before firing the init frame to FW
@@ -3648,7 +3762,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
3648 megasas_get_pd_list(instance); 3762 megasas_get_pd_list(instance);
3649 3763
3650 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3764 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
3651 megasas_get_ld_list(instance); 3765 if (megasas_ld_list_query(instance,
3766 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
3767 megasas_get_ld_list(instance);
3652 3768
3653 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); 3769 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
3654 3770
@@ -3665,8 +3781,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
3665 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { 3781 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
3666 3782
3667 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 3783 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
3668 ctrl_info->max_strips_per_io; 3784 le16_to_cpu(ctrl_info->max_strips_per_io);
3669 max_sectors_2 = ctrl_info->max_request_size; 3785 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
3670 3786
3671 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 3787 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
3672 3788
@@ -3675,14 +3791,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
3675 instance->is_imr = 0; 3791 instance->is_imr = 0;
3676 dev_info(&instance->pdev->dev, "Controller type: MR," 3792 dev_info(&instance->pdev->dev, "Controller type: MR,"
3677 "Memory size is: %dMB\n", 3793 "Memory size is: %dMB\n",
3678 ctrl_info->memory_size); 3794 le16_to_cpu(ctrl_info->memory_size));
3679 } else { 3795 } else {
3680 instance->is_imr = 1; 3796 instance->is_imr = 1;
3681 dev_info(&instance->pdev->dev, 3797 dev_info(&instance->pdev->dev,
3682 "Controller type: iMR\n"); 3798 "Controller type: iMR\n");
3683 } 3799 }
3800 /* OnOffProperties are converted into CPU arch*/
3801 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
3684 instance->disableOnlineCtrlReset = 3802 instance->disableOnlineCtrlReset =
3685 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3803 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3804 /* adapterOperations2 are converted into CPU arch*/
3805 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
3686 instance->UnevenSpanSupport = 3806 instance->UnevenSpanSupport =
3687 ctrl_info->adapterOperations2.supportUnevenSpans; 3807 ctrl_info->adapterOperations2.supportUnevenSpans;
3688 if (instance->UnevenSpanSupport) { 3808 if (instance->UnevenSpanSupport) {
@@ -3696,7 +3816,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
3696 3816
3697 } 3817 }
3698 } 3818 }
3699
3700 instance->max_sectors_per_req = instance->max_num_sge * 3819 instance->max_sectors_per_req = instance->max_num_sge *
3701 PAGE_SIZE / 512; 3820 PAGE_SIZE / 512;
3702 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 3821 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -3802,20 +3921,24 @@ megasas_get_seq_num(struct megasas_instance *instance,
3802 dcmd->cmd = MFI_CMD_DCMD; 3921 dcmd->cmd = MFI_CMD_DCMD;
3803 dcmd->cmd_status = 0x0; 3922 dcmd->cmd_status = 0x0;
3804 dcmd->sge_count = 1; 3923 dcmd->sge_count = 1;
3805 dcmd->flags = MFI_FRAME_DIR_READ; 3924 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3806 dcmd->timeout = 0; 3925 dcmd->timeout = 0;
3807 dcmd->pad_0 = 0; 3926 dcmd->pad_0 = 0;
3808 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); 3927 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
3809 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 3928 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
3810 dcmd->sgl.sge32[0].phys_addr = el_info_h; 3929 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
3811 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); 3930 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
3812 3931
3813 megasas_issue_blocked_cmd(instance, cmd); 3932 megasas_issue_blocked_cmd(instance, cmd);
3814 3933
3815 /* 3934 /*
3816 * Copy the data back into callers buffer 3935 * Copy the data back into callers buffer
3817 */ 3936 */
3818 memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); 3937 eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
3938 eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
3939 eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
3940 eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
3941 eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
3819 3942
3820 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 3943 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
3821 el_info, el_info_h); 3944 el_info, el_info_h);
@@ -3862,6 +3985,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
3862 if (instance->aen_cmd) { 3985 if (instance->aen_cmd) {
3863 3986
3864 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 3987 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
3988 prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
3865 3989
3866 /* 3990 /*
3867 * A class whose enum value is smaller is inclusive of all 3991 * A class whose enum value is smaller is inclusive of all
@@ -3874,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
3874 * values 3998 * values
3875 */ 3999 */
3876 if ((prev_aen.members.class <= curr_aen.members.class) && 4000 if ((prev_aen.members.class <= curr_aen.members.class) &&
3877 !((prev_aen.members.locale & curr_aen.members.locale) ^ 4001 !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
3878 curr_aen.members.locale)) { 4002 curr_aen.members.locale)) {
3879 /* 4003 /*
3880 * Previously issued event registration includes 4004 * Previously issued event registration includes
@@ -3882,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
3882 */ 4006 */
3883 return 0; 4007 return 0;
3884 } else { 4008 } else {
3885 curr_aen.members.locale |= prev_aen.members.locale; 4009 curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
3886 4010
3887 if (prev_aen.members.class < curr_aen.members.class) 4011 if (prev_aen.members.class < curr_aen.members.class)
3888 curr_aen.members.class = prev_aen.members.class; 4012 curr_aen.members.class = prev_aen.members.class;
@@ -3917,16 +4041,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
3917 dcmd->cmd = MFI_CMD_DCMD; 4041 dcmd->cmd = MFI_CMD_DCMD;
3918 dcmd->cmd_status = 0x0; 4042 dcmd->cmd_status = 0x0;
3919 dcmd->sge_count = 1; 4043 dcmd->sge_count = 1;
3920 dcmd->flags = MFI_FRAME_DIR_READ; 4044 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
3921 dcmd->timeout = 0; 4045 dcmd->timeout = 0;
3922 dcmd->pad_0 = 0; 4046 dcmd->pad_0 = 0;
4047 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
4048 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
4049 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
3923 instance->last_seq_num = seq_num; 4050 instance->last_seq_num = seq_num;
3924 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 4051 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
3925 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4052 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
3926 dcmd->mbox.w[0] = seq_num; 4053 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
3927 dcmd->mbox.w[1] = curr_aen.word;
3928 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
3929 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
3930 4054
3931 if (instance->aen_cmd != NULL) { 4055 if (instance->aen_cmd != NULL) {
3932 megasas_return_cmd(instance, cmd); 4056 megasas_return_cmd(instance, cmd);
@@ -3972,8 +4096,9 @@ static int megasas_start_aen(struct megasas_instance *instance)
3972 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4096 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3973 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4097 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3974 4098
3975 return megasas_register_aen(instance, eli.newest_seq_num + 1, 4099 return megasas_register_aen(instance,
3976 class_locale.word); 4100 le32_to_cpu(eli.newest_seq_num) + 1,
4101 class_locale.word);
3977} 4102}
3978 4103
3979/** 4104/**
@@ -4068,6 +4193,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
4068 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4193 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4069 goto fail_set_dma_mask; 4194 goto fail_set_dma_mask;
4070 } 4195 }
4196
4071 return 0; 4197 return 0;
4072 4198
4073fail_set_dma_mask: 4199fail_set_dma_mask:
@@ -4386,11 +4512,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
4386 dcmd->cmd = MFI_CMD_DCMD; 4512 dcmd->cmd = MFI_CMD_DCMD;
4387 dcmd->cmd_status = 0x0; 4513 dcmd->cmd_status = 0x0;
4388 dcmd->sge_count = 0; 4514 dcmd->sge_count = 0;
4389 dcmd->flags = MFI_FRAME_DIR_NONE; 4515 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4390 dcmd->timeout = 0; 4516 dcmd->timeout = 0;
4391 dcmd->pad_0 = 0; 4517 dcmd->pad_0 = 0;
4392 dcmd->data_xfer_len = 0; 4518 dcmd->data_xfer_len = 0;
4393 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4519 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
4394 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4520 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4395 4521
4396 megasas_issue_blocked_cmd(instance, cmd); 4522 megasas_issue_blocked_cmd(instance, cmd);
@@ -4431,11 +4557,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
4431 dcmd->cmd = MFI_CMD_DCMD; 4557 dcmd->cmd = MFI_CMD_DCMD;
4432 dcmd->cmd_status = 0x0; 4558 dcmd->cmd_status = 0x0;
4433 dcmd->sge_count = 0; 4559 dcmd->sge_count = 0;
4434 dcmd->flags = MFI_FRAME_DIR_NONE; 4560 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4435 dcmd->timeout = 0; 4561 dcmd->timeout = 0;
4436 dcmd->pad_0 = 0; 4562 dcmd->pad_0 = 0;
4437 dcmd->data_xfer_len = 0; 4563 dcmd->data_xfer_len = 0;
4438 dcmd->opcode = opcode; 4564 dcmd->opcode = cpu_to_le32(opcode);
4439 4565
4440 megasas_issue_blocked_cmd(instance, cmd); 4566 megasas_issue_blocked_cmd(instance, cmd);
4441 4567
@@ -4850,10 +4976,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4850 * alone separately 4976 * alone separately
4851 */ 4977 */
4852 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 4978 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
4853 cmd->frame->hdr.context = cmd->index; 4979 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
4854 cmd->frame->hdr.pad_0 = 0; 4980 cmd->frame->hdr.pad_0 = 0;
4855 cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | 4981 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
4856 MFI_FRAME_SENSE64); 4982 MFI_FRAME_SGL64 |
4983 MFI_FRAME_SENSE64));
4857 4984
4858 /* 4985 /*
4859 * The management interface between applications and the fw uses 4986 * The management interface between applications and the fw uses
@@ -4887,8 +5014,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4887 * We don't change the dma_coherent_mask, so 5014 * We don't change the dma_coherent_mask, so
4888 * pci_alloc_consistent only returns 32bit addresses 5015 * pci_alloc_consistent only returns 32bit addresses
4889 */ 5016 */
4890 kern_sge32[i].phys_addr = (u32) buf_handle; 5017 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
4891 kern_sge32[i].length = ioc->sgl[i].iov_len; 5018 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
4892 5019
4893 /* 5020 /*
4894 * We created a kernel buffer corresponding to the 5021 * We created a kernel buffer corresponding to the
@@ -4911,7 +5038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4911 5038
4912 sense_ptr = 5039 sense_ptr =
4913 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 5040 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
4914 *sense_ptr = sense_handle; 5041 *sense_ptr = cpu_to_le32(sense_handle);
4915 } 5042 }
4916 5043
4917 /* 5044 /*
@@ -4971,9 +5098,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4971 for (i = 0; i < ioc->sge_count; i++) { 5098 for (i = 0; i < ioc->sge_count; i++) {
4972 if (kbuff_arr[i]) 5099 if (kbuff_arr[i])
4973 dma_free_coherent(&instance->pdev->dev, 5100 dma_free_coherent(&instance->pdev->dev,
4974 kern_sge32[i].length, 5101 le32_to_cpu(kern_sge32[i].length),
4975 kbuff_arr[i], 5102 kbuff_arr[i],
4976 kern_sge32[i].phys_addr); 5103 le32_to_cpu(kern_sge32[i].phys_addr));
4977 } 5104 }
4978 5105
4979 megasas_return_cmd(instance, cmd); 5106 megasas_return_cmd(instance, cmd);
@@ -5327,7 +5454,7 @@ megasas_aen_polling(struct work_struct *work)
5327 host = instance->host; 5454 host = instance->host;
5328 if (instance->evt_detail) { 5455 if (instance->evt_detail) {
5329 5456
5330 switch (instance->evt_detail->code) { 5457 switch (le32_to_cpu(instance->evt_detail->code)) {
5331 case MR_EVT_PD_INSERTED: 5458 case MR_EVT_PD_INSERTED:
5332 if (megasas_get_pd_list(instance) == 0) { 5459 if (megasas_get_pd_list(instance) == 0) {
5333 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 5460 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -5389,7 +5516,9 @@ megasas_aen_polling(struct work_struct *work)
5389 case MR_EVT_LD_OFFLINE: 5516 case MR_EVT_LD_OFFLINE:
5390 case MR_EVT_CFG_CLEARED: 5517 case MR_EVT_CFG_CLEARED:
5391 case MR_EVT_LD_DELETED: 5518 case MR_EVT_LD_DELETED:
5392 megasas_get_ld_list(instance); 5519 if (megasas_ld_list_query(instance,
5520 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5521 megasas_get_ld_list(instance);
5393 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5522 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
5394 for (j = 0; 5523 for (j = 0;
5395 j < MEGASAS_MAX_DEV_PER_CHANNEL; 5524 j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5399,7 +5528,7 @@ megasas_aen_polling(struct work_struct *work)
5399 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5528 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5400 5529
5401 sdev1 = scsi_device_lookup(host, 5530 sdev1 = scsi_device_lookup(host,
5402 i + MEGASAS_MAX_LD_CHANNELS, 5531 MEGASAS_MAX_PD_CHANNELS + i,
5403 j, 5532 j,
5404 0); 5533 0);
5405 5534
@@ -5418,7 +5547,9 @@ megasas_aen_polling(struct work_struct *work)
5418 doscan = 0; 5547 doscan = 0;
5419 break; 5548 break;
5420 case MR_EVT_LD_CREATED: 5549 case MR_EVT_LD_CREATED:
5421 megasas_get_ld_list(instance); 5550 if (megasas_ld_list_query(instance,
5551 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5552 megasas_get_ld_list(instance);
5422 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5553 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
5423 for (j = 0; 5554 for (j = 0;
5424 j < MEGASAS_MAX_DEV_PER_CHANNEL; 5555 j < MEGASAS_MAX_DEV_PER_CHANNEL;
@@ -5427,14 +5558,14 @@ megasas_aen_polling(struct work_struct *work)
5427 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5558 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5428 5559
5429 sdev1 = scsi_device_lookup(host, 5560 sdev1 = scsi_device_lookup(host,
5430 i+MEGASAS_MAX_LD_CHANNELS, 5561 MEGASAS_MAX_PD_CHANNELS + i,
5431 j, 0); 5562 j, 0);
5432 5563
5433 if (instance->ld_ids[ld_index] != 5564 if (instance->ld_ids[ld_index] !=
5434 0xff) { 5565 0xff) {
5435 if (!sdev1) { 5566 if (!sdev1) {
5436 scsi_add_device(host, 5567 scsi_add_device(host,
5437 i + 2, 5568 MEGASAS_MAX_PD_CHANNELS + i,
5438 j, 0); 5569 j, 0);
5439 } 5570 }
5440 } 5571 }
@@ -5483,18 +5614,20 @@ megasas_aen_polling(struct work_struct *work)
5483 } 5614 }
5484 } 5615 }
5485 5616
5486 megasas_get_ld_list(instance); 5617 if (megasas_ld_list_query(instance,
5618 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5619 megasas_get_ld_list(instance);
5487 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5620 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
5488 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 5621 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
5489 ld_index = 5622 ld_index =
5490 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5623 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
5491 5624
5492 sdev1 = scsi_device_lookup(host, 5625 sdev1 = scsi_device_lookup(host,
5493 i+MEGASAS_MAX_LD_CHANNELS, j, 0); 5626 MEGASAS_MAX_PD_CHANNELS + i, j, 0);
5494 if (instance->ld_ids[ld_index] != 0xff) { 5627 if (instance->ld_ids[ld_index] != 0xff) {
5495 if (!sdev1) { 5628 if (!sdev1) {
5496 scsi_add_device(host, 5629 scsi_add_device(host,
5497 i+2, 5630 MEGASAS_MAX_PD_CHANNELS + i,
5498 j, 0); 5631 j, 0);
5499 } else { 5632 } else {
5500 scsi_device_put(sdev1); 5633 scsi_device_put(sdev1);
@@ -5514,7 +5647,7 @@ megasas_aen_polling(struct work_struct *work)
5514 return ; 5647 return ;
5515 } 5648 }
5516 5649
5517 seq_num = instance->evt_detail->seq_num + 1; 5650 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
5518 5651
5519 /* Register AEN with FW for latest sequence number plus 1 */ 5652 /* Register AEN with FW for latest sequence number plus 1 */
5520 class_locale.members.reserved = 0; 5653 class_locale.members.reserved = 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 4f401f753f8e..e24b6eb645b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
126 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 126 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
127} 127}
128 128
129static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) 129u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
130{ 130{
131 return map->raidMap.arMapInfo[ar].pd[arm]; 131 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
132} 132}
133 133
134static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) 134u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
135{ 135{
136 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 136 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
137} 137}
138 138
139static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) 139u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
140{ 140{
141 return map->raidMap.devHndlInfo[pd].curDevHdl; 141 return map->raidMap.devHndlInfo[pd].curDevHdl;
142} 142}
@@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
148 148
149u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) 149u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
150{ 150{
151 return map->raidMap.ldTgtIdToLd[ldTgtId]; 151 return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
152} 152}
153 153
154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, 154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
167 struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; 167 struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
168 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 168 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
169 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 169 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
170 struct MR_LD_RAID *raid;
171 int ldCount, num_lds;
172 u16 ld;
173
170 174
171 if (pFwRaidMap->totalSize != 175 if (le32_to_cpu(pFwRaidMap->totalSize) !=
172 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + 176 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
173 (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { 177 (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
174 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", 178 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
175 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - 179 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
176 sizeof(struct MR_LD_SPAN_MAP)) + 180 sizeof(struct MR_LD_SPAN_MAP)) +
177 (sizeof(struct MR_LD_SPAN_MAP) * 181 (sizeof(struct MR_LD_SPAN_MAP) *
178 pFwRaidMap->ldCount))); 182 le32_to_cpu(pFwRaidMap->ldCount))));
179 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " 183 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
180 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 184 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
181 pFwRaidMap->totalSize); 185 le32_to_cpu(pFwRaidMap->totalSize));
182 return 0; 186 return 0;
183 } 187 }
184 188
@@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
187 191
188 mr_update_load_balance_params(map, lbInfo); 192 mr_update_load_balance_params(map, lbInfo);
189 193
194 num_lds = le32_to_cpu(map->raidMap.ldCount);
195
196 /*Convert Raid capability values to CPU arch */
197 for (ldCount = 0; ldCount < num_lds; ldCount++) {
198 ld = MR_TargetIdToLdGet(ldCount, map);
199 raid = MR_LdRaidGet(ld, map);
200 le32_to_cpus((u32 *)&raid->capability);
201 }
202
190 return 1; 203 return 1;
191} 204}
192 205
@@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
200 213
201 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 214 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
202 215
203 for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 216 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
204 quad = &pSpanBlock->block_span_info.quad[j]; 217 quad = &pSpanBlock->block_span_info.quad[j];
205 218
206 if (quad->diff == 0) 219 if (le32_to_cpu(quad->diff) == 0)
207 return SPAN_INVALID; 220 return SPAN_INVALID;
208 if (quad->logStart <= row && row <= quad->logEnd && 221 if (le64_to_cpu(quad->logStart) <= row && row <=
209 (mega_mod64(row-quad->logStart, quad->diff)) == 0) { 222 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
223 le32_to_cpu(quad->diff))) == 0) {
210 if (span_blk != NULL) { 224 if (span_blk != NULL) {
211 u64 blk, debugBlk; 225 u64 blk, debugBlk;
212 blk = 226 blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
213 mega_div64_32(
214 (row-quad->logStart),
215 quad->diff);
216 debugBlk = blk; 227 debugBlk = blk;
217 228
218 blk = (blk + quad->offsetInSpan) << 229 blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
219 raid->stripeShift;
220 *span_blk = blk; 230 *span_blk = blk;
221 } 231 }
222 return span; 232 return span;
@@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
257 for (span = 0; span < raid->spanDepth; span++) 267 for (span = 0; span < raid->spanDepth; span++)
258 dev_dbg(&instance->pdev->dev, "Span=%x," 268 dev_dbg(&instance->pdev->dev, "Span=%x,"
259 " number of quads=%x\n", span, 269 " number of quads=%x\n", span,
260 map->raidMap.ldSpanMap[ld].spanBlock[span]. 270 le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
261 block_span_info.noElements); 271 block_span_info.noElements));
262 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 272 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
263 span_set = &(ldSpanInfo[ld].span_set[element]); 273 span_set = &(ldSpanInfo[ld].span_set[element]);
264 if (span_set->span_row_data_width == 0) 274 if (span_set->span_row_data_width == 0)
@@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
286 (long unsigned int)span_set->data_strip_end); 296 (long unsigned int)span_set->data_strip_end);
287 297
288 for (span = 0; span < raid->spanDepth; span++) { 298 for (span = 0; span < raid->spanDepth; span++) {
289 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 299 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
290 block_span_info.noElements >= 300 block_span_info.noElements) >=
291 element + 1) { 301 element + 1) {
292 quad = &map->raidMap.ldSpanMap[ld]. 302 quad = &map->raidMap.ldSpanMap[ld].
293 spanBlock[span].block_span_info. 303 spanBlock[span].block_span_info.
294 quad[element]; 304 quad[element];
295 dev_dbg(&instance->pdev->dev, "Span=%x," 305 dev_dbg(&instance->pdev->dev, "Span=%x,"
296 "Quad=%x, diff=%x\n", span, 306 "Quad=%x, diff=%x\n", span,
297 element, quad->diff); 307 element, le32_to_cpu(quad->diff));
298 dev_dbg(&instance->pdev->dev, 308 dev_dbg(&instance->pdev->dev,
299 "offset_in_span=0x%08lx\n", 309 "offset_in_span=0x%08lx\n",
300 (long unsigned int)quad->offsetInSpan); 310 (long unsigned int)le64_to_cpu(quad->offsetInSpan));
301 dev_dbg(&instance->pdev->dev, 311 dev_dbg(&instance->pdev->dev,
302 "logical start=0x%08lx, end=0x%08lx\n", 312 "logical start=0x%08lx, end=0x%08lx\n",
303 (long unsigned int)quad->logStart, 313 (long unsigned int)le64_to_cpu(quad->logStart),
304 (long unsigned int)quad->logEnd); 314 (long unsigned int)le64_to_cpu(quad->logEnd));
305 } 315 }
306 } 316 }
307 } 317 }
@@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
348 continue; 358 continue;
349 359
350 for (span = 0; span < raid->spanDepth; span++) 360 for (span = 0; span < raid->spanDepth; span++)
351 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 361 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
352 block_span_info.noElements >= info+1) { 362 block_span_info.noElements) >= info+1) {
353 quad = &map->raidMap.ldSpanMap[ld]. 363 quad = &map->raidMap.ldSpanMap[ld].
354 spanBlock[span]. 364 spanBlock[span].
355 block_span_info.quad[info]; 365 block_span_info.quad[info];
356 if (quad->diff == 0) 366 if (le32_to_cpu(quad->diff == 0))
357 return SPAN_INVALID; 367 return SPAN_INVALID;
358 if (quad->logStart <= row && 368 if (le64_to_cpu(quad->logStart) <= row &&
359 row <= quad->logEnd && 369 row <= le64_to_cpu(quad->logEnd) &&
360 (mega_mod64(row - quad->logStart, 370 (mega_mod64(row - le64_to_cpu(quad->logStart),
361 quad->diff)) == 0) { 371 le32_to_cpu(quad->diff))) == 0) {
362 if (span_blk != NULL) { 372 if (span_blk != NULL) {
363 u64 blk; 373 u64 blk;
364 blk = mega_div64_32 374 blk = mega_div64_32
365 ((row - quad->logStart), 375 ((row - le64_to_cpu(quad->logStart)),
366 quad->diff); 376 le32_to_cpu(quad->diff));
367 blk = (blk + quad->offsetInSpan) 377 blk = (blk + le64_to_cpu(quad->offsetInSpan))
368 << raid->stripeShift; 378 << raid->stripeShift;
369 *span_blk = blk; 379 *span_blk = blk;
370 } 380 }
@@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
415 span_set_Row = mega_div64_32(span_set_Strip, 425 span_set_Row = mega_div64_32(span_set_Strip,
416 span_set->span_row_data_width) * span_set->diff; 426 span_set->span_row_data_width) * span_set->diff;
417 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 427 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
418 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 428 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
419 block_span_info.noElements >= info+1) { 429 block_span_info.noElements >= info+1)) {
420 if (strip_offset >= 430 if (strip_offset >=
421 span_set->strip_offset[span]) 431 span_set->strip_offset[span])
422 span_offset++; 432 span_offset++;
@@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
480 continue; 490 continue;
481 491
482 for (span = 0; span < raid->spanDepth; span++) 492 for (span = 0; span < raid->spanDepth; span++)
483 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 493 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
484 block_span_info.noElements >= info+1) { 494 block_span_info.noElements) >= info+1) {
485 quad = &map->raidMap.ldSpanMap[ld]. 495 quad = &map->raidMap.ldSpanMap[ld].
486 spanBlock[span].block_span_info.quad[info]; 496 spanBlock[span].block_span_info.quad[info];
487 if (quad->logStart <= row && 497 if (le64_to_cpu(quad->logStart) <= row &&
488 row <= quad->logEnd && 498 row <= le64_to_cpu(quad->logEnd) &&
489 mega_mod64((row - quad->logStart), 499 mega_mod64((row - le64_to_cpu(quad->logStart)),
490 quad->diff) == 0) { 500 le32_to_cpu(quad->diff)) == 0) {
491 strip = mega_div64_32 501 strip = mega_div64_32
492 (((row - span_set->data_row_start) 502 (((row - span_set->data_row_start)
493 - quad->logStart), 503 - le64_to_cpu(quad->logStart)),
494 quad->diff); 504 le32_to_cpu(quad->diff));
495 strip *= span_set->span_row_data_width; 505 strip *= span_set->span_row_data_width;
496 strip += span_set->data_strip_start; 506 strip += span_set->data_strip_start;
497 strip += span_set->strip_offset[span]; 507 strip += span_set->strip_offset[span];
@@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
543 span_set->span_row_data_width); 553 span_set->span_row_data_width);
544 554
545 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 555 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
546 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 556 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
547 block_span_info.noElements >= info+1) { 557 block_span_info.noElements) >= info+1) {
548 if (strip_offset >= 558 if (strip_offset >=
549 span_set->strip_offset[span]) 559 span_set->strip_offset[span])
550 span_offset = 560 span_offset =
@@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
669 } 679 }
670 } 680 }
671 681
672 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 682 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
673 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 683 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
674 physArm; 684 physArm;
675 return retval; 685 return retval;
@@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
765 } 775 }
766 } 776 }
767 777
768 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 778 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
769 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 779 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
770 physArm; 780 physArm;
771 return retval; 781 return retval;
@@ -784,7 +794,7 @@ u8
784MR_BuildRaidContext(struct megasas_instance *instance, 794MR_BuildRaidContext(struct megasas_instance *instance,
785 struct IO_REQUEST_INFO *io_info, 795 struct IO_REQUEST_INFO *io_info,
786 struct RAID_CONTEXT *pRAID_Context, 796 struct RAID_CONTEXT *pRAID_Context,
787 struct MR_FW_RAID_MAP_ALL *map) 797 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
788{ 798{
789 struct MR_LD_RAID *raid; 799 struct MR_LD_RAID *raid;
790 u32 ld, stripSize, stripe_mask; 800 u32 ld, stripSize, stripe_mask;
@@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
965 regSize += stripSize; 975 regSize += stripSize;
966 } 976 }
967 977
968 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 978 pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
969 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 979 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
970 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 980 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
971 pRAID_Context->regLockFlags = (isRead) ? 981 pRAID_Context->regLockFlags = (isRead) ?
@@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance,
974 pRAID_Context->regLockFlags = (isRead) ? 984 pRAID_Context->regLockFlags = (isRead) ?
975 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 985 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
976 pRAID_Context->VirtualDiskTgtId = raid->targetId; 986 pRAID_Context->VirtualDiskTgtId = raid->targetId;
977 pRAID_Context->regLockRowLBA = regStart; 987 pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
978 pRAID_Context->regLockLength = regSize; 988 pRAID_Context->regLockLength = cpu_to_le32(regSize);
979 pRAID_Context->configSeqNum = raid->seqNum; 989 pRAID_Context->configSeqNum = raid->seqNum;
990 /* save pointer to raid->LUN array */
991 *raidLUN = raid->LUN;
992
980 993
981 /*Get Phy Params only if FP capable, or else leave it to MR firmware 994 /*Get Phy Params only if FP capable, or else leave it to MR firmware
982 to do the calculation.*/ 995 to do the calculation.*/
@@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1047 raid = MR_LdRaidGet(ld, map); 1060 raid = MR_LdRaidGet(ld, map);
1048 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1061 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1049 for (span = 0; span < raid->spanDepth; span++) { 1062 for (span = 0; span < raid->spanDepth; span++) {
1050 if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1063 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1051 block_span_info.noElements < 1064 block_span_info.noElements) <
1052 element + 1) 1065 element + 1)
1053 continue; 1066 continue;
1054 span_set = &(ldSpanInfo[ld].span_set[element]); 1067 span_set = &(ldSpanInfo[ld].span_set[element]);
@@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1056 spanBlock[span].block_span_info. 1069 spanBlock[span].block_span_info.
1057 quad[element]; 1070 quad[element];
1058 1071
1059 span_set->diff = quad->diff; 1072 span_set->diff = le32_to_cpu(quad->diff);
1060 1073
1061 for (count = 0, span_row_width = 0; 1074 for (count = 0, span_row_width = 0;
1062 count < raid->spanDepth; count++) { 1075 count < raid->spanDepth; count++) {
1063 if (map->raidMap.ldSpanMap[ld]. 1076 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1064 spanBlock[count]. 1077 spanBlock[count].
1065 block_span_info. 1078 block_span_info.
1066 noElements >= element + 1) { 1079 noElements) >= element + 1) {
1067 span_set->strip_offset[count] = 1080 span_set->strip_offset[count] =
1068 span_row_width; 1081 span_row_width;
1069 span_row_width += 1082 span_row_width +=
@@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1077 } 1090 }
1078 1091
1079 span_set->span_row_data_width = span_row_width; 1092 span_set->span_row_data_width = span_row_width;
1080 span_row = mega_div64_32(((quad->logEnd - 1093 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1081 quad->logStart) + quad->diff), 1094 le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1082 quad->diff); 1095 le32_to_cpu(quad->diff));
1083 1096
1084 if (element == 0) { 1097 if (element == 0) {
1085 span_set->log_start_lba = 0; 1098 span_set->log_start_lba = 0;
@@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1096 1109
1097 span_set->data_row_start = 0; 1110 span_set->data_row_start = 0;
1098 span_set->data_row_end = 1111 span_set->data_row_end =
1099 (span_row * quad->diff) - 1; 1112 (span_row * le32_to_cpu(quad->diff)) - 1;
1100 } else { 1113 } else {
1101 span_set_prev = &(ldSpanInfo[ld]. 1114 span_set_prev = &(ldSpanInfo[ld].
1102 span_set[element - 1]); 1115 span_set[element - 1]);
@@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1122 span_set_prev->data_row_end + 1; 1135 span_set_prev->data_row_end + 1;
1123 span_set->data_row_end = 1136 span_set->data_row_end =
1124 span_set->data_row_start + 1137 span_set->data_row_start +
1125 (span_row * quad->diff) - 1; 1138 (span_row * le32_to_cpu(quad->diff)) - 1;
1126 } 1139 }
1127 break; 1140 break;
1128 } 1141 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 417d5f167aa2..f6555921fd7a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
72int 72int
73megasas_issue_polled(struct megasas_instance *instance, 73megasas_issue_polled(struct megasas_instance *instance,
74 struct megasas_cmd *cmd); 74 struct megasas_cmd *cmd);
75
76u8
77MR_BuildRaidContext(struct megasas_instance *instance,
78 struct IO_REQUEST_INFO *io_info,
79 struct RAID_CONTEXT *pRAID_Context,
80 struct MR_FW_RAID_MAP_ALL *map);
81u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
82struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
83
84u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
85
86void 75void
87megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 76megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
88 77
@@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
626 615
627 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 616 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
628 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 617 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
629 IOCInitMessage->MsgVersion = MPI2_VERSION; 618 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
630 IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; 619 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
631 IOCInitMessage->SystemRequestFrameSize = 620 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
632 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 621
633 622 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
634 IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; 623 IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
635 IOCInitMessage->ReplyDescriptorPostQueueAddress = 624 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
636 fusion->reply_frames_desc_phys;
637 IOCInitMessage->SystemRequestFrameBaseAddress =
638 fusion->io_request_frames_phys;
639 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 625 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
640 init_frame = (struct megasas_init_frame *)cmd->frame; 626 init_frame = (struct megasas_init_frame *)cmd->frame;
641 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 627 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
642 628
643 frame_hdr = &cmd->frame->hdr; 629 frame_hdr = &cmd->frame->hdr;
644 frame_hdr->cmd_status = 0xFF; 630 frame_hdr->cmd_status = 0xFF;
645 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 631 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
646 632
647 init_frame->cmd = MFI_CMD_INIT; 633 init_frame->cmd = MFI_CMD_INIT;
648 init_frame->cmd_status = 0xFF; 634 init_frame->cmd_status = 0xFF;
@@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
652 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 638 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
653 init_frame->driver_operations. 639 init_frame->driver_operations.
654 mfi_capabilities.support_additional_msix = 1; 640 mfi_capabilities.support_additional_msix = 1;
641 /* driver supports HA / Remote LUN over Fast Path interface */
642 init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
643 = 1;
644 /* Convert capability to LE32 */
645 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
655 646
656 init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; 647 init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
657 init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); 648 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
658 649
659 req_desc = 650 req_desc =
660 (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; 651 (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
661 652
662 req_desc->Words = cmd->frame_phys_addr; 653 req_desc->Words = 0;
663 req_desc->MFAIo.RequestFlags = 654 req_desc->MFAIo.RequestFlags =
664 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 655 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
665 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 656 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
657 cpu_to_le32s((u32 *)&req_desc->MFAIo);
658 req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
666 659
667 /* 660 /*
668 * disable the intr before firing the init frame 661 * disable the intr before firing the init frame
@@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
753 dcmd->cmd = MFI_CMD_DCMD; 746 dcmd->cmd = MFI_CMD_DCMD;
754 dcmd->cmd_status = 0xFF; 747 dcmd->cmd_status = 0xFF;
755 dcmd->sge_count = 1; 748 dcmd->sge_count = 1;
756 dcmd->flags = MFI_FRAME_DIR_READ; 749 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
757 dcmd->timeout = 0; 750 dcmd->timeout = 0;
758 dcmd->pad_0 = 0; 751 dcmd->pad_0 = 0;
759 dcmd->data_xfer_len = size_map_info; 752 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
760 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 753 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
761 dcmd->sgl.sge32[0].phys_addr = ci_h; 754 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
762 dcmd->sgl.sge32[0].length = size_map_info; 755 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
763 756
764 if (!megasas_issue_polled(instance, cmd)) 757 if (!megasas_issue_polled(instance, cmd))
765 ret = 0; 758 ret = 0;
@@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
828 821
829 map = fusion->ld_map[instance->map_id & 1]; 822 map = fusion->ld_map[instance->map_id & 1];
830 823
831 num_lds = map->raidMap.ldCount; 824 num_lds = le32_to_cpu(map->raidMap.ldCount);
832 825
833 dcmd = &cmd->frame->dcmd; 826 dcmd = &cmd->frame->dcmd;
834 827
@@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
856 dcmd->cmd = MFI_CMD_DCMD; 849 dcmd->cmd = MFI_CMD_DCMD;
857 dcmd->cmd_status = 0xFF; 850 dcmd->cmd_status = 0xFF;
858 dcmd->sge_count = 1; 851 dcmd->sge_count = 1;
859 dcmd->flags = MFI_FRAME_DIR_WRITE; 852 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
860 dcmd->timeout = 0; 853 dcmd->timeout = 0;
861 dcmd->pad_0 = 0; 854 dcmd->pad_0 = 0;
862 dcmd->data_xfer_len = size_map_info; 855 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
863 dcmd->mbox.b[0] = num_lds; 856 dcmd->mbox.b[0] = num_lds;
864 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 857 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
865 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 858 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
866 dcmd->sgl.sge32[0].phys_addr = ci_h; 859 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
867 dcmd->sgl.sge32[0].length = size_map_info; 860 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
868 861
869 instance->map_update_cmd = cmd; 862 instance->map_update_cmd = cmd;
870 863
@@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1067 1060
1068 spin_lock_irqsave(&instance->hba_lock, flags); 1061 spin_lock_irqsave(&instance->hba_lock, flags);
1069 1062
1070 writel(req_desc_lo, 1063 writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
1071 &(regs)->inbound_low_queue_port); 1064 writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
1072 writel(req_desc_hi, &(regs)->inbound_high_queue_port);
1073 spin_unlock_irqrestore(&instance->hba_lock, flags); 1065 spin_unlock_irqrestore(&instance->hba_lock, flags);
1074} 1066}
1075 1067
@@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1157 return sge_count; 1149 return sge_count;
1158 1150
1159 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1151 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1160 sgl_ptr->Length = sg_dma_len(os_sgl); 1152 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1161 sgl_ptr->Address = sg_dma_address(os_sgl); 1153 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1162 sgl_ptr->Flags = 0; 1154 sgl_ptr->Flags = 0;
1163 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1155 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1164 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1156 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
@@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1177 PCI_DEVICE_ID_LSI_INVADER) || 1169 PCI_DEVICE_ID_LSI_INVADER) ||
1178 (instance->pdev->device == 1170 (instance->pdev->device ==
1179 PCI_DEVICE_ID_LSI_FURY)) { 1171 PCI_DEVICE_ID_LSI_FURY)) {
1180 if ((cmd->io_request->IoFlags & 1172 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1181 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1173 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1182 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1174 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1183 cmd->io_request->ChainOffset = 1175 cmd->io_request->ChainOffset =
1184 fusion-> 1176 fusion->
1185 chain_offset_io_request; 1177 chain_offset_io_request;
@@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1201 sg_chain->Flags = 1193 sg_chain->Flags =
1202 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1194 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1203 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1195 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1204 sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) 1196 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1205 *(sge_count - sg_processed)); 1197 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1206 sg_chain->Address = cmd->sg_frame_phys_addr;
1207 1198
1208 sgl_ptr = 1199 sgl_ptr =
1209 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 1200 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
@@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1261 io_request->CDB.EEDP32.PrimaryReferenceTag = 1252 io_request->CDB.EEDP32.PrimaryReferenceTag =
1262 cpu_to_be32(ref_tag); 1253 cpu_to_be32(ref_tag);
1263 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1254 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1264 io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1255 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1265 1256
1266 /* Transfer length */ 1257 /* Transfer length */
1267 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 1258 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
@@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1271 1262
1272 /* set SCSI IO EEDPFlags */ 1263 /* set SCSI IO EEDPFlags */
1273 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 1264 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1274 io_request->EEDPFlags = 1265 io_request->EEDPFlags = cpu_to_le16(
1275 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1266 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1276 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1267 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1277 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1268 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1278 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1269 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1279 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1270 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1280 } else { 1271 } else {
1281 io_request->EEDPFlags = 1272 io_request->EEDPFlags = cpu_to_le16(
1282 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1273 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1283 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1274 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1284 } 1275 }
1285 io_request->Control |= (0x4 << 26); 1276 io_request->Control |= cpu_to_le32((0x4 << 26));
1286 io_request->EEDPBlockSize = scp->device->sector_size; 1277 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1287 } else { 1278 } else {
1288 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1279 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1289 if (((cdb_len == 12) || (cdb_len == 16)) && 1280 if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1311 cdb[8] = (u8)(num_blocks & 0xff); 1302 cdb[8] = (u8)(num_blocks & 0xff);
1312 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1303 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1313 1304
1314 io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1305 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1315 cdb_len = 10; 1306 cdb_len = 10;
1316 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1307 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1317 /* Convert to 16 byte CDB for large LBA's */ 1308 /* Convert to 16 byte CDB for large LBA's */
@@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1349 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 1340 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1350 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 1341 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1351 1342
1352 io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1343 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1353 cdb_len = 16; 1344 cdb_len = 16;
1354 } 1345 }
1355 1346
@@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1410 struct IO_REQUEST_INFO io_info; 1401 struct IO_REQUEST_INFO io_info;
1411 struct fusion_context *fusion; 1402 struct fusion_context *fusion;
1412 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1403 struct MR_FW_RAID_MAP_ALL *local_map_ptr;
1404 u8 *raidLUN;
1413 1405
1414 device_id = MEGASAS_DEV_INDEX(instance, scp); 1406 device_id = MEGASAS_DEV_INDEX(instance, scp);
1415 1407
1416 fusion = instance->ctrl_context; 1408 fusion = instance->ctrl_context;
1417 1409
1418 io_request = cmd->io_request; 1410 io_request = cmd->io_request;
1419 io_request->RaidContext.VirtualDiskTgtId = device_id; 1411 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1420 io_request->RaidContext.status = 0; 1412 io_request->RaidContext.status = 0;
1421 io_request->RaidContext.exStatus = 0; 1413 io_request->RaidContext.exStatus = 0;
1422 1414
@@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1480 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1472 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1481 io_info.numBlocks = datalength; 1473 io_info.numBlocks = datalength;
1482 io_info.ldTgtId = device_id; 1474 io_info.ldTgtId = device_id;
1483 io_request->DataLength = scsi_bufflen(scp); 1475 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1484 1476
1485 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1477 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1486 io_info.isRead = 1; 1478 io_info.isRead = 1;
@@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1494 } else { 1486 } else {
1495 if (MR_BuildRaidContext(instance, &io_info, 1487 if (MR_BuildRaidContext(instance, &io_info,
1496 &io_request->RaidContext, 1488 &io_request->RaidContext,
1497 local_map_ptr)) 1489 local_map_ptr, &raidLUN))
1498 fp_possible = io_info.fpOkForIo; 1490 fp_possible = io_info.fpOkForIo;
1499 } 1491 }
1500 1492
@@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1520 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1512 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1521 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1513 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1522 io_request->RaidContext.nseg = 0x1; 1514 io_request->RaidContext.nseg = 0x1;
1523 io_request->IoFlags |= 1515 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1524 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1525 io_request->RaidContext.regLockFlags |= 1516 io_request->RaidContext.regLockFlags |=
1526 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1517 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1527 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1518 MR_RL_FLAGS_SEQ_NUM_ENABLE);
@@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1537 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1528 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1538 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1529 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1539 io_request->DevHandle = io_info.devHandle; 1530 io_request->DevHandle = io_info.devHandle;
1531 /* populate the LUN field */
1532 memcpy(io_request->LUN, raidLUN, 8);
1540 } else { 1533 } else {
1541 io_request->RaidContext.timeoutValue = 1534 io_request->RaidContext.timeoutValue =
1542 local_map_ptr->raidMap.fpPdIoTimeoutSec; 1535 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1543 cmd->request_desc->SCSIIO.RequestFlags = 1536 cmd->request_desc->SCSIIO.RequestFlags =
1544 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1537 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1545 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1538 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1557 io_request->RaidContext.nseg = 0x1; 1550 io_request->RaidContext.nseg = 0x1;
1558 } 1551 }
1559 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1552 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1560 io_request->DevHandle = device_id; 1553 io_request->DevHandle = cpu_to_le16(device_id);
1561 } /* Not FP */ 1554 } /* Not FP */
1562} 1555}
1563 1556
@@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1579 u16 pd_index = 0; 1572 u16 pd_index = 0;
1580 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1573 struct MR_FW_RAID_MAP_ALL *local_map_ptr;
1581 struct fusion_context *fusion = instance->ctrl_context; 1574 struct fusion_context *fusion = instance->ctrl_context;
1575 u8 span, physArm;
1576 u16 devHandle;
1577 u32 ld, arRef, pd;
1578 struct MR_LD_RAID *raid;
1579 struct RAID_CONTEXT *pRAID_Context;
1582 1580
1583 io_request = cmd->io_request; 1581 io_request = cmd->io_request;
1584 device_id = MEGASAS_DEV_INDEX(instance, scmd); 1582 device_id = MEGASAS_DEV_INDEX(instance, scmd);
@@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1586 +scmd->device->id; 1584 +scmd->device->id;
1587 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1585 local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
1588 1586
1587 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1588
1589
1589 /* Check if this is a system PD I/O */ 1590 /* Check if this is a system PD I/O */
1590 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && 1591 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
1591 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1592 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
@@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1623 scmd->request->timeout / HZ; 1624 scmd->request->timeout / HZ;
1624 } 1625 }
1625 } else { 1626 } else {
1627 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
1628 goto NonFastPath;
1629
1630 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1631 if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
1632 goto NonFastPath;
1633
1634 raid = MR_LdRaidGet(ld, local_map_ptr);
1635
1636 /* check if this LD is FP capable */
1637 if (!(raid->capability.fpNonRWCapable))
1638 /* not FP capable, send as non-FP */
1639 goto NonFastPath;
1640
1641 /* get RAID_Context pointer */
1642 pRAID_Context = &io_request->RaidContext;
1643
1644 /* set RAID context values */
1645 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1646 pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
1647 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1648 pRAID_Context->regLockRowLBA = 0;
1649 pRAID_Context->regLockLength = 0;
1650 pRAID_Context->configSeqNum = raid->seqNum;
1651
1652 /* get the DevHandle for the PD (since this is
1653 fpNonRWCapable, this is a single disk RAID0) */
1654 span = physArm = 0;
1655 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1656 pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1657 devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1658
1659 /* build request descriptor */
1660 cmd->request_desc->SCSIIO.RequestFlags =
1661 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 cmd->request_desc->SCSIIO.DevHandle = devHandle;
1664
1665 /* populate the LUN field */
1666 memcpy(io_request->LUN, raid->LUN, 8);
1667
1668 /* build the raidScsiIO structure */
1669 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1670 io_request->DevHandle = devHandle;
1671
1672 return;
1673
1674NonFastPath:
1626 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1675 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1627 io_request->DevHandle = device_id; 1676 io_request->DevHandle = cpu_to_le16(device_id);
1628 cmd->request_desc->SCSIIO.RequestFlags = 1677 cmd->request_desc->SCSIIO.RequestFlags =
1629 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1678 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1630 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1679 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1631 } 1680 }
1632 io_request->RaidContext.VirtualDiskTgtId = device_id; 1681 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1633 io_request->LUN[1] = scmd->device->lun; 1682 io_request->LUN[1] = scmd->device->lun;
1634 io_request->DataLength = scsi_bufflen(scmd);
1635} 1683}
1636 1684
1637/** 1685/**
@@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1670 * Just the CDB length,rest of the Flags are zero 1718 * Just the CDB length,rest of the Flags are zero
1671 * This will be modified for FP in build_ldio_fusion 1719 * This will be modified for FP in build_ldio_fusion
1672 */ 1720 */
1673 io_request->IoFlags = scp->cmd_len; 1721 io_request->IoFlags = cpu_to_le16(scp->cmd_len);
1674 1722
1675 if (megasas_is_ldio(scp)) 1723 if (megasas_is_ldio(scp))
1676 megasas_build_ldio_fusion(instance, scp, cmd); 1724 megasas_build_ldio_fusion(instance, scp, cmd);
@@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1695 1743
1696 io_request->RaidContext.numSGE = sge_count; 1744 io_request->RaidContext.numSGE = sge_count;
1697 1745
1698 io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 1746 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1699 1747
1700 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1748 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1701 io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; 1749 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
1702 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1750 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1703 io_request->Control |= MPI2_SCSIIO_CONTROL_READ; 1751 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
1704 1752
1705 io_request->SGLOffset0 = 1753 io_request->SGLOffset0 =
1706 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 1754 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
1707 1755
1708 io_request->SenseBufferLowAddress = cmd->sense_phys_addr; 1756 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
1709 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 1757 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
1710 1758
1711 cmd->scmd = scp; 1759 cmd->scmd = scp;
@@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1770 } 1818 }
1771 1819
1772 req_desc = cmd->request_desc; 1820 req_desc = cmd->request_desc;
1773 req_desc->SCSIIO.SMID = index; 1821 req_desc->SCSIIO.SMID = cpu_to_le16(index);
1774 1822
1775 if (cmd->io_request->ChainOffset != 0 && 1823 if (cmd->io_request->ChainOffset != 0 &&
1776 cmd->io_request->ChainOffset != 0xF) 1824 cmd->io_request->ChainOffset != 0xF)
@@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1832 num_completed = 0; 1880 num_completed = 0;
1833 1881
1834 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 1882 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
1835 smid = reply_desc->SMID; 1883 smid = le16_to_cpu(reply_desc->SMID);
1836 1884
1837 cmd_fusion = fusion->cmd_list[smid - 1]; 1885 cmd_fusion = fusion->cmd_list[smid - 1];
1838 1886
@@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2050 SGL) / 4; 2098 SGL) / 4;
2051 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 2099 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2052 2100
2053 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 2101 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2054 2102
2055 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2103 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2056 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2104 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2057 2105
2058 mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; 2106 mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
2059 2107
2060 return 0; 2108 return 0;
2061} 2109}
@@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2088 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2136 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2089 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2137 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2090 2138
2091 req_desc->SCSIIO.SMID = index; 2139 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2092 2140
2093 return req_desc; 2141 return req_desc;
2094} 2142}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 4eb84011cb07..35a51397b364 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
93 */ 93 */
94 94
95struct RAID_CONTEXT { 95struct RAID_CONTEXT {
96#if defined(__BIG_ENDIAN_BITFIELD)
97 u8 nseg:4;
98 u8 Type:4;
99#else
96 u8 Type:4; 100 u8 Type:4;
97 u8 nseg:4; 101 u8 nseg:4;
102#endif
98 u8 resvd0; 103 u8 resvd0;
99 u16 timeoutValue; 104 u16 timeoutValue;
100 u8 regLockFlags; 105 u8 regLockFlags;
@@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
298 * MPT RAID MFA IO Descriptor. 303 * MPT RAID MFA IO Descriptor.
299 */ 304 */
300struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { 305struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
306#if defined(__BIG_ENDIAN_BITFIELD)
307 u32 MessageAddress1:24; /* bits 31:8*/
308 u32 RequestFlags:8;
309#else
301 u32 RequestFlags:8; 310 u32 RequestFlags:8;
302 u32 MessageAddress1:24; /* bits 31:8*/ 311 u32 MessageAddress1:24; /* bits 31:8*/
312#endif
303 u32 MessageAddress2; /* bits 61:32 */ 313 u32 MessageAddress2; /* bits 61:32 */
304}; 314};
305 315
@@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO {
518 528
519struct MR_LD_RAID { 529struct MR_LD_RAID {
520 struct { 530 struct {
531#if defined(__BIG_ENDIAN_BITFIELD)
532 u32 reserved4:7;
533 u32 fpNonRWCapable:1;
534 u32 fpReadAcrossStripe:1;
535 u32 fpWriteAcrossStripe:1;
536 u32 fpReadCapable:1;
537 u32 fpWriteCapable:1;
538 u32 encryptionType:8;
539 u32 pdPiMode:4;
540 u32 ldPiMode:4;
541 u32 reserved5:3;
542 u32 fpCapable:1;
543#else
521 u32 fpCapable:1; 544 u32 fpCapable:1;
522 u32 reserved5:3; 545 u32 reserved5:3;
523 u32 ldPiMode:4; 546 u32 ldPiMode:4;
@@ -527,7 +550,9 @@ struct MR_LD_RAID {
527 u32 fpReadCapable:1; 550 u32 fpReadCapable:1;
528 u32 fpWriteAcrossStripe:1; 551 u32 fpWriteAcrossStripe:1;
529 u32 fpReadAcrossStripe:1; 552 u32 fpReadAcrossStripe:1;
530 u32 reserved4:8; 553 u32 fpNonRWCapable:1;
554 u32 reserved4:7;
555#endif
531 } capability; 556 } capability;
532 u32 reserved6; 557 u32 reserved6;
533 u64 size; 558 u64 size;
@@ -551,7 +576,9 @@ struct MR_LD_RAID {
551 u32 reserved:31; 576 u32 reserved:31;
552 } flags; 577 } flags;
553 578
554 u8 reserved3[0x5C]; 579 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
580 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
581 u8 reserved3[0x80-0x2D]; /* 0x2D */
555}; 582};
556 583
557struct MR_LD_SPAN_MAP { 584struct MR_LD_SPAN_MAP {
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 4c1d2e7a1176..efb0c4c2e310 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,5 +1,5 @@
1# mpt3sas makefile 1# mpt3sas makefile
2obj-m += mpt3sas.o 2obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
3mpt3sas-y += mpt3sas_base.o \ 3mpt3sas-y += mpt3sas_base.o \
4 mpt3sas_config.o \ 4 mpt3sas_config.o \
5 mpt3sas_scsih.o \ 5 mpt3sas_scsih.o \
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b58e8f815a00..e62d17d41d4e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2420,14 +2420,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2420 } 2420 }
2421 } 2421 }
2422 2422
2423 if (modepage == 0x3F) { 2423 sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2424 sd_printk(KERN_ERR, sdkp, "No Caching mode page " 2424 goto defaults;
2425 "present\n"); 2425
2426 goto defaults;
2427 } else if ((buffer[offset] & 0x3f) != modepage) {
2428 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2429 goto defaults;
2430 }
2431 Page_found: 2426 Page_found:
2432 if (modepage == 8) { 2427 if (modepage == 8) {
2433 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2428 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index bce09a6898c4..721050090520 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -177,6 +177,7 @@ enum {
177 MASK_TASK_RESPONSE = 0xFF00, 177 MASK_TASK_RESPONSE = 0xFF00,
178 MASK_RSP_UPIU_RESULT = 0xFFFF, 178 MASK_RSP_UPIU_RESULT = 0xFFFF,
179 MASK_QUERY_DATA_SEG_LEN = 0xFFFF, 179 MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
180 MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
180 MASK_RSP_EXCEPTION_EVENT = 0x10000, 181 MASK_RSP_EXCEPTION_EVENT = 0x10000,
181}; 182};
182 183
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b36ca9a2dfbb..04884d663e4e 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -36,9 +36,11 @@
36#include <linux/async.h> 36#include <linux/async.h>
37 37
38#include "ufshcd.h" 38#include "ufshcd.h"
39#include "unipro.h"
39 40
40#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 41#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
41 UTP_TASK_REQ_COMPL |\ 42 UTP_TASK_REQ_COMPL |\
43 UIC_POWER_MODE |\
42 UFSHCD_ERROR_MASK) 44 UFSHCD_ERROR_MASK)
43/* UIC command timeout, unit: ms */ 45/* UIC command timeout, unit: ms */
44#define UIC_CMD_TIMEOUT 500 46#define UIC_CMD_TIMEOUT 500
@@ -56,6 +58,9 @@
56/* Expose the flag value from utp_upiu_query.value */ 58/* Expose the flag value from utp_upiu_query.value */
57#define MASK_QUERY_UPIU_FLAG_LOC 0xFF 59#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
58 60
61/* Interrupt aggregation default timeout, unit: 40us */
62#define INT_AGGR_DEF_TO 0x02
63
59enum { 64enum {
60 UFSHCD_MAX_CHANNEL = 0, 65 UFSHCD_MAX_CHANNEL = 0,
61 UFSHCD_MAX_ID = 1, 66 UFSHCD_MAX_ID = 1,
@@ -78,12 +83,6 @@ enum {
78 UFSHCD_INT_CLEAR, 83 UFSHCD_INT_CLEAR,
79}; 84};
80 85
81/* Interrupt aggregation options */
82enum {
83 INT_AGGR_RESET,
84 INT_AGGR_CONFIG,
85};
86
87/* 86/*
88 * ufshcd_wait_for_register - wait for register value to change 87 * ufshcd_wait_for_register - wait for register value to change
89 * @hba - per-adapter interface 88 * @hba - per-adapter interface
@@ -238,6 +237,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
238} 237}
239 238
240/** 239/**
240 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
241 * @hba: Pointer to adapter instance
242 *
243 * This function gets UIC command argument3
244 * Returns 0 on success, non zero value on error
245 */
246static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
247{
248 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
249}
250
251/**
241 * ufshcd_get_req_rsp - returns the TR response transaction type 252 * ufshcd_get_req_rsp - returns the TR response transaction type
242 * @ucd_rsp_ptr: pointer to response UPIU 253 * @ucd_rsp_ptr: pointer to response UPIU
243 */ 254 */
@@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
260 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; 271 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
261} 272}
262 273
274/*
275 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
276 * from response UPIU
277 * @ucd_rsp_ptr: pointer to response UPIU
278 *
279 * Return the data segment length.
280 */
281static inline unsigned int
282ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
283{
284 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
285 MASK_RSP_UPIU_DATA_SEG_LEN;
286}
287
263/** 288/**
264 * ufshcd_is_exception_event - Check if the device raised an exception event 289 * ufshcd_is_exception_event - Check if the device raised an exception event
265 * @ucd_rsp_ptr: pointer to response UPIU 290 * @ucd_rsp_ptr: pointer to response UPIU
@@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
276} 301}
277 302
278/** 303/**
279 * ufshcd_config_int_aggr - Configure interrupt aggregation values. 304 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
280 * Currently there is no use case where we want to configure
281 * interrupt aggregation dynamically. So to configure interrupt
282 * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
283 * INT_AGGR_TIMEOUT_VALUE are used.
284 * @hba: per adapter instance 305 * @hba: per adapter instance
285 * @option: Interrupt aggregation option
286 */ 306 */
287static inline void 307static inline void
288ufshcd_config_int_aggr(struct ufs_hba *hba, int option) 308ufshcd_reset_intr_aggr(struct ufs_hba *hba)
289{ 309{
290 switch (option) { 310 ufshcd_writel(hba, INT_AGGR_ENABLE |
291 case INT_AGGR_RESET: 311 INT_AGGR_COUNTER_AND_TIMER_RESET,
292 ufshcd_writel(hba, INT_AGGR_ENABLE | 312 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
293 INT_AGGR_COUNTER_AND_TIMER_RESET, 313}
294 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 314
295 break; 315/**
296 case INT_AGGR_CONFIG: 316 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
297 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 317 * @hba: per adapter instance
298 INT_AGGR_COUNTER_THRESHOLD_VALUE | 318 * @cnt: Interrupt aggregation counter threshold
299 INT_AGGR_TIMEOUT_VALUE, 319 * @tmout: Interrupt aggregation timeout value
300 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 320 */
301 break; 321static inline void
302 } 322ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
323{
324 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
325 INT_AGGR_COUNTER_THLD_VAL(cnt) |
326 INT_AGGR_TIMEOUT_VAL(tmout),
327 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
303} 328}
304 329
305/** 330/**
@@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
355static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) 380static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
356{ 381{
357 int len; 382 int len;
358 if (lrbp->sense_buffer) { 383 if (lrbp->sense_buffer &&
384 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
359 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 385 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
360 memcpy(lrbp->sense_buffer, 386 memcpy(lrbp->sense_buffer,
361 lrbp->ucd_rsp_ptr->sr.sense_data, 387 lrbp->ucd_rsp_ptr->sr.sense_data,
@@ -446,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
446} 472}
447 473
448/** 474/**
475 * ufshcd_get_upmcrs - Get the power mode change request status
476 * @hba: Pointer to adapter instance
477 *
478 * This function gets the UPMCRS field of HCS register
479 * Returns value of UPMCRS field
480 */
481static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
482{
483 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
484}
485
486/**
449 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers 487 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
450 * @hba: per adapter instance 488 * @hba: per adapter instance
451 * @uic_cmd: UIC command 489 * @uic_cmd: UIC command
@@ -1362,6 +1400,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1362} 1400}
1363 1401
1364/** 1402/**
1403 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1404 * @hba: per adapter instance
1405 * @attr_sel: uic command argument1
1406 * @attr_set: attribute set type as uic command argument2
1407 * @mib_val: setting value as uic command argument3
1408 * @peer: indicate whether peer or local
1409 *
1410 * Returns 0 on success, non-zero value on failure
1411 */
1412int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1413 u8 attr_set, u32 mib_val, u8 peer)
1414{
1415 struct uic_command uic_cmd = {0};
1416 static const char *const action[] = {
1417 "dme-set",
1418 "dme-peer-set"
1419 };
1420 const char *set = action[!!peer];
1421 int ret;
1422
1423 uic_cmd.command = peer ?
1424 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1425 uic_cmd.argument1 = attr_sel;
1426 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1427 uic_cmd.argument3 = mib_val;
1428
1429 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1430 if (ret)
1431 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1432 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1433
1434 return ret;
1435}
1436EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1437
1438/**
1439 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1440 * @hba: per adapter instance
1441 * @attr_sel: uic command argument1
1442 * @mib_val: the value of the attribute as returned by the UIC command
1443 * @peer: indicate whether peer or local
1444 *
1445 * Returns 0 on success, non-zero value on failure
1446 */
1447int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1448 u32 *mib_val, u8 peer)
1449{
1450 struct uic_command uic_cmd = {0};
1451 static const char *const action[] = {
1452 "dme-get",
1453 "dme-peer-get"
1454 };
1455 const char *get = action[!!peer];
1456 int ret;
1457
1458 uic_cmd.command = peer ?
1459 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1460 uic_cmd.argument1 = attr_sel;
1461
1462 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1463 if (ret) {
1464 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1465 get, UIC_GET_ATTR_ID(attr_sel), ret);
1466 goto out;
1467 }
1468
1469 if (mib_val)
1470 *mib_val = uic_cmd.argument3;
1471out:
1472 return ret;
1473}
1474EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1475
1476/**
1477 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1478 * using DME_SET primitives.
1479 * @hba: per adapter instance
1480 * @mode: powr mode value
1481 *
1482 * Returns 0 on success, non-zero value on failure
1483 */
1484int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1485{
1486 struct uic_command uic_cmd = {0};
1487 struct completion pwr_done;
1488 unsigned long flags;
1489 u8 status;
1490 int ret;
1491
1492 uic_cmd.command = UIC_CMD_DME_SET;
1493 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1494 uic_cmd.argument3 = mode;
1495 init_completion(&pwr_done);
1496
1497 mutex_lock(&hba->uic_cmd_mutex);
1498
1499 spin_lock_irqsave(hba->host->host_lock, flags);
1500 hba->pwr_done = &pwr_done;
1501 spin_unlock_irqrestore(hba->host->host_lock, flags);
1502 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1503 if (ret) {
1504 dev_err(hba->dev,
1505 "pwr mode change with mode 0x%x uic error %d\n",
1506 mode, ret);
1507 goto out;
1508 }
1509
1510 if (!wait_for_completion_timeout(hba->pwr_done,
1511 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1512 dev_err(hba->dev,
1513 "pwr mode change with mode 0x%x completion timeout\n",
1514 mode);
1515 ret = -ETIMEDOUT;
1516 goto out;
1517 }
1518
1519 status = ufshcd_get_upmcrs(hba);
1520 if (status != PWR_LOCAL) {
1521 dev_err(hba->dev,
1522 "pwr mode change failed, host umpcrs:0x%x\n",
1523 status);
1524 ret = (status != PWR_OK) ? status : -1;
1525 }
1526out:
1527 spin_lock_irqsave(hba->host->host_lock, flags);
1528 hba->pwr_done = NULL;
1529 spin_unlock_irqrestore(hba->host->host_lock, flags);
1530 mutex_unlock(&hba->uic_cmd_mutex);
1531 return ret;
1532}
1533
1534/**
1535 * ufshcd_config_max_pwr_mode - Set & Change power mode with
1536 * maximum capability attribute information.
1537 * @hba: per adapter instance
1538 *
1539 * Returns 0 on success, non-zero value on failure
1540 */
1541static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1542{
1543 enum {RX = 0, TX = 1};
1544 u32 lanes[] = {1, 1};
1545 u32 gear[] = {1, 1};
1546 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1547 int ret;
1548
1549 /* Get the connected lane count */
1550 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1551 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1552
1553 /*
1554 * First, get the maximum gears of HS speed.
1555 * If a zero value, it means there is no HSGEAR capability.
1556 * Then, get the maximum gears of PWM speed.
1557 */
1558 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1559 if (!gear[RX]) {
1560 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1561 pwr[RX] = SLOWAUTO_MODE;
1562 }
1563
1564 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1565 if (!gear[TX]) {
1566 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1567 &gear[TX]);
1568 pwr[TX] = SLOWAUTO_MODE;
1569 }
1570
1571 /*
1572 * Configure attributes for power mode change with below.
1573 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1574 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1575 * - PA_HSSERIES
1576 */
1577 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
1578 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
1579 if (pwr[RX] == FASTAUTO_MODE)
1580 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1581
1582 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
1583 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
1584 if (pwr[TX] == FASTAUTO_MODE)
1585 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1586
1587 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
1588 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
1589
1590 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
1591 if (ret)
1592 dev_err(hba->dev,
1593 "pwr_mode: power mode change failed %d\n", ret);
1594
1595 return ret;
1596}
1597
1598/**
1365 * ufshcd_complete_dev_init() - checks device readiness 1599 * ufshcd_complete_dev_init() - checks device readiness
1366 * hba: per-adapter instance 1600 * hba: per-adapter instance
1367 * 1601 *
@@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1442 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 1676 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1443 1677
1444 /* Configure interrupt aggregation */ 1678 /* Configure interrupt aggregation */
1445 ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); 1679 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
1446 1680
1447 /* Configure UTRL and UTMRL base address registers */ 1681 /* Configure UTRL and UTMRL base address registers */
1448 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), 1682 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
@@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1788 int result = 0; 2022 int result = 0;
1789 2023
1790 switch (scsi_status) { 2024 switch (scsi_status) {
1791 case SAM_STAT_GOOD:
1792 result |= DID_OK << 16 |
1793 COMMAND_COMPLETE << 8 |
1794 SAM_STAT_GOOD;
1795 break;
1796 case SAM_STAT_CHECK_CONDITION: 2025 case SAM_STAT_CHECK_CONDITION:
2026 ufshcd_copy_sense_data(lrbp);
2027 case SAM_STAT_GOOD:
1797 result |= DID_OK << 16 | 2028 result |= DID_OK << 16 |
1798 COMMAND_COMPLETE << 8 | 2029 COMMAND_COMPLETE << 8 |
1799 SAM_STAT_CHECK_CONDITION; 2030 scsi_status;
1800 ufshcd_copy_sense_data(lrbp);
1801 break;
1802 case SAM_STAT_BUSY:
1803 result |= SAM_STAT_BUSY;
1804 break; 2031 break;
1805 case SAM_STAT_TASK_SET_FULL: 2032 case SAM_STAT_TASK_SET_FULL:
1806
1807 /* 2033 /*
1808 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue 2034 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1809 * depth needs to be adjusted to the exact number of 2035 * depth needs to be adjusted to the exact number of
1810 * outstanding commands the LUN can handle at any given time. 2036 * outstanding commands the LUN can handle at any given time.
1811 */ 2037 */
1812 ufshcd_adjust_lun_qdepth(lrbp->cmd); 2038 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1813 result |= SAM_STAT_TASK_SET_FULL; 2039 case SAM_STAT_BUSY:
1814 break;
1815 case SAM_STAT_TASK_ABORTED: 2040 case SAM_STAT_TASK_ABORTED:
1816 result |= SAM_STAT_TASK_ABORTED; 2041 ufshcd_copy_sense_data(lrbp);
2042 result |= scsi_status;
1817 break; 2043 break;
1818 default: 2044 default:
1819 result |= DID_ERROR << 16; 2045 result |= DID_ERROR << 16;
@@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1898/** 2124/**
1899 * ufshcd_uic_cmd_compl - handle completion of uic command 2125 * ufshcd_uic_cmd_compl - handle completion of uic command
1900 * @hba: per adapter instance 2126 * @hba: per adapter instance
2127 * @intr_status: interrupt status generated by the controller
1901 */ 2128 */
1902static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) 2129static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
1903{ 2130{
1904 if (hba->active_uic_cmd) { 2131 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
1905 hba->active_uic_cmd->argument2 |= 2132 hba->active_uic_cmd->argument2 |=
1906 ufshcd_get_uic_cmd_result(hba); 2133 ufshcd_get_uic_cmd_result(hba);
2134 hba->active_uic_cmd->argument3 =
2135 ufshcd_get_dme_attr_val(hba);
1907 complete(&hba->active_uic_cmd->done); 2136 complete(&hba->active_uic_cmd->done);
1908 } 2137 }
2138
2139 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2140 complete(hba->pwr_done);
1909} 2141}
1910 2142
1911/** 2143/**
@@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1960 2192
1961 /* Reset interrupt aggregation counters */ 2193 /* Reset interrupt aggregation counters */
1962 if (int_aggr_reset) 2194 if (int_aggr_reset)
1963 ufshcd_config_int_aggr(hba, INT_AGGR_RESET); 2195 ufshcd_reset_intr_aggr(hba);
1964} 2196}
1965 2197
1966/** 2198/**
@@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2251 if (hba->errors) 2483 if (hba->errors)
2252 ufshcd_err_handler(hba); 2484 ufshcd_err_handler(hba);
2253 2485
2254 if (intr_status & UIC_COMMAND_COMPL) 2486 if (intr_status & UFSHCD_UIC_MASK)
2255 ufshcd_uic_cmd_compl(hba); 2487 ufshcd_uic_cmd_compl(hba, intr_status);
2256 2488
2257 if (intr_status & UTP_TASK_REQ_COMPL) 2489 if (intr_status & UTP_TASK_REQ_COMPL)
2258 ufshcd_tmc_handler(hba); 2490 ufshcd_tmc_handler(hba);
@@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
2494 if (ret) 2726 if (ret)
2495 goto out; 2727 goto out;
2496 2728
2729 ufshcd_config_max_pwr_mode(hba);
2730
2497 ret = ufshcd_verify_dev_init(hba); 2731 ret = ufshcd_verify_dev_init(hba);
2498 if (ret) 2732 if (ret)
2499 goto out; 2733 goto out;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 59c9c4848be1..577679a2d189 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -175,6 +175,7 @@ struct ufs_dev_cmd {
175 * @active_uic_cmd: handle of active UIC command 175 * @active_uic_cmd: handle of active UIC command
176 * @uic_cmd_mutex: mutex for uic command 176 * @uic_cmd_mutex: mutex for uic command
177 * @ufshcd_tm_wait_queue: wait queue for task management 177 * @ufshcd_tm_wait_queue: wait queue for task management
178 * @pwr_done: completion for power mode change
178 * @tm_condition: condition variable for task management 179 * @tm_condition: condition variable for task management
179 * @ufshcd_state: UFSHCD states 180 * @ufshcd_state: UFSHCD states
180 * @intr_mask: Interrupt Mask Bits 181 * @intr_mask: Interrupt Mask Bits
@@ -219,6 +220,8 @@ struct ufs_hba {
219 wait_queue_head_t ufshcd_tm_wait_queue; 220 wait_queue_head_t ufshcd_tm_wait_queue;
220 unsigned long tm_condition; 221 unsigned long tm_condition;
221 222
223 struct completion *pwr_done;
224
222 u32 ufshcd_state; 225 u32 ufshcd_state;
223 u32 intr_mask; 226 u32 intr_mask;
224 u16 ee_ctrl_mask; 227 u16 ee_ctrl_mask;
@@ -263,4 +266,55 @@ static inline void check_upiu_size(void)
263extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 266extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
264extern int ufshcd_runtime_resume(struct ufs_hba *hba); 267extern int ufshcd_runtime_resume(struct ufs_hba *hba);
265extern int ufshcd_runtime_idle(struct ufs_hba *hba); 268extern int ufshcd_runtime_idle(struct ufs_hba *hba);
269extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
270 u8 attr_set, u32 mib_val, u8 peer);
271extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
272 u32 *mib_val, u8 peer);
273
274/* UIC command interfaces for DME primitives */
275#define DME_LOCAL 0
276#define DME_PEER 1
277#define ATTR_SET_NOR 0 /* NORMAL */
278#define ATTR_SET_ST 1 /* STATIC */
279
280static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
281 u32 mib_val)
282{
283 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
284 mib_val, DME_LOCAL);
285}
286
287static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
288 u32 mib_val)
289{
290 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
291 mib_val, DME_LOCAL);
292}
293
294static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
295 u32 mib_val)
296{
297 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
298 mib_val, DME_PEER);
299}
300
301static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
302 u32 mib_val)
303{
304 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
305 mib_val, DME_PEER);
306}
307
308static inline int ufshcd_dme_get(struct ufs_hba *hba,
309 u32 attr_sel, u32 *mib_val)
310{
311 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
312}
313
314static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
315 u32 attr_sel, u32 *mib_val)
316{
317 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
318}
319
266#endif /* End of Header */ 320#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f1e1b7459107..0475c6619a68 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,6 +124,9 @@ enum {
124#define CONTROLLER_FATAL_ERROR UFS_BIT(16) 124#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
125#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) 125#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
126 126
127#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\
128 UIC_POWER_MODE)
129
127#define UFSHCD_ERROR_MASK (UIC_ERROR |\ 130#define UFSHCD_ERROR_MASK (UIC_ERROR |\
128 DEVICE_FATAL_ERROR |\ 131 DEVICE_FATAL_ERROR |\
129 CONTROLLER_FATAL_ERROR |\ 132 CONTROLLER_FATAL_ERROR |\
@@ -142,6 +145,15 @@ enum {
142#define DEVICE_ERROR_INDICATOR UFS_BIT(5) 145#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
143#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) 146#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
144 147
148enum {
149 PWR_OK = 0x0,
150 PWR_LOCAL = 0x01,
151 PWR_REMOTE = 0x02,
152 PWR_BUSY = 0x03,
153 PWR_ERROR_CAP = 0x04,
154 PWR_FATAL_ERROR = 0x05,
155};
156
145/* HCE - Host Controller Enable 34h */ 157/* HCE - Host Controller Enable 34h */
146#define CONTROLLER_ENABLE UFS_BIT(0) 158#define CONTROLLER_ENABLE UFS_BIT(0)
147#define CONTROLLER_DISABLE 0x0 159#define CONTROLLER_DISABLE 0x0
@@ -191,6 +203,12 @@ enum {
191#define CONFIG_RESULT_CODE_MASK 0xFF 203#define CONFIG_RESULT_CODE_MASK 0xFF
192#define GENERIC_ERROR_CODE_MASK 0xFF 204#define GENERIC_ERROR_CODE_MASK 0xFF
193 205
206#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
207 ((sel) & 0xFFFF))
208#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
209#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
210#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
211
194/* UIC Commands */ 212/* UIC Commands */
195enum { 213enum {
196 UIC_CMD_DME_GET = 0x01, 214 UIC_CMD_DME_GET = 0x01,
@@ -226,8 +244,8 @@ enum {
226 244
227#define MASK_UIC_COMMAND_RESULT 0xFF 245#define MASK_UIC_COMMAND_RESULT 0xFF
228 246
229#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) 247#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
230#define INT_AGGR_TIMEOUT_VALUE (0x02) 248#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
231 249
232/* Interrupt disable masks */ 250/* Interrupt disable masks */
233enum { 251enum {
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
new file mode 100644
index 000000000000..0bb8041c047a
--- /dev/null
+++ b/drivers/scsi/ufs/unipro.h
@@ -0,0 +1,151 @@
1/*
2 * drivers/scsi/ufs/unipro.h
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef _UNIPRO_H_
13#define _UNIPRO_H_
14
15/*
16 * PHY Adpater attributes
17 */
18#define PA_ACTIVETXDATALANES 0x1560
19#define PA_ACTIVERXDATALANES 0x1580
20#define PA_TXTRAILINGCLOCKS 0x1564
21#define PA_PHY_TYPE 0x1500
22#define PA_AVAILTXDATALANES 0x1520
23#define PA_AVAILRXDATALANES 0x1540
24#define PA_MINRXTRAILINGCLOCKS 0x1543
25#define PA_TXPWRSTATUS 0x1567
26#define PA_RXPWRSTATUS 0x1582
27#define PA_TXFORCECLOCK 0x1562
28#define PA_TXPWRMODE 0x1563
29#define PA_LEGACYDPHYESCDL 0x1570
30#define PA_MAXTXSPEEDFAST 0x1521
31#define PA_MAXTXSPEEDSLOW 0x1522
32#define PA_MAXRXSPEEDFAST 0x1541
33#define PA_MAXRXSPEEDSLOW 0x1542
34#define PA_TXLINKSTARTUPHS 0x1544
35#define PA_TXSPEEDFAST 0x1565
36#define PA_TXSPEEDSLOW 0x1566
37#define PA_REMOTEVERINFO 0x15A0
38#define PA_TXGEAR 0x1568
39#define PA_TXTERMINATION 0x1569
40#define PA_HSSERIES 0x156A
41#define PA_PWRMODE 0x1571
42#define PA_RXGEAR 0x1583
43#define PA_RXTERMINATION 0x1584
44#define PA_MAXRXPWMGEAR 0x1586
45#define PA_MAXRXHSGEAR 0x1587
46#define PA_RXHSUNTERMCAP 0x15A5
47#define PA_RXLSTERMCAP 0x15A6
48#define PA_PACPREQTIMEOUT 0x1590
49#define PA_PACPREQEOBTIMEOUT 0x1591
50#define PA_HIBERN8TIME 0x15A7
51#define PA_LOCALVERINFO 0x15A9
52#define PA_TACTIVATE 0x15A8
53#define PA_PACPFRAMECOUNT 0x15C0
54#define PA_PACPERRORCOUNT 0x15C1
55#define PA_PHYTESTCONTROL 0x15C2
56#define PA_PWRMODEUSERDATA0 0x15B0
57#define PA_PWRMODEUSERDATA1 0x15B1
58#define PA_PWRMODEUSERDATA2 0x15B2
59#define PA_PWRMODEUSERDATA3 0x15B3
60#define PA_PWRMODEUSERDATA4 0x15B4
61#define PA_PWRMODEUSERDATA5 0x15B5
62#define PA_PWRMODEUSERDATA6 0x15B6
63#define PA_PWRMODEUSERDATA7 0x15B7
64#define PA_PWRMODEUSERDATA8 0x15B8
65#define PA_PWRMODEUSERDATA9 0x15B9
66#define PA_PWRMODEUSERDATA10 0x15BA
67#define PA_PWRMODEUSERDATA11 0x15BB
68#define PA_CONNECTEDTXDATALANES 0x1561
69#define PA_CONNECTEDRXDATALANES 0x1581
70#define PA_LOGICALLANEMAP 0x15A1
71#define PA_SLEEPNOCONFIGTIME 0x15A2
72#define PA_STALLNOCONFIGTIME 0x15A3
73#define PA_SAVECONFIGTIME 0x15A4
74
75/* PA power modes */
76enum {
77 FAST_MODE = 1,
78 SLOW_MODE = 2,
79 FASTAUTO_MODE = 4,
80 SLOWAUTO_MODE = 5,
81 UNCHANGED = 7,
82};
83
84/* PA TX/RX Frequency Series */
85enum {
86 PA_HS_MODE_A = 1,
87 PA_HS_MODE_B = 2,
88};
89
90/*
91 * Data Link Layer Attributes
92 */
93#define DL_TC0TXFCTHRESHOLD 0x2040
94#define DL_FC0PROTTIMEOUTVAL 0x2041
95#define DL_TC0REPLAYTIMEOUTVAL 0x2042
96#define DL_AFC0REQTIMEOUTVAL 0x2043
97#define DL_AFC0CREDITTHRESHOLD 0x2044
98#define DL_TC0OUTACKTHRESHOLD 0x2045
99#define DL_TC1TXFCTHRESHOLD 0x2060
100#define DL_FC1PROTTIMEOUTVAL 0x2061
101#define DL_TC1REPLAYTIMEOUTVAL 0x2062
102#define DL_AFC1REQTIMEOUTVAL 0x2063
103#define DL_AFC1CREDITTHRESHOLD 0x2064
104#define DL_TC1OUTACKTHRESHOLD 0x2065
105#define DL_TXPREEMPTIONCAP 0x2000
106#define DL_TC0TXMAXSDUSIZE 0x2001
107#define DL_TC0RXINITCREDITVAL 0x2002
108#define DL_TC0TXBUFFERSIZE 0x2005
109#define DL_PEERTC0PRESENT 0x2046
110#define DL_PEERTC0RXINITCREVAL 0x2047
111#define DL_TC1TXMAXSDUSIZE 0x2003
112#define DL_TC1RXINITCREDITVAL 0x2004
113#define DL_TC1TXBUFFERSIZE 0x2006
114#define DL_PEERTC1PRESENT 0x2066
115#define DL_PEERTC1RXINITCREVAL 0x2067
116
117/*
118 * Network Layer Attributes
119 */
120#define N_DEVICEID 0x3000
121#define N_DEVICEID_VALID 0x3001
122#define N_TC0TXMAXSDUSIZE 0x3020
123#define N_TC1TXMAXSDUSIZE 0x3021
124
125/*
126 * Transport Layer Attributes
127 */
128#define T_NUMCPORTS 0x4000
129#define T_NUMTESTFEATURES 0x4001
130#define T_CONNECTIONSTATE 0x4020
131#define T_PEERDEVICEID 0x4021
132#define T_PEERCPORTID 0x4022
133#define T_TRAFFICCLASS 0x4023
134#define T_PROTOCOLID 0x4024
135#define T_CPORTFLAGS 0x4025
136#define T_TXTOKENVALUE 0x4026
137#define T_RXTOKENVALUE 0x4027
138#define T_LOCALBUFFERSPACE 0x4028
139#define T_PEERBUFFERSPACE 0x4029
140#define T_CREDITSTOSEND 0x402A
141#define T_CPORTMODE 0x402B
142#define T_TC0TXMAXSDUSIZE 0x4060
143#define T_TC1TXMAXSDUSIZE 0x4061
144
145/* Boolean attribute values */
146enum {
147 FALSE = 0,
148 TRUE,
149};
150
151#endif /* _UNIPRO_H_ */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bc95b2b391bf..97fbecdd7a40 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -758,6 +758,7 @@
758#define PCI_DEVICE_ID_HP_CISSE 0x323a 758#define PCI_DEVICE_ID_HP_CISSE 0x323a
759#define PCI_DEVICE_ID_HP_CISSF 0x323b 759#define PCI_DEVICE_ID_HP_CISSF 0x323b
760#define PCI_DEVICE_ID_HP_CISSH 0x323c 760#define PCI_DEVICE_ID_HP_CISSH 0x323c
761#define PCI_DEVICE_ID_HP_CISSI 0x3239
761#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 762#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
762 763
763#define PCI_VENDOR_ID_PCTECH 0x1042 764#define PCI_VENDOR_ID_PCTECH 0x1042