aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/message/fusion/mptbase.c6
-rw-r--r--drivers/misc/enclosure.c1
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c24
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c135
-rw-r--r--drivers/scsi/hpsa.c3531
-rw-r--r--drivers/scsi/hpsa.h273
-rw-r--r--drivers/scsi/hpsa_cmd.h326
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c65
-rw-r--r--drivers/scsi/libfc/fc_lport.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c88
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c57
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h32
-rw-r--r--drivers/scsi/pmcraid.c34
-rw-r--r--drivers/scsi/pmcraid.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c17
-rw-r--r--drivers/scsi/sd.c107
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/st.c23
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--include/linux/enclosure.h2
-rw-r--r--include/scsi/osd_initiator.h5
49 files changed, 4914 insertions, 360 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 610e914abe6c..85bc6a685e36 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1587{ 1587{
1588 u8 __iomem *mem; 1588 u8 __iomem *mem;
1589 int ii; 1589 int ii;
1590 unsigned long mem_phys; 1590 resource_size_t mem_phys;
1591 unsigned long port; 1591 unsigned long port;
1592 u32 msize; 1592 u32 msize;
1593 u32 psize; 1593 u32 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
1679 ioc->memmap = mem; 1679 ioc->memmap = mem;
1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n", 1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
1681 ioc->name, mem, mem_phys)); 1681 ioc->name, mem, (unsigned long long)mem_phys));
1682 1682
1683 ioc->mem_phys = mem_phys; 1683 ioc->mem_phys = mem_phys;
1684 ioc->chip = (SYSIF_REGS __iomem *)mem; 1684 ioc->chip = (SYSIF_REGS __iomem *)mem;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index e9eae4a78402..1eac626e710a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", 391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown", 392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", 393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
394 [ENCLOSURE_STATUS_MAX] = NULL,
394}; 395};
395 396
396static const char *const enclosure_type [] = { 397static const char *const enclosure_type [] = {
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3bf75924741f..84d3bbaa95e7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
76 Fix bug in twa_get_param() on 4GB+. 76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap(). 77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support. 78 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl().
79*/ 80*/
80 81
81#include <linux/module.h> 82#include <linux/module.h>
@@ -100,7 +101,7 @@
100#include "3w-9xxx.h" 101#include "3w-9xxx.h"
101 102
102/* Globals */ 103/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.012" 104#define TW_DRIVER_VERSION "2.26.02.013"
104static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
105static unsigned int twa_device_extension_count; 106static unsigned int twa_device_extension_count;
106static int twa_major = -1; 107static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1382 newcommand = &full_command_packet->command.newcommand; 1383 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl = 1384 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1385 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1386 if (length) {
1386 newcommand->sg_list[0].length = cpu_to_le32(length); 1387 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1388 newcommand->sg_list[0].length = cpu_to_le32(length);
1389 }
1387 newcommand->sgl_entries__lunh = 1390 newcommand->sgl_entries__lunh =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1)); 1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1389 } else { 1392 } else {
1390 oldcommand = &full_command_packet->command.oldcommand; 1393 oldcommand = &full_command_packet->command.oldcommand;
1391 oldcommand->request_id = request_id; 1394 oldcommand->request_id = request_id;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 36900c71a592..9191d1ea6451 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
388 Please read the comments at the top of 388 Please read the comments at the top of
389 <file:drivers/scsi/3w-xxxx.c>. 389 <file:drivers/scsi/3w-xxxx.c>.
390 390
391config SCSI_HPSA
392 tristate "HP Smart Array SCSI driver"
393 depends on PCI && SCSI
394 help
395 This driver supports HP Smart Array Controllers (circa 2009).
396 It is a SCSI alternative to the cciss driver, which is a block
397 driver. Anyone wishing to use HP Smart Array controllers who
398 would prefer the devices be presented to linux as SCSI devices,
399 rather than as generic block devices should say Y here.
400
391config SCSI_3W_9XXX 401config SCSI_3W_9XXX
392 tristate "3ware 9xxx SATA-RAID support" 402 tristate "3ware 9xxx SATA-RAID support"
393 depends on PCI && SCSI 403 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 280d3c657d60..92a8c500b23d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
91obj-$(CONFIG_SCSI_PAS16) += pas16.o 91obj-$(CONFIG_SCSI_PAS16) += pas16.o
92obj-$(CONFIG_SCSI_T128) += t128.o 92obj-$(CONFIG_SCSI_T128) += t128.o
93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
94obj-$(CONFIG_SCSI_HPSA) += hpsa.o
94obj-$(CONFIG_SCSI_DTC3280) += dtc.o 95obj-$(CONFIG_SCSI_DTC3280) += dtc.o
95obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ 96obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
96obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 97obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 698a527d6cca..f008708f1b08 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
135 while ((compl = be_mcc_compl_get(phba))) { 135 while ((compl = be_mcc_compl_get(phba))) {
136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 /* Interpret flags as an async trailer */ 137 /* Interpret flags as an async trailer */
138 BUG_ON(!is_link_state_evt(compl->flags)); 138 if (is_link_state_evt(compl->flags))
139 /* Interpret compl as a async link evt */
140 beiscsi_async_link_state_process(phba,
141 (struct be_async_event_link_state *) compl);
142 else
143 SE_DEBUG(DBG_LVL_1,
144 " Unsupported Async Event, flags"
145 " = 0x%08x \n", compl->flags);
139 146
140 /* Interpret compl as a async link evt */
141 beiscsi_async_link_state_process(phba,
142 (struct be_async_event_link_state *) compl);
143 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 147 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
144 status = be_mcc_compl_process(ctrl, compl); 148 status = be_mcc_compl_process(ctrl, compl);
145 atomic_dec(&phba->ctrl.mcc_obj.q.used); 149 atomic_dec(&phba->ctrl.mcc_obj.q.used);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 2b973f3c2eb2..6cf9dc37d78b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
684extern u64 iscsi_error_mask; 684extern u64 iscsi_error_mask;
685extern unsigned int en_tcp_dack; 685extern unsigned int en_tcp_dack;
686extern unsigned int event_coal_div; 686extern unsigned int event_coal_div;
687extern unsigned int event_coal_min;
687 688
688extern struct scsi_transport_template *bnx2i_scsi_xport_template; 689extern struct scsi_transport_template *bnx2i_scsi_xport_template;
689extern struct iscsi_transport bnx2i_iscsi_transport; 690extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d7630c13e..1af578dec276 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{ 133{
134 struct bnx2i_5771x_cq_db *cq_db; 134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index; 135 u16 cq_index;
136 u16 next_index;
137 u32 num_active_cmds;
136 138
139
140 /* Coalesce CQ entries only on 10G devices */
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 141 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return; 142 return;
139 143
144 /* Do not update CQ DB multiple times before firmware writes
145 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
146 * interrupts and other unwanted results
147 */
148 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
149 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
150 return;
151
140 if (action == CNIC_ARM_CQE) { 152 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn + 153 num_active_cmds = ep->num_active_cmds;
142 ep->num_active_cmds / event_coal_div; 154 if (num_active_cmds <= event_coal_min)
143 cq_index %= (ep->qp.cqe_size * 2 + 1); 155 next_index = 1;
144 if (!cq_index) { 156 else
157 next_index = event_coal_min +
158 (num_active_cmds - event_coal_min) / event_coal_div;
159 if (!next_index)
160 next_index = 1;
161 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
162 if (cq_index > ep->qp.cqe_size * 2)
163 cq_index -= ep->qp.cqe_size * 2;
164 if (!cq_index)
145 cq_index = 1; 165 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *) 166
147 ep->qp.cq_pgtbl_virt; 167 cq_db->sqn[0] = cq_index;
148 cq_db->sqn[0] = cq_index;
149 }
150 } 168 }
151} 169}
152 170
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
366 struct bnx2i_cmd *bnx2i_cmd; 384 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe; 385 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword; 386 u32 dword;
387 u32 scsi_lun[2];
369 388
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
376 tmfabort_wqe->op_attr = 0; 395 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr = 396 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; 397 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381 398
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 399 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0; 400 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 401 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385 402
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 403 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc) 404 if (!ctask || !ctask->sc)
388 /* 405 /*
389 * the iscsi layer must have completed the cmd while this 406 * the iscsi layer must have completed the cmd while this
390 * was starting up. 407 * was starting up.
408 *
409 * Note: In the case of a SCSI cmd timeout, the task's sc
410 * is still active; hence ctask->sc != 0
411 * In this case, the task must be aborted
391 */ 412 */
392 return 0; 413 return 0;
414
393 ref_sc = ctask->sc; 415 ref_sc = ctask->sc;
394 416
417 /* Retrieve LUN directly from the ref_sc */
418 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
419 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
420 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
421
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 422 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 423 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else 424 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 425 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); 426 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 427 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401 428
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 429 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d48ee8..6d8172e781cf 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.0.1e" 20#define DRV_MODULE_VERSION "2.1.0"
21#define DRV_MODULE_RELDATE "June 22, 2009" 21#define DRV_MODULE_RELDATE "Dec 06, 2009"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
32 32
33static DEFINE_MUTEX(bnx2i_dev_lock); 33static DEFINE_MUTEX(bnx2i_dev_lock);
34 34
35unsigned int event_coal_min = 24;
36module_param(event_coal_min, int, 0664);
37MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
38
35unsigned int event_coal_div = 1; 39unsigned int event_coal_div = 1;
36module_param(event_coal_div, int, 0664); 40module_param(event_coal_div, int, 0664);
37MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 41MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
83 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); 87 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
84 hba->mail_queue_access = BNX2I_MQ_BIN_MODE; 88 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
85 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || 89 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
86 hba->pci_did == PCI_DEVICE_ID_NX2_57711) 90 hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
91 hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
87 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); 92 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
93 else
94 printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
95 hba->pci_did);
88} 96}
89 97
90 98
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
363 371
364 printk(KERN_INFO "%s", version); 372 printk(KERN_INFO "%s", version);
365 373
366 if (!is_power_of_2(sq_size)) 374 if (sq_size && !is_power_of_2(sq_size))
367 sq_size = roundup_pow_of_two(sq_size); 375 sq_size = roundup_pow_of_two(sq_size);
368 376
369 mutex_init(&bnx2i_dev_lock); 377 mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 132898c88d5e..33b2294625bb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
485 struct iscsi_task *task = session->cmds[i]; 485 struct iscsi_task *task = session->cmds[i];
486 struct bnx2i_cmd *cmd = task->dd_data; 486 struct bnx2i_cmd *cmd = task->dd_data;
487 487
488 /* Anil */
489 task->hdr = &cmd->hdr; 488 task->hdr = &cmd->hdr;
490 task->hdr_max = sizeof(struct iscsi_hdr); 489 task->hdr_max = sizeof(struct iscsi_hdr);
491 490
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
765 hba->pci_svid = hba->pcidev->subsystem_vendor; 764 hba->pci_svid = hba->pcidev->subsystem_vendor;
766 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
767 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
768 bnx2i_identify_device(hba);
769 767
770 bnx2i_identify_device(hba); 768 bnx2i_identify_device(hba);
771 bnx2i_setup_host_queue_size(hba, shost); 769 bnx2i_setup_host_queue_size(hba, shost);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4adf9c..26ffdcd5a437 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
291 c3cn_hold(c3cn); 291 c3cn_hold(c3cn);
292 spin_lock_bh(&c3cn->lock); 292 spin_lock_bh(&c3cn->lock);
293 if (c3cn->state == C3CN_STATE_CONNECTING) 293 if (c3cn->state == C3CN_STATE_CONNECTING)
294 fail_act_open(c3cn, EHOSTUNREACH); 294 fail_act_open(c3cn, -EHOSTUNREACH);
295 spin_unlock_bh(&c3cn->lock); 295 spin_unlock_bh(&c3cn->lock);
296 c3cn_put(c3cn); 296 c3cn_put(c3cn);
297 __kfree_skb(skb); 297 __kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
792{ 792{
793 switch (status) { 793 switch (status) {
794 case CPL_ERR_CONN_RESET: 794 case CPL_ERR_CONN_RESET:
795 return ECONNREFUSED; 795 return -ECONNREFUSED;
796 case CPL_ERR_ARP_MISS: 796 case CPL_ERR_ARP_MISS:
797 return EHOSTUNREACH; 797 return -EHOSTUNREACH;
798 case CPL_ERR_CONN_TIMEDOUT: 798 case CPL_ERR_CONN_TIMEDOUT:
799 return ETIMEDOUT; 799 return -ETIMEDOUT;
800 case CPL_ERR_TCAM_FULL: 800 case CPL_ERR_TCAM_FULL:
801 return ENOMEM; 801 return -ENOMEM;
802 case CPL_ERR_CONN_EXIST: 802 case CPL_ERR_CONN_EXIST:
803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); 803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
804 return EADDRINUSE; 804 return -EADDRINUSE;
805 default: 805 default:
806 return EIO; 806 return -EIO;
807 } 807 }
808} 808}
809 809
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
817 spin_lock_bh(&c3cn->lock); 817 spin_lock_bh(&c3cn->lock);
818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); 818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
819 if (!skb) 819 if (!skb)
820 fail_act_open(c3cn, ENOMEM); 820 fail_act_open(c3cn, -ENOMEM);
821 else { 821 else {
822 skb->sk = (struct sock *)c3cn; 822 skb->sk = (struct sock *)c3cn;
823 set_arp_failure_handler(skb, act_open_req_arp_failure); 823 set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
966 case CPL_ERR_BAD_SYN: /* fall through */ 966 case CPL_ERR_BAD_SYN: /* fall through */
967 case CPL_ERR_CONN_RESET: 967 case CPL_ERR_CONN_RESET:
968 return c3cn->state > C3CN_STATE_ESTABLISHED ? 968 return c3cn->state > C3CN_STATE_ESTABLISHED ?
969 EPIPE : ECONNRESET; 969 -EPIPE : -ECONNRESET;
970 case CPL_ERR_XMIT_TIMEDOUT: 970 case CPL_ERR_XMIT_TIMEDOUT:
971 case CPL_ERR_PERSIST_TIMEDOUT: 971 case CPL_ERR_PERSIST_TIMEDOUT:
972 case CPL_ERR_FINWAIT2_TIMEDOUT: 972 case CPL_ERR_FINWAIT2_TIMEDOUT:
973 case CPL_ERR_KEEPALIVE_TIMEDOUT: 973 case CPL_ERR_KEEPALIVE_TIMEDOUT:
974 return ETIMEDOUT; 974 return -ETIMEDOUT;
975 default: 975 default:
976 return EIO; 976 return -EIO;
977 } 977 }
978} 978}
979 979
@@ -1563,7 +1563,7 @@ free_tid:
1563 s3_free_atid(cdev, c3cn->tid); 1563 s3_free_atid(cdev, c3cn->tid);
1564 c3cn->tid = 0; 1564 c3cn->tid = 0;
1565out_err: 1565out_err:
1566 return -1; 1566 return -EINVAL;
1567} 1567}
1568 1568
1569 1569
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 709105071177..1fe3b0f1f3c9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
388 if (err > 0) { 388 if (err > 0) {
389 int pdulen = err; 389 int pdulen = err;
390 390
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", 391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err); 392 task, skb, skb->len, skb->data_len, err);
393 393
394 if (task->conn->hdrdgst_en) 394 if (task->conn->hdrdgst_en)
395 pdulen += ISCSI_DIGEST_SIZE; 395 pdulen += ISCSI_DIGEST_SIZE;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 47cfe1c49c3e..1a660191a905 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
748 {"IBM", "1724"}, 748 {"IBM", "1724"},
749 {"IBM", "1726"}, 749 {"IBM", "1726"},
750 {"IBM", "1742"}, 750 {"IBM", "1742"},
751 {"IBM", "1745"},
752 {"IBM", "1746"},
751 {"IBM", "1814"}, 753 {"IBM", "1814"},
752 {"IBM", "1815"}, 754 {"IBM", "1815"},
753 {"IBM", "1818"}, 755 {"IBM", "1818"},
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a30ffaa1222c..e3896fcb06e3 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
101 101
102static int fcoe_create(const char *, struct kernel_param *); 102static int fcoe_create(const char *, struct kernel_param *);
103static int fcoe_destroy(const char *, struct kernel_param *); 103static int fcoe_destroy(const char *, struct kernel_param *);
104static int fcoe_enable(const char *, struct kernel_param *);
105static int fcoe_disable(const char *, struct kernel_param *);
104 106
105static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 107static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
106 u32 did, struct fc_frame *, 108 u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
115 117
116module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 118module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
117__MODULE_PARM_TYPE(create, "string"); 119__MODULE_PARM_TYPE(create, "string");
118MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); 120MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
119module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 121module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
120__MODULE_PARM_TYPE(destroy, "string"); 122__MODULE_PARM_TYPE(destroy, "string");
121MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); 123MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
124module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
125__MODULE_PARM_TYPE(enable, "string");
126MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
127module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
128__MODULE_PARM_TYPE(disable, "string");
129MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
122 130
123/* notification function for packets from net device */ 131/* notification function for packets from net device */
124static struct notifier_block fcoe_notifier = { 132static struct notifier_block fcoe_notifier = {
@@ -545,6 +553,23 @@ static void fcoe_queue_timer(ulong lport)
545} 553}
546 554
547/** 555/**
556 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
557 * @netdev: the associated net device
558 * @wwn: the output WWN
559 * @type: the type of WWN (WWPN or WWNN)
560 *
561 * Returns: 0 for success
562 */
563static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
564{
565 const struct net_device_ops *ops = netdev->netdev_ops;
566
567 if (ops->ndo_fcoe_get_wwn)
568 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
569 return -EINVAL;
570}
571
572/**
548 * fcoe_netdev_config() - Set up net devive for SW FCoE 573 * fcoe_netdev_config() - Set up net devive for SW FCoE
549 * @lport: The local port that is associated with the net device 574 * @lport: The local port that is associated with the net device
550 * @netdev: The associated net device 575 * @netdev: The associated net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
611 */ 636 */
612 if (netdev->priv_flags & IFF_802_1Q_VLAN) 637 if (netdev->priv_flags & IFF_802_1Q_VLAN)
613 vid = vlan_dev_vlan_id(netdev); 638 vid = vlan_dev_vlan_id(netdev);
614 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 639
640 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
641 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
615 fc_set_wwnn(lport, wwnn); 642 fc_set_wwnn(lport, wwnn);
616 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid); 643 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
644 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
645 2, vid);
617 fc_set_wwpn(lport, wwpn); 646 fc_set_wwpn(lport, wwpn);
618 } 647 }
619 648
@@ -1838,6 +1867,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
1838} 1867}
1839 1868
1840/** 1869/**
1870 * fcoe_disable() - Disables a FCoE interface
1871 * @buffer: The name of the Ethernet interface to be disabled
1872 * @kp: The associated kernel parameter
1873 *
1874 * Called from sysfs.
1875 *
1876 * Returns: 0 for success
1877 */
1878static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1879{
1880 struct fcoe_interface *fcoe;
1881 struct net_device *netdev;
1882 int rc = 0;
1883
1884 mutex_lock(&fcoe_config_mutex);
1885#ifdef CONFIG_FCOE_MODULE
1886 /*
1887 * Make sure the module has been initialized, and is not about to be
1888 * removed. Module paramter sysfs files are writable before the
1889 * module_init function is called and after module_exit.
1890 */
1891 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1892 rc = -ENODEV;
1893 goto out_nodev;
1894 }
1895#endif
1896
1897 netdev = fcoe_if_to_netdev(buffer);
1898 if (!netdev) {
1899 rc = -ENODEV;
1900 goto out_nodev;
1901 }
1902
1903 rtnl_lock();
1904 fcoe = fcoe_hostlist_lookup_port(netdev);
1905 rtnl_unlock();
1906
1907 if (fcoe)
1908 fc_fabric_logoff(fcoe->ctlr.lp);
1909 else
1910 rc = -ENODEV;
1911
1912 dev_put(netdev);
1913out_nodev:
1914 mutex_unlock(&fcoe_config_mutex);
1915 return rc;
1916}
1917
1918/**
1919 * fcoe_enable() - Enables a FCoE interface
1920 * @buffer: The name of the Ethernet interface to be enabled
1921 * @kp: The associated kernel parameter
1922 *
1923 * Called from sysfs.
1924 *
1925 * Returns: 0 for success
1926 */
1927static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1928{
1929 struct fcoe_interface *fcoe;
1930 struct net_device *netdev;
1931 int rc = 0;
1932
1933 mutex_lock(&fcoe_config_mutex);
1934#ifdef CONFIG_FCOE_MODULE
1935 /*
1936 * Make sure the module has been initialized, and is not about to be
1937 * removed. Module paramter sysfs files are writable before the
1938 * module_init function is called and after module_exit.
1939 */
1940 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1941 rc = -ENODEV;
1942 goto out_nodev;
1943 }
1944#endif
1945
1946 netdev = fcoe_if_to_netdev(buffer);
1947 if (!netdev) {
1948 rc = -ENODEV;
1949 goto out_nodev;
1950 }
1951
1952 rtnl_lock();
1953 fcoe = fcoe_hostlist_lookup_port(netdev);
1954 rtnl_unlock();
1955
1956 if (fcoe)
1957 rc = fc_fabric_login(fcoe->ctlr.lp);
1958 else
1959 rc = -ENODEV;
1960
1961 dev_put(netdev);
1962out_nodev:
1963 mutex_unlock(&fcoe_config_mutex);
1964 return rc;
1965}
1966
1967/**
1841 * fcoe_destroy() - Destroy a FCoE interface 1968 * fcoe_destroy() - Destroy a FCoE interface
1842 * @buffer: The name of the Ethernet interface to be destroyed 1969 * @buffer: The name of the Ethernet interface to be destroyed
1843 * @kp: The associated kernel parameter 1970 * @kp: The associated kernel parameter
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 000000000000..bb96fdd58e23
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3531 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fs.h>
30#include <linux/timer.h>
31#include <linux/seq_file.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <asm/atomic.h>
50#include <linux/kthread.h>
51#include "hpsa_cmd.h"
52#include "hpsa.h"
53
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "1.0.0"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57
58/* How long to wait (in milliseconds) for board to go into simple mode */
59#define MAX_CONFIG_WAIT 30000
60#define MAX_IOCTL_CONFIG_WAIT 1000
61
62/*define how many times we will try a command because of bus resets */
63#define MAX_CMD_RETRIES 3
64
65/* Embedded module documentation macros - see modules.h */
66MODULE_AUTHOR("Hewlett-Packard Company");
67MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
68 HPSA_DRIVER_VERSION);
69MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70MODULE_VERSION(HPSA_DRIVER_VERSION);
71MODULE_LICENSE("GPL");
72
73static int hpsa_allow_any;
74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77
78/* define the PCI info for the cards we can control */
79static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
96
97/* board_id = Subsystem Device ID & Vendor ID
98 * product = Marketing Name for the board
99 * access = Address of the struct of function pointers
100 */
101static struct board_type products[] = {
102 {0x3223103C, "Smart Array P800", &SA5_access},
103 {0x3234103C, "Smart Array P400", &SA5_access},
104 {0x323d103c, "Smart Array P700M", &SA5_access},
105 {0x3241103C, "Smart Array P212", &SA5_access},
106 {0x3243103C, "Smart Array P410", &SA5_access},
107 {0x3245103C, "Smart Array P410i", &SA5_access},
108 {0x3247103C, "Smart Array P411", &SA5_access},
109 {0x3249103C, "Smart Array P812", &SA5_access},
110 {0x324a103C, "Smart Array P712m", &SA5_access},
111 {0x324b103C, "Smart Array P711m", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113};
114
115static int number_of_controllers;
116
117static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
118static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119static void start_io(struct ctlr_info *h);
120
121#ifdef CONFIG_COMPAT
122static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
123#endif
124
125static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127static struct CommandList *cmd_alloc(struct ctlr_info *h);
128static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
130 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
131 int cmd_type);
132
133static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134 void (*done)(struct scsi_cmnd *));
135
136static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
137static int hpsa_slave_alloc(struct scsi_device *sdev);
138static void hpsa_slave_destroy(struct scsi_device *sdev);
139
140static ssize_t raid_level_show(struct device *dev,
141 struct device_attribute *attr, char *buf);
142static ssize_t lunid_show(struct device *dev,
143 struct device_attribute *attr, char *buf);
144static ssize_t unique_id_show(struct device *dev,
145 struct device_attribute *attr, char *buf);
146static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
147static ssize_t host_store_rescan(struct device *dev,
148 struct device_attribute *attr, const char *buf, size_t count);
149static int check_for_unit_attention(struct ctlr_info *h,
150 struct CommandList *c);
151static void check_ioctl_unit_attention(struct ctlr_info *h,
152 struct CommandList *c);
153
154static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
155static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
156static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
157static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
158
159static struct device_attribute *hpsa_sdev_attrs[] = {
160 &dev_attr_raid_level,
161 &dev_attr_lunid,
162 &dev_attr_unique_id,
163 NULL,
164};
165
166static struct device_attribute *hpsa_shost_attrs[] = {
167 &dev_attr_rescan,
168 NULL,
169};
170
171static struct scsi_host_template hpsa_driver_template = {
172 .module = THIS_MODULE,
173 .name = "hpsa",
174 .proc_name = "hpsa",
175 .queuecommand = hpsa_scsi_queue_command,
176 .can_queue = 512,
177 .this_id = -1,
178 .sg_tablesize = MAXSGENTRIES,
179 .cmd_per_lun = 512,
180 .use_clustering = ENABLE_CLUSTERING,
181 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
182 .ioctl = hpsa_ioctl,
183 .slave_alloc = hpsa_slave_alloc,
184 .slave_destroy = hpsa_slave_destroy,
185#ifdef CONFIG_COMPAT
186 .compat_ioctl = hpsa_compat_ioctl,
187#endif
188 .sdev_attrs = hpsa_sdev_attrs,
189 .shost_attrs = hpsa_shost_attrs,
190};
191
192static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
193{
194 unsigned long *priv = shost_priv(sdev->host);
195 return (struct ctlr_info *) *priv;
196}
197
198static struct task_struct *hpsa_scan_thread;
199static DEFINE_MUTEX(hpsa_scan_mutex);
200static LIST_HEAD(hpsa_scan_q);
201static int hpsa_scan_func(void *data);
202
203/**
204 * add_to_scan_list() - add controller to rescan queue
205 * @h: Pointer to the controller.
206 *
207 * Adds the controller to the rescan queue if not already on the queue.
208 *
209 * returns 1 if added to the queue, 0 if skipped (could be on the
210 * queue already, or the controller could be initializing or shutting
211 * down).
212 **/
213static int add_to_scan_list(struct ctlr_info *h)
214{
215 struct ctlr_info *test_h;
216 int found = 0;
217 int ret = 0;
218
219 if (h->busy_initializing)
220 return 0;
221
222 /*
223 * If we don't get the lock, it means the driver is unloading
224 * and there's no point in scheduling a new scan.
225 */
226 if (!mutex_trylock(&h->busy_shutting_down))
227 return 0;
228
229 mutex_lock(&hpsa_scan_mutex);
230 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
231 if (test_h == h) {
232 found = 1;
233 break;
234 }
235 }
236 if (!found && !h->busy_scanning) {
237 INIT_COMPLETION(h->scan_wait);
238 list_add_tail(&h->scan_list, &hpsa_scan_q);
239 ret = 1;
240 }
241 mutex_unlock(&hpsa_scan_mutex);
242 mutex_unlock(&h->busy_shutting_down);
243
244 return ret;
245}
246
247/**
248 * remove_from_scan_list() - remove controller from rescan queue
249 * @h: Pointer to the controller.
250 *
251 * Removes the controller from the rescan queue if present. Blocks if
252 * the controller is currently conducting a rescan. The controller
253 * can be in one of three states:
254 * 1. Doesn't need a scan
255 * 2. On the scan list, but not scanning yet (we remove it)
256 * 3. Busy scanning (and not on the list). In this case we want to wait for
257 * the scan to complete to make sure the scanning thread for this
258 * controller is completely idle.
259 **/
260static void remove_from_scan_list(struct ctlr_info *h)
261{
262 struct ctlr_info *test_h, *tmp_h;
263
264 mutex_lock(&hpsa_scan_mutex);
265 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
266 if (test_h == h) { /* state 2. */
267 list_del(&h->scan_list);
268 complete_all(&h->scan_wait);
269 mutex_unlock(&hpsa_scan_mutex);
270 return;
271 }
272 }
273 if (h->busy_scanning) { /* state 3. */
274 mutex_unlock(&hpsa_scan_mutex);
275 wait_for_completion(&h->scan_wait);
276 } else { /* state 1, nothing to do. */
277 mutex_unlock(&hpsa_scan_mutex);
278 }
279}
280
281/* hpsa_scan_func() - kernel thread used to rescan controllers
282 * @data: Ignored.
283 *
284 * A kernel thread used scan for drive topology changes on
285 * controllers. The thread processes only one controller at a time
286 * using a queue. Controllers are added to the queue using
287 * add_to_scan_list() and removed from the queue either after done
288 * processing or using remove_from_scan_list().
289 *
290 * returns 0.
291 **/
292static int hpsa_scan_func(__attribute__((unused)) void *data)
293{
294 struct ctlr_info *h;
295 int host_no;
296
297 while (1) {
298 set_current_state(TASK_INTERRUPTIBLE);
299 schedule();
300 if (kthread_should_stop())
301 break;
302
303 while (1) {
304 mutex_lock(&hpsa_scan_mutex);
305 if (list_empty(&hpsa_scan_q)) {
306 mutex_unlock(&hpsa_scan_mutex);
307 break;
308 }
309 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
310 scan_list);
311 list_del(&h->scan_list);
312 h->busy_scanning = 1;
313 mutex_unlock(&hpsa_scan_mutex);
314 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
315 hpsa_update_scsi_devices(h, host_no);
316 complete_all(&h->scan_wait);
317 mutex_lock(&hpsa_scan_mutex);
318 h->busy_scanning = 0;
319 mutex_unlock(&hpsa_scan_mutex);
320 }
321 }
322 return 0;
323}
324
325static int check_for_unit_attention(struct ctlr_info *h,
326 struct CommandList *c)
327{
328 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
329 return 0;
330
331 switch (c->err_info->SenseInfo[12]) {
332 case STATE_CHANGED:
333 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
334 "detected, command retried\n", h->ctlr);
335 break;
336 case LUN_FAILED:
337 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
338 "detected, action required\n", h->ctlr);
339 break;
340 case REPORT_LUNS_CHANGED:
341 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
342 "changed\n", h->ctlr);
343 /*
344 * Here, we could call add_to_scan_list and wake up the scan thread,
345 * except that it's quite likely that we will get more than one
346 * REPORT_LUNS_CHANGED condition in quick succession, which means
347 * that those which occur after the first one will likely happen
348 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
349 * robust enough to restart in the middle, undoing what it has already
350 * done, and it's not clear that it's even possible to do this, since
351 * part of what it does is notify the SCSI mid layer, which starts
352 * doing it's own i/o to read partition tables and so on, and the
353 * driver doesn't have visibility to know what might need undoing.
354 * In any event, if possible, it is horribly complicated to get right
355 * so we just don't do it for now.
356 *
357 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
358 */
359 break;
360 case POWER_OR_RESET:
361 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
362 "or device reset detected\n", h->ctlr);
363 break;
364 case UNIT_ATTENTION_CLEARED:
365 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
366 "cleared by another initiator\n", h->ctlr);
367 break;
368 default:
369 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
370 "unit attention detected\n", h->ctlr);
371 break;
372 }
373 return 1;
374}
375
376static ssize_t host_store_rescan(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf, size_t count)
379{
380 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev);
382 unsigned long *priv = shost_priv(shost);
383 h = (struct ctlr_info *) *priv;
384 if (add_to_scan_list(h)) {
385 wake_up_process(hpsa_scan_thread);
386 wait_for_completion_interruptible(&h->scan_wait);
387 }
388 return count;
389}
390
391/* Enqueuing and dequeuing functions for cmdlists. */
392static inline void addQ(struct hlist_head *list, struct CommandList *c)
393{
394 hlist_add_head(&c->list, list);
395}
396
397static void enqueue_cmd_and_start_io(struct ctlr_info *h,
398 struct CommandList *c)
399{
400 unsigned long flags;
401 spin_lock_irqsave(&h->lock, flags);
402 addQ(&h->reqQ, c);
403 h->Qdepth++;
404 start_io(h);
405 spin_unlock_irqrestore(&h->lock, flags);
406}
407
408static inline void removeQ(struct CommandList *c)
409{
410 if (WARN_ON(hlist_unhashed(&c->list)))
411 return;
412 hlist_del_init(&c->list);
413}
414
415static inline int is_hba_lunid(unsigned char scsi3addr[])
416{
417 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
418}
419
420static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
421{
422 return (scsi3addr[3] & 0xC0) == 0x40;
423}
424
425static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
426 "UNKNOWN"
427};
428#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
429
430static ssize_t raid_level_show(struct device *dev,
431 struct device_attribute *attr, char *buf)
432{
433 ssize_t l = 0;
434 int rlevel;
435 struct ctlr_info *h;
436 struct scsi_device *sdev;
437 struct hpsa_scsi_dev_t *hdev;
438 unsigned long flags;
439
440 sdev = to_scsi_device(dev);
441 h = sdev_to_hba(sdev);
442 spin_lock_irqsave(&h->lock, flags);
443 hdev = sdev->hostdata;
444 if (!hdev) {
445 spin_unlock_irqrestore(&h->lock, flags);
446 return -ENODEV;
447 }
448
449 /* Is this even a logical drive? */
450 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
451 spin_unlock_irqrestore(&h->lock, flags);
452 l = snprintf(buf, PAGE_SIZE, "N/A\n");
453 return l;
454 }
455
456 rlevel = hdev->raid_level;
457 spin_unlock_irqrestore(&h->lock, flags);
458 if (rlevel < 0 || rlevel > RAID_UNKNOWN)
459 rlevel = RAID_UNKNOWN;
460 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
461 return l;
462}
463
464static ssize_t lunid_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct ctlr_info *h;
468 struct scsi_device *sdev;
469 struct hpsa_scsi_dev_t *hdev;
470 unsigned long flags;
471 unsigned char lunid[8];
472
473 sdev = to_scsi_device(dev);
474 h = sdev_to_hba(sdev);
475 spin_lock_irqsave(&h->lock, flags);
476 hdev = sdev->hostdata;
477 if (!hdev) {
478 spin_unlock_irqrestore(&h->lock, flags);
479 return -ENODEV;
480 }
481 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
482 spin_unlock_irqrestore(&h->lock, flags);
483 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
484 lunid[0], lunid[1], lunid[2], lunid[3],
485 lunid[4], lunid[5], lunid[6], lunid[7]);
486}
487
488static ssize_t unique_id_show(struct device *dev,
489 struct device_attribute *attr, char *buf)
490{
491 struct ctlr_info *h;
492 struct scsi_device *sdev;
493 struct hpsa_scsi_dev_t *hdev;
494 unsigned long flags;
495 unsigned char sn[16];
496
497 sdev = to_scsi_device(dev);
498 h = sdev_to_hba(sdev);
499 spin_lock_irqsave(&h->lock, flags);
500 hdev = sdev->hostdata;
501 if (!hdev) {
502 spin_unlock_irqrestore(&h->lock, flags);
503 return -ENODEV;
504 }
505 memcpy(sn, hdev->device_id, sizeof(sn));
506 spin_unlock_irqrestore(&h->lock, flags);
507 return snprintf(buf, 16 * 2 + 2,
508 "%02X%02X%02X%02X%02X%02X%02X%02X"
509 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
510 sn[0], sn[1], sn[2], sn[3],
511 sn[4], sn[5], sn[6], sn[7],
512 sn[8], sn[9], sn[10], sn[11],
513 sn[12], sn[13], sn[14], sn[15]);
514}
515
516static int hpsa_find_target_lun(struct ctlr_info *h,
517 unsigned char scsi3addr[], int bus, int *target, int *lun)
518{
519 /* finds an unused bus, target, lun for a new physical device
520 * assumes h->devlock is held
521 */
522 int i, found = 0;
523 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
524
525 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
526
527 for (i = 0; i < h->ndevices; i++) {
528 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
529 set_bit(h->dev[i]->target, lun_taken);
530 }
531
532 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
533 if (!test_bit(i, lun_taken)) {
534 /* *bus = 1; */
535 *target = i;
536 *lun = 0;
537 found = 1;
538 break;
539 }
540 }
541 return !found;
542}
543
544/* Add an entry into h->dev[] array. */
545static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
546 struct hpsa_scsi_dev_t *device,
547 struct hpsa_scsi_dev_t *added[], int *nadded)
548{
549 /* assumes h->devlock is held */
550 int n = h->ndevices;
551 int i;
552 unsigned char addr1[8], addr2[8];
553 struct hpsa_scsi_dev_t *sd;
554
555 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
556 dev_err(&h->pdev->dev, "too many devices, some will be "
557 "inaccessible.\n");
558 return -1;
559 }
560
561 /* physical devices do not have lun or target assigned until now. */
562 if (device->lun != -1)
563 /* Logical device, lun is already assigned. */
564 goto lun_assigned;
565
566 /* If this device a non-zero lun of a multi-lun device
567 * byte 4 of the 8-byte LUN addr will contain the logical
568 * unit no, zero otherise.
569 */
570 if (device->scsi3addr[4] == 0) {
571 /* This is not a non-zero lun of a multi-lun device */
572 if (hpsa_find_target_lun(h, device->scsi3addr,
573 device->bus, &device->target, &device->lun) != 0)
574 return -1;
575 goto lun_assigned;
576 }
577
578 /* This is a non-zero lun of a multi-lun device.
579 * Search through our list and find the device which
580 * has the same 8 byte LUN address, excepting byte 4.
581 * Assign the same bus and target for this new LUN.
582 * Use the logical unit number from the firmware.
583 */
584 memcpy(addr1, device->scsi3addr, 8);
585 addr1[4] = 0;
586 for (i = 0; i < n; i++) {
587 sd = h->dev[i];
588 memcpy(addr2, sd->scsi3addr, 8);
589 addr2[4] = 0;
590 /* differ only in byte 4? */
591 if (memcmp(addr1, addr2, 8) == 0) {
592 device->bus = sd->bus;
593 device->target = sd->target;
594 device->lun = device->scsi3addr[4];
595 break;
596 }
597 }
598 if (device->lun == -1) {
599 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
600 " suspect firmware bug or unsupported hardware "
601 "configuration.\n");
602 return -1;
603 }
604
605lun_assigned:
606
607 h->dev[n] = device;
608 h->ndevices++;
609 added[*nadded] = device;
610 (*nadded)++;
611
612 /* initially, (before registering with scsi layer) we don't
613 * know our hostno and we don't want to print anything first
614 * time anyway (the scsi layer's inquiries will show that info)
615 */
616 /* if (hostno != -1) */
617 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
618 scsi_device_type(device->devtype), hostno,
619 device->bus, device->target, device->lun);
620 return 0;
621}
622
623/* Remove an entry from h->dev[] array. */
624static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
625 struct hpsa_scsi_dev_t *removed[], int *nremoved)
626{
627 /* assumes h->devlock is held */
628 int i;
629 struct hpsa_scsi_dev_t *sd;
630
631 if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
632 BUG();
633
634 sd = h->dev[entry];
635 removed[*nremoved] = h->dev[entry];
636 (*nremoved)++;
637
638 for (i = entry; i < h->ndevices-1; i++)
639 h->dev[i] = h->dev[i+1];
640 h->ndevices--;
641 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
642 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
643 sd->lun);
644}
645
646#define SCSI3ADDR_EQ(a, b) ( \
647 (a)[7] == (b)[7] && \
648 (a)[6] == (b)[6] && \
649 (a)[5] == (b)[5] && \
650 (a)[4] == (b)[4] && \
651 (a)[3] == (b)[3] && \
652 (a)[2] == (b)[2] && \
653 (a)[1] == (b)[1] && \
654 (a)[0] == (b)[0])
655
656static void fixup_botched_add(struct ctlr_info *h,
657 struct hpsa_scsi_dev_t *added)
658{
659 /* called when scsi_add_device fails in order to re-adjust
660 * h->dev[] to match the mid layer's view.
661 */
662 unsigned long flags;
663 int i, j;
664
665 spin_lock_irqsave(&h->lock, flags);
666 for (i = 0; i < h->ndevices; i++) {
667 if (h->dev[i] == added) {
668 for (j = i; j < h->ndevices-1; j++)
669 h->dev[j] = h->dev[j+1];
670 h->ndevices--;
671 break;
672 }
673 }
674 spin_unlock_irqrestore(&h->lock, flags);
675 kfree(added);
676}
677
678static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
679 struct hpsa_scsi_dev_t *dev2)
680{
681 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
682 (dev1->lun != -1 && dev2->lun != -1)) &&
683 dev1->devtype != 0x0C)
684 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
685
686 /* we compare everything except lun and target as these
687 * are not yet assigned. Compare parts likely
688 * to differ first
689 */
690 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
691 sizeof(dev1->scsi3addr)) != 0)
692 return 0;
693 if (memcmp(dev1->device_id, dev2->device_id,
694 sizeof(dev1->device_id)) != 0)
695 return 0;
696 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
697 return 0;
698 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
699 return 0;
700 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
701 return 0;
702 if (dev1->devtype != dev2->devtype)
703 return 0;
704 if (dev1->raid_level != dev2->raid_level)
705 return 0;
706 if (dev1->bus != dev2->bus)
707 return 0;
708 return 1;
709}
710
711/* Find needle in haystack. If exact match found, return DEVICE_SAME,
712 * and return needle location in *index. If scsi3addr matches, but not
713 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
714 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
715 */
716static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
717 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
718 int *index)
719{
720 int i;
721#define DEVICE_NOT_FOUND 0
722#define DEVICE_CHANGED 1
723#define DEVICE_SAME 2
724 for (i = 0; i < haystack_size; i++) {
725 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
726 *index = i;
727 if (device_is_the_same(needle, haystack[i]))
728 return DEVICE_SAME;
729 else
730 return DEVICE_CHANGED;
731 }
732 }
733 *index = -1;
734 return DEVICE_NOT_FOUND;
735}
736
737static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
738 struct hpsa_scsi_dev_t *sd[], int nsds)
739{
740 /* sd contains scsi3 addresses and devtypes, and inquiry
741 * data. This function takes what's in sd to be the current
742 * reality and updates h->dev[] to reflect that reality.
743 */
744 int i, entry, device_change, changes = 0;
745 struct hpsa_scsi_dev_t *csd;
746 unsigned long flags;
747 struct hpsa_scsi_dev_t **added, **removed;
748 int nadded, nremoved;
749 struct Scsi_Host *sh = NULL;
750
751 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
752 GFP_KERNEL);
753 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
754 GFP_KERNEL);
755
756 if (!added || !removed) {
757 dev_warn(&h->pdev->dev, "out of memory in "
758 "adjust_hpsa_scsi_table\n");
759 goto free_and_out;
760 }
761
762 spin_lock_irqsave(&h->devlock, flags);
763
764 /* find any devices in h->dev[] that are not in
765 * sd[] and remove them from h->dev[], and for any
766 * devices which have changed, remove the old device
767 * info and add the new device info.
768 */
769 i = 0;
770 nremoved = 0;
771 nadded = 0;
772 while (i < h->ndevices) {
773 csd = h->dev[i];
774 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
775 if (device_change == DEVICE_NOT_FOUND) {
776 changes++;
777 hpsa_scsi_remove_entry(h, hostno, i,
778 removed, &nremoved);
779 continue; /* remove ^^^, hence i not incremented */
780 } else if (device_change == DEVICE_CHANGED) {
781 changes++;
782 hpsa_scsi_remove_entry(h, hostno, i,
783 removed, &nremoved);
784 (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
785 added, &nadded);
786 /* add can't fail, we just removed one. */
787 sd[entry] = NULL; /* prevent it from being freed */
788 }
789 i++;
790 }
791
792 /* Now, make sure every device listed in sd[] is also
793 * listed in h->dev[], adding them if they aren't found
794 */
795
796 for (i = 0; i < nsds; i++) {
797 if (!sd[i]) /* if already added above. */
798 continue;
799 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
800 h->ndevices, &entry);
801 if (device_change == DEVICE_NOT_FOUND) {
802 changes++;
803 if (hpsa_scsi_add_entry(h, hostno, sd[i],
804 added, &nadded) != 0)
805 break;
806 sd[i] = NULL; /* prevent from being freed later. */
807 } else if (device_change == DEVICE_CHANGED) {
808 /* should never happen... */
809 changes++;
810 dev_warn(&h->pdev->dev,
811 "device unexpectedly changed.\n");
812 /* but if it does happen, we just ignore that device */
813 }
814 }
815 spin_unlock_irqrestore(&h->devlock, flags);
816
817 /* Don't notify scsi mid layer of any changes the first time through
818 * (or if there are no changes) scsi_scan_host will do it later the
819 * first time through.
820 */
821 if (hostno == -1 || !changes)
822 goto free_and_out;
823
824 sh = h->scsi_host;
825 /* Notify scsi mid layer of any removed devices */
826 for (i = 0; i < nremoved; i++) {
827 struct scsi_device *sdev =
828 scsi_device_lookup(sh, removed[i]->bus,
829 removed[i]->target, removed[i]->lun);
830 if (sdev != NULL) {
831 scsi_remove_device(sdev);
832 scsi_device_put(sdev);
833 } else {
834 /* We don't expect to get here.
835 * future cmds to this device will get selection
836 * timeout as if the device was gone.
837 */
838 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
839 " for removal.", hostno, removed[i]->bus,
840 removed[i]->target, removed[i]->lun);
841 }
842 kfree(removed[i]);
843 removed[i] = NULL;
844 }
845
846 /* Notify scsi mid layer of any added devices */
847 for (i = 0; i < nadded; i++) {
848 if (scsi_add_device(sh, added[i]->bus,
849 added[i]->target, added[i]->lun) == 0)
850 continue;
851 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
852 "device not added.\n", hostno, added[i]->bus,
853 added[i]->target, added[i]->lun);
854 /* now we have to remove it from h->dev,
855 * since it didn't get added to scsi mid layer
856 */
857 fixup_botched_add(h, added[i]);
858 }
859
860free_and_out:
861 kfree(added);
862 kfree(removed);
863 return 0;
864}
865
866/*
867 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
868 * Assume's h->devlock is held.
869 */
870static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
871 int bus, int target, int lun)
872{
873 int i;
874 struct hpsa_scsi_dev_t *sd;
875
876 for (i = 0; i < h->ndevices; i++) {
877 sd = h->dev[i];
878 if (sd->bus == bus && sd->target == target && sd->lun == lun)
879 return sd;
880 }
881 return NULL;
882}
883
884/* link sdev->hostdata to our per-device structure. */
885static int hpsa_slave_alloc(struct scsi_device *sdev)
886{
887 struct hpsa_scsi_dev_t *sd;
888 unsigned long flags;
889 struct ctlr_info *h;
890
891 h = sdev_to_hba(sdev);
892 spin_lock_irqsave(&h->devlock, flags);
893 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
894 sdev_id(sdev), sdev->lun);
895 if (sd != NULL)
896 sdev->hostdata = sd;
897 spin_unlock_irqrestore(&h->devlock, flags);
898 return 0;
899}
900
901static void hpsa_slave_destroy(struct scsi_device *sdev)
902{
903 return; /* nothing to do. */
904}
905
906static void hpsa_scsi_setup(struct ctlr_info *h)
907{
908 h->ndevices = 0;
909 h->scsi_host = NULL;
910 spin_lock_init(&h->devlock);
911 return;
912}
913
914static void complete_scsi_command(struct CommandList *cp,
915 int timeout, __u32 tag)
916{
917 struct scsi_cmnd *cmd;
918 struct ctlr_info *h;
919 struct ErrorInfo *ei;
920
921 unsigned char sense_key;
922 unsigned char asc; /* additional sense code */
923 unsigned char ascq; /* additional sense code qualifier */
924
925 ei = cp->err_info;
926 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
927 h = cp->h;
928
929 scsi_dma_unmap(cmd); /* undo the DMA mappings */
930
931 cmd->result = (DID_OK << 16); /* host byte */
932 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
933 cmd->result |= (ei->ScsiStatus << 1);
934
935 /* copy the sense data whether we need to or not. */
936 memcpy(cmd->sense_buffer, ei->SenseInfo,
937 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
938 SCSI_SENSE_BUFFERSIZE :
939 ei->SenseLen);
940 scsi_set_resid(cmd, ei->ResidualCnt);
941
942 if (ei->CommandStatus == 0) {
943 cmd->scsi_done(cmd);
944 cmd_free(h, cp);
945 return;
946 }
947
948 /* an error has occurred */
949 switch (ei->CommandStatus) {
950
951 case CMD_TARGET_STATUS:
952 if (ei->ScsiStatus) {
953 /* Get sense key */
954 sense_key = 0xf & ei->SenseInfo[2];
955 /* Get additional sense code */
956 asc = ei->SenseInfo[12];
957 /* Get addition sense code qualifier */
958 ascq = ei->SenseInfo[13];
959 }
960
961 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
962 if (check_for_unit_attention(h, cp)) {
963 cmd->result = DID_SOFT_ERROR << 16;
964 break;
965 }
966 if (sense_key == ILLEGAL_REQUEST) {
967 /*
968 * SCSI REPORT_LUNS is commonly unsupported on
969 * Smart Array. Suppress noisy complaint.
970 */
971 if (cp->Request.CDB[0] == REPORT_LUNS)
972 break;
973
974 /* If ASC/ASCQ indicate Logical Unit
975 * Not Supported condition,
976 */
977 if ((asc == 0x25) && (ascq == 0x0)) {
978 dev_warn(&h->pdev->dev, "cp %p "
979 "has check condition\n", cp);
980 break;
981 }
982 }
983
984 if (sense_key == NOT_READY) {
985 /* If Sense is Not Ready, Logical Unit
986 * Not ready, Manual Intervention
987 * required
988 */
989 if ((asc == 0x04) && (ascq == 0x03)) {
990 cmd->result = DID_NO_CONNECT << 16;
991 dev_warn(&h->pdev->dev, "cp %p "
992 "has check condition: unit "
993 "not ready, manual "
994 "intervention required\n", cp);
995 break;
996 }
997 }
998
999
1000 /* Must be some other type of check condition */
1001 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1002 "unknown type: "
1003 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1004 "Returning result: 0x%x, "
1005 "cmd=[%02x %02x %02x %02x %02x "
1006 "%02x %02x %02x %02x %02x]\n",
1007 cp, sense_key, asc, ascq,
1008 cmd->result,
1009 cmd->cmnd[0], cmd->cmnd[1],
1010 cmd->cmnd[2], cmd->cmnd[3],
1011 cmd->cmnd[4], cmd->cmnd[5],
1012 cmd->cmnd[6], cmd->cmnd[7],
1013 cmd->cmnd[8], cmd->cmnd[9]);
1014 break;
1015 }
1016
1017
1018 /* Problem was not a check condition
1019 * Pass it up to the upper layers...
1020 */
1021 if (ei->ScsiStatus) {
1022 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1023 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1024 "Returning result: 0x%x\n",
1025 cp, ei->ScsiStatus,
1026 sense_key, asc, ascq,
1027 cmd->result);
1028 } else { /* scsi status is zero??? How??? */
1029 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1030 "Returning no connection.\n", cp),
1031
1032 /* Ordinarily, this case should never happen,
1033 * but there is a bug in some released firmware
1034 * revisions that allows it to happen if, for
1035 * example, a 4100 backplane loses power and
1036 * the tape drive is in it. We assume that
1037 * it's a fatal error of some kind because we
1038 * can't show that it wasn't. We will make it
1039 * look like selection timeout since that is
1040 * the most common reason for this to occur,
1041 * and it's severe enough.
1042 */
1043
1044 cmd->result = DID_NO_CONNECT << 16;
1045 }
1046 break;
1047
1048 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1049 break;
1050 case CMD_DATA_OVERRUN:
1051 dev_warn(&h->pdev->dev, "cp %p has"
1052 " completed with data overrun "
1053 "reported\n", cp);
1054 break;
1055 case CMD_INVALID: {
1056 /* print_bytes(cp, sizeof(*cp), 1, 0);
1057 print_cmd(cp); */
1058 /* We get CMD_INVALID if you address a non-existent device
1059 * instead of a selection timeout (no response). You will
1060 * see this if you yank out a drive, then try to access it.
1061 * This is kind of a shame because it means that any other
1062 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1063 * missing target. */
1064 cmd->result = DID_NO_CONNECT << 16;
1065 }
1066 break;
1067 case CMD_PROTOCOL_ERR:
1068 dev_warn(&h->pdev->dev, "cp %p has "
1069 "protocol error \n", cp);
1070 break;
1071 case CMD_HARDWARE_ERR:
1072 cmd->result = DID_ERROR << 16;
1073 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1074 break;
1075 case CMD_CONNECTION_LOST:
1076 cmd->result = DID_ERROR << 16;
1077 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1078 break;
1079 case CMD_ABORTED:
1080 cmd->result = DID_ABORT << 16;
1081 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1082 cp, ei->ScsiStatus);
1083 break;
1084 case CMD_ABORT_FAILED:
1085 cmd->result = DID_ERROR << 16;
1086 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1087 break;
1088 case CMD_UNSOLICITED_ABORT:
1089 cmd->result = DID_ABORT << 16;
1090 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1091 "abort\n", cp);
1092 break;
1093 case CMD_TIMEOUT:
1094 cmd->result = DID_TIME_OUT << 16;
1095 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1096 break;
1097 default:
1098 cmd->result = DID_ERROR << 16;
1099 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1100 cp, ei->CommandStatus);
1101 }
1102 cmd->scsi_done(cmd);
1103 cmd_free(h, cp);
1104}
1105
1106static int hpsa_scsi_detect(struct ctlr_info *h)
1107{
1108 struct Scsi_Host *sh;
1109 int error;
1110
1111 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1112 if (sh == NULL)
1113 goto fail;
1114
1115 sh->io_port = 0;
1116 sh->n_io_port = 0;
1117 sh->this_id = -1;
1118 sh->max_channel = 3;
1119 sh->max_cmd_len = MAX_COMMAND_SIZE;
1120 sh->max_lun = HPSA_MAX_LUN;
1121 sh->max_id = HPSA_MAX_LUN;
1122 h->scsi_host = sh;
1123 sh->hostdata[0] = (unsigned long) h;
1124 sh->irq = h->intr[SIMPLE_MODE_INT];
1125 sh->unique_id = sh->irq;
1126 error = scsi_add_host(sh, &h->pdev->dev);
1127 if (error)
1128 goto fail_host_put;
1129 scsi_scan_host(sh);
1130 return 0;
1131
1132 fail_host_put:
1133 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1134 " failed for controller %d\n", h->ctlr);
1135 scsi_host_put(sh);
1136 return -1;
1137 fail:
1138 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1139 " failed for controller %d\n", h->ctlr);
1140 return -1;
1141}
1142
1143static void hpsa_pci_unmap(struct pci_dev *pdev,
1144 struct CommandList *c, int sg_used, int data_direction)
1145{
1146 int i;
1147 union u64bit addr64;
1148
1149 for (i = 0; i < sg_used; i++) {
1150 addr64.val32.lower = c->SG[i].Addr.lower;
1151 addr64.val32.upper = c->SG[i].Addr.upper;
1152 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1153 data_direction);
1154 }
1155}
1156
1157static void hpsa_map_one(struct pci_dev *pdev,
1158 struct CommandList *cp,
1159 unsigned char *buf,
1160 size_t buflen,
1161 int data_direction)
1162{
1163 __u64 addr64;
1164
1165 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1166 cp->Header.SGList = 0;
1167 cp->Header.SGTotal = 0;
1168 return;
1169 }
1170
1171 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
1172 cp->SG[0].Addr.lower =
1173 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1174 cp->SG[0].Addr.upper =
1175 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1176 cp->SG[0].Len = buflen;
1177 cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
1178 cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
1179}
1180
1181static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1182 struct CommandList *c)
1183{
1184 DECLARE_COMPLETION_ONSTACK(wait);
1185
1186 c->waiting = &wait;
1187 enqueue_cmd_and_start_io(h, c);
1188 wait_for_completion(&wait);
1189}
1190
1191static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1192 struct CommandList *c, int data_direction)
1193{
1194 int retry_count = 0;
1195
1196 do {
1197 memset(c->err_info, 0, sizeof(c->err_info));
1198 hpsa_scsi_do_simple_cmd_core(h, c);
1199 retry_count++;
1200 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1201 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1202}
1203
1204static void hpsa_scsi_interpret_error(struct CommandList *cp)
1205{
1206 struct ErrorInfo *ei;
1207 struct device *d = &cp->h->pdev->dev;
1208
1209 ei = cp->err_info;
1210 switch (ei->CommandStatus) {
1211 case CMD_TARGET_STATUS:
1212 dev_warn(d, "cmd %p has completed with errors\n", cp);
1213 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1214 ei->ScsiStatus);
1215 if (ei->ScsiStatus == 0)
1216 dev_warn(d, "SCSI status is abnormally zero. "
1217 "(probably indicates selection timeout "
1218 "reported incorrectly due to a known "
1219 "firmware bug, circa July, 2001.)\n");
1220 break;
1221 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1222 dev_info(d, "UNDERRUN\n");
1223 break;
1224 case CMD_DATA_OVERRUN:
1225 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1226 break;
1227 case CMD_INVALID: {
1228 /* controller unfortunately reports SCSI passthru's
1229 * to non-existent targets as invalid commands.
1230 */
1231 dev_warn(d, "cp %p is reported invalid (probably means "
1232 "target device no longer present)\n", cp);
1233 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1234 print_cmd(cp); */
1235 }
1236 break;
1237 case CMD_PROTOCOL_ERR:
1238 dev_warn(d, "cp %p has protocol error \n", cp);
1239 break;
1240 case CMD_HARDWARE_ERR:
1241 /* cmd->result = DID_ERROR << 16; */
1242 dev_warn(d, "cp %p had hardware error\n", cp);
1243 break;
1244 case CMD_CONNECTION_LOST:
1245 dev_warn(d, "cp %p had connection lost\n", cp);
1246 break;
1247 case CMD_ABORTED:
1248 dev_warn(d, "cp %p was aborted\n", cp);
1249 break;
1250 case CMD_ABORT_FAILED:
1251 dev_warn(d, "cp %p reports abort failed\n", cp);
1252 break;
1253 case CMD_UNSOLICITED_ABORT:
1254 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1255 break;
1256 case CMD_TIMEOUT:
1257 dev_warn(d, "cp %p timed out\n", cp);
1258 break;
1259 default:
1260 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1261 ei->CommandStatus);
1262 }
1263}
1264
1265static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1266 unsigned char page, unsigned char *buf,
1267 unsigned char bufsize)
1268{
1269 int rc = IO_OK;
1270 struct CommandList *c;
1271 struct ErrorInfo *ei;
1272
1273 c = cmd_special_alloc(h);
1274
1275 if (c == NULL) { /* trouble... */
1276 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1277 return -1;
1278 }
1279
1280 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1281 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1282 ei = c->err_info;
1283 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1284 hpsa_scsi_interpret_error(c);
1285 rc = -1;
1286 }
1287 cmd_special_free(h, c);
1288 return rc;
1289}
1290
1291static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1292{
1293 int rc = IO_OK;
1294 struct CommandList *c;
1295 struct ErrorInfo *ei;
1296
1297 c = cmd_special_alloc(h);
1298
1299 if (c == NULL) { /* trouble... */
1300 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1301 return -1;
1302 }
1303
1304 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1305 hpsa_scsi_do_simple_cmd_core(h, c);
1306 /* no unmap needed here because no data xfer. */
1307
1308 ei = c->err_info;
1309 if (ei->CommandStatus != 0) {
1310 hpsa_scsi_interpret_error(c);
1311 rc = -1;
1312 }
1313 cmd_special_free(h, c);
1314 return rc;
1315}
1316
1317static void hpsa_get_raid_level(struct ctlr_info *h,
1318 unsigned char *scsi3addr, unsigned char *raid_level)
1319{
1320 int rc;
1321 unsigned char *buf;
1322
1323 *raid_level = RAID_UNKNOWN;
1324 buf = kzalloc(64, GFP_KERNEL);
1325 if (!buf)
1326 return;
1327 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1328 if (rc == 0)
1329 *raid_level = buf[8];
1330 if (*raid_level > RAID_UNKNOWN)
1331 *raid_level = RAID_UNKNOWN;
1332 kfree(buf);
1333 return;
1334}
1335
1336/* Get the device id from inquiry page 0x83 */
1337static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1338 unsigned char *device_id, int buflen)
1339{
1340 int rc;
1341 unsigned char *buf;
1342
1343 if (buflen > 16)
1344 buflen = 16;
1345 buf = kzalloc(64, GFP_KERNEL);
1346 if (!buf)
1347 return -1;
1348 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1349 if (rc == 0)
1350 memcpy(device_id, &buf[8], buflen);
1351 kfree(buf);
1352 return rc != 0;
1353}
1354
1355static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1356 struct ReportLUNdata *buf, int bufsize,
1357 int extended_response)
1358{
1359 int rc = IO_OK;
1360 struct CommandList *c;
1361 unsigned char scsi3addr[8];
1362 struct ErrorInfo *ei;
1363
1364 c = cmd_special_alloc(h);
1365 if (c == NULL) { /* trouble... */
1366 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1367 return -1;
1368 }
1369
1370 memset(&scsi3addr[0], 0, 8); /* address the controller */
1371
1372 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1373 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1374 if (extended_response)
1375 c->Request.CDB[1] = extended_response;
1376 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1377 ei = c->err_info;
1378 if (ei->CommandStatus != 0 &&
1379 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1380 hpsa_scsi_interpret_error(c);
1381 rc = -1;
1382 }
1383 cmd_special_free(h, c);
1384 return rc;
1385}
1386
1387static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1388 struct ReportLUNdata *buf,
1389 int bufsize, int extended_response)
1390{
1391 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1392}
1393
1394static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1395 struct ReportLUNdata *buf, int bufsize)
1396{
1397 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1398}
1399
1400static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1401 int bus, int target, int lun)
1402{
1403 device->bus = bus;
1404 device->target = target;
1405 device->lun = lun;
1406}
1407
1408static int hpsa_update_device_info(struct ctlr_info *h,
1409 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1410{
1411#define OBDR_TAPE_INQ_SIZE 49
1412 unsigned char *inq_buff = NULL;
1413
1414 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1415 if (!inq_buff)
1416 goto bail_out;
1417
1418 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1419 /* Do an inquiry to the device to see what it is. */
1420 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1421 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1422 /* Inquiry failed (msg printed already) */
1423 dev_err(&h->pdev->dev,
1424 "hpsa_update_device_info: inquiry failed\n");
1425 goto bail_out;
1426 }
1427
1428 /* As a side effect, record the firmware version number
1429 * if we happen to be talking to the RAID controller.
1430 */
1431 if (is_hba_lunid(scsi3addr))
1432 memcpy(h->firm_ver, &inq_buff[32], 4);
1433
1434 this_device->devtype = (inq_buff[0] & 0x1f);
1435 memcpy(this_device->scsi3addr, scsi3addr, 8);
1436 memcpy(this_device->vendor, &inq_buff[8],
1437 sizeof(this_device->vendor));
1438 memcpy(this_device->model, &inq_buff[16],
1439 sizeof(this_device->model));
1440 memcpy(this_device->revision, &inq_buff[32],
1441 sizeof(this_device->revision));
1442 memset(this_device->device_id, 0,
1443 sizeof(this_device->device_id));
1444 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1445 sizeof(this_device->device_id));
1446
1447 if (this_device->devtype == TYPE_DISK &&
1448 is_logical_dev_addr_mode(scsi3addr))
1449 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1450 else
1451 this_device->raid_level = RAID_UNKNOWN;
1452
1453 kfree(inq_buff);
1454 return 0;
1455
1456bail_out:
1457 kfree(inq_buff);
1458 return 1;
1459}
1460
1461static unsigned char *msa2xxx_model[] = {
1462 "MSA2012",
1463 "MSA2024",
1464 "MSA2312",
1465 "MSA2324",
1466 NULL,
1467};
1468
1469static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1470{
1471 int i;
1472
1473 for (i = 0; msa2xxx_model[i]; i++)
1474 if (strncmp(device->model, msa2xxx_model[i],
1475 strlen(msa2xxx_model[i])) == 0)
1476 return 1;
1477 return 0;
1478}
1479
1480/* Helper function to assign bus, target, lun mapping of devices.
1481 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1482 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1483 * Logical drive target and lun are assigned at this time, but
1484 * physical device lun and target assignment are deferred (assigned
1485 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1486 */
1487static void figure_bus_target_lun(struct ctlr_info *h,
1488 __u8 *lunaddrbytes, int *bus, int *target, int *lun,
1489 struct hpsa_scsi_dev_t *device)
1490{
1491
1492 __u32 lunid;
1493
1494 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1495 /* logical device */
1496 memcpy(&lunid, lunaddrbytes, sizeof(lunid));
1497 lunid = le32_to_cpu(lunid);
1498
1499 if (is_msa2xxx(h, device)) {
1500 *bus = 1;
1501 *target = (lunid >> 16) & 0x3fff;
1502 *lun = lunid & 0x00ff;
1503 } else {
1504 *bus = 0;
1505 *lun = 0;
1506 *target = lunid & 0x3fff;
1507 }
1508 } else {
1509 /* physical device */
1510 if (is_hba_lunid(lunaddrbytes))
1511 *bus = 3;
1512 else
1513 *bus = 2;
1514 *target = -1;
1515 *lun = -1; /* we will fill these in later. */
1516 }
1517}
1518
1519/*
1520 * If there is no lun 0 on a target, linux won't find any devices.
1521 * For the MSA2xxx boxes, we have to manually detect the enclosure
1522 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1523 * it for some reason. *tmpdevice is the target we're adding,
1524 * this_device is a pointer into the current element of currentsd[]
1525 * that we're building up in update_scsi_devices(), below.
1526 * lunzerobits is a bitmap that tracks which targets already have a
1527 * lun 0 assigned.
1528 * Returns 1 if an enclosure was added, 0 if not.
1529 */
1530static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1531 struct hpsa_scsi_dev_t *tmpdevice,
1532 struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
1533 int bus, int target, int lun, unsigned long lunzerobits[],
1534 int *nmsa2xxx_enclosures)
1535{
1536 unsigned char scsi3addr[8];
1537
1538 if (test_bit(target, lunzerobits))
1539 return 0; /* There is already a lun 0 on this target. */
1540
1541 if (!is_logical_dev_addr_mode(lunaddrbytes))
1542 return 0; /* It's the logical targets that may lack lun 0. */
1543
1544 if (!is_msa2xxx(h, tmpdevice))
1545 return 0; /* It's only the MSA2xxx that have this problem. */
1546
1547 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1548 return 0;
1549
1550 if (is_hba_lunid(scsi3addr))
1551 return 0; /* Don't add the RAID controller here. */
1552
1553#define MAX_MSA2XXX_ENCLOSURES 32
1554 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1555 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1556 "enclosures exceeded. Check your hardware "
1557 "configuration.");
1558 return 0;
1559 }
1560
1561 memset(scsi3addr, 0, 8);
1562 scsi3addr[3] = target;
1563 if (hpsa_update_device_info(h, scsi3addr, this_device))
1564 return 0;
1565 (*nmsa2xxx_enclosures)++;
1566 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1567 set_bit(target, lunzerobits);
1568 return 1;
1569}
1570
1571/*
1572 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1573 * logdev. The number of luns in physdev and logdev are returned in
1574 * *nphysicals and *nlogicals, respectively.
1575 * Returns 0 on success, -1 otherwise.
1576 */
1577static int hpsa_gather_lun_info(struct ctlr_info *h,
1578 int reportlunsize,
1579 struct ReportLUNdata *physdev, __u32 *nphysicals,
1580 struct ReportLUNdata *logdev, __u32 *nlogicals)
1581{
1582 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1583 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1584 return -1;
1585 }
1586 memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
1587 *nphysicals = be32_to_cpu(*nphysicals) / 8;
1588#ifdef DEBUG
1589 dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
1590#endif
1591 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1592 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1593 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1594 *nphysicals - HPSA_MAX_PHYS_LUN);
1595 *nphysicals = HPSA_MAX_PHYS_LUN;
1596 }
1597 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1598 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1599 return -1;
1600 }
1601 memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
1602 *nlogicals = be32_to_cpu(*nlogicals) / 8;
1603#ifdef DEBUG
1604 dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
1605#endif
1606 /* Reject Logicals in excess of our max capability. */
1607 if (*nlogicals > HPSA_MAX_LUN) {
1608 dev_warn(&h->pdev->dev,
1609 "maximum logical LUNs (%d) exceeded. "
1610 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1611 *nlogicals - HPSA_MAX_LUN);
1612 *nlogicals = HPSA_MAX_LUN;
1613 }
1614 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1615 dev_warn(&h->pdev->dev,
1616 "maximum logical + physical LUNs (%d) exceeded. "
1617 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1618 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1619 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1620 }
1621 return 0;
1622}
1623
1624static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1625{
1626 /* the idea here is we could get notified
1627 * that some devices have changed, so we do a report
1628 * physical luns and report logical luns cmd, and adjust
1629 * our list of devices accordingly.
1630 *
1631 * The scsi3addr's of devices won't change so long as the
1632 * adapter is not reset. That means we can rescan and
1633 * tell which devices we already know about, vs. new
1634 * devices, vs. disappearing devices.
1635 */
1636 struct ReportLUNdata *physdev_list = NULL;
1637 struct ReportLUNdata *logdev_list = NULL;
1638 unsigned char *inq_buff = NULL;
1639 __u32 nphysicals = 0;
1640 __u32 nlogicals = 0;
1641 __u32 ndev_allocated = 0;
1642 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1643 int ncurrent = 0;
1644 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1645 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1646 int bus, target, lun;
1647 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1648
1649 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1650 GFP_KERNEL);
1651 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1652 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1653 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1654 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1655
1656 if (!currentsd || !physdev_list || !logdev_list ||
1657 !inq_buff || !tmpdevice) {
1658 dev_err(&h->pdev->dev, "out of memory\n");
1659 goto out;
1660 }
1661 memset(lunzerobits, 0, sizeof(lunzerobits));
1662
1663 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1664 logdev_list, &nlogicals))
1665 goto out;
1666
1667 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1668 * but each of them 4 times through different paths. The plus 1
1669 * is for the RAID controller.
1670 */
1671 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1672
1673 /* Allocate the per device structures */
1674 for (i = 0; i < ndevs_to_allocate; i++) {
1675 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1676 if (!currentsd[i]) {
1677 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1678 __FILE__, __LINE__);
1679 goto out;
1680 }
1681 ndev_allocated++;
1682 }
1683
1684 /* adjust our table of devices */
1685 nmsa2xxx_enclosures = 0;
1686 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1687 __u8 *lunaddrbytes;
1688
1689 /* Figure out where the LUN ID info is coming from */
1690 if (i < nphysicals)
1691 lunaddrbytes = &physdev_list->LUN[i][0];
1692 else
1693 if (i < nphysicals + nlogicals)
1694 lunaddrbytes =
1695 &logdev_list->LUN[i-nphysicals][0];
1696 else /* jam in the RAID controller at the end */
1697 lunaddrbytes = RAID_CTLR_LUNID;
1698
1699 /* skip masked physical devices. */
1700 if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
1701 continue;
1702
1703 /* Get device type, vendor, model, device id */
1704 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1705 continue; /* skip it if we can't talk to it. */
1706 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1707 tmpdevice);
1708 this_device = currentsd[ncurrent];
1709
1710 /*
1711 * For the msa2xxx boxes, we have to insert a LUN 0 which
1712 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1713 * is nonetheless an enclosure device there. We have to
1714 * present that otherwise linux won't find anything if
1715 * there is no lun 0.
1716 */
1717 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1718 lunaddrbytes, bus, target, lun, lunzerobits,
1719 &nmsa2xxx_enclosures)) {
1720 ncurrent++;
1721 this_device = currentsd[ncurrent];
1722 }
1723
1724 *this_device = *tmpdevice;
1725 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1726
1727 switch (this_device->devtype) {
1728 case TYPE_ROM: {
1729 /* We don't *really* support actual CD-ROM devices,
1730 * just "One Button Disaster Recovery" tape drive
1731 * which temporarily pretends to be a CD-ROM drive.
1732 * So we check that the device is really an OBDR tape
1733 * device by checking for "$DR-10" in bytes 43-48 of
1734 * the inquiry data.
1735 */
1736 char obdr_sig[7];
1737#define OBDR_TAPE_SIG "$DR-10"
1738 strncpy(obdr_sig, &inq_buff[43], 6);
1739 obdr_sig[6] = '\0';
1740 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1741 /* Not OBDR device, ignore it. */
1742 break;
1743 }
1744 ncurrent++;
1745 break;
1746 case TYPE_DISK:
1747 if (i < nphysicals)
1748 break;
1749 ncurrent++;
1750 break;
1751 case TYPE_TAPE:
1752 case TYPE_MEDIUM_CHANGER:
1753 ncurrent++;
1754 break;
1755 case TYPE_RAID:
1756 /* Only present the Smartarray HBA as a RAID controller.
1757 * If it's a RAID controller other than the HBA itself
1758 * (an external RAID controller, MSA500 or similar)
1759 * don't present it.
1760 */
1761 if (!is_hba_lunid(lunaddrbytes))
1762 break;
1763 ncurrent++;
1764 break;
1765 default:
1766 break;
1767 }
1768 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1769 break;
1770 }
1771 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1772out:
1773 kfree(tmpdevice);
1774 for (i = 0; i < ndev_allocated; i++)
1775 kfree(currentsd[i]);
1776 kfree(currentsd);
1777 kfree(inq_buff);
1778 kfree(physdev_list);
1779 kfree(logdev_list);
1780 return;
1781}
1782
1783/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1784 * dma mapping and fills in the scatter gather entries of the
1785 * hpsa command, cp.
1786 */
1787static int hpsa_scatter_gather(struct pci_dev *pdev,
1788 struct CommandList *cp,
1789 struct scsi_cmnd *cmd)
1790{
1791 unsigned int len;
1792 struct scatterlist *sg;
1793 __u64 addr64;
1794 int use_sg, i;
1795
1796 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1797
1798 use_sg = scsi_dma_map(cmd);
1799 if (use_sg < 0)
1800 return use_sg;
1801
1802 if (!use_sg)
1803 goto sglist_finished;
1804
1805 scsi_for_each_sg(cmd, sg, use_sg, i) {
1806 addr64 = (__u64) sg_dma_address(sg);
1807 len = sg_dma_len(sg);
1808 cp->SG[i].Addr.lower =
1809 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1810 cp->SG[i].Addr.upper =
1811 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1812 cp->SG[i].Len = len;
1813 cp->SG[i].Ext = 0; /* we are not chaining */
1814 }
1815
1816sglist_finished:
1817
1818 cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
1819 cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
1820 return 0;
1821}
1822
1823
1824static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1825 void (*done)(struct scsi_cmnd *))
1826{
1827 struct ctlr_info *h;
1828 struct hpsa_scsi_dev_t *dev;
1829 unsigned char scsi3addr[8];
1830 struct CommandList *c;
1831 unsigned long flags;
1832
1833 /* Get the ptr to our adapter structure out of cmd->host. */
1834 h = sdev_to_hba(cmd->device);
1835 dev = cmd->device->hostdata;
1836 if (!dev) {
1837 cmd->result = DID_NO_CONNECT << 16;
1838 done(cmd);
1839 return 0;
1840 }
1841 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1842
1843 /* Need a lock as this is being allocated from the pool */
1844 spin_lock_irqsave(&h->lock, flags);
1845 c = cmd_alloc(h);
1846 spin_unlock_irqrestore(&h->lock, flags);
1847 if (c == NULL) { /* trouble... */
1848 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1849 return SCSI_MLQUEUE_HOST_BUSY;
1850 }
1851
1852 /* Fill in the command list header */
1853
1854 cmd->scsi_done = done; /* save this for use by completion code */
1855
1856 /* save c in case we have to abort it */
1857 cmd->host_scribble = (unsigned char *) c;
1858
1859 c->cmd_type = CMD_SCSI;
1860 c->scsi_cmd = cmd;
1861 c->Header.ReplyQueue = 0; /* unused in simple mode */
1862 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1863 c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
1864
1865 /* Fill in the request block... */
1866
1867 c->Request.Timeout = 0;
1868 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1869 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1870 c->Request.CDBLen = cmd->cmd_len;
1871 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1872 c->Request.Type.Type = TYPE_CMD;
1873 c->Request.Type.Attribute = ATTR_SIMPLE;
1874 switch (cmd->sc_data_direction) {
1875 case DMA_TO_DEVICE:
1876 c->Request.Type.Direction = XFER_WRITE;
1877 break;
1878 case DMA_FROM_DEVICE:
1879 c->Request.Type.Direction = XFER_READ;
1880 break;
1881 case DMA_NONE:
1882 c->Request.Type.Direction = XFER_NONE;
1883 break;
1884 case DMA_BIDIRECTIONAL:
1885 /* This can happen if a buggy application does a scsi passthru
1886 * and sets both inlen and outlen to non-zero. ( see
1887 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1888 */
1889
1890 c->Request.Type.Direction = XFER_RSVD;
1891 /* This is technically wrong, and hpsa controllers should
1892 * reject it with CMD_INVALID, which is the most correct
1893 * response, but non-fibre backends appear to let it
1894 * slide by, and give the same results as if this field
1895 * were set correctly. Either way is acceptable for
1896 * our purposes here.
1897 */
1898
1899 break;
1900
1901 default:
1902 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1903 cmd->sc_data_direction);
1904 BUG();
1905 break;
1906 }
1907
1908 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
1909 cmd_free(h, c);
1910 return SCSI_MLQUEUE_HOST_BUSY;
1911 }
1912 enqueue_cmd_and_start_io(h, c);
1913 /* the cmd'll come back via intr handler in complete_scsi_command() */
1914 return 0;
1915}
1916
1917static void hpsa_unregister_scsi(struct ctlr_info *h)
1918{
1919 /* we are being forcibly unloaded, and may not refuse. */
1920 scsi_remove_host(h->scsi_host);
1921 scsi_host_put(h->scsi_host);
1922 h->scsi_host = NULL;
1923}
1924
1925static int hpsa_register_scsi(struct ctlr_info *h)
1926{
1927 int rc;
1928
1929 hpsa_update_scsi_devices(h, -1);
1930 rc = hpsa_scsi_detect(h);
1931 if (rc != 0)
1932 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
1933 " hpsa_scsi_detect(), rc is %d\n", rc);
1934 return rc;
1935}
1936
1937static int wait_for_device_to_become_ready(struct ctlr_info *h,
1938 unsigned char lunaddr[])
1939{
1940 int rc = 0;
1941 int count = 0;
1942 int waittime = 1; /* seconds */
1943 struct CommandList *c;
1944
1945 c = cmd_special_alloc(h);
1946 if (!c) {
1947 dev_warn(&h->pdev->dev, "out of memory in "
1948 "wait_for_device_to_become_ready.\n");
1949 return IO_ERROR;
1950 }
1951
1952 /* Send test unit ready until device ready, or give up. */
1953 while (count < HPSA_TUR_RETRY_LIMIT) {
1954
1955 /* Wait for a bit. do this first, because if we send
1956 * the TUR right away, the reset will just abort it.
1957 */
1958 msleep(1000 * waittime);
1959 count++;
1960
1961 /* Increase wait time with each try, up to a point. */
1962 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
1963 waittime = waittime * 2;
1964
1965 /* Send the Test Unit Ready */
1966 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
1967 hpsa_scsi_do_simple_cmd_core(h, c);
1968 /* no unmap needed here because no data xfer. */
1969
1970 if (c->err_info->CommandStatus == CMD_SUCCESS)
1971 break;
1972
1973 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1974 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
1975 (c->err_info->SenseInfo[2] == NO_SENSE ||
1976 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
1977 break;
1978
1979 dev_warn(&h->pdev->dev, "waiting %d secs "
1980 "for device to become ready.\n", waittime);
1981 rc = 1; /* device not ready. */
1982 }
1983
1984 if (rc)
1985 dev_warn(&h->pdev->dev, "giving up on device.\n");
1986 else
1987 dev_warn(&h->pdev->dev, "device is ready.\n");
1988
1989 cmd_special_free(h, c);
1990 return rc;
1991}
1992
1993/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1994 * complaining. Doing a host- or bus-reset can't do anything good here.
1995 */
1996static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1997{
1998 int rc;
1999 struct ctlr_info *h;
2000 struct hpsa_scsi_dev_t *dev;
2001
2002 /* find the controller to which the command to be aborted was sent */
2003 h = sdev_to_hba(scsicmd->device);
2004 if (h == NULL) /* paranoia */
2005 return FAILED;
2006 dev_warn(&h->pdev->dev, "resetting drive\n");
2007
2008 dev = scsicmd->device->hostdata;
2009 if (!dev) {
2010 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2011 "device lookup failed.\n");
2012 return FAILED;
2013 }
2014 /* send a reset to the SCSI LUN which the command was sent to */
2015 rc = hpsa_send_reset(h, dev->scsi3addr);
2016 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2017 return SUCCESS;
2018
2019 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2020 return FAILED;
2021}
2022
2023/*
2024 * For operations that cannot sleep, a command block is allocated at init,
2025 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2026 * which ones are free or in use. Lock must be held when calling this.
2027 * cmd_free() is the complement.
2028 */
2029static struct CommandList *cmd_alloc(struct ctlr_info *h)
2030{
2031 struct CommandList *c;
2032 int i;
2033 union u64bit temp64;
2034 dma_addr_t cmd_dma_handle, err_dma_handle;
2035
2036 do {
2037 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2038 if (i == h->nr_cmds)
2039 return NULL;
2040 } while (test_and_set_bit
2041 (i & (BITS_PER_LONG - 1),
2042 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2043 c = h->cmd_pool + i;
2044 memset(c, 0, sizeof(*c));
2045 cmd_dma_handle = h->cmd_pool_dhandle
2046 + i * sizeof(*c);
2047 c->err_info = h->errinfo_pool + i;
2048 memset(c->err_info, 0, sizeof(*c->err_info));
2049 err_dma_handle = h->errinfo_pool_dhandle
2050 + i * sizeof(*c->err_info);
2051 h->nr_allocs++;
2052
2053 c->cmdindex = i;
2054
2055 INIT_HLIST_NODE(&c->list);
2056 c->busaddr = (__u32) cmd_dma_handle;
2057 temp64.val = (__u64) err_dma_handle;
2058 c->ErrDesc.Addr.lower = temp64.val32.lower;
2059 c->ErrDesc.Addr.upper = temp64.val32.upper;
2060 c->ErrDesc.Len = sizeof(*c->err_info);
2061
2062 c->h = h;
2063 return c;
2064}
2065
2066/* For operations that can wait for kmalloc to possibly sleep,
2067 * this routine can be called. Lock need not be held to call
2068 * cmd_special_alloc. cmd_special_free() is the complement.
2069 */
2070static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2071{
2072 struct CommandList *c;
2073 union u64bit temp64;
2074 dma_addr_t cmd_dma_handle, err_dma_handle;
2075
2076 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2077 if (c == NULL)
2078 return NULL;
2079 memset(c, 0, sizeof(*c));
2080
2081 c->cmdindex = -1;
2082
2083 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2084 &err_dma_handle);
2085
2086 if (c->err_info == NULL) {
2087 pci_free_consistent(h->pdev,
2088 sizeof(*c), c, cmd_dma_handle);
2089 return NULL;
2090 }
2091 memset(c->err_info, 0, sizeof(*c->err_info));
2092
2093 INIT_HLIST_NODE(&c->list);
2094 c->busaddr = (__u32) cmd_dma_handle;
2095 temp64.val = (__u64) err_dma_handle;
2096 c->ErrDesc.Addr.lower = temp64.val32.lower;
2097 c->ErrDesc.Addr.upper = temp64.val32.upper;
2098 c->ErrDesc.Len = sizeof(*c->err_info);
2099
2100 c->h = h;
2101 return c;
2102}
2103
2104static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2105{
2106 int i;
2107
2108 i = c - h->cmd_pool;
2109 clear_bit(i & (BITS_PER_LONG - 1),
2110 h->cmd_pool_bits + (i / BITS_PER_LONG));
2111 h->nr_frees++;
2112}
2113
2114static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2115{
2116 union u64bit temp64;
2117
2118 temp64.val32.lower = c->ErrDesc.Addr.lower;
2119 temp64.val32.upper = c->ErrDesc.Addr.upper;
2120 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2121 c->err_info, (dma_addr_t) temp64.val);
2122 pci_free_consistent(h->pdev, sizeof(*c),
2123 c, (dma_addr_t) c->busaddr);
2124}
2125
2126#ifdef CONFIG_COMPAT
2127
2128static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
2129{
2130 int ret;
2131
2132 lock_kernel();
2133 ret = hpsa_ioctl(dev, cmd, arg);
2134 unlock_kernel();
2135 return ret;
2136}
2137
2138static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
2139static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2140 int cmd, void *arg);
2141
2142static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2143{
2144 switch (cmd) {
2145 case CCISS_GETPCIINFO:
2146 case CCISS_GETINTINFO:
2147 case CCISS_SETINTINFO:
2148 case CCISS_GETNODENAME:
2149 case CCISS_SETNODENAME:
2150 case CCISS_GETHEARTBEAT:
2151 case CCISS_GETBUSTYPES:
2152 case CCISS_GETFIRMVER:
2153 case CCISS_GETDRIVVER:
2154 case CCISS_REVALIDVOLS:
2155 case CCISS_DEREGDISK:
2156 case CCISS_REGNEWDISK:
2157 case CCISS_REGNEWD:
2158 case CCISS_RESCANDISK:
2159 case CCISS_GETLUNINFO:
2160 return do_ioctl(dev, cmd, arg);
2161
2162 case CCISS_PASSTHRU32:
2163 return hpsa_ioctl32_passthru(dev, cmd, arg);
2164 case CCISS_BIG_PASSTHRU32:
2165 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2166
2167 default:
2168 return -ENOIOCTLCMD;
2169 }
2170}
2171
2172static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2173{
2174 IOCTL32_Command_struct __user *arg32 =
2175 (IOCTL32_Command_struct __user *) arg;
2176 IOCTL_Command_struct arg64;
2177 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2178 int err;
2179 u32 cp;
2180
2181 err = 0;
2182 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2183 sizeof(arg64.LUN_info));
2184 err |= copy_from_user(&arg64.Request, &arg32->Request,
2185 sizeof(arg64.Request));
2186 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2187 sizeof(arg64.error_info));
2188 err |= get_user(arg64.buf_size, &arg32->buf_size);
2189 err |= get_user(cp, &arg32->buf);
2190 arg64.buf = compat_ptr(cp);
2191 err |= copy_to_user(p, &arg64, sizeof(arg64));
2192
2193 if (err)
2194 return -EFAULT;
2195
2196 err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2197 if (err)
2198 return err;
2199 err |= copy_in_user(&arg32->error_info, &p->error_info,
2200 sizeof(arg32->error_info));
2201 if (err)
2202 return -EFAULT;
2203 return err;
2204}
2205
2206static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2207 int cmd, void *arg)
2208{
2209 BIG_IOCTL32_Command_struct __user *arg32 =
2210 (BIG_IOCTL32_Command_struct __user *) arg;
2211 BIG_IOCTL_Command_struct arg64;
2212 BIG_IOCTL_Command_struct __user *p =
2213 compat_alloc_user_space(sizeof(arg64));
2214 int err;
2215 u32 cp;
2216
2217 err = 0;
2218 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2219 sizeof(arg64.LUN_info));
2220 err |= copy_from_user(&arg64.Request, &arg32->Request,
2221 sizeof(arg64.Request));
2222 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2223 sizeof(arg64.error_info));
2224 err |= get_user(arg64.buf_size, &arg32->buf_size);
2225 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2226 err |= get_user(cp, &arg32->buf);
2227 arg64.buf = compat_ptr(cp);
2228 err |= copy_to_user(p, &arg64, sizeof(arg64));
2229
2230 if (err)
2231 return -EFAULT;
2232
2233 err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2234 if (err)
2235 return err;
2236 err |= copy_in_user(&arg32->error_info, &p->error_info,
2237 sizeof(arg32->error_info));
2238 if (err)
2239 return -EFAULT;
2240 return err;
2241}
2242#endif
2243
2244static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2245{
2246 struct hpsa_pci_info pciinfo;
2247
2248 if (!argp)
2249 return -EINVAL;
2250 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2251 pciinfo.bus = h->pdev->bus->number;
2252 pciinfo.dev_fn = h->pdev->devfn;
2253 pciinfo.board_id = h->board_id;
2254 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2255 return -EFAULT;
2256 return 0;
2257}
2258
2259static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2260{
2261 DriverVer_type DriverVer;
2262 unsigned char vmaj, vmin, vsubmin;
2263 int rc;
2264
2265 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2266 &vmaj, &vmin, &vsubmin);
2267 if (rc != 3) {
2268 dev_info(&h->pdev->dev, "driver version string '%s' "
2269 "unrecognized.", HPSA_DRIVER_VERSION);
2270 vmaj = 0;
2271 vmin = 0;
2272 vsubmin = 0;
2273 }
2274 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2275 if (!argp)
2276 return -EINVAL;
2277 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2278 return -EFAULT;
2279 return 0;
2280}
2281
2282static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2283{
2284 IOCTL_Command_struct iocommand;
2285 struct CommandList *c;
2286 char *buff = NULL;
2287 union u64bit temp64;
2288
2289 if (!argp)
2290 return -EINVAL;
2291 if (!capable(CAP_SYS_RAWIO))
2292 return -EPERM;
2293 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2294 return -EFAULT;
2295 if ((iocommand.buf_size < 1) &&
2296 (iocommand.Request.Type.Direction != XFER_NONE)) {
2297 return -EINVAL;
2298 }
2299 if (iocommand.buf_size > 0) {
2300 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2301 if (buff == NULL)
2302 return -EFAULT;
2303 }
2304 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2305 /* Copy the data into the buffer we created */
2306 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2307 kfree(buff);
2308 return -EFAULT;
2309 }
2310 } else
2311 memset(buff, 0, iocommand.buf_size);
2312 c = cmd_special_alloc(h);
2313 if (c == NULL) {
2314 kfree(buff);
2315 return -ENOMEM;
2316 }
2317 /* Fill in the command type */
2318 c->cmd_type = CMD_IOCTL_PEND;
2319 /* Fill in Command Header */
2320 c->Header.ReplyQueue = 0; /* unused in simple mode */
2321 if (iocommand.buf_size > 0) { /* buffer to fill */
2322 c->Header.SGList = 1;
2323 c->Header.SGTotal = 1;
2324 } else { /* no buffers to fill */
2325 c->Header.SGList = 0;
2326 c->Header.SGTotal = 0;
2327 }
2328 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2329 /* use the kernel address the cmd block for tag */
2330 c->Header.Tag.lower = c->busaddr;
2331
2332 /* Fill in Request block */
2333 memcpy(&c->Request, &iocommand.Request,
2334 sizeof(c->Request));
2335
2336 /* Fill in the scatter gather information */
2337 if (iocommand.buf_size > 0) {
2338 temp64.val = pci_map_single(h->pdev, buff,
2339 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2340 c->SG[0].Addr.lower = temp64.val32.lower;
2341 c->SG[0].Addr.upper = temp64.val32.upper;
2342 c->SG[0].Len = iocommand.buf_size;
2343 c->SG[0].Ext = 0; /* we are not chaining*/
2344 }
2345 hpsa_scsi_do_simple_cmd_core(h, c);
2346 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2347 check_ioctl_unit_attention(h, c);
2348
2349 /* Copy the error information out */
2350 memcpy(&iocommand.error_info, c->err_info,
2351 sizeof(iocommand.error_info));
2352 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2353 kfree(buff);
2354 cmd_special_free(h, c);
2355 return -EFAULT;
2356 }
2357
2358 if (iocommand.Request.Type.Direction == XFER_READ) {
2359 /* Copy the data out of the buffer we created */
2360 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2361 kfree(buff);
2362 cmd_special_free(h, c);
2363 return -EFAULT;
2364 }
2365 }
2366 kfree(buff);
2367 cmd_special_free(h, c);
2368 return 0;
2369}
2370
2371static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2372{
2373 BIG_IOCTL_Command_struct *ioc;
2374 struct CommandList *c;
2375 unsigned char **buff = NULL;
2376 int *buff_size = NULL;
2377 union u64bit temp64;
2378 BYTE sg_used = 0;
2379 int status = 0;
2380 int i;
2381 __u32 left;
2382 __u32 sz;
2383 BYTE __user *data_ptr;
2384
2385 if (!argp)
2386 return -EINVAL;
2387 if (!capable(CAP_SYS_RAWIO))
2388 return -EPERM;
2389 ioc = (BIG_IOCTL_Command_struct *)
2390 kmalloc(sizeof(*ioc), GFP_KERNEL);
2391 if (!ioc) {
2392 status = -ENOMEM;
2393 goto cleanup1;
2394 }
2395 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2396 status = -EFAULT;
2397 goto cleanup1;
2398 }
2399 if ((ioc->buf_size < 1) &&
2400 (ioc->Request.Type.Direction != XFER_NONE)) {
2401 status = -EINVAL;
2402 goto cleanup1;
2403 }
2404 /* Check kmalloc limits using all SGs */
2405 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2406 status = -EINVAL;
2407 goto cleanup1;
2408 }
2409 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2410 status = -EINVAL;
2411 goto cleanup1;
2412 }
2413 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2414 if (!buff) {
2415 status = -ENOMEM;
2416 goto cleanup1;
2417 }
2418 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2419 if (!buff_size) {
2420 status = -ENOMEM;
2421 goto cleanup1;
2422 }
2423 left = ioc->buf_size;
2424 data_ptr = ioc->buf;
2425 while (left) {
2426 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2427 buff_size[sg_used] = sz;
2428 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2429 if (buff[sg_used] == NULL) {
2430 status = -ENOMEM;
2431 goto cleanup1;
2432 }
2433 if (ioc->Request.Type.Direction == XFER_WRITE) {
2434 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2435 status = -ENOMEM;
2436 goto cleanup1;
2437 }
2438 } else
2439 memset(buff[sg_used], 0, sz);
2440 left -= sz;
2441 data_ptr += sz;
2442 sg_used++;
2443 }
2444 c = cmd_special_alloc(h);
2445 if (c == NULL) {
2446 status = -ENOMEM;
2447 goto cleanup1;
2448 }
2449 c->cmd_type = CMD_IOCTL_PEND;
2450 c->Header.ReplyQueue = 0;
2451
2452 if (ioc->buf_size > 0) {
2453 c->Header.SGList = sg_used;
2454 c->Header.SGTotal = sg_used;
2455 } else {
2456 c->Header.SGList = 0;
2457 c->Header.SGTotal = 0;
2458 }
2459 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2460 c->Header.Tag.lower = c->busaddr;
2461 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2462 if (ioc->buf_size > 0) {
2463 int i;
2464 for (i = 0; i < sg_used; i++) {
2465 temp64.val = pci_map_single(h->pdev, buff[i],
2466 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2467 c->SG[i].Addr.lower = temp64.val32.lower;
2468 c->SG[i].Addr.upper = temp64.val32.upper;
2469 c->SG[i].Len = buff_size[i];
2470 /* we are not chaining */
2471 c->SG[i].Ext = 0;
2472 }
2473 }
2474 hpsa_scsi_do_simple_cmd_core(h, c);
2475 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2476 check_ioctl_unit_attention(h, c);
2477 /* Copy the error information out */
2478 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2479 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2480 cmd_special_free(h, c);
2481 status = -EFAULT;
2482 goto cleanup1;
2483 }
2484 if (ioc->Request.Type.Direction == XFER_READ) {
2485 /* Copy the data out of the buffer we created */
2486 BYTE __user *ptr = ioc->buf;
2487 for (i = 0; i < sg_used; i++) {
2488 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2489 cmd_special_free(h, c);
2490 status = -EFAULT;
2491 goto cleanup1;
2492 }
2493 ptr += buff_size[i];
2494 }
2495 }
2496 cmd_special_free(h, c);
2497 status = 0;
2498cleanup1:
2499 if (buff) {
2500 for (i = 0; i < sg_used; i++)
2501 kfree(buff[i]);
2502 kfree(buff);
2503 }
2504 kfree(buff_size);
2505 kfree(ioc);
2506 return status;
2507}
2508
2509static void check_ioctl_unit_attention(struct ctlr_info *h,
2510 struct CommandList *c)
2511{
2512 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2513 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2514 (void) check_for_unit_attention(h, c);
2515}
2516/*
2517 * ioctl
2518 */
2519static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2520{
2521 struct ctlr_info *h;
2522 void __user *argp = (void __user *)arg;
2523
2524 h = sdev_to_hba(dev);
2525
2526 switch (cmd) {
2527 case CCISS_DEREGDISK:
2528 case CCISS_REGNEWDISK:
2529 case CCISS_REGNEWD:
2530 hpsa_update_scsi_devices(h, dev->host->host_no);
2531 return 0;
2532 case CCISS_GETPCIINFO:
2533 return hpsa_getpciinfo_ioctl(h, argp);
2534 case CCISS_GETDRIVVER:
2535 return hpsa_getdrivver_ioctl(h, argp);
2536 case CCISS_PASSTHRU:
2537 return hpsa_passthru_ioctl(h, argp);
2538 case CCISS_BIG_PASSTHRU:
2539 return hpsa_big_passthru_ioctl(h, argp);
2540 default:
2541 return -ENOTTY;
2542 }
2543}
2544
2545static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
2546 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
2547 int cmd_type)
2548{
2549 int pci_dir = XFER_NONE;
2550
2551 c->cmd_type = CMD_IOCTL_PEND;
2552 c->Header.ReplyQueue = 0;
2553 if (buff != NULL && size > 0) {
2554 c->Header.SGList = 1;
2555 c->Header.SGTotal = 1;
2556 } else {
2557 c->Header.SGList = 0;
2558 c->Header.SGTotal = 0;
2559 }
2560 c->Header.Tag.lower = c->busaddr;
2561 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2562
2563 c->Request.Type.Type = cmd_type;
2564 if (cmd_type == TYPE_CMD) {
2565 switch (cmd) {
2566 case HPSA_INQUIRY:
2567 /* are we trying to read a vital product page */
2568 if (page_code != 0) {
2569 c->Request.CDB[1] = 0x01;
2570 c->Request.CDB[2] = page_code;
2571 }
2572 c->Request.CDBLen = 6;
2573 c->Request.Type.Attribute = ATTR_SIMPLE;
2574 c->Request.Type.Direction = XFER_READ;
2575 c->Request.Timeout = 0;
2576 c->Request.CDB[0] = HPSA_INQUIRY;
2577 c->Request.CDB[4] = size & 0xFF;
2578 break;
2579 case HPSA_REPORT_LOG:
2580 case HPSA_REPORT_PHYS:
2581 /* Talking to controller so It's a physical command
2582 mode = 00 target = 0. Nothing to write.
2583 */
2584 c->Request.CDBLen = 12;
2585 c->Request.Type.Attribute = ATTR_SIMPLE;
2586 c->Request.Type.Direction = XFER_READ;
2587 c->Request.Timeout = 0;
2588 c->Request.CDB[0] = cmd;
2589 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2590 c->Request.CDB[7] = (size >> 16) & 0xFF;
2591 c->Request.CDB[8] = (size >> 8) & 0xFF;
2592 c->Request.CDB[9] = size & 0xFF;
2593 break;
2594
2595 case HPSA_READ_CAPACITY:
2596 c->Request.CDBLen = 10;
2597 c->Request.Type.Attribute = ATTR_SIMPLE;
2598 c->Request.Type.Direction = XFER_READ;
2599 c->Request.Timeout = 0;
2600 c->Request.CDB[0] = cmd;
2601 break;
2602 case HPSA_CACHE_FLUSH:
2603 c->Request.CDBLen = 12;
2604 c->Request.Type.Attribute = ATTR_SIMPLE;
2605 c->Request.Type.Direction = XFER_WRITE;
2606 c->Request.Timeout = 0;
2607 c->Request.CDB[0] = BMIC_WRITE;
2608 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2609 break;
2610 case TEST_UNIT_READY:
2611 c->Request.CDBLen = 6;
2612 c->Request.Type.Attribute = ATTR_SIMPLE;
2613 c->Request.Type.Direction = XFER_NONE;
2614 c->Request.Timeout = 0;
2615 break;
2616 default:
2617 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2618 BUG();
2619 return;
2620 }
2621 } else if (cmd_type == TYPE_MSG) {
2622 switch (cmd) {
2623
2624 case HPSA_DEVICE_RESET_MSG:
2625 c->Request.CDBLen = 16;
2626 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2627 c->Request.Type.Attribute = ATTR_SIMPLE;
2628 c->Request.Type.Direction = XFER_NONE;
2629 c->Request.Timeout = 0; /* Don't time out */
2630 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2631 c->Request.CDB[1] = 0x03; /* Reset target above */
2632 /* If bytes 4-7 are zero, it means reset the */
2633 /* LunID device */
2634 c->Request.CDB[4] = 0x00;
2635 c->Request.CDB[5] = 0x00;
2636 c->Request.CDB[6] = 0x00;
2637 c->Request.CDB[7] = 0x00;
2638 break;
2639
2640 default:
2641 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2642 cmd);
2643 BUG();
2644 }
2645 } else {
2646 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2647 BUG();
2648 }
2649
2650 switch (c->Request.Type.Direction) {
2651 case XFER_READ:
2652 pci_dir = PCI_DMA_FROMDEVICE;
2653 break;
2654 case XFER_WRITE:
2655 pci_dir = PCI_DMA_TODEVICE;
2656 break;
2657 case XFER_NONE:
2658 pci_dir = PCI_DMA_NONE;
2659 break;
2660 default:
2661 pci_dir = PCI_DMA_BIDIRECTIONAL;
2662 }
2663
2664 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2665
2666 return;
2667}
2668
2669/*
2670 * Map (physical) PCI mem into (virtual) kernel space
2671 */
2672static void __iomem *remap_pci_mem(ulong base, ulong size)
2673{
2674 ulong page_base = ((ulong) base) & PAGE_MASK;
2675 ulong page_offs = ((ulong) base) - page_base;
2676 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2677
2678 return page_remapped ? (page_remapped + page_offs) : NULL;
2679}
2680
2681/* Takes cmds off the submission queue and sends them to the hardware,
2682 * then puts them on the queue of cmds waiting for completion.
2683 */
2684static void start_io(struct ctlr_info *h)
2685{
2686 struct CommandList *c;
2687
2688 while (!hlist_empty(&h->reqQ)) {
2689 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2690 /* can't do anything if fifo is full */
2691 if ((h->access.fifo_full(h))) {
2692 dev_warn(&h->pdev->dev, "fifo full\n");
2693 break;
2694 }
2695
2696 /* Get the first entry from the Request Q */
2697 removeQ(c);
2698 h->Qdepth--;
2699
2700 /* Tell the controller execute command */
2701 h->access.submit_command(h, c);
2702
2703 /* Put job onto the completed Q */
2704 addQ(&h->cmpQ, c);
2705 }
2706}
2707
2708static inline unsigned long get_next_completion(struct ctlr_info *h)
2709{
2710 return h->access.command_completed(h);
2711}
2712
2713static inline int interrupt_pending(struct ctlr_info *h)
2714{
2715 return h->access.intr_pending(h);
2716}
2717
2718static inline long interrupt_not_for_us(struct ctlr_info *h)
2719{
2720 return ((h->access.intr_pending(h) == 0) ||
2721 (h->interrupts_enabled == 0));
2722}
2723
2724static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
2725 __u32 raw_tag)
2726{
2727 if (unlikely(tag_index >= h->nr_cmds)) {
2728 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2729 return 1;
2730 }
2731 return 0;
2732}
2733
2734static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
2735{
2736 removeQ(c);
2737 if (likely(c->cmd_type == CMD_SCSI))
2738 complete_scsi_command(c, 0, raw_tag);
2739 else if (c->cmd_type == CMD_IOCTL_PEND)
2740 complete(c->waiting);
2741}
2742
2743static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2744{
2745 struct ctlr_info *h = dev_id;
2746 struct CommandList *c;
2747 unsigned long flags;
2748 __u32 raw_tag, tag, tag_index;
2749 struct hlist_node *tmp;
2750
2751 if (interrupt_not_for_us(h))
2752 return IRQ_NONE;
2753 spin_lock_irqsave(&h->lock, flags);
2754 while (interrupt_pending(h)) {
2755 while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
2756 if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
2757 tag_index = HPSA_TAG_TO_INDEX(raw_tag);
2758 if (bad_tag(h, tag_index, raw_tag))
2759 return IRQ_HANDLED;
2760 c = h->cmd_pool + tag_index;
2761 finish_cmd(c, raw_tag);
2762 continue;
2763 }
2764 tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
2765 c = NULL;
2766 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2767 if (c->busaddr == tag) {
2768 finish_cmd(c, raw_tag);
2769 break;
2770 }
2771 }
2772 }
2773 }
2774 spin_unlock_irqrestore(&h->lock, flags);
2775 return IRQ_HANDLED;
2776}
2777
2778/* Send a message CDB to the firmware. */
2779static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2780 unsigned char type)
2781{
2782 struct Command {
2783 struct CommandListHeader CommandHeader;
2784 struct RequestBlock Request;
2785 struct ErrDescriptor ErrorDescriptor;
2786 };
2787 struct Command *cmd;
2788 static const size_t cmd_sz = sizeof(*cmd) +
2789 sizeof(cmd->ErrorDescriptor);
2790 dma_addr_t paddr64;
2791 uint32_t paddr32, tag;
2792 void __iomem *vaddr;
2793 int i, err;
2794
2795 vaddr = pci_ioremap_bar(pdev, 0);
2796 if (vaddr == NULL)
2797 return -ENOMEM;
2798
2799 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2800 * CCISS commands, so they must be allocated from the lower 4GiB of
2801 * memory.
2802 */
2803 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2804 if (err) {
2805 iounmap(vaddr);
2806 return -ENOMEM;
2807 }
2808
2809 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2810 if (cmd == NULL) {
2811 iounmap(vaddr);
2812 return -ENOMEM;
2813 }
2814
2815 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2816 * although there's no guarantee, we assume that the address is at
2817 * least 4-byte aligned (most likely, it's page-aligned).
2818 */
2819 paddr32 = paddr64;
2820
2821 cmd->CommandHeader.ReplyQueue = 0;
2822 cmd->CommandHeader.SGList = 0;
2823 cmd->CommandHeader.SGTotal = 0;
2824 cmd->CommandHeader.Tag.lower = paddr32;
2825 cmd->CommandHeader.Tag.upper = 0;
2826 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
2827
2828 cmd->Request.CDBLen = 16;
2829 cmd->Request.Type.Type = TYPE_MSG;
2830 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
2831 cmd->Request.Type.Direction = XFER_NONE;
2832 cmd->Request.Timeout = 0; /* Don't time out */
2833 cmd->Request.CDB[0] = opcode;
2834 cmd->Request.CDB[1] = type;
2835 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
2836 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
2837 cmd->ErrorDescriptor.Addr.upper = 0;
2838 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
2839
2840 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
2841
2842 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2843 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2844 if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
2845 break;
2846 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
2847 }
2848
2849 iounmap(vaddr);
2850
2851 /* we leak the DMA buffer here ... no choice since the controller could
2852 * still complete the command.
2853 */
2854 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
2855 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
2856 opcode, type);
2857 return -ETIMEDOUT;
2858 }
2859
2860 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
2861
2862 if (tag & HPSA_ERROR_BIT) {
2863 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
2864 opcode, type);
2865 return -EIO;
2866 }
2867
2868 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
2869 opcode, type);
2870 return 0;
2871}
2872
2873#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
2874#define hpsa_noop(p) hpsa_message(p, 3, 0)
2875
2876static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
2877{
2878/* the #defines are stolen from drivers/pci/msi.h. */
2879#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
2880#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
2881
2882 int pos;
2883 u16 control = 0;
2884
2885 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
2886 if (pos) {
2887 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2888 if (control & PCI_MSI_FLAGS_ENABLE) {
2889 dev_info(&pdev->dev, "resetting MSI\n");
2890 pci_write_config_word(pdev, msi_control_reg(pos),
2891 control & ~PCI_MSI_FLAGS_ENABLE);
2892 }
2893 }
2894
2895 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2896 if (pos) {
2897 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2898 if (control & PCI_MSIX_FLAGS_ENABLE) {
2899 dev_info(&pdev->dev, "resetting MSI-X\n");
2900 pci_write_config_word(pdev, msi_control_reg(pos),
2901 control & ~PCI_MSIX_FLAGS_ENABLE);
2902 }
2903 }
2904
2905 return 0;
2906}
2907
2908/* This does a hard reset of the controller using PCI power management
2909 * states.
2910 */
2911static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
2912{
2913 u16 pmcsr, saved_config_space[32];
2914 int i, pos;
2915
2916 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2917
2918 /* This is very nearly the same thing as
2919 *
2920 * pci_save_state(pci_dev);
2921 * pci_set_power_state(pci_dev, PCI_D3hot);
2922 * pci_set_power_state(pci_dev, PCI_D0);
2923 * pci_restore_state(pci_dev);
2924 *
2925 * but we can't use these nice canned kernel routines on
2926 * kexec, because they also check the MSI/MSI-X state in PCI
2927 * configuration space and do the wrong thing when it is
2928 * set/cleared. Also, the pci_save/restore_state functions
2929 * violate the ordering requirements for restoring the
2930 * configuration space from the CCISS document (see the
2931 * comment below). So we roll our own ....
2932 */
2933
2934 for (i = 0; i < 32; i++)
2935 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
2936
2937 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
2938 if (pos == 0) {
2939 dev_err(&pdev->dev,
2940 "hpsa_reset_controller: PCI PM not supported\n");
2941 return -ENODEV;
2942 }
2943
2944 /* Quoting from the Open CISS Specification: "The Power
2945 * Management Control/Status Register (CSR) controls the power
2946 * state of the device. The normal operating state is D0,
2947 * CSR=00h. The software off state is D3, CSR=03h. To reset
2948 * the controller, place the interface device in D3 then to
2949 * D0, this causes a secondary PCI reset which will reset the
2950 * controller."
2951 */
2952
2953 /* enter the D3hot power management state */
2954 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
2955 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2956 pmcsr |= PCI_D3hot;
2957 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2958
2959 msleep(500);
2960
2961 /* enter the D0 power management state */
2962 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2963 pmcsr |= PCI_D0;
2964 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2965
2966 msleep(500);
2967
2968 /* Restore the PCI configuration space. The Open CISS
2969 * Specification says, "Restore the PCI Configuration
2970 * Registers, offsets 00h through 60h. It is important to
2971 * restore the command register, 16-bits at offset 04h,
2972 * last. Do not restore the configuration status register,
2973 * 16-bits at offset 06h." Note that the offset is 2*i.
2974 */
2975 for (i = 0; i < 32; i++) {
2976 if (i == 2 || i == 3)
2977 continue;
2978 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
2979 }
2980 wmb();
2981 pci_write_config_word(pdev, 4, saved_config_space[2]);
2982
2983 return 0;
2984}
2985
2986/*
2987 * We cannot read the structure directly, for portability we must use
2988 * the io functions.
2989 * This is for debug only.
2990 */
2991#ifdef HPSA_DEBUG
2992static void print_cfg_table(struct device *dev, struct CfgTable *tb)
2993{
2994 int i;
2995 char temp_name[17];
2996
2997 dev_info(dev, "Controller Configuration information\n");
2998 dev_info(dev, "------------------------------------\n");
2999 for (i = 0; i < 4; i++)
3000 temp_name[i] = readb(&(tb->Signature[i]));
3001 temp_name[4] = '\0';
3002 dev_info(dev, " Signature = %s\n", temp_name);
3003 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3004 dev_info(dev, " Transport methods supported = 0x%x\n",
3005 readl(&(tb->TransportSupport)));
3006 dev_info(dev, " Transport methods active = 0x%x\n",
3007 readl(&(tb->TransportActive)));
3008 dev_info(dev, " Requested transport Method = 0x%x\n",
3009 readl(&(tb->HostWrite.TransportRequest)));
3010 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3011 readl(&(tb->HostWrite.CoalIntDelay)));
3012 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3013 readl(&(tb->HostWrite.CoalIntCount)));
3014 dev_info(dev, " Max outstanding commands = 0x%d\n",
3015 readl(&(tb->CmdsOutMax)));
3016 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3017 for (i = 0; i < 16; i++)
3018 temp_name[i] = readb(&(tb->ServerName[i]));
3019 temp_name[16] = '\0';
3020 dev_info(dev, " Server Name = %s\n", temp_name);
3021 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3022 readl(&(tb->HeartBeat)));
3023}
3024#endif /* HPSA_DEBUG */
3025
3026static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3027{
3028 int i, offset, mem_type, bar_type;
3029
3030 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3031 return 0;
3032 offset = 0;
3033 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3034 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3035 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3036 offset += 4;
3037 else {
3038 mem_type = pci_resource_flags(pdev, i) &
3039 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3040 switch (mem_type) {
3041 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3042 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3043 offset += 4; /* 32 bit */
3044 break;
3045 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3046 offset += 8;
3047 break;
3048 default: /* reserved in PCI 2.2 */
3049 dev_warn(&pdev->dev,
3050 "base address is invalid\n");
3051 return -1;
3052 break;
3053 }
3054 }
3055 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3056 return i + 1;
3057 }
3058 return -1;
3059}
3060
3061/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3062 * controllers that are capable. If not, we use IO-APIC mode.
3063 */
3064
3065static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3066 struct pci_dev *pdev, __u32 board_id)
3067{
3068#ifdef CONFIG_PCI_MSI
3069 int err;
3070 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3071 {0, 2}, {0, 3}
3072 };
3073
3074 /* Some boards advertise MSI but don't really support it */
3075 if ((board_id == 0x40700E11) ||
3076 (board_id == 0x40800E11) ||
3077 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3078 goto default_int_mode;
3079 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3080 dev_info(&pdev->dev, "MSIX\n");
3081 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3082 if (!err) {
3083 h->intr[0] = hpsa_msix_entries[0].vector;
3084 h->intr[1] = hpsa_msix_entries[1].vector;
3085 h->intr[2] = hpsa_msix_entries[2].vector;
3086 h->intr[3] = hpsa_msix_entries[3].vector;
3087 h->msix_vector = 1;
3088 return;
3089 }
3090 if (err > 0) {
3091 dev_warn(&pdev->dev, "only %d MSI-X vectors "
3092 "available\n", err);
3093 goto default_int_mode;
3094 } else {
3095 dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3096 err);
3097 goto default_int_mode;
3098 }
3099 }
3100 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3101 dev_info(&pdev->dev, "MSI\n");
3102 if (!pci_enable_msi(pdev))
3103 h->msi_vector = 1;
3104 else
3105 dev_warn(&pdev->dev, "MSI init failed\n");
3106 }
3107default_int_mode:
3108#endif /* CONFIG_PCI_MSI */
3109 /* if we get here we're going to use the default interrupt mode */
3110 h->intr[SIMPLE_MODE_INT] = pdev->irq;
3111 return;
3112}
3113
3114static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3115{
3116 ushort subsystem_vendor_id, subsystem_device_id, command;
3117 __u32 board_id, scratchpad = 0;
3118 __u64 cfg_offset;
3119 __u32 cfg_base_addr;
3120 __u64 cfg_base_addr_index;
3121 int i, prod_index, err;
3122
3123 subsystem_vendor_id = pdev->subsystem_vendor;
3124 subsystem_device_id = pdev->subsystem_device;
3125 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3126 subsystem_vendor_id);
3127
3128 for (i = 0; i < ARRAY_SIZE(products); i++)
3129 if (board_id == products[i].board_id)
3130 break;
3131
3132 prod_index = i;
3133
3134 if (prod_index == ARRAY_SIZE(products)) {
3135 prod_index--;
3136 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3137 !hpsa_allow_any) {
3138 dev_warn(&pdev->dev, "unrecognized board ID:"
3139 " 0x%08lx, ignoring.\n",
3140 (unsigned long) board_id);
3141 return -ENODEV;
3142 }
3143 }
3144 /* check to see if controller has been disabled
3145 * BEFORE trying to enable it
3146 */
3147 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3148 if (!(command & 0x02)) {
3149 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3150 return -ENODEV;
3151 }
3152
3153 err = pci_enable_device(pdev);
3154 if (err) {
3155 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3156 return err;
3157 }
3158
3159 err = pci_request_regions(pdev, "hpsa");
3160 if (err) {
3161 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3162 return err;
3163 }
3164
3165 /* If the kernel supports MSI/MSI-X we will try to enable that,
3166 * else we use the IO-APIC interrupt assigned to us by system ROM.
3167 */
3168 hpsa_interrupt_mode(h, pdev, board_id);
3169
3170 /* find the memory BAR */
3171 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3172 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3173 break;
3174 }
3175 if (i == DEVICE_COUNT_RESOURCE) {
3176 dev_warn(&pdev->dev, "no memory BAR found\n");
3177 err = -ENODEV;
3178 goto err_out_free_res;
3179 }
3180
3181 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3182 * already removed
3183 */
3184
3185 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3186
3187 /* Wait for the board to become ready. */
3188 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3189 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3190 if (scratchpad == HPSA_FIRMWARE_READY)
3191 break;
3192 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3193 }
3194 if (scratchpad != HPSA_FIRMWARE_READY) {
3195 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3196 err = -ENODEV;
3197 goto err_out_free_res;
3198 }
3199
3200 /* get the address index number */
3201 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3202 cfg_base_addr &= (__u32) 0x0000ffff;
3203 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3204 if (cfg_base_addr_index == -1) {
3205 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3206 err = -ENODEV;
3207 goto err_out_free_res;
3208 }
3209
3210 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3211 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3212 cfg_base_addr_index) + cfg_offset,
3213 sizeof(h->cfgtable));
3214 h->board_id = board_id;
3215
3216 /* Query controller for max supported commands: */
3217 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3218
3219 h->product_name = products[prod_index].product_name;
3220 h->access = *(products[prod_index].access);
3221 /* Allow room for some ioctls */
3222 h->nr_cmds = h->max_commands - 4;
3223
3224 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3225 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3226 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3227 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3228 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3229 err = -ENODEV;
3230 goto err_out_free_res;
3231 }
3232#ifdef CONFIG_X86
3233 {
3234 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3235 __u32 prefetch;
3236 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3237 prefetch |= 0x100;
3238 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3239 }
3240#endif
3241
3242 /* Disabling DMA prefetch for the P600
3243 * An ASIC bug may result in a prefetch beyond
3244 * physical memory.
3245 */
3246 if (board_id == 0x3225103C) {
3247 __u32 dma_prefetch;
3248 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3249 dma_prefetch |= 0x8000;
3250 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3251 }
3252
3253 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3254 /* Update the field, and then ring the doorbell */
3255 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3256 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3257
3258 /* under certain very rare conditions, this can take awhile.
3259 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3260 * as we enter this code.)
3261 */
3262 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3263 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3264 break;
3265 /* delay and try again */
3266 msleep(10);
3267 }
3268
3269#ifdef HPSA_DEBUG
3270 print_cfg_table(&pdev->dev, h->cfgtable);
3271#endif /* HPSA_DEBUG */
3272
3273 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3274 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3275 err = -ENODEV;
3276 goto err_out_free_res;
3277 }
3278 return 0;
3279
3280err_out_free_res:
3281 /*
3282 * Deliberately omit pci_disable_device(): it does something nasty to
3283 * Smart Array controllers that pci_enable_device does not undo
3284 */
3285 pci_release_regions(pdev);
3286 return err;
3287}
3288
3289static int __devinit hpsa_init_one(struct pci_dev *pdev,
3290 const struct pci_device_id *ent)
3291{
3292 int i;
3293 int dac;
3294 struct ctlr_info *h;
3295
3296 if (number_of_controllers == 0)
3297 printk(KERN_INFO DRIVER_NAME "\n");
3298 if (reset_devices) {
3299 /* Reset the controller with a PCI power-cycle */
3300 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3301 return -ENODEV;
3302
3303 /* Some devices (notably the HP Smart Array 5i Controller)
3304 need a little pause here */
3305 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3306
3307 /* Now try to get the controller to respond to a no-op */
3308 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3309 if (hpsa_noop(pdev) == 0)
3310 break;
3311 else
3312 dev_warn(&pdev->dev, "no-op failed%s\n",
3313 (i < 11 ? "; re-trying" : ""));
3314 }
3315 }
3316
3317 BUILD_BUG_ON(sizeof(struct CommandList) % 8);
3318 h = kzalloc(sizeof(*h), GFP_KERNEL);
3319 if (!h)
3320 return -1;
3321
3322 h->busy_initializing = 1;
3323 INIT_HLIST_HEAD(&h->cmpQ);
3324 INIT_HLIST_HEAD(&h->reqQ);
3325 mutex_init(&h->busy_shutting_down);
3326 init_completion(&h->scan_wait);
3327 if (hpsa_pci_init(h, pdev) != 0)
3328 goto clean1;
3329
3330 sprintf(h->devname, "hpsa%d", number_of_controllers);
3331 h->ctlr = number_of_controllers;
3332 number_of_controllers++;
3333 h->pdev = pdev;
3334
3335 /* configure PCI DMA stuff */
3336 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3337 dac = 1;
3338 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
3339 dac = 0;
3340 else {
3341 dev_err(&pdev->dev, "no suitable DMA available\n");
3342 goto clean1;
3343 }
3344
3345 /* make sure the board interrupts are off */
3346 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3347 if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
3348 IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
3349 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3350 h->intr[SIMPLE_MODE_INT], h->devname);
3351 goto clean2;
3352 }
3353
3354 dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3355 h->devname, pdev->device, pci_name(pdev),
3356 h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3357
3358 h->cmd_pool_bits =
3359 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3360 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3361 h->cmd_pool = pci_alloc_consistent(h->pdev,
3362 h->nr_cmds * sizeof(*h->cmd_pool),
3363 &(h->cmd_pool_dhandle));
3364 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3365 h->nr_cmds * sizeof(*h->errinfo_pool),
3366 &(h->errinfo_pool_dhandle));
3367 if ((h->cmd_pool_bits == NULL)
3368 || (h->cmd_pool == NULL)
3369 || (h->errinfo_pool == NULL)) {
3370 dev_err(&pdev->dev, "out of memory");
3371 goto clean4;
3372 }
3373 spin_lock_init(&h->lock);
3374
3375 pci_set_drvdata(pdev, h);
3376 memset(h->cmd_pool_bits, 0,
3377 ((h->nr_cmds + BITS_PER_LONG -
3378 1) / BITS_PER_LONG) * sizeof(unsigned long));
3379
3380 hpsa_scsi_setup(h);
3381
3382 /* Turn the interrupts on so we can service requests */
3383 h->access.set_intr_mask(h, HPSA_INTR_ON);
3384
3385 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3386 h->busy_initializing = 0;
3387 return 1;
3388
3389clean4:
3390 kfree(h->cmd_pool_bits);
3391 if (h->cmd_pool)
3392 pci_free_consistent(h->pdev,
3393 h->nr_cmds * sizeof(struct CommandList),
3394 h->cmd_pool, h->cmd_pool_dhandle);
3395 if (h->errinfo_pool)
3396 pci_free_consistent(h->pdev,
3397 h->nr_cmds * sizeof(struct ErrorInfo),
3398 h->errinfo_pool,
3399 h->errinfo_pool_dhandle);
3400 free_irq(h->intr[SIMPLE_MODE_INT], h);
3401clean2:
3402clean1:
3403 h->busy_initializing = 0;
3404 kfree(h);
3405 return -1;
3406}
3407
3408static void hpsa_flush_cache(struct ctlr_info *h)
3409{
3410 char *flush_buf;
3411 struct CommandList *c;
3412
3413 flush_buf = kzalloc(4, GFP_KERNEL);
3414 if (!flush_buf)
3415 return;
3416
3417 c = cmd_special_alloc(h);
3418 if (!c) {
3419 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3420 goto out_of_memory;
3421 }
3422 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3423 RAID_CTLR_LUNID, TYPE_CMD);
3424 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3425 if (c->err_info->CommandStatus != 0)
3426 dev_warn(&h->pdev->dev,
3427 "error flushing cache on controller\n");
3428 cmd_special_free(h, c);
3429out_of_memory:
3430 kfree(flush_buf);
3431}
3432
3433static void hpsa_shutdown(struct pci_dev *pdev)
3434{
3435 struct ctlr_info *h;
3436
3437 h = pci_get_drvdata(pdev);
3438 /* Turn board interrupts off and send the flush cache command
3439 * sendcmd will turn off interrupt, and send the flush...
3440 * To write all data in the battery backed cache to disks
3441 */
3442 hpsa_flush_cache(h);
3443 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3444 free_irq(h->intr[2], h);
3445#ifdef CONFIG_PCI_MSI
3446 if (h->msix_vector)
3447 pci_disable_msix(h->pdev);
3448 else if (h->msi_vector)
3449 pci_disable_msi(h->pdev);
3450#endif /* CONFIG_PCI_MSI */
3451}
3452
3453static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3454{
3455 struct ctlr_info *h;
3456
3457 if (pci_get_drvdata(pdev) == NULL) {
3458 dev_err(&pdev->dev, "unable to remove device \n");
3459 return;
3460 }
3461 h = pci_get_drvdata(pdev);
3462 mutex_lock(&h->busy_shutting_down);
3463 remove_from_scan_list(h);
3464 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3465 hpsa_shutdown(pdev);
3466 iounmap(h->vaddr);
3467 pci_free_consistent(h->pdev,
3468 h->nr_cmds * sizeof(struct CommandList),
3469 h->cmd_pool, h->cmd_pool_dhandle);
3470 pci_free_consistent(h->pdev,
3471 h->nr_cmds * sizeof(struct ErrorInfo),
3472 h->errinfo_pool, h->errinfo_pool_dhandle);
3473 kfree(h->cmd_pool_bits);
3474 /*
3475 * Deliberately omit pci_disable_device(): it does something nasty to
3476 * Smart Array controllers that pci_enable_device does not undo
3477 */
3478 pci_release_regions(pdev);
3479 pci_set_drvdata(pdev, NULL);
3480 mutex_unlock(&h->busy_shutting_down);
3481 kfree(h);
3482}
3483
3484static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3485 __attribute__((unused)) pm_message_t state)
3486{
3487 return -ENOSYS;
3488}
3489
3490static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3491{
3492 return -ENOSYS;
3493}
3494
3495static struct pci_driver hpsa_pci_driver = {
3496 .name = "hpsa",
3497 .probe = hpsa_init_one,
3498 .remove = __devexit_p(hpsa_remove_one),
3499 .id_table = hpsa_pci_device_id, /* id_table */
3500 .shutdown = hpsa_shutdown,
3501 .suspend = hpsa_suspend,
3502 .resume = hpsa_resume,
3503};
3504
3505/*
3506 * This is it. Register the PCI driver information for the cards we control
3507 * the OS will call our registered routines when it finds one of our cards.
3508 */
3509static int __init hpsa_init(void)
3510{
3511 int err;
3512 /* Start the scan thread */
3513 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3514 if (IS_ERR(hpsa_scan_thread)) {
3515 err = PTR_ERR(hpsa_scan_thread);
3516 return -ENODEV;
3517 }
3518 err = pci_register_driver(&hpsa_pci_driver);
3519 if (err)
3520 kthread_stop(hpsa_scan_thread);
3521 return err;
3522}
3523
3524static void __exit hpsa_cleanup(void)
3525{
3526 pci_unregister_driver(&hpsa_pci_driver);
3527 kthread_stop(hpsa_scan_thread);
3528}
3529
3530module_init(hpsa_init);
3531module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 000000000000..6bd1949144b5
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,273 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_H
22#define HPSA_H
23
24#include <scsi/scsicam.h>
25
26#define IO_OK 0
27#define IO_ERROR 1
28
29struct ctlr_info;
30
31struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 unsigned long (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h);
38};
39
40struct hpsa_scsi_dev_t {
41 int devtype;
42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */
44#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
49 unsigned char raid_level; /* from inquiry page 0xC1 */
50};
51
52struct ctlr_info {
53 int ctlr;
54 char devname[8];
55 char *product_name;
56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev;
58 __u32 board_id;
59 void __iomem *vaddr;
60 unsigned long paddr;
61 int nr_cmds; /* Number of commands allowed on this controller */
62 struct CfgTable __iomem *cfgtable;
63 int interrupts_enabled;
64 int major;
65 int max_commands;
66 int commands_outstanding;
67 int max_outstanding; /* Debug */
68 int usage_count; /* number of opens all all minor devices */
69# define DOORBELL_INT 0
70# define PERF_MODE_INT 1
71# define SIMPLE_MODE_INT 2
72# define MEMQ_MODE_INT 3
73 unsigned int intr[4];
74 unsigned int msix_vector;
75 unsigned int msi_vector;
76 struct access_method access;
77
78 /* queue and queue Info */
79 struct hlist_head reqQ;
80 struct hlist_head cmpQ;
81 unsigned int Qdepth;
82 unsigned int maxQsinceinit;
83 unsigned int maxSG;
84 spinlock_t lock;
85
86 /* pointers to command and error info pool */
87 struct CommandList *cmd_pool;
88 dma_addr_t cmd_pool_dhandle;
89 struct ErrorInfo *errinfo_pool;
90 dma_addr_t errinfo_pool_dhandle;
91 unsigned long *cmd_pool_bits;
92 int nr_allocs;
93 int nr_frees;
94 int busy_initializing;
95 int busy_scanning;
96 struct mutex busy_shutting_down;
97 struct list_head scan_list;
98 struct completion scan_wait;
99
100 struct Scsi_Host *scsi_host;
101 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
102 int ndevices; /* number of used elements in .dev[] array. */
103#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
104 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
105};
106#define HPSA_ABORT_MSG 0
107#define HPSA_DEVICE_RESET_MSG 1
108#define HPSA_BUS_RESET_MSG 2
109#define HPSA_HOST_RESET_MSG 3
110#define HPSA_MSG_SEND_RETRY_LIMIT 10
111#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
112
113/* Maximum time in seconds driver will wait for command completions
114 * when polling before giving up.
115 */
116#define HPSA_MAX_POLL_TIME_SECS (20)
117
118/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
119 * how many times to retry TEST UNIT READY on a device
120 * while waiting for it to become ready before giving up.
121 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
122 * between sending TURs while waiting for a device
123 * to become ready.
124 */
125#define HPSA_TUR_RETRY_LIMIT (20)
126#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
127
128/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
129 * to become ready, in seconds, before giving up on it.
130 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
131 * between polling the board to see if it is ready, in
132 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
133 * HPSA_BOARD_READY_ITERATIONS are derived from those.
134 */
135#define HPSA_BOARD_READY_WAIT_SECS (120)
136#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
137#define HPSA_BOARD_READY_POLL_INTERVAL \
138 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
139#define HPSA_BOARD_READY_ITERATIONS \
140 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
141 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
142#define HPSA_POST_RESET_PAUSE_MSECS (3000)
143#define HPSA_POST_RESET_NOOP_RETRIES (12)
144
145/* Defining the diffent access_menthods */
146/*
147 * Memory mapped FIFO interface (SMART 53xx cards)
148 */
149#define SA5_DOORBELL 0x20
150#define SA5_REQUEST_PORT_OFFSET 0x40
151#define SA5_REPLY_INTR_MASK_OFFSET 0x34
152#define SA5_REPLY_PORT_OFFSET 0x44
153#define SA5_INTR_STATUS 0x30
154#define SA5_SCRATCHPAD_OFFSET 0xB0
155
156#define SA5_CTCFG_OFFSET 0xB4
157#define SA5_CTMEM_OFFSET 0xB8
158
159#define SA5_INTR_OFF 0x08
160#define SA5B_INTR_OFF 0x04
161#define SA5_INTR_PENDING 0x08
162#define SA5B_INTR_PENDING 0x04
163#define FIFO_EMPTY 0xffffffff
164#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
165
166#define HPSA_ERROR_BIT 0x02
167#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
168#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
169#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
170
171#define HPSA_INTR_ON 1
172#define HPSA_INTR_OFF 0
173/*
174 Send the command to the hardware
175*/
176static void SA5_submit_command(struct ctlr_info *h,
177 struct CommandList *c)
178{
179#ifdef HPSA_DEBUG
180 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
181 c->busaddr);
182#endif /* HPSA_DEBUG */
183 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
184 h->commands_outstanding++;
185 if (h->commands_outstanding > h->max_outstanding)
186 h->max_outstanding = h->commands_outstanding;
187}
188
189/*
190 * This card is the opposite of the other cards.
191 * 0 turns interrupts on...
192 * 0x08 turns them off...
193 */
194static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
195{
196 if (val) { /* Turn interrupts on */
197 h->interrupts_enabled = 1;
198 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
199 } else { /* Turn them off */
200 h->interrupts_enabled = 0;
201 writel(SA5_INTR_OFF,
202 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
203 }
204}
205/*
206 * Returns true if fifo is full.
207 *
208 */
209static unsigned long SA5_fifo_full(struct ctlr_info *h)
210{
211 if (h->commands_outstanding >= h->max_commands)
212 return 1;
213 else
214 return 0;
215
216}
217/*
218 * returns value read from hardware.
219 * returns FIFO_EMPTY if there is nothing to read
220 */
221static unsigned long SA5_completed(struct ctlr_info *h)
222{
223 unsigned long register_value
224 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
225
226 if (register_value != FIFO_EMPTY)
227 h->commands_outstanding--;
228
229#ifdef HPSA_DEBUG
230 if (register_value != FIFO_EMPTY)
231 printk(KERN_INFO "hpsa: Read %lx back from board\n",
232 register_value);
233 else
234 printk(KERN_INFO "hpsa: FIFO Empty read\n");
235#endif
236
237 return register_value;
238}
239/*
240 * Returns true if an interrupt is pending..
241 */
242static unsigned long SA5_intr_pending(struct ctlr_info *h)
243{
244 unsigned long register_value =
245 readl(h->vaddr + SA5_INTR_STATUS);
246#ifdef HPSA_DEBUG
247 printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
248#endif /* HPSA_DEBUG */
249 if (register_value & SA5_INTR_PENDING)
250 return 1;
251 return 0 ;
252}
253
254
255static struct access_method SA5_access = {
256 SA5_submit_command,
257 SA5_intr_mask,
258 SA5_fifo_full,
259 SA5_intr_pending,
260 SA5_completed,
261};
262
263struct board_type {
264 __u32 board_id;
265 char *product_name;
266 struct access_method *access;
267};
268
269
270/* end of old hpsa_scsi.h file */
271
272#endif /* HPSA_H */
273
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 000000000000..12d71387ed9a
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,326 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_CMD_H
22#define HPSA_CMD_H
23
24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 31
27#define MAXREPLYQS 256
28
29/* Command Status value */
30#define CMD_SUCCESS 0x0000
31#define CMD_TARGET_STATUS 0x0001
32#define CMD_DATA_UNDERRUN 0x0002
33#define CMD_DATA_OVERRUN 0x0003
34#define CMD_INVALID 0x0004
35#define CMD_PROTOCOL_ERR 0x0005
36#define CMD_HARDWARE_ERR 0x0006
37#define CMD_CONNECTION_LOST 0x0007
38#define CMD_ABORTED 0x0008
39#define CMD_ABORT_FAILED 0x0009
40#define CMD_UNSOLICITED_ABORT 0x000A
41#define CMD_TIMEOUT 0x000B
42#define CMD_UNABORTABLE 0x000C
43
44/* Unit Attentions ASC's as defined for the MSA2012sa */
45#define POWER_OR_RESET 0x29
46#define STATE_CHANGED 0x2a
47#define UNIT_ATTENTION_CLEARED 0x2f
48#define LUN_FAILED 0x3e
49#define REPORT_LUNS_CHANGED 0x3f
50
51/* Unit Attentions ASCQ's as defined for the MSA2012sa */
52
53 /* These ASCQ's defined for ASC = POWER_OR_RESET */
54#define POWER_ON_RESET 0x00
55#define POWER_ON_REBOOT 0x01
56#define SCSI_BUS_RESET 0x02
57#define MSA_TARGET_RESET 0x03
58#define CONTROLLER_FAILOVER 0x04
59#define TRANSCEIVER_SE 0x05
60#define TRANSCEIVER_LVD 0x06
61
62 /* These ASCQ's defined for ASC = STATE_CHANGED */
63#define RESERVATION_PREEMPTED 0x03
64#define ASYM_ACCESS_CHANGED 0x06
65#define LUN_CAPACITY_CHANGED 0x09
66
67/* transfer direction */
68#define XFER_NONE 0x00
69#define XFER_WRITE 0x01
70#define XFER_READ 0x02
71#define XFER_RSVD 0x03
72
73/* task attribute */
74#define ATTR_UNTAGGED 0x00
75#define ATTR_SIMPLE 0x04
76#define ATTR_HEADOFQUEUE 0x05
77#define ATTR_ORDERED 0x06
78#define ATTR_ACA 0x07
79
80/* cdb type */
81#define TYPE_CMD 0x00
82#define TYPE_MSG 0x01
83
84/* config space register offsets */
85#define CFG_VENDORID 0x00
86#define CFG_DEVICEID 0x02
87#define CFG_I2OBAR 0x10
88#define CFG_MEM1BAR 0x14
89
90/* i2o space register offsets */
91#define I2O_IBDB_SET 0x20
92#define I2O_IBDB_CLEAR 0x70
93#define I2O_INT_STATUS 0x30
94#define I2O_INT_MASK 0x34
95#define I2O_IBPOST_Q 0x40
96#define I2O_OBPOST_Q 0x44
97#define I2O_DMA1_CFG 0x214
98
99/* Configuration Table */
100#define CFGTBL_ChangeReq 0x00000001l
101#define CFGTBL_AccCmds 0x00000001l
102
103#define CFGTBL_Trans_Simple 0x00000002l
104
105#define CFGTBL_BusType_Ultra2 0x00000001l
106#define CFGTBL_BusType_Ultra3 0x00000002l
107#define CFGTBL_BusType_Fibre1G 0x00000100l
108#define CFGTBL_BusType_Fibre2G 0x00000200l
109struct vals32 {
110 __u32 lower;
111 __u32 upper;
112};
113
114union u64bit {
115 struct vals32 val32;
116 __u64 val;
117};
118
119/* FIXME this is a per controller value (barf!) */
120#define HPSA_MAX_TARGETS_PER_CTLR 16
121#define HPSA_MAX_LUN 256
122#define HPSA_MAX_PHYS_LUN 1024
123
124/* SCSI-3 Commands */
125#pragma pack(1)
126
127#define HPSA_INQUIRY 0x12
128struct InquiryData {
129 __u8 data_byte[36];
130};
131
132#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
133#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
134struct ReportLUNdata {
135 __u8 LUNListLength[4];
136 __u32 reserved;
137 __u8 LUN[HPSA_MAX_LUN][8];
138};
139
140struct ReportExtendedLUNdata {
141 __u8 LUNListLength[4];
142 __u8 extended_response_flag;
143 __u8 reserved[3];
144 __u8 LUN[HPSA_MAX_LUN][24];
145};
146
147struct SenseSubsystem_info {
148 __u8 reserved[36];
149 __u8 portname[8];
150 __u8 reserved1[1108];
151};
152
153#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
154struct ReadCapdata {
155 __u8 total_size[4]; /* Total size in blocks */
156 __u8 block_size[4]; /* Size of blocks in bytes */
157};
158
159#if 0
160/* 12 byte commands not implemented in firmware yet. */
161#define HPSA_READ 0xa8
162#define HPSA_WRITE 0xaa
163#endif
164
165#define HPSA_READ 0x28 /* Read(10) */
166#define HPSA_WRITE 0x2a /* Write(10) */
167
168/* BMIC commands */
169#define BMIC_READ 0x26
170#define BMIC_WRITE 0x27
171#define BMIC_CACHE_FLUSH 0xc2
172#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
173
174/* Command List Structure */
175union SCSI3Addr {
176 struct {
177 __u8 Dev;
178 __u8 Bus:6;
179 __u8 Mode:2; /* b00 */
180 } PeripDev;
181 struct {
182 __u8 DevLSB;
183 __u8 DevMSB:6;
184 __u8 Mode:2; /* b01 */
185 } LogDev;
186 struct {
187 __u8 Dev:5;
188 __u8 Bus:3;
189 __u8 Targ:6;
190 __u8 Mode:2; /* b10 */
191 } LogUnit;
192};
193
194struct PhysDevAddr {
195 __u32 TargetId:24;
196 __u32 Bus:6;
197 __u32 Mode:2;
198 /* 2 level target device addr */
199 union SCSI3Addr Target[2];
200};
201
202struct LogDevAddr {
203 __u32 VolId:30;
204 __u32 Mode:2;
205 __u8 reserved[4];
206};
207
208union LUNAddr {
209 __u8 LunAddrBytes[8];
210 union SCSI3Addr SCSI3Lun[4];
211 struct PhysDevAddr PhysDev;
212 struct LogDevAddr LogDev;
213};
214
215struct CommandListHeader {
216 __u8 ReplyQueue;
217 __u8 SGList;
218 __u16 SGTotal;
219 struct vals32 Tag;
220 union LUNAddr LUN;
221};
222
223struct RequestBlock {
224 __u8 CDBLen;
225 struct {
226 __u8 Type:3;
227 __u8 Attribute:3;
228 __u8 Direction:2;
229 } Type;
230 __u16 Timeout;
231 __u8 CDB[16];
232};
233
234struct ErrDescriptor {
235 struct vals32 Addr;
236 __u32 Len;
237};
238
239struct SGDescriptor {
240 struct vals32 Addr;
241 __u32 Len;
242 __u32 Ext;
243};
244
245union MoreErrInfo {
246 struct {
247 __u8 Reserved[3];
248 __u8 Type;
249 __u32 ErrorInfo;
250 } Common_Info;
251 struct {
252 __u8 Reserved[2];
253 __u8 offense_size; /* size of offending entry */
254 __u8 offense_num; /* byte # of offense 0-base */
255 __u32 offense_value;
256 } Invalid_Cmd;
257};
258struct ErrorInfo {
259 __u8 ScsiStatus;
260 __u8 SenseLen;
261 __u16 CommandStatus;
262 __u32 ResidualCnt;
263 union MoreErrInfo MoreErrInfo;
264 __u8 SenseInfo[SENSEINFOBYTES];
265};
266/* Command types */
267#define CMD_IOCTL_PEND 0x01
268#define CMD_SCSI 0x03
269
270struct ctlr_info; /* defined in hpsa.h */
271/* The size of this structure needs to be divisible by 8
272 * od on all architectures, because the controller uses 2
273 * lower bits of the address, and the driver uses 1 lower
274 * bit (3 bits total.)
275 */
276struct CommandList {
277 struct CommandListHeader Header;
278 struct RequestBlock Request;
279 struct ErrDescriptor ErrDesc;
280 struct SGDescriptor SG[MAXSGENTRIES];
281 /* information associated with the command */
282 __u32 busaddr; /* physical addr of this record */
283 struct ErrorInfo *err_info; /* pointer to the allocated mem */
284 struct ctlr_info *h;
285 int cmd_type;
286 long cmdindex;
287 struct hlist_node list;
288 struct CommandList *prev;
289 struct CommandList *next;
290 struct request *rq;
291 struct completion *waiting;
292 int retry_count;
293 void *scsi_cmd;
294};
295
296/* Configuration Table Structure */
297struct HostWrite {
298 __u32 TransportRequest;
299 __u32 Reserved;
300 __u32 CoalIntDelay;
301 __u32 CoalIntCount;
302};
303
304struct CfgTable {
305 __u8 Signature[4];
306 __u32 SpecValence;
307 __u32 TransportSupport;
308 __u32 TransportActive;
309 struct HostWrite HostWrite;
310 __u32 CmdsOutMax;
311 __u32 BusTypes;
312 __u32 Reserved;
313 __u8 ServerName[16];
314 __u32 HeartBeat;
315 __u32 SCSI_Prefetch;
316};
317
318struct hpsa_pci_info {
319 unsigned char bus;
320 unsigned char dev_fn;
321 unsigned short domain;
322 __u32 board_id;
323};
324
325#pragma pack()
326#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8643f5089361..9e52d16c7c39 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6521 int rc; 6521 int rc;
6522 6522
6523 ENTER; 6523 ENTER;
6524 ioa_cfg->pdev->state_saved = true;
6524 rc = pci_restore_state(ioa_cfg->pdev); 6525 rc = pci_restore_state(ioa_cfg->pdev);
6525 6526
6526 if (rc != PCIBIOS_SUCCESSFUL) { 6527 if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c4b58d042f6f..881d5dfe8c74 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
68 68
69/** 69/**
70 * struct fc_fcp_internal - FCP layer internal data 70 * struct fc_fcp_internal - FCP layer internal data
71 * @scsi_pkt_pool: Memory pool to draw FCP packets from 71 * @scsi_pkt_pool: Memory pool to draw FCP packets from
72 * @scsi_queue_lock: Protects the scsi_pkt_queue
72 * @scsi_pkt_queue: Current FCP packets 73 * @scsi_pkt_queue: Current FCP packets
73 * @last_can_queue_ramp_down_time: ramp down time 74 * @last_can_queue_ramp_down_time: ramp down time
74 * @last_can_queue_ramp_up_time: ramp up time 75 * @last_can_queue_ramp_up_time: ramp up time
75 * @max_can_queue: max can_queue size 76 * @max_can_queue: max can_queue size
76 */ 77 */
77struct fc_fcp_internal { 78struct fc_fcp_internal {
78 mempool_t *scsi_pkt_pool; 79 mempool_t *scsi_pkt_pool;
79 struct list_head scsi_pkt_queue; 80 spinlock_t scsi_queue_lock;
80 unsigned long last_can_queue_ramp_down_time; 81 struct list_head scsi_pkt_queue;
81 unsigned long last_can_queue_ramp_up_time; 82 unsigned long last_can_queue_ramp_down_time;
82 int max_can_queue; 83 unsigned long last_can_queue_ramp_up_time;
84 int max_can_queue;
83}; 85};
84 86
85#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 87#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
410 unsigned long flags; 412 unsigned long flags;
411 413
412 fp = fc_frame_alloc(lport, len); 414 fp = fc_frame_alloc(lport, len);
413 if (!fp) { 415 if (likely(fp))
414 spin_lock_irqsave(lport->host->host_lock, flags); 416 return fp;
415 fc_fcp_can_queue_ramp_down(lport); 417
416 spin_unlock_irqrestore(lport->host->host_lock, flags); 418 /* error case */
417 } 419 spin_lock_irqsave(lport->host->host_lock, flags);
418 return fp; 420 fc_fcp_can_queue_ramp_down(lport);
421 spin_unlock_irqrestore(lport->host->host_lock, flags);
422 return NULL;
419} 423}
420 424
421/** 425/**
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
990 struct scsi_cmnd *sc_cmd; 994 struct scsi_cmnd *sc_cmd;
991 unsigned long flags; 995 unsigned long flags;
992 996
993 spin_lock_irqsave(lport->host->host_lock, flags); 997 spin_lock_irqsave(&si->scsi_queue_lock, flags);
994restart: 998restart:
995 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 999 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
996 sc_cmd = fsp->cmd; 1000 sc_cmd = fsp->cmd;
@@ -1001,7 +1005,7 @@ restart:
1001 continue; 1005 continue;
1002 1006
1003 fc_fcp_pkt_hold(fsp); 1007 fc_fcp_pkt_hold(fsp);
1004 spin_unlock_irqrestore(lport->host->host_lock, flags); 1008 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1005 1009
1006 if (!fc_fcp_lock_pkt(fsp)) { 1010 if (!fc_fcp_lock_pkt(fsp)) {
1007 fc_fcp_cleanup_cmd(fsp, error); 1011 fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1014,14 @@ restart:
1010 } 1014 }
1011 1015
1012 fc_fcp_pkt_release(fsp); 1016 fc_fcp_pkt_release(fsp);
1013 spin_lock_irqsave(lport->host->host_lock, flags); 1017 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1014 /* 1018 /*
1015 * while we dropped the lock multiple pkts could 1019 * while we dropped the lock multiple pkts could
1016 * have been released, so we have to start over. 1020 * have been released, so we have to start over.
1017 */ 1021 */
1018 goto restart; 1022 goto restart;
1019 } 1023 }
1020 spin_unlock_irqrestore(lport->host->host_lock, flags); 1024 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1021} 1025}
1022 1026
1023/** 1027/**
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
1035 * @fsp: The FCP packet to send 1039 * @fsp: The FCP packet to send
1036 * 1040 *
1037 * Return: Zero for success and -1 for failure 1041 * Return: Zero for success and -1 for failure
1038 * Locks: Called with the host lock and irqs disabled. 1042 * Locks: Called without locks held
1039 */ 1043 */
1040static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) 1044static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1041{ 1045{
1042 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 1046 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1047 unsigned long flags;
1043 int rc; 1048 int rc;
1044 1049
1045 fsp->cmd->SCp.ptr = (char *)fsp; 1050 fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1049 int_to_scsilun(fsp->cmd->device->lun, 1054 int_to_scsilun(fsp->cmd->device->lun,
1050 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1055 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1051 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1056 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
1052 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1053 1057
1054 spin_unlock_irq(lport->host->host_lock); 1058 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1059 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1060 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1055 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1061 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1056 spin_lock_irq(lport->host->host_lock); 1062 if (unlikely(rc)) {
1057 if (rc) 1063 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1058 list_del(&fsp->list); 1064 list_del(&fsp->list);
1065 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1066 }
1059 1067
1060 return rc; 1068 return rc;
1061} 1069}
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1752 struct fcoe_dev_stats *stats; 1760 struct fcoe_dev_stats *stats;
1753 1761
1754 lport = shost_priv(sc_cmd->device->host); 1762 lport = shost_priv(sc_cmd->device->host);
1763 spin_unlock_irq(lport->host->host_lock);
1755 1764
1756 rval = fc_remote_port_chkready(rport); 1765 rval = fc_remote_port_chkready(rport);
1757 if (rval) { 1766 if (rval) {
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1834 rc = SCSI_MLQUEUE_HOST_BUSY; 1843 rc = SCSI_MLQUEUE_HOST_BUSY;
1835 } 1844 }
1836out: 1845out:
1846 spin_lock_irq(lport->host->host_lock);
1837 return rc; 1847 return rc;
1838} 1848}
1839EXPORT_SYMBOL(fc_queuecommand); 1849EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1864 1874
1865 lport = fsp->lp; 1875 lport = fsp->lp;
1866 si = fc_get_scsi_internal(lport); 1876 si = fc_get_scsi_internal(lport);
1867 spin_lock_irqsave(lport->host->host_lock, flags); 1877 if (!fsp->cmd)
1868 if (!fsp->cmd) {
1869 spin_unlock_irqrestore(lport->host->host_lock, flags);
1870 return; 1878 return;
1871 }
1872 1879
1873 /* 1880 /*
1874 * if can_queue ramp down is done then try can_queue ramp up 1881 * if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1880 sc_cmd = fsp->cmd; 1887 sc_cmd = fsp->cmd;
1881 fsp->cmd = NULL; 1888 fsp->cmd = NULL;
1882 1889
1883 if (!sc_cmd->SCp.ptr) { 1890 if (!sc_cmd->SCp.ptr)
1884 spin_unlock_irqrestore(lport->host->host_lock, flags);
1885 return; 1891 return;
1886 }
1887 1892
1888 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1893 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1889 switch (fsp->status_code) { 1894 switch (fsp->status_code) {
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1945 break; 1950 break;
1946 } 1951 }
1947 1952
1953 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1948 list_del(&fsp->list); 1954 list_del(&fsp->list);
1955 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1949 sc_cmd->SCp.ptr = NULL; 1956 sc_cmd->SCp.ptr = NULL;
1950 sc_cmd->scsi_done(sc_cmd); 1957 sc_cmd->scsi_done(sc_cmd);
1951 spin_unlock_irqrestore(lport->host->host_lock, flags);
1952 1958
1953 /* release ref from initial allocation in queue command */ 1959 /* release ref from initial allocation in queue command */
1954 fc_fcp_pkt_release(fsp); 1960 fc_fcp_pkt_release(fsp);
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
2216 lport->scsi_priv = si; 2222 lport->scsi_priv = si;
2217 si->max_can_queue = lport->host->can_queue; 2223 si->max_can_queue = lport->host->can_queue;
2218 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2224 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2225 spin_lock_init(&si->scsi_queue_lock);
2219 2226
2220 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2227 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2221 if (!si->scsi_pkt_pool) { 2228 if (!si->scsi_pkt_pool) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 74338c83ad0a..0b165024a219 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
537 int rc = -1; 537 int rc = -1;
538 538
539 mutex_lock(&lport->lp_mutex); 539 mutex_lock(&lport->lp_mutex);
540 if (lport->state == LPORT_ST_DISABLED) { 540 if (lport->state == LPORT_ST_DISABLED ||
541 lport->state == LPORT_ST_LOGO) {
542 fc_lport_state_enter(lport, LPORT_ST_RESET);
541 fc_lport_enter_reset(lport); 543 fc_lport_enter_reset(lport);
542 rc = 0; 544 rc = 0;
543 } 545 }
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
967 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 969 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
968 fc_lport_state(lport)); 970 fc_lport_state(lport));
969 971
972 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
973 return;
974
970 if (lport->vport) { 975 if (lport->vport) {
971 if (lport->link_up) 976 if (lport->link_up)
972 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 977 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 35ca0e72df46..02300523b234 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
310 restart = 1; 310 restart = 1;
311 else 311 else
312 list_del(&rdata->peers); 312 list_del(&rdata->peers);
313 rdata->event = RPORT_EV_NONE;
313 mutex_unlock(&rdata->rp_mutex); 314 mutex_unlock(&rdata->rp_mutex);
314 mutex_unlock(&lport->disc.disc_mutex); 315 mutex_unlock(&lport->disc.disc_mutex);
315 } 316 }
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 226920d15ea1..d4da6bdd0e73 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4506 pdev = phba->pcidev; 4506 pdev = phba->pcidev;
4507 4507
4508 /* Set the device DMA mask size */ 4508 /* Set the device DMA mask size */
4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4510 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4510 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
4511 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
4512 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4511 return error; 4513 return error;
4514 }
4515 }
4512 4516
4513 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4517 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4514 * required by each mapping. 4518 * required by each mapping.
@@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6021 pdev = phba->pcidev; 6025 pdev = phba->pcidev;
6022 6026
6023 /* Set the device DMA mask size */ 6027 /* Set the device DMA mask size */
6024 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6028 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6025 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6029 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6030 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6031 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6026 return error; 6032 return error;
6033 }
6034 }
6027 6035
6028 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6036 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6029 * number of bytes required by each mapping. They are actually 6037 * number of bytes required by each mapping. They are actually
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 134c63ef6d38..99ff99e45bee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2501 instance->base_addr = pci_resource_start(instance->pdev, 0); 2501 instance->base_addr = pci_resource_start(instance->pdev, 0);
2502 } 2502 }
2503 2503
2504 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2504 if (pci_request_selected_regions(instance->pdev,
2505 pci_select_bars(instance->pdev, IORESOURCE_MEM),
2506 "megasas: LSI")) {
2505 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 2507 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
2506 return -EBUSY; 2508 return -EBUSY;
2507 } 2509 }
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2642 iounmap(instance->reg_set); 2644 iounmap(instance->reg_set);
2643 2645
2644 fail_ioremap: 2646 fail_ioremap:
2645 pci_release_regions(instance->pdev); 2647 pci_release_selected_regions(instance->pdev,
2648 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2646 2649
2647 return -EINVAL; 2650 return -EINVAL;
2648} 2651}
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
2662 2665
2663 iounmap(instance->reg_set); 2666 iounmap(instance->reg_set);
2664 2667
2665 pci_release_regions(instance->pdev); 2668 pci_release_selected_regions(instance->pdev,
2669 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2666} 2670}
2667 2671
2668/** 2672/**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2971 /* 2975 /*
2972 * PCI prepping: enable device set bus mastering and dma mask 2976 * PCI prepping: enable device set bus mastering and dma mask
2973 */ 2977 */
2974 rval = pci_enable_device(pdev); 2978 rval = pci_enable_device_mem(pdev);
2975 2979
2976 if (rval) { 2980 if (rval) {
2977 return rval; 2981 return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
3276 /* 3280 /*
3277 * PCI prepping: enable device set bus mastering and dma mask 3281 * PCI prepping: enable device set bus mastering and dma mask
3278 */ 3282 */
3279 rval = pci_enable_device(pdev); 3283 rval = pci_enable_device_mem(pdev);
3280 3284
3281 if (rval) { 3285 if (rval) {
3282 printk(KERN_ERR "megasas: Enable device failed\n"); 3286 printk(KERN_ERR "megasas: Enable device failed\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6422e258fd52..89d02401b9ec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3584 mutex_init(&ioc->transport_cmds.mutex); 3584 mutex_init(&ioc->transport_cmds.mutex);
3585 3585
3586 /* scsih internal command bits */
3587 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
3589 mutex_init(&ioc->scsih_cmds.mutex);
3590
3586 /* task management internal command bits */ 3591 /* task management internal command bits */
3587 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3592 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3593 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45876c4..cae6b2cf492f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
660 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
660 661
661 { } /* terminate list */ 662 { } /* terminate list */
662}; 663};
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 950202a70bcf..24223473f573 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
432 seg->alloc_size = 0; 432 seg->alloc_size = 0;
433} 433}
434 434
435static void _put_request(struct request *rq , bool is_async) 435static void _put_request(struct request *rq)
436{ 436{
437 if (is_async) { 437 /*
438 WARN_ON(rq->bio); 438 * If osd_finalize_request() was called but the request was not
439 __blk_put_request(rq->q, rq); 439 * executed through the block layer, then we must release BIOs.
440 } else { 440 * TODO: Keep error code in or->async_error. Need to audit all
441 /* 441 * code paths.
442 * If osd_finalize_request() was called but the request was not 442 */
443 * executed through the block layer, then we must release BIOs. 443 if (unlikely(rq->bio))
444 * TODO: Keep error code in or->async_error. Need to audit all 444 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
445 * code paths. 445 else
446 */ 446 blk_put_request(rq);
447 if (unlikely(rq->bio))
448 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
449 else
450 blk_put_request(rq);
451 }
452} 447}
453 448
454void osd_end_request(struct osd_request *or) 449void osd_end_request(struct osd_request *or)
455{ 450{
456 struct request *rq = or->request; 451 struct request *rq = or->request;
457 /* IMPORTANT: make sure this agrees with osd_execute_request_async */
458 bool is_async = (or->request->end_io_data == or);
459 452
460 _osd_free_seg(or, &or->set_attr); 453 _osd_free_seg(or, &or->set_attr);
461 _osd_free_seg(or, &or->enc_get_attr); 454 _osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
463 456
464 if (rq) { 457 if (rq) {
465 if (rq->next_rq) { 458 if (rq->next_rq) {
466 _put_request(rq->next_rq, is_async); 459 _put_request(rq->next_rq);
467 rq->next_rq = NULL; 460 rq->next_rq = NULL;
468 } 461 }
469 462
470 _put_request(rq, is_async); 463 _put_request(rq);
471 } 464 }
472 _osd_request_free(or); 465 _osd_request_free(or);
473} 466}
474EXPORT_SYMBOL(osd_end_request); 467EXPORT_SYMBOL(osd_end_request);
475 468
469static void _set_error_resid(struct osd_request *or, struct request *req,
470 int error)
471{
472 or->async_error = error;
473 or->req_errors = req->errors ? : error;
474 or->sense_len = req->sense_len;
475 if (or->out.req)
476 or->out.residual = or->out.req->resid_len;
477 if (or->in.req)
478 or->in.residual = or->in.req->resid_len;
479}
480
476int osd_execute_request(struct osd_request *or) 481int osd_execute_request(struct osd_request *or)
477{ 482{
478 return or->async_error = 483 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
479 blk_execute_rq(or->request->q, NULL, or->request, 0); 484
485 _set_error_resid(or, or->request, error);
486 return error;
480} 487}
481EXPORT_SYMBOL(osd_execute_request); 488EXPORT_SYMBOL(osd_execute_request);
482 489
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
484{ 491{
485 struct osd_request *or = req->end_io_data; 492 struct osd_request *or = req->end_io_data;
486 493
487 or->async_error = error; 494 _set_error_resid(or, req, error);
488 495 if (req->next_rq) {
489 if (unlikely(error)) { 496 __blk_put_request(req->q, req->next_rq);
490 OSD_DEBUG("osd_request_async_done error recieved %d " 497 req->next_rq = NULL;
491 "errors 0x%x\n", error, req->errors);
492 if (!req->errors) /* don't miss out on this one */
493 req->errors = error;
494 } 498 }
495 499
500 __blk_put_request(req->q, req);
501 or->request = NULL;
502 or->in.req = NULL;
503 or->out.req = NULL;
504
496 if (or->async_done) 505 if (or->async_done)
497 or->async_done(or, or->async_private); 506 or->async_done(or, or->async_private);
498 else 507 else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
1489#endif 1498#endif
1490 int ret; 1499 int ret;
1491 1500
1492 if (likely(!or->request->errors)) { 1501 if (likely(!or->req_errors))
1493 osi->out_resid = 0;
1494 osi->in_resid = 0;
1495 return 0; 1502 return 0;
1496 }
1497 1503
1498 osi = osi ? : &local_osi; 1504 osi = osi ? : &local_osi;
1499 memset(osi, 0, sizeof(*osi)); 1505 memset(osi, 0, sizeof(*osi));
1500 1506
1501 ssdb = or->request->sense; 1507 ssdb = (typeof(ssdb))or->sense;
1502 sense_len = or->request->sense_len; 1508 sense_len = or->sense_len;
1503 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { 1509 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1504 OSD_ERR("Block-layer returned error(0x%x) but " 1510 OSD_ERR("Block-layer returned error(0x%x) but "
1505 "sense_len(%u) || key(%d) is empty\n", 1511 "sense_len(%u) || key(%d) is empty\n",
1506 or->request->errors, sense_len, ssdb->sense_key); 1512 or->req_errors, sense_len, ssdb->sense_key);
1507 goto analyze; 1513 goto analyze;
1508 } 1514 }
1509 1515
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
1525 "additional_code=0x%x async_error=%d errors=0x%x\n", 1531 "additional_code=0x%x async_error=%d errors=0x%x\n",
1526 osi->key, original_sense_len, sense_len, 1532 osi->key, original_sense_len, sense_len,
1527 osi->additional_code, or->async_error, 1533 osi->additional_code, or->async_error,
1528 or->request->errors); 1534 or->req_errors);
1529 1535
1530 if (original_sense_len < sense_len) 1536 if (original_sense_len < sense_len)
1531 sense_len = original_sense_len; 1537 sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ analyze:
1695 ret = -EIO; 1701 ret = -EIO;
1696 } 1702 }
1697 1703
1698 if (or->out.req) 1704 if (!or->out.residual)
1699 osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; 1705 or->out.residual = or->out.total_bytes;
1700 if (or->in.req) 1706 if (!or->in.residual)
1701 osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; 1707 or->in.residual = or->in.total_bytes;
1702 1708
1703 return ret; 1709 return ret;
1704} 1710}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 22644de26399..63ad4aa0c422 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,16 +45,6 @@
45#define HEADER_LEN 28 45#define HEADER_LEN 28
46#define SIZE_OFFSET 16 46#define SIZE_OFFSET 16
47 47
48struct pm8001_ioctl_payload {
49 u32 signature;
50 u16 major_function;
51 u16 minor_function;
52 u16 length;
53 u16 status;
54 u16 offset;
55 u16 id;
56 u8 func_specific[1];
57};
58 48
59#define FLASH_OK 0x000000 49#define FLASH_OK 0x000000
60#define FAIL_OPEN_BIOS_FILE 0x000100 50#define FAIL_OPEN_BIOS_FILE 0x000100
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a3de306b9045..9b44c6f1b10e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
373static void __devinit 373static void __devinit
374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) 374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
375{ 375{
376 u32 offset; 376 u32 value, offset, i;
377 u32 value;
378 u32 i, j;
379 u32 bit_cnt;
380 377
381#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 378#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
382#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 379#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
392 */ 389 */
393 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) 390 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
394 return; 391 return;
395 /* set SSC bit of PHY 0 - 3 */ 392
396 for (i = 0; i < 4; i++) { 393 for (i = 0; i < 4; i++) {
397 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; 394 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
398 value = pm8001_cr32(pm8001_ha, 2, offset); 395 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
399 if (SSCbit) {
400 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
401 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
402 } else {
403 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
404 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
405 }
406 bit_cnt = 0;
407 for (j = 0; j < 31; j++)
408 if ((value >> j) & 0x00000001)
409 bit_cnt++;
410 if (bit_cnt % 2)
411 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
412 else
413 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
414
415 pm8001_cw32(pm8001_ha, 2, offset, value);
416 } 396 }
417
418 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ 397 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
419 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) 398 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
420 return; 399 return;
421
422 /* set SSC bit of PHY 4 - 7 */
423 for (i = 4; i < 8; i++) { 400 for (i = 4; i < 8; i++) {
424 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 401 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
425 value = pm8001_cr32(pm8001_ha, 2, offset); 402 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
426 if (SSCbit) {
427 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
428 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
429 } else {
430 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
431 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
432 }
433 bit_cnt = 0;
434 for (j = 0; j < 31; j++)
435 if ((value >> j) & 0x00000001)
436 bit_cnt++;
437 if (bit_cnt % 2)
438 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
439 else
440 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
441
442 pm8001_cw32(pm8001_ha, 2, offset, value);
443 } 403 }
404 /*************************************************************
405 Change the SSC upspreading value to 0x0 so that upspreading is disabled.
406 Device MABC SMOD0 Controls
407 Address: (via MEMBASE-III):
408 Using shifted destination address 0x0_0000: with Offset 0xD8
409
410 31:28 R/W Reserved Do not change
411 27:24 R/W SAS_SMOD_SPRDUP 0000
412 23:20 R/W SAS_SMOD_SPRDDN 0000
413 19:0 R/W Reserved Do not change
414 Upon power-up this register will read as 0x8990c016,
415 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
416 so that the written value will be 0x8090c016.
417 This will ensure only down-spreading SSC is enabled on the SPC.
418 *************************************************************/
419 value = pm8001_cr32(pm8001_ha, 2, 0xd8);
420 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
444 421
445 /*set the shifted destination address to 0x0 to avoid error operation */ 422 /*set the shifted destination address to 0x0 to avoid error operation */
446 bar4_shift(pm8001_ha, 0x0); 423 bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1901{ 1878{
1902 struct sas_task *t; 1879 struct sas_task *t;
1903 struct pm8001_ccb_info *ccb; 1880 struct pm8001_ccb_info *ccb;
1904 unsigned long flags; 1881 unsigned long flags = 0;
1905 u32 param; 1882 u32 param;
1906 u32 status; 1883 u32 status;
1907 u32 tag; 1884 u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2040 ts->stat = SAS_QUEUE_FULL; 2017 ts->stat = SAS_QUEUE_FULL;
2041 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2018 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2042 mb();/*in order to force CPU ordering*/ 2019 mb();/*in order to force CPU ordering*/
2020 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2043 t->task_done(t); 2021 t->task_done(t);
2022 spin_lock_irqsave(&pm8001_ha->lock, flags);
2044 return; 2023 return;
2045 } 2024 }
2046 break; 2025 break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2058 ts->stat = SAS_QUEUE_FULL; 2037 ts->stat = SAS_QUEUE_FULL;
2059 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2038 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2060 mb();/*ditto*/ 2039 mb();/*ditto*/
2040 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2061 t->task_done(t); 2041 t->task_done(t);
2042 spin_lock_irqsave(&pm8001_ha->lock, flags);
2062 return; 2043 return;
2063 } 2044 }
2064 break; 2045 break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2084 ts->stat = SAS_QUEUE_FULL; 2065 ts->stat = SAS_QUEUE_FULL;
2085 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2066 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2086 mb();/* ditto*/ 2067 mb();/* ditto*/
2068 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2087 t->task_done(t); 2069 t->task_done(t);
2070 spin_lock_irqsave(&pm8001_ha->lock, flags);
2088 return; 2071 return;
2089 } 2072 }
2090 break; 2073 break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2149 ts->stat = SAS_QUEUE_FULL; 2132 ts->stat = SAS_QUEUE_FULL;
2150 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2133 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2151 mb();/*ditto*/ 2134 mb();/*ditto*/
2135 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2152 t->task_done(t); 2136 t->task_done(t);
2137 spin_lock_irqsave(&pm8001_ha->lock, flags);
2153 return; 2138 return;
2154 } 2139 }
2155 break; 2140 break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2171 ts->stat = SAS_QUEUE_FULL; 2156 ts->stat = SAS_QUEUE_FULL;
2172 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2157 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2173 mb();/*ditto*/ 2158 mb();/*ditto*/
2159 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2174 t->task_done(t); 2160 t->task_done(t);
2161 spin_lock_irqsave(&pm8001_ha->lock, flags);
2175 return; 2162 return;
2176 } 2163 }
2177 break; 2164 break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2200 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2187 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2201 t, status, ts->resp, ts->stat)); 2188 t, status, ts->resp, ts->stat));
2202 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2189 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2203 } else { 2190 } else if (t->uldd_task) {
2204 spin_unlock_irqrestore(&t->task_state_lock, flags); 2191 spin_unlock_irqrestore(&t->task_state_lock, flags);
2205 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2192 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2206 mb();/* ditto */ 2193 mb();/* ditto */
2194 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2207 t->task_done(t); 2195 t->task_done(t);
2196 spin_lock_irqsave(&pm8001_ha->lock, flags);
2197 } else if (!t->uldd_task) {
2198 spin_unlock_irqrestore(&t->task_state_lock, flags);
2199 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2200 mb();/*ditto*/
2201 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2202 t->task_done(t);
2203 spin_lock_irqsave(&pm8001_ha->lock, flags);
2208 } 2204 }
2209} 2205}
2210 2206
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2212static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) 2208static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2213{ 2209{
2214 struct sas_task *t; 2210 struct sas_task *t;
2215 unsigned long flags; 2211 unsigned long flags = 0;
2216 struct task_status_struct *ts; 2212 struct task_status_struct *ts;
2217 struct pm8001_ccb_info *ccb; 2213 struct pm8001_ccb_info *ccb;
2218 struct pm8001_device *pm8001_dev; 2214 struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2292 ts->stat = SAS_QUEUE_FULL; 2288 ts->stat = SAS_QUEUE_FULL;
2293 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2289 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2294 mb();/*ditto*/ 2290 mb();/*ditto*/
2291 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2295 t->task_done(t); 2292 t->task_done(t);
2293 spin_lock_irqsave(&pm8001_ha->lock, flags);
2296 return; 2294 return;
2297 } 2295 }
2298 break; 2296 break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2401 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2399 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2402 t, event, ts->resp, ts->stat)); 2400 t, event, ts->resp, ts->stat));
2403 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2401 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2404 } else { 2402 } else if (t->uldd_task) {
2405 spin_unlock_irqrestore(&t->task_state_lock, flags); 2403 spin_unlock_irqrestore(&t->task_state_lock, flags);
2406 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2404 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2407 mb();/* in order to force CPU ordering */ 2405 mb();/* ditto */
2406 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2408 t->task_done(t); 2407 t->task_done(t);
2408 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } else if (!t->uldd_task) {
2410 spin_unlock_irqrestore(&t->task_state_lock, flags);
2411 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2412 mb();/*ditto*/
2413 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2414 t->task_done(t);
2415 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } 2416 }
2410} 2417}
2411 2418
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2876 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2883 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2877 u8 link_rate = 2884 u8 link_rate =
2878 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2885 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2886 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2879 u8 phy_id = 2887 u8 phy_id =
2880 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2888 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2889 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2890 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2891 struct pm8001_port *port = &pm8001_ha->port[port_id];
2881 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2892 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2882 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2893 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2883 unsigned long flags; 2894 unsigned long flags;
2884 u8 deviceType = pPayload->sas_identify.dev_type; 2895 u8 deviceType = pPayload->sas_identify.dev_type;
2885 2896 port->port_state = portstate;
2886 PM8001_MSG_DBG(pm8001_ha, 2897 PM8001_MSG_DBG(pm8001_ha,
2887 pm8001_printk("HW_EVENT_SAS_PHY_UP \n")); 2898 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
2899 port_id, phy_id));
2888 2900
2889 switch (deviceType) { 2901 switch (deviceType) {
2890 case SAS_PHY_UNUSED: 2902 case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2895 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); 2907 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2896 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 2908 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
2897 PHY_NOTIFY_ENABLE_SPINUP); 2909 PHY_NOTIFY_ENABLE_SPINUP);
2910 port->port_attached = 1;
2898 get_lrate_mode(phy, link_rate); 2911 get_lrate_mode(phy, link_rate);
2899 break; 2912 break;
2900 case SAS_EDGE_EXPANDER_DEVICE: 2913 case SAS_EDGE_EXPANDER_DEVICE:
2901 PM8001_MSG_DBG(pm8001_ha, 2914 PM8001_MSG_DBG(pm8001_ha,
2902 pm8001_printk("expander device.\n")); 2915 pm8001_printk("expander device.\n"));
2916 port->port_attached = 1;
2903 get_lrate_mode(phy, link_rate); 2917 get_lrate_mode(phy, link_rate);
2904 break; 2918 break;
2905 case SAS_FANOUT_EXPANDER_DEVICE: 2919 case SAS_FANOUT_EXPANDER_DEVICE:
2906 PM8001_MSG_DBG(pm8001_ha, 2920 PM8001_MSG_DBG(pm8001_ha,
2907 pm8001_printk("fanout expander device.\n")); 2921 pm8001_printk("fanout expander device.\n"));
2922 port->port_attached = 1;
2908 get_lrate_mode(phy, link_rate); 2923 get_lrate_mode(phy, link_rate);
2909 break; 2924 break;
2910 default: 2925 default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2946 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2961 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2947 u8 link_rate = 2962 u8 link_rate =
2948 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2963 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2964 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2949 u8 phy_id = 2965 u8 phy_id =
2950 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2966 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2967 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2968 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2969 struct pm8001_port *port = &pm8001_ha->port[port_id];
2951 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2970 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2952 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2971 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2953 unsigned long flags; 2972 unsigned long flags;
2973 PM8001_MSG_DBG(pm8001_ha,
2974 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
2975 " phy id = %d\n", port_id, phy_id));
2976 port->port_state = portstate;
2977 port->port_attached = 1;
2954 get_lrate_mode(phy, link_rate); 2978 get_lrate_mode(phy, link_rate);
2955 phy->phy_type |= PORT_TYPE_SATA; 2979 phy->phy_type |= PORT_TYPE_SATA;
2956 phy->phy_attached = 1; 2980 phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2984 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3008 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2985 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3009 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2986 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3010 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2987 3011 struct pm8001_port *port = &pm8001_ha->port[port_id];
3012 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3013 port->port_state = portstate;
3014 phy->phy_type = 0;
3015 phy->identify.device_type = 0;
3016 phy->phy_attached = 0;
3017 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
2988 switch (portstate) { 3018 switch (portstate) {
2989 case PORT_VALID: 3019 case PORT_VALID:
2990 break; 3020 break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2993 pm8001_printk(" PortInvalid portID %d \n", port_id)); 3023 pm8001_printk(" PortInvalid portID %d \n", port_id));
2994 PM8001_MSG_DBG(pm8001_ha, 3024 PM8001_MSG_DBG(pm8001_ha,
2995 pm8001_printk(" Last phy Down and port invalid\n")); 3025 pm8001_printk(" Last phy Down and port invalid\n"));
3026 port->port_attached = 0;
2996 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3027 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2997 port_id, phy_id, 0, 0); 3028 port_id, phy_id, 0, 0);
2998 break; 3029 break;
2999 case PORT_IN_RESET: 3030 case PORT_IN_RESET:
3000 PM8001_MSG_DBG(pm8001_ha, 3031 PM8001_MSG_DBG(pm8001_ha,
3001 pm8001_printk(" PortInReset portID %d \n", port_id)); 3032 pm8001_printk(" Port In Reset portID %d \n", port_id));
3002 break; 3033 break;
3003 case PORT_NOT_ESTABLISHED: 3034 case PORT_NOT_ESTABLISHED:
3004 PM8001_MSG_DBG(pm8001_ha, 3035 PM8001_MSG_DBG(pm8001_ha,
3005 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); 3036 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
3037 port->port_attached = 0;
3006 break; 3038 break;
3007 case PORT_LOSTCOMM: 3039 case PORT_LOSTCOMM:
3008 PM8001_MSG_DBG(pm8001_ha, 3040 PM8001_MSG_DBG(pm8001_ha,
3009 pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); 3041 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
3010 PM8001_MSG_DBG(pm8001_ha, 3042 PM8001_MSG_DBG(pm8001_ha,
3011 pm8001_printk(" Last phy Down and port invalid\n")); 3043 pm8001_printk(" Last phy Down and port invalid\n"));
3044 port->port_attached = 0;
3012 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3045 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3013 port_id, phy_id, 0, 0); 3046 port_id, phy_id, 0, 0);
3014 break; 3047 break;
3015 default: 3048 default:
3049 port->port_attached = 0;
3016 PM8001_MSG_DBG(pm8001_ha, 3050 PM8001_MSG_DBG(pm8001_ha,
3017 pm8001_printk(" phy Down and(default) = %x\n", 3051 pm8001_printk(" phy Down and(default) = %x\n",
3018 portstate)); 3052 portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3770 u32 opc = OPC_INB_SSPINIIOSTART; 3804 u32 opc = OPC_INB_SSPINIIOSTART;
3771 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 3805 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3772 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); 3806 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3773 ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for 3807 ssp_cmd.dir_m_tlr =
3808 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
3774 SAS 1.1 compatible TLR*/ 3809 SAS 1.1 compatible TLR*/
3775 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); 3810 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3776 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); 3811 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3841 } 3876 }
3842 } 3877 }
3843 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 3878 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
3844 ncg_tag = cpu_to_le32(hdr_tag); 3879 ncg_tag = hdr_tag;
3845 dir = data_dir_flags[task->data_dir] << 8; 3880 dir = data_dir_flags[task->data_dir] << 8;
3846 sata_cmd.tag = cpu_to_le32(tag); 3881 sata_cmd.tag = cpu_to_le32(tag);
3847 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 3882 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3986 ((stp_sspsmp_sata & 0x03) * 0x10000000)); 4021 ((stp_sspsmp_sata & 0x03) * 0x10000000));
3987 payload.firstburstsize_ITNexustimeout = 4022 payload.firstburstsize_ITNexustimeout =
3988 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4023 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
3989 memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, 4024 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
3990 SAS_ADDR_SIZE); 4025 SAS_ADDR_SIZE);
3991 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4026 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
3992 return rc; 4027 return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4027 struct inbound_queue_table *circularQ; 4062 struct inbound_queue_table *circularQ;
4028 int ret; 4063 int ret;
4029 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4064 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4030 memset((u8 *)&payload, 0, sizeof(payload)); 4065 memset(&payload, 0, sizeof(payload));
4031 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4066 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4032 payload.tag = 1; 4067 payload.tag = 1;
4033 payload.phyop_phyid = 4068 payload.phyop_phyid =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 96e4daa68b8f..833a5201eda4 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -242,8 +242,7 @@ struct reg_dev_req {
242 __le32 phyid_portid; 242 __le32 phyid_portid;
243 __le32 dtype_dlr_retry; 243 __le32 dtype_dlr_retry;
244 __le32 firstburstsize_ITNexustimeout; 244 __le32 firstburstsize_ITNexustimeout;
245 u32 sas_addr_hi; 245 u8 sas_addr[SAS_ADDR_SIZE];
246 u32 sas_addr_low;
247 __le32 upper_device_id; 246 __le32 upper_device_id;
248 u32 reserved[8]; 247 u32 reserved[8];
249} __attribute__((packed, aligned(4))); 248} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 42ebe725d5a5..c2f1032496cb 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
200{ 200{
201 int i; 201 int i;
202 spin_lock_init(&pm8001_ha->lock); 202 spin_lock_init(&pm8001_ha->lock);
203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) 203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
204 pm8001_phy_init(pm8001_ha, i); 204 pm8001_phy_init(pm8001_ha, i);
205 pm8001_ha->port[i].wide_port_phymap = 0;
206 pm8001_ha->port[i].port_attached = 0;
207 pm8001_ha->port[i].port_state = 0;
208 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
209 }
205 210
206 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); 211 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
207 if (!pm8001_ha->tags) 212 if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
511 u8 i; 516 u8 i;
512#ifdef PM8001_READ_VPD 517#ifdef PM8001_READ_VPD
513 DECLARE_COMPLETION_ONSTACK(completion); 518 DECLARE_COMPLETION_ONSTACK(completion);
519 struct pm8001_ioctl_payload payload;
514 pm8001_ha->nvmd_completion = &completion; 520 pm8001_ha->nvmd_completion = &completion;
515 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0); 521 payload.minor_function = 0;
522 payload.length = 128;
523 payload.func_specific = kzalloc(128, GFP_KERNEL);
524 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
516 wait_for_completion(&completion); 525 wait_for_completion(&completion);
517 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 526 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
518 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, 527 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
519 SAS_ADDR_SIZE); 528 SAS_ADDR_SIZE);
520 PM8001_INIT_DBG(pm8001_ha, 529 PM8001_INIT_DBG(pm8001_ha,
521 pm8001_printk("phy %d sas_addr = %x \n", i, 530 pm8001_printk("phy %d sas_addr = %016llx \n", i,
522 (u64)pm8001_ha->phy[i].dev_sas_addr)); 531 pm8001_ha->phy[i].dev_sas_addr));
523 } 532 }
524#else 533#else
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 534 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL; 535 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
527 pm8001_ha->phy[i].dev_sas_addr = 536 pm8001_ha->phy[i].dev_sas_addr =
528 cpu_to_be64((u64) 537 cpu_to_be64((u64)
529 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); 538 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 1f767a0e727a..7f9c83a76390 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
329 } 329 }
330 return 0; 330 return 0;
331} 331}
332 /* Find the local port id that's attached to this device */
333static int sas_find_local_port_id(struct domain_device *dev)
334{
335 struct domain_device *pdev = dev->parent;
336
337 /* Directly attached device */
338 if (!pdev)
339 return dev->port->id;
340 while (pdev) {
341 struct domain_device *pdev_p = pdev->parent;
342 if (!pdev_p)
343 return pdev->port->id;
344 pdev = pdev->parent;
345 }
346 return 0;
347}
348
332/** 349/**
333 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 350 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
334 * @task: the task to be execute. 351 * @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
346 struct domain_device *dev = task->dev; 363 struct domain_device *dev = task->dev;
347 struct pm8001_hba_info *pm8001_ha; 364 struct pm8001_hba_info *pm8001_ha;
348 struct pm8001_device *pm8001_dev; 365 struct pm8001_device *pm8001_dev;
366 struct pm8001_port *port = NULL;
349 struct sas_task *t = task; 367 struct sas_task *t = task;
350 struct pm8001_ccb_info *ccb; 368 struct pm8001_ccb_info *ccb;
351 u32 tag = 0xdeadbeef, rc, n_elem = 0; 369 u32 tag = 0xdeadbeef, rc, n_elem = 0;
352 u32 n = num; 370 u32 n = num;
353 unsigned long flags = 0; 371 unsigned long flags = 0, flags_libsas = 0;
354 372
355 if (!dev->port) { 373 if (!dev->port) {
356 struct task_status_struct *tsm = &t->task_status; 374 struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
379 rc = SAS_PHY_DOWN; 397 rc = SAS_PHY_DOWN;
380 goto out_done; 398 goto out_done;
381 } 399 }
400 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
401 if (!port->port_attached) {
402 if (sas_protocol_ata(t->task_proto)) {
403 struct task_status_struct *ts = &t->task_status;
404 ts->resp = SAS_TASK_UNDELIVERED;
405 ts->stat = SAS_PHY_DOWN;
406
407 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
408 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
409 flags_libsas);
410 t->task_done(t);
411 spin_lock_irqsave(dev->sata_dev.ap->lock,
412 flags_libsas);
413 spin_lock_irqsave(&pm8001_ha->lock, flags);
414 if (n > 1)
415 t = list_entry(t->list.next,
416 struct sas_task, list);
417 continue;
418 } else {
419 struct task_status_struct *ts = &t->task_status;
420 ts->resp = SAS_TASK_UNDELIVERED;
421 ts->stat = SAS_PHY_DOWN;
422 t->task_done(t);
423 if (n > 1)
424 t = list_entry(t->list.next,
425 struct sas_task, list);
426 continue;
427 }
428 }
382 rc = pm8001_tag_alloc(pm8001_ha, &tag); 429 rc = pm8001_tag_alloc(pm8001_ha, &tag);
383 if (rc) 430 if (rc)
384 goto err_out; 431 goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
569 spin_lock_irqsave(&pm8001_ha->lock, flags); 616 spin_lock_irqsave(&pm8001_ha->lock, flags);
570 617
571 pm8001_device = pm8001_alloc_dev(pm8001_ha); 618 pm8001_device = pm8001_alloc_dev(pm8001_ha);
572 pm8001_device->sas_device = dev;
573 if (!pm8001_device) { 619 if (!pm8001_device) {
574 res = -1; 620 res = -1;
575 goto found_out; 621 goto found_out;
576 } 622 }
623 pm8001_device->sas_device = dev;
577 dev->lldd_dev = pm8001_device; 624 dev->lldd_dev = pm8001_device;
578 pm8001_device->dev_type = dev->dev_type; 625 pm8001_device->dev_type = dev->dev_type;
579 pm8001_device->dcompletion = &completion; 626 pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
609 wait_for_completion(&completion); 656 wait_for_completion(&completion);
610 if (dev->dev_type == SAS_END_DEV) 657 if (dev->dev_type == SAS_END_DEV)
611 msleep(50); 658 msleep(50);
612 pm8001_ha->flags = PM8001F_RUN_TIME ; 659 pm8001_ha->flags |= PM8001F_RUN_TIME ;
613 return 0; 660 return 0;
614found_out: 661found_out:
615 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 662 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
772 task->task_done = pm8001_task_done; 819 task->task_done = pm8001_task_done;
773 task->timer.data = (unsigned long)task; 820 task->timer.data = (unsigned long)task;
774 task->timer.function = pm8001_tmf_timedout; 821 task->timer.function = pm8001_tmf_timedout;
775 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 822 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
776 add_timer(&task->timer); 823 add_timer(&task->timer);
777 824
778 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 825 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
897 944
898 if (dev_is_sata(dev)) { 945 if (dev_is_sata(dev)) {
899 DECLARE_COMPLETION_ONSTACK(completion_setstate); 946 DECLARE_COMPLETION_ONSTACK(completion_setstate);
947 if (scsi_is_sas_phy_local(phy))
948 return 0;
900 rc = sas_phy_reset(phy, 1); 949 rc = sas_phy_reset(phy, 1);
901 msleep(2000); 950 msleep(2000);
902 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 951 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 30f2ede55a75..8e38ca8cd101 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -59,11 +59,11 @@
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm8001"
61#define DRV_VERSION "0.1.36" 61#define DRV_VERSION "0.1.36"
62#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ 65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
66#define PM8001_EH_LOGGING 0x10 /* Error message logging */ 66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ 67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ 68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ 69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do { \
100 100
101#define PM8001_USE_TASKLET 101#define PM8001_USE_TASKLET
102#define PM8001_USE_MSIX 102#define PM8001_USE_MSIX
103#define PM8001_READ_VPD
103 104
104 105
105#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) 106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
111struct pm8001_hba_info; 112struct pm8001_hba_info;
112struct pm8001_ccb_info; 113struct pm8001_ccb_info;
113struct pm8001_device; 114struct pm8001_device;
114struct pm8001_tmf_task; 115/* define task management IU */
116struct pm8001_tmf_task {
117 u8 tmf;
118 u32 tag_of_task_to_be_managed;
119};
120struct pm8001_ioctl_payload {
121 u32 signature;
122 u16 major_function;
123 u16 minor_function;
124 u16 length;
125 u16 status;
126 u16 offset;
127 u16 id;
128 u8 *func_specific;
129};
130
115struct pm8001_dispatch { 131struct pm8001_dispatch {
116 char *name; 132 char *name;
117 int (*chip_init)(struct pm8001_hba_info *pm8001_ha); 133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
164 180
165struct pm8001_port { 181struct pm8001_port {
166 struct asd_sas_port sas_port; 182 struct asd_sas_port sas_port;
183 u8 port_attached;
184 u8 wide_port_phymap;
185 u8 port_state;
186 struct list_head list;
167}; 187};
168 188
169struct pm8001_phy { 189struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
386 __be32 startup_entry; 406 __be32 startup_entry;
387} __attribute__((packed, aligned(4))); 407} __attribute__((packed, aligned(4)));
388 408
389/* define task management IU */ 409
390struct pm8001_tmf_task {
391 u8 tmf;
392 u32 tag_of_task_to_be_managed;
393};
394/** 410/**
395 * FW Flash Update status values 411 * FW Flash Update status values
396 */ 412 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 34c6b896a91b..e7d2688fbeba 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters 2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 * 3 *
4 * Written By: PMC Sierra Corporation 4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
5 * 6 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc 7 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 * 8 *
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
79/* 80/*
80 * Module parameters 81 * Module parameters
81 */ 82 */
82MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com"); 83MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
83MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); 84MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
84MODULE_LICENSE("GPL"); 85MODULE_LICENSE("GPL");
85MODULE_VERSION(PMCRAID_DRIVER_VERSION); 86MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
162 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 163 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
163 list_for_each_entry(temp, &pinstance->used_res_q, queue) { 164 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
164 165
165 /* do not expose VSETs with order-ids >= 240 */ 166 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
166 if (RES_IS_VSET(temp->cfg_entry)) { 167 if (RES_IS_VSET(temp->cfg_entry)) {
167 target = temp->cfg_entry.unique_flags1; 168 target = temp->cfg_entry.unique_flags1;
168 if (target >= PMCRAID_MAX_VSET_TARGETS) 169 if (target > PMCRAID_MAX_VSET_TARGETS)
169 continue; 170 continue;
170 bus = PMCRAID_VSET_BUS_ID; 171 bus = PMCRAID_VSET_BUS_ID;
171 lun = 0; 172 lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1210 int retval = 0; 1211 int retval = 0;
1211 1212
1212 if (cfgte->resource_type == RES_TYPE_VSET) 1213 if (cfgte->resource_type == RES_TYPE_VSET)
1213 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE); 1214 retval = ((cfgte->unique_flags1 & 0x80) == 0);
1214 else if (cfgte->resource_type == RES_TYPE_GSCSI) 1215 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1215 retval = (RES_BUS(cfgte->resource_address) != 1216 retval = (RES_BUS(cfgte->resource_address) !=
1216 PMCRAID_VIRTUAL_ENCL_BUS_ID); 1217 PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1361 * Return value: 1362 * Return value:
1362 * none 1363 * none
1363 */ 1364 */
1365
1364static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) 1366static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1365{ 1367{
1366 struct pmcraid_config_table_entry *cfg_entry; 1368 struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1368 struct pmcraid_cmd *cmd; 1370 struct pmcraid_cmd *cmd;
1369 struct pmcraid_cmd *cfgcmd; 1371 struct pmcraid_cmd *cfgcmd;
1370 struct pmcraid_resource_entry *res = NULL; 1372 struct pmcraid_resource_entry *res = NULL;
1371 u32 new_entry = 1;
1372 unsigned long lock_flags; 1373 unsigned long lock_flags;
1373 unsigned long host_lock_flags; 1374 unsigned long host_lock_flags;
1375 u32 new_entry = 1;
1376 u32 hidden_entry = 0;
1374 int rc; 1377 int rc;
1375 1378
1376 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; 1379 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1406 } 1409 }
1407 1410
1408 /* If this resource is not going to be added to mid-layer, just notify 1411 /* If this resource is not going to be added to mid-layer, just notify
1409 * applications and return 1412 * applications and return. If this notification is about hiding a VSET
1413 * resource, check if it was exposed already.
1410 */ 1414 */
1411 if (!pmcraid_expose_resource(cfg_entry)) 1415 if (pinstance->ccn.hcam->notification_type ==
1416 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1417 cfg_entry->resource_type == RES_TYPE_VSET &&
1418 cfg_entry->unique_flags1 & 0x80) {
1419 hidden_entry = 1;
1420 } else if (!pmcraid_expose_resource(cfg_entry))
1412 goto out_notify_apps; 1421 goto out_notify_apps;
1413 1422
1414 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 1423 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1424 1433
1425 if (new_entry) { 1434 if (new_entry) {
1426 1435
1436 if (hidden_entry) {
1437 spin_unlock_irqrestore(&pinstance->resource_lock,
1438 lock_flags);
1439 goto out_notify_apps;
1440 }
1441
1427 /* If there are more number of resources than what driver can 1442 /* If there are more number of resources than what driver can
1428 * manage, do not notify the applications about the CCN. Just 1443 * manage, do not notify the applications about the CCN. Just
1429 * ignore this notifications and re-register the same HCAM 1444 * ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1454 sizeof(struct pmcraid_config_table_entry)); 1469 sizeof(struct pmcraid_config_table_entry));
1455 1470
1456 if (pinstance->ccn.hcam->notification_type == 1471 if (pinstance->ccn.hcam->notification_type ==
1457 NOTIFICATION_TYPE_ENTRY_DELETED) { 1472 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1458 if (res->scsi_dev) { 1473 if (res->scsi_dev) {
1474 res->cfg_entry.unique_flags1 &= 0x7F;
1459 res->change_detected = RES_CHANGE_DEL; 1475 res->change_detected = RES_CHANGE_DEL;
1460 res->cfg_entry.resource_handle = 1476 res->cfg_entry.resource_handle =
1461 PMCRAID_INVALID_RES_HANDLE; 1477 PMCRAID_INVALID_RES_HANDLE;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 2752b56cad56..92f89d50850c 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file 2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 * 3 *
4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
6 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc. 7 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
106#define PMCRAID_VSET_LUN_ID 0x0 109#define PMCRAID_VSET_LUN_ID 0x0
107#define PMCRAID_PHYS_BUS_ID 0x0 110#define PMCRAID_PHYS_BUS_ID 0x0
108#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 111#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
109#define PMCRAID_MAX_VSET_TARGETS 240 112#define PMCRAID_MAX_VSET_TARGETS 0x7F
110#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 113#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
111 114
112#define PMCRAID_IOA_MAX_SECTORS 32767 115#define PMCRAID_IOA_MAX_SECTORS 32767
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6b9bf23c7735..384afda7dbe9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1570,9 +1570,6 @@ typedef struct fc_port {
1570 struct fc_rport *rport, *drport; 1570 struct fc_rport *rport, *drport;
1571 u32 supported_classes; 1571 u32 supported_classes;
1572 1572
1573 unsigned long last_queue_full;
1574 unsigned long last_ramp_up;
1575
1576 uint16_t vp_idx; 1573 uint16_t vp_idx;
1577} fc_port_t; 1574} fc_port_t;
1578 1575
@@ -2265,6 +2262,7 @@ struct qla_hw_data {
2265 uint32_t port0 :1; 2262 uint32_t port0 :1;
2266 uint32_t running_gold_fw :1; 2263 uint32_t running_gold_fw :1;
2267 uint32_t cpu_affinity_enabled :1; 2264 uint32_t cpu_affinity_enabled :1;
2265 uint32_t disable_msix_handshake :1;
2268 } flags; 2266 } flags;
2269 2267
2270 /* This spinlock is used to protect "io transactions", you must 2268 /* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2385,7 @@ struct qla_hw_data {
2387#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2385#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2388#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2386#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2389 IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2387 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2388#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
2390#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2389#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2391 (ha)->flags.msix_enabled) 2390 (ha)->flags.msix_enabled)
2392#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2391#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e21851358509..0b6801fc6389 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
72extern int ql2xfdmienable; 72extern int ql2xfdmienable;
73extern int ql2xallocfwdump; 73extern int ql2xallocfwdump;
74extern int ql2xextended_error_logging; 74extern int ql2xextended_error_logging;
75extern int ql2xqfullrampup;
76extern int ql2xqfulltracking;
77extern int ql2xiidmaenable; 75extern int ql2xiidmaenable;
78extern int ql2xmaxqueues; 76extern int ql2xmaxqueues;
79extern int ql2xmultique_tag; 77extern int ql2xmultique_tag;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b74924b279ef..73a793539d45 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1442 icb->firmware_options_2 |= 1442 icb->firmware_options_2 |=
1443 __constant_cpu_to_le32(BIT_18); 1443 __constant_cpu_to_le32(BIT_18);
1444 1444
1445 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1445 /* Use Disable MSIX Handshake mode for capable adapters */
1446 if (IS_MSIX_NACK_CAPABLE(ha)) {
1447 icb->firmware_options_2 &=
1448 __constant_cpu_to_le32(~BIT_22);
1449 ha->flags.disable_msix_handshake = 1;
1450 qla_printk(KERN_INFO, ha,
1451 "MSIX Handshake Disable Mode turned on\n");
1452 } else {
1453 icb->firmware_options_2 |=
1454 __constant_cpu_to_le32(BIT_22);
1455 }
1446 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1456 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1447 1457
1448 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 1458 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 804987397b77..1692a883f4de 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -811,78 +811,6 @@ skip_rio:
811 qla2x00_alert_all_vps(rsp, mb); 811 qla2x00_alert_all_vps(rsp, mb);
812} 812}
813 813
814static void
815qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
816{
817 fc_port_t *fcport = data;
818 struct scsi_qla_host *vha = fcport->vha;
819 struct qla_hw_data *ha = vha->hw;
820 struct req_que *req = NULL;
821
822 if (!ql2xqfulltracking)
823 return;
824
825 req = vha->req;
826 if (!req)
827 return;
828 if (req->max_q_depth <= sdev->queue_depth)
829 return;
830
831 if (sdev->ordered_tags)
832 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
833 sdev->queue_depth + 1);
834 else
835 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
836 sdev->queue_depth + 1);
837
838 fcport->last_ramp_up = jiffies;
839
840 DEBUG2(qla_printk(KERN_INFO, ha,
841 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
842 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
843 sdev->queue_depth));
844}
845
846static void
847qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
848{
849 fc_port_t *fcport = data;
850
851 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
852 return;
853
854 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
855 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
856 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
857 sdev->queue_depth));
858}
859
860static inline void
861qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
862 srb_t *sp)
863{
864 fc_port_t *fcport;
865 struct scsi_device *sdev;
866
867 if (!ql2xqfulltracking)
868 return;
869
870 sdev = sp->cmd->device;
871 if (sdev->queue_depth >= req->max_q_depth)
872 return;
873
874 fcport = sp->fcport;
875 if (time_before(jiffies,
876 fcport->last_ramp_up + ql2xqfullrampup * HZ))
877 return;
878 if (time_before(jiffies,
879 fcport->last_queue_full + ql2xqfullrampup * HZ))
880 return;
881
882 starget_for_each_device(sdev->sdev_target, fcport,
883 qla2x00_adjust_sdev_qdepth_up);
884}
885
886/** 814/**
887 * qla2x00_process_completed_request() - Process a Fast Post response. 815 * qla2x00_process_completed_request() - Process a Fast Post response.
888 * @ha: SCSI driver HA context 816 * @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
913 841
914 /* Save ISP completion status */ 842 /* Save ISP completion status */
915 sp->cmd->result = DID_OK << 16; 843 sp->cmd->result = DID_OK << 16;
916
917 qla2x00_ramp_up_queue_depth(vha, req, sp);
918 qla2x00_sp_compl(ha, sp); 844 qla2x00_sp_compl(ha, sp);
919 } else { 845 } else {
920 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 846 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1435 "scsi(%ld): QUEUE FULL status detected " 1361 "scsi(%ld): QUEUE FULL status detected "
1436 "0x%x-0x%x.\n", vha->host_no, comp_status, 1362 "0x%x-0x%x.\n", vha->host_no, comp_status,
1437 scsi_status)); 1363 scsi_status));
1438
1439 /* Adjust queue depth for all luns on the port. */
1440 if (!ql2xqfulltracking)
1441 break;
1442 fcport->last_queue_full = jiffies;
1443 starget_for_each_device(cp->device->sdev_target,
1444 fcport, qla2x00_adjust_sdev_qdepth_down);
1445 break; 1364 break;
1446 } 1365 }
1447 if (lscsi_status != SS_CHECK_CONDITION) 1366 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1516 "scsi(%ld): QUEUE FULL status detected " 1435 "scsi(%ld): QUEUE FULL status detected "
1517 "0x%x-0x%x.\n", vha->host_no, comp_status, 1436 "0x%x-0x%x.\n", vha->host_no, comp_status,
1518 scsi_status)); 1437 scsi_status));
1519
1520 /*
1521 * Adjust queue depth for all luns on the
1522 * port.
1523 */
1524 if (!ql2xqfulltracking)
1525 break;
1526 fcport->last_queue_full = jiffies;
1527 starget_for_each_device(
1528 cp->device->sdev_target, fcport,
1529 qla2x00_adjust_sdev_qdepth_down);
1530 break; 1438 break;
1531 } 1439 }
1532 if (lscsi_status != SS_CHECK_CONDITION) 1440 if (lscsi_status != SS_CHECK_CONDITION)
@@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2020 1928
2021 vha = qla25xx_get_host(rsp); 1929 vha = qla25xx_get_host(rsp);
2022 qla24xx_process_response_queue(vha, rsp); 1930 qla24xx_process_response_queue(vha, rsp);
2023 if (!ha->mqenable) { 1931 if (!ha->flags.disable_msix_handshake) {
2024 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1932 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025 RD_REG_DWORD_RELAXED(&reg->hccr); 1933 RD_REG_DWORD_RELAXED(&reg->hccr);
2026 } 1934 }
@@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2034{ 1942{
2035 struct qla_hw_data *ha; 1943 struct qla_hw_data *ha;
2036 struct rsp_que *rsp; 1944 struct rsp_que *rsp;
1945 struct device_reg_24xx __iomem *reg;
2037 1946
2038 rsp = (struct rsp_que *) dev_id; 1947 rsp = (struct rsp_que *) dev_id;
2039 if (!rsp) { 1948 if (!rsp) {
@@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2043 } 1952 }
2044 ha = rsp->hw; 1953 ha = rsp->hw;
2045 1954
1955 /* Clear the interrupt, if enabled, for this response queue */
1956 if (rsp->options & ~BIT_6) {
1957 reg = &ha->iobase->isp24;
1958 spin_lock_irq(&ha->hardware_lock);
1959 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1960 RD_REG_DWORD_RELAXED(&reg->hccr);
1961 spin_unlock_irq(&ha->hardware_lock);
1962 }
2046 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1963 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2047 1964
2048 return IRQ_HANDLED; 1965 return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a47d34308a3a..2a4c7f4e7b69 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
696 /* Use alternate PCI devfn */ 696 /* Use alternate PCI devfn */
697 if (LSB(rsp->rid)) 697 if (LSB(rsp->rid))
698 options |= BIT_5; 698 options |= BIT_5;
699 /* Enable MSIX handshake mode on for uncapable adapters */
700 if (!IS_MSIX_NACK_CAPABLE(ha))
701 options |= BIT_6;
702
699 rsp->options = options; 703 rsp->options = options;
700 rsp->id = que_id; 704 rsp->id = que_id;
701 reg = ISP_QUE_REG(ha, que_id); 705 reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41669357b186..2f873d237325 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(ql2xmaxqdepth, 78MODULE_PARM_DESC(ql2xmaxqdepth,
79 "Maximum queue depth to report for target devices."); 79 "Maximum queue depth to report for target devices.");
80 80
81int ql2xqfulltracking = 1;
82module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
83MODULE_PARM_DESC(ql2xqfulltracking,
84 "Controls whether the driver tracks queue full status "
85 "returns and dynamically adjusts a scsi device's queue "
86 "depth. Default is 1, perform tracking. Set to 0 to "
87 "disable dynamic tracking and adjustment of queue depth.");
88
89int ql2xqfullrampup = 120;
90module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(ql2xqfullrampup,
92 "Number of seconds to wait to begin to ramp-up the queue "
93 "depth for a device after a queue-full condition has been "
94 "detected. Default is 120 seconds.");
95
96int ql2xiidmaenable=1; 81int ql2xiidmaenable=1;
97module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 82module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
98MODULE_PARM_DESC(ql2xiidmaenable, 83MODULE_PARM_DESC(ql2xiidmaenable,
@@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1217 sdev->hostdata = NULL; 1202 sdev->hostdata = NULL;
1218} 1203}
1219 1204
1205static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1206{
1207 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1208
1209 if (!scsi_track_queue_full(sdev, qdepth))
1210 return;
1211
1212 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1213 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1214 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1215 sdev->queue_depth));
1216}
1217
1218static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1219{
1220 fc_port_t *fcport = sdev->hostdata;
1221 struct scsi_qla_host *vha = fcport->vha;
1222 struct qla_hw_data *ha = vha->hw;
1223 struct req_que *req = NULL;
1224
1225 req = vha->req;
1226 if (!req)
1227 return;
1228
1229 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1230 return;
1231
1232 if (sdev->ordered_tags)
1233 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1234 else
1235 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1236
1237 DEBUG2(qla_printk(KERN_INFO, ha,
1238 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1239 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1240 sdev->queue_depth));
1241}
1242
1220static int 1243static int
1221qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 1244qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1222{ 1245{
1223 if (reason != SCSI_QDEPTH_DEFAULT) 1246 switch (reason) {
1224 return -EOPNOTSUPP; 1247 case SCSI_QDEPTH_DEFAULT:
1248 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1249 break;
1250 case SCSI_QDEPTH_QFULL:
1251 qla2x00_handle_queue_full(sdev, qdepth);
1252 break;
1253 case SCSI_QDEPTH_RAMP_UP:
1254 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1255 break;
1256 default:
1257 return EOPNOTSUPP;
1258 }
1225 1259
1226 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1227 return sdev->queue_depth; 1260 return sdev->queue_depth;
1228} 1261}
1229 1262
@@ -2003,13 +2036,13 @@ skip_dpc:
2003 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2036 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2004 base_vha->host_no, ha)); 2037 base_vha->host_no, ha));
2005 2038
2006 base_vha->flags.init_done = 1;
2007 base_vha->flags.online = 1;
2008
2009 ret = scsi_add_host(host, &pdev->dev); 2039 ret = scsi_add_host(host, &pdev->dev);
2010 if (ret) 2040 if (ret)
2011 goto probe_failed; 2041 goto probe_failed;
2012 2042
2043 base_vha->flags.init_done = 1;
2044 base_vha->flags.online = 1;
2045
2013 ha->isp_ops->enable_intrs(ha); 2046 ha->isp_ops->enable_intrs(ha);
2014 2047
2015 scsi_scan_host(host); 2048 scsi_scan_host(host);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 807e0dbc67fa..c482220f7eed 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k7" 10#define QLA2XXX_VERSION "8.03.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e495d3813948..d8927681ec88 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 case 0x07: /* operation in progress */ 859 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */ 860 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */ 861 case 0x09: /* self test in progress */
862 case 0x14: /* space allocation in progress */
862 action = ACTION_DELAYED_RETRY; 863 action = ACTION_DELAYED_RETRY;
863 break; 864 break;
864 default: 865 default:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6531c91501be..ddfcecd5099f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
649 return error; 649 return error;
650 error = transport_class_register(&fc_vport_class); 650 error = transport_class_register(&fc_vport_class);
651 if (error) 651 if (error)
652 return error; 652 goto unreg_host_class;
653 error = transport_class_register(&fc_rport_class); 653 error = transport_class_register(&fc_rport_class);
654 if (error) 654 if (error)
655 return error; 655 goto unreg_vport_class;
656 return transport_class_register(&fc_transport_class); 656 error = transport_class_register(&fc_transport_class);
657 if (error)
658 goto unreg_rport_class;
659 return 0;
660
661unreg_rport_class:
662 transport_class_unregister(&fc_rport_class);
663unreg_vport_class:
664 transport_class_unregister(&fc_vport_class);
665unreg_host_class:
666 transport_class_unregister(&fc_host_class);
667 return error;
657} 668}
658 669
659static void __exit fc_transport_exit(void) 670static void __exit fc_transport_exit(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c7261f33..255da53e5a01 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
264 return snprintf(buf, 20, "%u\n", sdkp->ATO); 264 return snprintf(buf, 20, "%u\n", sdkp->ATO);
265} 265}
266 266
267static ssize_t
268sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
269 char *buf)
270{
271 struct scsi_disk *sdkp = to_scsi_disk(dev);
272
273 return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
274}
275
267static struct device_attribute sd_disk_attrs[] = { 276static struct device_attribute sd_disk_attrs[] = {
268 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 277 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
269 sd_store_cache_type), 278 sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
274 sd_store_manage_start_stop), 283 sd_store_manage_start_stop),
275 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 284 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
276 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 285 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
286 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
277 __ATTR_NULL, 287 __ATTR_NULL,
278}; 288};
279 289
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
399} 409}
400 410
401/** 411/**
412 * sd_prepare_discard - unmap blocks on thinly provisioned device
413 * @rq: Request to prepare
414 *
415 * Will issue either UNMAP or WRITE SAME(16) depending on preference
416 * indicated by target device.
417 **/
418static int sd_prepare_discard(struct request *rq)
419{
420 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
421 struct bio *bio = rq->bio;
422 sector_t sector = bio->bi_sector;
423 unsigned int num = bio_sectors(bio);
424
425 if (sdkp->device->sector_size == 4096) {
426 sector >>= 3;
427 num >>= 3;
428 }
429
430 rq->cmd_type = REQ_TYPE_BLOCK_PC;
431 rq->timeout = SD_TIMEOUT;
432
433 memset(rq->cmd, 0, rq->cmd_len);
434
435 if (sdkp->unmap) {
436 char *buf = kmap_atomic(bio_page(bio), KM_USER0);
437
438 rq->cmd[0] = UNMAP;
439 rq->cmd[8] = 24;
440 rq->cmd_len = 10;
441
442 /* Ensure that data length matches payload */
443 rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
444
445 put_unaligned_be16(6 + 16, &buf[0]);
446 put_unaligned_be16(16, &buf[2]);
447 put_unaligned_be64(sector, &buf[8]);
448 put_unaligned_be32(num, &buf[16]);
449
450 kunmap_atomic(buf, KM_USER0);
451 } else {
452 rq->cmd[0] = WRITE_SAME_16;
453 rq->cmd[1] = 0x8; /* UNMAP */
454 put_unaligned_be64(sector, &rq->cmd[2]);
455 put_unaligned_be32(num, &rq->cmd[10]);
456 rq->cmd_len = 16;
457 }
458
459 return BLKPREP_OK;
460}
461
462/**
402 * sd_init_command - build a scsi (read or write) command from 463 * sd_init_command - build a scsi (read or write) command from
403 * information in the request structure. 464 * information in the request structure.
404 * @SCpnt: pointer to mid-level's per scsi command structure that 465 * @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
418 int ret, host_dif; 479 int ret, host_dif;
419 unsigned char protect; 480 unsigned char protect;
420 481
482 /*
483 * Discard request come in as REQ_TYPE_FS but we turn them into
484 * block PC requests to make life easier.
485 */
486 if (blk_discard_rq(rq))
487 ret = sd_prepare_discard(rq);
488
421 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 489 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
422 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 490 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
423 goto out; 491 goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1432 sd_printk(KERN_NOTICE, sdkp, 1500 sd_printk(KERN_NOTICE, sdkp,
1433 "physical block alignment offset: %u\n", alignment); 1501 "physical block alignment offset: %u\n", alignment);
1434 1502
1503 if (buffer[14] & 0x80) { /* TPE */
1504 struct request_queue *q = sdp->request_queue;
1505
1506 sdkp->thin_provisioning = 1;
1507 q->limits.discard_granularity = sdkp->hw_sector_size;
1508 q->limits.max_discard_sectors = 0xffffffff;
1509
1510 if (buffer[14] & 0x40) /* TPRZ */
1511 q->limits.discard_zeroes_data = 1;
1512
1513 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1514 }
1515
1435 sdkp->capacity = lba + 1; 1516 sdkp->capacity = lba + 1;
1436 return sector_size; 1517 return sector_size;
1437} 1518}
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1863 */ 1944 */
1864static void sd_read_block_limits(struct scsi_disk *sdkp) 1945static void sd_read_block_limits(struct scsi_disk *sdkp)
1865{ 1946{
1947 struct request_queue *q = sdkp->disk->queue;
1866 unsigned int sector_sz = sdkp->device->sector_size; 1948 unsigned int sector_sz = sdkp->device->sector_size;
1867 char *buffer; 1949 char *buffer;
1868 1950
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1877 blk_queue_io_opt(sdkp->disk->queue, 1959 blk_queue_io_opt(sdkp->disk->queue,
1878 get_unaligned_be32(&buffer[12]) * sector_sz); 1960 get_unaligned_be32(&buffer[12]) * sector_sz);
1879 1961
1962 /* Thin provisioning enabled and page length indicates TP support */
1963 if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
1964 unsigned int lba_count, desc_count, granularity;
1965
1966 lba_count = get_unaligned_be32(&buffer[20]);
1967 desc_count = get_unaligned_be32(&buffer[24]);
1968
1969 if (lba_count) {
1970 q->limits.max_discard_sectors =
1971 lba_count * sector_sz >> 9;
1972
1973 if (desc_count)
1974 sdkp->unmap = 1;
1975 }
1976
1977 granularity = get_unaligned_be32(&buffer[28]);
1978
1979 if (granularity)
1980 q->limits.discard_granularity = granularity * sector_sz;
1981
1982 if (buffer[32] & 0x80)
1983 q->limits.discard_alignment =
1984 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
1985 }
1986
1880 kfree(buffer); 1987 kfree(buffer);
1881} 1988}
1882 1989
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804d26fb..43d3caf268ef 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
60 unsigned RCD : 1; /* state of disk RCD bit, unused */ 60 unsigned RCD : 1; /* state of disk RCD bit, unused */
61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
62 unsigned first_scan : 1; 62 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1;
64 unsigned unmap : 1;
63}; 65};
64#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
65 67
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad59abb47722..d04ea9a6f673 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
552 SRpnt->waiting = waiting; 552 SRpnt->waiting = waiting;
553 553
554 if (STp->buffer->do_dio) { 554 if (STp->buffer->do_dio) {
555 mdata->page_order = 0;
555 mdata->nr_entries = STp->buffer->sg_segs; 556 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages; 557 mdata->pages = STp->buffer->mapped_pages;
557 } else { 558 } else {
559 mdata->page_order = STp->buffer->reserved_page_order;
558 mdata->nr_entries = 560 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); 561 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages; 562 mdata->pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0; 563 mdata->offset = 0;
562 } 564 }
563 565
564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 566 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3719 priority |= __GFP_ZERO; 3721 priority |= __GFP_ZERO;
3720 3722
3721 if (STbuffer->frp_segs) { 3723 if (STbuffer->frp_segs) {
3722 order = STbuffer->map_data.page_order; 3724 order = STbuffer->reserved_page_order;
3723 b_size = PAGE_SIZE << order; 3725 b_size = PAGE_SIZE << order;
3724 } else { 3726 } else {
3725 for (b_size = PAGE_SIZE, order = 0; 3727 for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3752 segs++; 3754 segs++;
3753 } 3755 }
3754 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); 3756 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3755 STbuffer->map_data.page_order = order; 3757 STbuffer->reserved_page_order = order;
3756 3758
3757 return 1; 3759 return 1;
3758} 3760}
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3765 3767
3766 for (i=0; i < st_bp->frp_segs; i++) 3768 for (i=0; i < st_bp->frp_segs; i++)
3767 memset(page_address(st_bp->reserved_pages[i]), 0, 3769 memset(page_address(st_bp->reserved_pages[i]), 0,
3768 PAGE_SIZE << st_bp->map_data.page_order); 3770 PAGE_SIZE << st_bp->reserved_page_order);
3769 st_bp->cleared = 1; 3771 st_bp->cleared = 1;
3770} 3772}
3771 3773
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3773/* Release the extra buffer */ 3775/* Release the extra buffer */
3774static void normalize_buffer(struct st_buffer * STbuffer) 3776static void normalize_buffer(struct st_buffer * STbuffer)
3775{ 3777{
3776 int i, order = STbuffer->map_data.page_order; 3778 int i, order = STbuffer->reserved_page_order;
3777 3779
3778 for (i = 0; i < STbuffer->frp_segs; i++) { 3780 for (i = 0; i < STbuffer->frp_segs; i++) {
3779 __free_pages(STbuffer->reserved_pages[i], order); 3781 __free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3781 } 3783 }
3782 STbuffer->frp_segs = 0; 3784 STbuffer->frp_segs = 0;
3783 STbuffer->sg_segs = 0; 3785 STbuffer->sg_segs = 0;
3784 STbuffer->map_data.page_order = 0; 3786 STbuffer->reserved_page_order = 0;
3785 STbuffer->map_data.offset = 0; 3787 STbuffer->map_data.offset = 0;
3786} 3788}
3787 3789
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3791static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3793static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3792{ 3794{
3793 int i, cnt, res, offset; 3795 int i, cnt, res, offset;
3794 int length = PAGE_SIZE << st_bp->map_data.page_order; 3796 int length = PAGE_SIZE << st_bp->reserved_page_order;
3795 3797
3796 for (i = 0, offset = st_bp->buffer_bytes; 3798 for (i = 0, offset = st_bp->buffer_bytes;
3797 i < st_bp->frp_segs && offset >= length; i++) 3799 i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3823static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3825static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3824{ 3826{
3825 int i, cnt, res, offset; 3827 int i, cnt, res, offset;
3826 int length = PAGE_SIZE << st_bp->map_data.page_order; 3828 int length = PAGE_SIZE << st_bp->reserved_page_order;
3827 3829
3828 for (i = 0, offset = st_bp->read_pointer; 3830 for (i = 0, offset = st_bp->read_pointer;
3829 i < st_bp->frp_segs && offset >= length; i++) 3831 i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3856{ 3858{
3857 int src_seg, dst_seg, src_offset = 0, dst_offset; 3859 int src_seg, dst_seg, src_offset = 0, dst_offset;
3858 int count, total; 3860 int count, total;
3859 int length = PAGE_SIZE << st_bp->map_data.page_order; 3861 int length = PAGE_SIZE << st_bp->reserved_page_order;
3860 3862
3861 if (offset == 0) 3863 if (offset == 0)
3862 return; 3864 return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4578 } 4580 }
4579 4581
4580 mdata->offset = uaddr & ~PAGE_MASK; 4582 mdata->offset = uaddr & ~PAGE_MASK;
4581 mdata->page_order = 0;
4582 STbp->mapped_pages = pages; 4583 STbp->mapped_pages = pages;
4583 4584
4584 return nr_pages; 4585 return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b1f548..f91a67c6d968 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
46 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
47 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages; 48 struct page **reserved_pages;
49 int reserved_page_order;
49 struct page **mapped_pages; 50 struct page **mapped_pages;
50 struct rq_map_data map_data; 51 struct rq_map_data map_data;
51 unsigned char *b_data; 52 unsigned char *b_data;
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 90d1c2184112..9a33c5f7e126 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -42,6 +42,8 @@ enum enclosure_status {
42 ENCLOSURE_STATUS_NOT_INSTALLED, 42 ENCLOSURE_STATUS_NOT_INSTALLED,
43 ENCLOSURE_STATUS_UNKNOWN, 43 ENCLOSURE_STATUS_UNKNOWN,
44 ENCLOSURE_STATUS_UNAVAILABLE, 44 ENCLOSURE_STATUS_UNAVAILABLE,
45 /* last element for counting purposes */
46 ENCLOSURE_STATUS_MAX
45}; 47};
46 48
47/* SFF-8485 activity light settings */ 49/* SFF-8485 activity light settings */
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index 39d6d1097153..a8f370126632 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -142,6 +142,7 @@ struct osd_request {
142 struct _osd_io_info { 142 struct _osd_io_info {
143 struct bio *bio; 143 struct bio *bio;
144 u64 total_bytes; 144 u64 total_bytes;
145 u64 residual;
145 struct request *req; 146 struct request *req;
146 struct _osd_req_data_segment *last_seg; 147 struct _osd_req_data_segment *last_seg;
147 u8 *pad_buff; 148 u8 *pad_buff;
@@ -150,12 +151,14 @@ struct osd_request {
150 gfp_t alloc_flags; 151 gfp_t alloc_flags;
151 unsigned timeout; 152 unsigned timeout;
152 unsigned retries; 153 unsigned retries;
154 unsigned sense_len;
153 u8 sense[OSD_MAX_SENSE_LEN]; 155 u8 sense[OSD_MAX_SENSE_LEN];
154 enum osd_attributes_mode attributes_mode; 156 enum osd_attributes_mode attributes_mode;
155 157
156 osd_req_done_fn *async_done; 158 osd_req_done_fn *async_done;
157 void *async_private; 159 void *async_private;
158 int async_error; 160 int async_error;
161 int req_errors;
159}; 162};
160 163
161static inline bool osd_req_is_ver1(struct osd_request *or) 164static inline bool osd_req_is_ver1(struct osd_request *or)
@@ -297,8 +300,6 @@ enum osd_err_priority {
297}; 300};
298 301
299struct osd_sense_info { 302struct osd_sense_info {
300 u64 out_resid; /* Zero on success otherwise out residual */
301 u64 in_resid; /* Zero on success otherwise in residual */
302 enum osd_err_priority osd_err_pri; 303 enum osd_err_priority osd_err_pri;
303 304
304 int key; /* one of enum scsi_sense_keys */ 305 int key; /* one of enum scsi_sense_keys */