aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c11
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c14
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c308
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c26
-rw-r--r--drivers/scsi/arm/fas216.h2
-rw-r--r--drivers/scsi/gdth.c112
-rw-r--r--drivers/scsi/gdth.h1
-rw-r--r--drivers/scsi/gdth_proc.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c9
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c39
-rw-r--r--drivers/scsi/libsas/sas_port.c11
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c102
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c18
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mvsas.c2969
-rw-r--r--drivers/scsi/ps3rom.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c39
-rw-r--r--drivers/scsi/qlogicpti.c12
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_tgt_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c80
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/ses.c126
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c44
50 files changed, 3539 insertions, 506 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a7a0813b24cb..c46666a24809 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -992,6 +992,16 @@ config SCSI_IZIP_SLOW_CTR
992 992
993 Generally, saying N is fine. 993 Generally, saying N is fine.
994 994
995config SCSI_MVSAS
996 tristate "Marvell 88SE6440 SAS/SATA support"
997 depends on PCI && SCSI
998 select SCSI_SAS_LIBSAS
999 help
1000 This driver supports Marvell SAS/SATA PCI devices.
1001
1002 To compiler this driver as a module, choose M here: the module
1003 will be called mvsas.
1004
995config SCSI_NCR53C406A 1005config SCSI_NCR53C406A
996 tristate "NCR53c406a SCSI support" 1006 tristate "NCR53c406a SCSI support"
997 depends on ISA && SCSI 1007 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 925c26b4fff9..23e6ecbd4778 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
121obj-$(CONFIG_SCSI_STEX) += stex.o 121obj-$(CONFIG_SCSI_STEX) += stex.o
122obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
122obj-$(CONFIG_PS3_ROM) += ps3rom.o 123obj-$(CONFIG_PS3_ROM) += ps3rom.o
123 124
124obj-$(CONFIG_ARM) += arm/ 125obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 4150c8a8fdc2..dfaaae5e73ae 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -89,7 +89,7 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
89 pci_save_state(pdev); 89 pci_save_state(pdev);
90 pci_disable_device(pdev); 90 pci_disable_device(pdev);
91 91
92 if (mesg.event == PM_EVENT_SUSPEND) 92 if (mesg.event & PM_EVENT_SLEEP)
93 pci_set_power_state(pdev, PCI_D3hot); 93 pci_set_power_state(pdev, PCI_D3hot);
94 94
95 return rc; 95 return rc;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 6d2ae641273c..64e62ce59c15 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -695,15 +695,16 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
695 scb_index = ahc_inb(ahc, SCB_TAG); 695 scb_index = ahc_inb(ahc, SCB_TAG);
696 scb = ahc_lookup_scb(ahc, scb_index); 696 scb = ahc_lookup_scb(ahc, scb_index);
697 if (devinfo.role == ROLE_INITIATOR) { 697 if (devinfo.role == ROLE_INITIATOR) {
698 if (scb == NULL) 698 if (bus_phase == P_MESGOUT) {
699 panic("HOST_MSG_LOOP with " 699 if (scb == NULL)
700 "invalid SCB %x\n", scb_index); 700 panic("HOST_MSG_LOOP with "
701 "invalid SCB %x\n",
702 scb_index);
701 703
702 if (bus_phase == P_MESGOUT)
703 ahc_setup_initiator_msgout(ahc, 704 ahc_setup_initiator_msgout(ahc,
704 &devinfo, 705 &devinfo,
705 scb); 706 scb);
706 else { 707 } else {
707 ahc->msg_type = 708 ahc->msg_type =
708 MSG_TYPE_INITIATOR_MSGIN; 709 MSG_TYPE_INITIATOR_MSGIN;
709 ahc->msgin_index = 0; 710 ahc->msgin_index = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index dd6e21d6f1dd..3d3eaef65fb3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -134,7 +134,7 @@ ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
134 pci_save_state(pdev); 134 pci_save_state(pdev);
135 pci_disable_device(pdev); 135 pci_disable_device(pdev);
136 136
137 if (mesg.event == PM_EVENT_SUSPEND) 137 if (mesg.event & PM_EVENT_SLEEP)
138 pci_set_power_state(pdev, PCI_D3hot); 138 pci_set_power_state(pdev, PCI_D3hot);
139 139
140 return rc; 140 return rc;
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 32f513b1b78a..eb8efdcefe48 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun);
102int asd_clear_aca(struct domain_device *, u8 *lun); 102int asd_clear_aca(struct domain_device *, u8 *lun);
103int asd_clear_task_set(struct domain_device *, u8 *lun); 103int asd_clear_task_set(struct domain_device *, u8 *lun);
104int asd_lu_reset(struct domain_device *, u8 *lun); 104int asd_lu_reset(struct domain_device *, u8 *lun);
105int asd_I_T_nexus_reset(struct domain_device *dev);
105int asd_query_task(struct sas_task *); 106int asd_query_task(struct sas_task *);
106 107
107/* ---------- Adapter and Port management ---------- */ 108/* ---------- Adapter and Port management ---------- */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 150f6706d23f..abc757559c1a 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -140,7 +140,7 @@ struct asd_ascb {
140 140
141 /* internally generated command */ 141 /* internally generated command */
142 struct timer_list timer; 142 struct timer_list timer;
143 struct completion completion; 143 struct completion *completion;
144 u8 tag_valid:1; 144 u8 tag_valid:1;
145 __be16 tag; /* error recovery only */ 145 __be16 tag; /* error recovery only */
146 146
@@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
294 ascb->timer.function = NULL; 294 ascb->timer.function = NULL;
295 init_timer(&ascb->timer); 295 init_timer(&ascb->timer);
296 ascb->tc_index = -1; 296 ascb->tc_index = -1;
297 init_completion(&ascb->completion);
298} 297}
299 298
300/* Must be called with the tc_index_lock held! 299/* Must be called with the tc_index_lock held!
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 5d761eb67442..88d1e731b65e 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
1003 .lldd_abort_task_set = asd_abort_task_set, 1003 .lldd_abort_task_set = asd_abort_task_set,
1004 .lldd_clear_aca = asd_clear_aca, 1004 .lldd_clear_aca = asd_clear_aca,
1005 .lldd_clear_task_set = asd_clear_task_set, 1005 .lldd_clear_task_set = asd_clear_task_set,
1006 .lldd_I_T_nexus_reset = NULL, 1006 .lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
1007 .lldd_lu_reset = asd_lu_reset, 1007 .lldd_lu_reset = asd_lu_reset,
1008 .lldd_query_task = asd_query_task, 1008 .lldd_query_task = asd_query_task,
1009 1009
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 0febad4dd75f..ab350504ca5a 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
458 tc_abort = le16_to_cpu(tc_abort); 458 tc_abort = le16_to_cpu(tc_abort);
459 459
460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
461 struct sas_task *task = ascb->uldd_task; 461 struct sas_task *task = a->uldd_task;
462
463 if (a->tc_index != tc_abort)
464 continue;
462 465
463 if (task && a->tc_index == tc_abort) { 466 if (task) {
464 failed_dev = task->dev; 467 failed_dev = task->dev;
465 sas_task_abort(task); 468 sas_task_abort(task);
466 break; 469 } else {
470 ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
471 a->scb->header.opcode);
467 } 472 }
473 break;
468 } 474 }
469 475
470 if (!failed_dev) { 476 if (!failed_dev) {
@@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
478 * that the EH will wake up and do something. 484 * that the EH will wake up and do something.
479 */ 485 */
480 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 486 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
481 struct sas_task *task = ascb->uldd_task; 487 struct sas_task *task = a->uldd_task;
482 488
483 if (task && 489 if (task &&
484 task->dev == failed_dev && 490 task->dev == failed_dev &&
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 965d4bb999d9..008df9ab92a5 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -343,11 +343,13 @@ Again:
343 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 343 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
344 task->task_state_flags |= SAS_TASK_STATE_DONE; 344 task->task_state_flags |= SAS_TASK_STATE_DONE;
345 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { 345 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
346 struct completion *completion = ascb->completion;
346 spin_unlock_irqrestore(&task->task_state_lock, flags); 347 spin_unlock_irqrestore(&task->task_state_lock, flags);
347 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " 348 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
348 "stat 0x%x but aborted by upper layer!\n", 349 "stat 0x%x but aborted by upper layer!\n",
349 task, opcode, ts->resp, ts->stat); 350 task, opcode, ts->resp, ts->stat);
350 complete(&ascb->completion); 351 if (completion)
352 complete(completion);
351 } else { 353 } else {
352 spin_unlock_irqrestore(&task->task_state_lock, flags); 354 spin_unlock_irqrestore(&task->task_state_lock, flags);
353 task->lldd_task = NULL; 355 task->lldd_task = NULL;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index b52124f3d3ac..b9ac8f703a1d 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
53 return res; 53 return res;
54} 54}
55 55
56static inline void asd_timedout_common(unsigned long data) 56/* ---------- CLEAR NEXUS ---------- */
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61 57
62 spin_lock_irqsave(&seq->pend_q_lock, flags); 58struct tasklet_completion_status {
63 seq->pending--; 59 int dl_opcode;
64 list_del_init(&ascb->list); 60 int tmf_state;
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags); 61 u8 tag_valid:1;
66} 62 __be16 tag;
63};
64
65#define DECLARE_TCS(tcs) \
66 struct tasklet_completion_status tcs = { \
67 .dl_opcode = 0, \
68 .tmf_state = 0, \
69 .tag_valid = 0, \
70 .tag = 0, \
71 }
67 72
68/* ---------- CLEAR NEXUS ---------- */
69 73
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, 74static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl) 75 struct done_list_struct *dl)
72{ 76{
77 struct tasklet_completion_status *tcs = ascb->uldd_task;
73 ASD_DPRINTK("%s: here\n", __FUNCTION__); 78 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) { 79 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); 80 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return; 81 return;
77 } 82 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); 83 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode; 84 tcs->dl_opcode = dl->opcode;
80 complete(&ascb->completion); 85 complete(ascb->completion);
86 asd_ascb_free(ascb);
81} 87}
82 88
83static void asd_clear_nexus_timedout(unsigned long data) 89static void asd_clear_nexus_timedout(unsigned long data)
84{ 90{
85 struct asd_ascb *ascb = (void *) data; 91 struct asd_ascb *ascb = (void *)data;
92 struct tasklet_completion_status *tcs = ascb->uldd_task;
86 93
87 ASD_DPRINTK("%s: here\n", __FUNCTION__); 94 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data); 95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; 96 complete(ascb->completion);
90 complete(&ascb->completion);
91} 97}
92 98
93#define CLEAR_NEXUS_PRE \ 99#define CLEAR_NEXUS_PRE \
100 struct asd_ascb *ascb; \
101 struct scb *scb; \
102 int res; \
103 DECLARE_COMPLETION_ONSTACK(completion); \
104 DECLARE_TCS(tcs); \
105 \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ 106 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \ 107 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ 108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \ 109 if (!ascb) \
98 return -ENOMEM; \ 110 return -ENOMEM; \
99 \ 111 \
112 ascb->completion = &completion; \
113 ascb->uldd_task = &tcs; \
100 scb = ascb->scb; \ 114 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS 115 scb->header.opcode = CLEAR_NEXUS
102 116
@@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data)
107 if (res) \ 121 if (res) \
108 goto out_err; \ 122 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ 123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \ 124 wait_for_completion(&completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \ 125 res = tcs.dl_opcode; \
112 if (res == TC_NO_ERROR) \ 126 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \ 127 res = TMF_RESP_FUNC_COMPLETE; \
128 return res; \
114out_err: \ 129out_err: \
115 asd_ascb_free(ascb); \ 130 asd_ascb_free(ascb); \
116 return res 131 return res
@@ -118,9 +133,6 @@ out_err: \
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) 133int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{ 134{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; 135 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124 136
125 CLEAR_NEXUS_PRE; 137 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER; 138 scb->clear_nexus.nexus = NEXUS_ADAPTER;
@@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
130int asd_clear_nexus_port(struct asd_sas_port *port) 142int asd_clear_nexus_port(struct asd_sas_port *port)
131{ 143{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha; 144 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136 145
137 CLEAR_NEXUS_PRE; 146 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT; 147 scb->clear_nexus.nexus = NEXUS_PORT;
@@ -140,37 +149,77 @@ int asd_clear_nexus_port(struct asd_sas_port *port)
140 CLEAR_NEXUS_POST; 149 CLEAR_NEXUS_POST;
141} 150}
142 151
143#if 0 152enum clear_nexus_phase {
144static int asd_clear_nexus_I_T(struct domain_device *dev) 153 NEXUS_PHASE_PRE,
154 NEXUS_PHASE_POST,
155 NEXUS_PHASE_RESUME,
156};
157
158static int asd_clear_nexus_I_T(struct domain_device *dev,
159 enum clear_nexus_phase phase)
145{ 160{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 161 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150 162
151 CLEAR_NEXUS_PRE; 163 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T; 164 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 165 switch (phase) {
154 if (dev->tproto) 166 case NEXUS_PHASE_PRE:
155 scb->clear_nexus.flags |= SUSPEND_TX; 167 scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
168 break;
169 case NEXUS_PHASE_POST:
170 scb->clear_nexus.flags = SEND_Q | NOTINQ;
171 break;
172 case NEXUS_PHASE_RESUME:
173 scb->clear_nexus.flags = RESUME_TX;
174 }
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev); 176 dev->lldd_dev);
158 CLEAR_NEXUS_POST; 177 CLEAR_NEXUS_POST;
159} 178}
160#endif 179
180int asd_I_T_nexus_reset(struct domain_device *dev)
181{
182 int res, tmp_res, i;
183 struct sas_phy *phy = sas_find_local_phy(dev);
184 /* Standard mandates link reset for ATA (type 0) and
185 * hard reset for SSP (type 1) */
186 int reset_type = (dev->dev_type == SATA_DEV ||
187 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
188
189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
190 /* send a hard reset */
191 ASD_DPRINTK("sending %s reset to %s\n",
192 reset_type ? "hard" : "soft", phy->dev.bus_id);
193 res = sas_phy_reset(phy, reset_type);
194 if (res == TMF_RESP_FUNC_COMPLETE) {
195 /* wait for the maximum settle time */
196 msleep(500);
197 /* clear all outstanding commands (keep nexus suspended) */
198 asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
199 }
200 for (i = 0 ; i < 3; i++) {
201 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
202 if (tmp_res == TC_RESUME)
203 return res;
204 msleep(500);
205 }
206
207 /* This is a bit of a problem: the sequencer is still suspended
208 * and is refusing to resume. Hope it will resume on a bigger hammer
209 * or the disk is lost */
210 dev_printk(KERN_ERR, &phy->dev,
211 "Failed to resume nexus after reset 0x%x\n", tmp_res);
212
213 return TMF_RESP_FUNC_FAILED;
214}
161 215
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) 216static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{ 217{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 218 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168 219
169 CLEAR_NEXUS_PRE; 220 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L; 221 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 222 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); 223 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 224 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev); 225 dev->lldd_dev);
@@ -181,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task)
181{ 230{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 231 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task; 232 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187 233
188 CLEAR_NEXUS_PRE; 234 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG; 235 scb->clear_nexus.nexus = NEXUS_TAG;
@@ -199,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task)
199{ 245{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 246 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task; 247 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205 248
206 CLEAR_NEXUS_PRE; 249 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX; 250 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
@@ -217,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task)
217static void asd_tmf_timedout(unsigned long data) 260static void asd_tmf_timedout(unsigned long data)
218{ 261{
219 struct asd_ascb *ascb = (void *) data; 262 struct asd_ascb *ascb = (void *) data;
263 struct tasklet_completion_status *tcs = ascb->uldd_task;
220 264
221 ASD_DPRINTK("tmf timed out\n"); 265 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data); 266 tcs->tmf_state = TMF_RESP_FUNC_FAILED;
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; 267 complete(ascb->completion);
224 complete(&ascb->completion);
225} 268}
226 269
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, 270static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
@@ -273,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, 316static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl) 317 struct done_list_struct *dl)
275{ 318{
319 struct tasklet_completion_status *tcs;
320
276 if (!del_timer(&ascb->timer)) 321 if (!del_timer(&ascb->timer))
277 return; 322 return;
278 323
324 tcs = ascb->uldd_task;
279 ASD_DPRINTK("tmf tasklet complete\n"); 325 ASD_DPRINTK("tmf tasklet complete\n");
280 326
281 if (dl->opcode == TC_SSP_RESP) 327 tcs->dl_opcode = dl->opcode;
282 ascb->uldd_task = (void *) (unsigned long) 328
283 asd_get_tmf_resp_tasklet(ascb, dl); 329 if (dl->opcode == TC_SSP_RESP) {
284 else 330 tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; 331 tcs->tag_valid = ascb->tag_valid;
332 tcs->tag = ascb->tag;
333 }
286 334
287 complete(&ascb->completion); 335 complete(ascb->completion);
336 asd_ascb_free(ascb);
288} 337}
289 338
290static inline int asd_clear_nexus(struct sas_task *task) 339static inline int asd_clear_nexus(struct sas_task *task)
@@ -292,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task)
292 int res = TMF_RESP_FUNC_FAILED; 341 int res = TMF_RESP_FUNC_FAILED;
293 int leftover; 342 int leftover;
294 struct asd_ascb *tascb = task->lldd_task; 343 struct asd_ascb *tascb = task->lldd_task;
344 DECLARE_COMPLETION_ONSTACK(completion);
295 unsigned long flags; 345 unsigned long flags;
296 346
347 tascb->completion = &completion;
348
297 ASD_DPRINTK("task not done, clearing nexus\n"); 349 ASD_DPRINTK("task not done, clearing nexus\n");
298 if (tascb->tag_valid) 350 if (tascb->tag_valid)
299 res = asd_clear_nexus_tag(task); 351 res = asd_clear_nexus_tag(task);
300 else 352 else
301 res = asd_clear_nexus_index(task); 353 res = asd_clear_nexus_index(task);
302 leftover = wait_for_completion_timeout(&tascb->completion, 354 leftover = wait_for_completion_timeout(&completion,
303 AIC94XX_SCB_TIMEOUT); 355 AIC94XX_SCB_TIMEOUT);
356 tascb->completion = NULL;
304 ASD_DPRINTK("came back from clear nexus\n"); 357 ASD_DPRINTK("came back from clear nexus\n");
305 spin_lock_irqsave(&task->task_state_lock, flags); 358 spin_lock_irqsave(&task->task_state_lock, flags);
306 if (leftover < 1) 359 if (leftover < 1)
@@ -354,6 +407,11 @@ int asd_abort_task(struct sas_task *task)
354 struct asd_ascb *ascb = NULL; 407 struct asd_ascb *ascb = NULL;
355 struct scb *scb; 408 struct scb *scb;
356 int leftover; 409 int leftover;
410 DECLARE_TCS(tcs);
411 DECLARE_COMPLETION_ONSTACK(completion);
412 DECLARE_COMPLETION_ONSTACK(tascb_completion);
413
414 tascb->completion = &tascb_completion;
357 415
358 spin_lock_irqsave(&task->task_state_lock, flags); 416 spin_lock_irqsave(&task->task_state_lock, flags);
359 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 417 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -367,8 +425,10 @@ int asd_abort_task(struct sas_task *task)
367 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); 425 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
368 if (!ascb) 426 if (!ascb)
369 return -ENOMEM; 427 return -ENOMEM;
370 scb = ascb->scb;
371 428
429 ascb->uldd_task = &tcs;
430 ascb->completion = &completion;
431 scb = ascb->scb;
372 scb->header.opcode = SCB_ABORT_TASK; 432 scb->header.opcode = SCB_ABORT_TASK;
373 433
374 switch (task->task_proto) { 434 switch (task->task_proto) {
@@ -410,13 +470,12 @@ int asd_abort_task(struct sas_task *task)
410 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, 470 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
411 asd_tmf_timedout); 471 asd_tmf_timedout);
412 if (res) 472 if (res)
413 goto out; 473 goto out_free;
414 wait_for_completion(&ascb->completion); 474 wait_for_completion(&completion);
415 ASD_DPRINTK("tmf came back\n"); 475 ASD_DPRINTK("tmf came back\n");
416 476
417 res = (int) (unsigned long) ascb->uldd_task; 477 tascb->tag = tcs.tag;
418 tascb->tag = ascb->tag; 478 tascb->tag_valid = tcs.tag_valid;
419 tascb->tag_valid = ascb->tag_valid;
420 479
421 spin_lock_irqsave(&task->task_state_lock, flags); 480 spin_lock_irqsave(&task->task_state_lock, flags);
422 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 481 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -427,63 +486,68 @@ int asd_abort_task(struct sas_task *task)
427 } 486 }
428 spin_unlock_irqrestore(&task->task_state_lock, flags); 487 spin_unlock_irqrestore(&task->task_state_lock, flags);
429 488
430 switch (res) { 489 if (tcs.dl_opcode == TC_SSP_RESP) {
431 /* The task to be aborted has been sent to the device. 490 /* The task to be aborted has been sent to the device.
432 * We got a Response IU for the ABORT TASK TMF. */ 491 * We got a Response IU for the ABORT TASK TMF. */
433 case TC_NO_ERROR + 0xFF00: 492 if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
434 case TMF_RESP_FUNC_COMPLETE: 493 res = asd_clear_nexus(task);
435 case TMF_RESP_FUNC_FAILED: 494 else
436 res = asd_clear_nexus(task); 495 res = tcs.tmf_state;
437 break; 496 } else if (tcs.dl_opcode == TC_NO_ERROR &&
438 case TMF_RESP_INVALID_FRAME: 497 tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
439 case TMF_RESP_OVERLAPPED_TAG: 498 /* timeout */
440 case TMF_RESP_FUNC_ESUPP:
441 case TMF_RESP_NO_LUN:
442 goto out_done; break;
443 }
444 /* In the following we assume that the managing layer
445 * will _never_ make a mistake, when issuing ABORT TASK.
446 */
447 switch (res) {
448 default:
449 res = asd_clear_nexus(task);
450 /* fallthrough */
451 case TC_NO_ERROR + 0xFF00:
452 case TMF_RESP_FUNC_COMPLETE:
453 break;
454 /* The task hasn't been sent to the device xor we never got
455 * a (sane) Response IU for the ABORT TASK TMF.
456 */
457 case TF_NAK_RECV + 0xFF00:
458 res = TMF_RESP_INVALID_FRAME;
459 break;
460 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
461 res = TMF_RESP_FUNC_FAILED; 499 res = TMF_RESP_FUNC_FAILED;
462 leftover = wait_for_completion_timeout(&tascb->completion, 500 } else {
463 AIC94XX_SCB_TIMEOUT); 501 /* In the following we assume that the managing layer
464 spin_lock_irqsave(&task->task_state_lock, flags); 502 * will _never_ make a mistake, when issuing ABORT
465 if (leftover < 1) 503 * TASK.
504 */
505 switch (tcs.dl_opcode) {
506 default:
507 res = asd_clear_nexus(task);
508 /* fallthrough */
509 case TC_NO_ERROR:
510 break;
511 /* The task hasn't been sent to the device xor
512 * we never got a (sane) Response IU for the
513 * ABORT TASK TMF.
514 */
515 case TF_NAK_RECV:
516 res = TMF_RESP_INVALID_FRAME;
517 break;
518 case TF_TMF_TASK_DONE: /* done but not reported yet */
466 res = TMF_RESP_FUNC_FAILED; 519 res = TMF_RESP_FUNC_FAILED;
467 if (task->task_state_flags & SAS_TASK_STATE_DONE) 520 leftover =
521 wait_for_completion_timeout(&tascb_completion,
522 AIC94XX_SCB_TIMEOUT);
523 spin_lock_irqsave(&task->task_state_lock, flags);
524 if (leftover < 1)
525 res = TMF_RESP_FUNC_FAILED;
526 if (task->task_state_flags & SAS_TASK_STATE_DONE)
527 res = TMF_RESP_FUNC_COMPLETE;
528 spin_unlock_irqrestore(&task->task_state_lock, flags);
529 break;
530 case TF_TMF_NO_TAG:
531 case TF_TMF_TAG_FREE: /* the tag is in the free list */
532 case TF_TMF_NO_CONN_HANDLE: /* no such device */
468 res = TMF_RESP_FUNC_COMPLETE; 533 res = TMF_RESP_FUNC_COMPLETE;
469 spin_unlock_irqrestore(&task->task_state_lock, flags); 534 break;
470 goto out_done; 535 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
471 case TF_TMF_NO_TAG + 0xFF00: 536 res = TMF_RESP_FUNC_ESUPP;
472 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ 537 break;
473 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ 538 }
474 res = TMF_RESP_FUNC_COMPLETE;
475 goto out_done;
476 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
477 res = TMF_RESP_FUNC_ESUPP;
478 goto out;
479 } 539 }
480out_done: 540 out_done:
541 tascb->completion = NULL;
481 if (res == TMF_RESP_FUNC_COMPLETE) { 542 if (res == TMF_RESP_FUNC_COMPLETE) {
482 task->lldd_task = NULL; 543 task->lldd_task = NULL;
483 mb(); 544 mb();
484 asd_ascb_free(tascb); 545 asd_ascb_free(tascb);
485 } 546 }
486out: 547 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
548 return res;
549
550 out_free:
487 asd_ascb_free(ascb); 551 asd_ascb_free(ascb);
488 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); 552 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
489 return res; 553 return res;
@@ -511,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
511 struct asd_ascb *ascb; 575 struct asd_ascb *ascb;
512 int res = 1; 576 int res = 1;
513 struct scb *scb; 577 struct scb *scb;
578 DECLARE_COMPLETION_ONSTACK(completion);
579 DECLARE_TCS(tcs);
514 580
515 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 581 if (!(dev->tproto & SAS_PROTOCOL_SSP))
516 return TMF_RESP_FUNC_ESUPP; 582 return TMF_RESP_FUNC_ESUPP;
@@ -518,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
518 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); 584 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
519 if (!ascb) 585 if (!ascb)
520 return -ENOMEM; 586 return -ENOMEM;
587
588 ascb->completion = &completion;
589 ascb->uldd_task = &tcs;
521 scb = ascb->scb; 590 scb = ascb->scb;
522 591
523 if (tmf == TMF_QUERY_TASK) 592 if (tmf == TMF_QUERY_TASK)
@@ -550,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
550 asd_tmf_timedout); 619 asd_tmf_timedout);
551 if (res) 620 if (res)
552 goto out_err; 621 goto out_err;
553 wait_for_completion(&ascb->completion); 622 wait_for_completion(&completion);
554 res = (int) (unsigned long) ascb->uldd_task;
555 623
556 switch (res) { 624 switch (tcs.dl_opcode) {
557 case TC_NO_ERROR + 0xFF00: 625 case TC_NO_ERROR:
558 res = TMF_RESP_FUNC_COMPLETE; 626 res = TMF_RESP_FUNC_COMPLETE;
559 break; 627 break;
560 case TF_NAK_RECV + 0xFF00: 628 case TF_NAK_RECV:
561 res = TMF_RESP_INVALID_FRAME; 629 res = TMF_RESP_INVALID_FRAME;
562 break; 630 break;
563 case TF_TMF_TASK_DONE + 0xFF00: 631 case TF_TMF_TASK_DONE:
564 res = TMF_RESP_FUNC_FAILED; 632 res = TMF_RESP_FUNC_FAILED;
565 break; 633 break;
566 case TF_TMF_NO_TAG + 0xFF00: 634 case TF_TMF_NO_TAG:
567 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ 635 case TF_TMF_TAG_FREE: /* the tag is in the free list */
568 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ 636 case TF_TMF_NO_CONN_HANDLE: /* no such device */
569 res = TMF_RESP_FUNC_COMPLETE; 637 res = TMF_RESP_FUNC_COMPLETE;
570 break; 638 break;
571 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ 639 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
572 res = TMF_RESP_FUNC_ESUPP; 640 res = TMF_RESP_FUNC_ESUPP;
573 break; 641 break;
574 default: 642 default:
575 /* Allow TMF response codes to propagate upwards */ 643 /* Allow TMF response codes to propagate upwards */
644 res = tcs.dl_opcode;
576 break; 645 break;
577 } 646 }
647 return res;
578out_err: 648out_err:
579 asd_ascb_free(ascb); 649 asd_ascb_free(ascb);
580 return res; 650 return res;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 57786502e3ec..0393707bdfce 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct class_device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f9ff32cfed0..f91f79c8007d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1387,18 +1387,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1387 switch(controlcode) { 1387 switch(controlcode) {
1388 1388
1389 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1389 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1390 unsigned long *ver_addr; 1390 unsigned char *ver_addr;
1391 uint8_t *pQbuffer, *ptmpQbuffer; 1391 uint8_t *pQbuffer, *ptmpQbuffer;
1392 int32_t allxfer_len = 0; 1392 int32_t allxfer_len = 0;
1393 void *tmp;
1394 1393
1395 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1394 ver_addr = kmalloc(1032, GFP_ATOMIC);
1396 ver_addr = (unsigned long *)tmp; 1395 if (!ver_addr) {
1397 if (!tmp) {
1398 retvalue = ARCMSR_MESSAGE_FAIL; 1396 retvalue = ARCMSR_MESSAGE_FAIL;
1399 goto message_out; 1397 goto message_out;
1400 } 1398 }
1401 ptmpQbuffer = (uint8_t *) ver_addr; 1399 ptmpQbuffer = ver_addr;
1402 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1400 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403 && (allxfer_len < 1031)) { 1401 && (allxfer_len < 1031)) {
1404 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1402 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
@@ -1427,26 +1425,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1427 } 1425 }
1428 arcmsr_iop_message_read(acb); 1426 arcmsr_iop_message_read(acb);
1429 } 1427 }
1430 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1428 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1431 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1429 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1432 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1430 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1433 kfree(tmp); 1431 kfree(ver_addr);
1434 } 1432 }
1435 break; 1433 break;
1436 1434
1437 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1435 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1438 unsigned long *ver_addr; 1436 unsigned char *ver_addr;
1439 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1437 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1440 uint8_t *pQbuffer, *ptmpuserbuffer; 1438 uint8_t *pQbuffer, *ptmpuserbuffer;
1441 void *tmp;
1442 1439
1443 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1440 ver_addr = kmalloc(1032, GFP_ATOMIC);
1444 ver_addr = (unsigned long *)tmp; 1441 if (!ver_addr) {
1445 if (!tmp) {
1446 retvalue = ARCMSR_MESSAGE_FAIL; 1442 retvalue = ARCMSR_MESSAGE_FAIL;
1447 goto message_out; 1443 goto message_out;
1448 } 1444 }
1449 ptmpuserbuffer = (uint8_t *)ver_addr; 1445 ptmpuserbuffer = ver_addr;
1450 user_len = pcmdmessagefld->cmdmessage.Length; 1446 user_len = pcmdmessagefld->cmdmessage.Length;
1451 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1447 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1452 wqbuf_lastindex = acb->wqbuf_lastindex; 1448 wqbuf_lastindex = acb->wqbuf_lastindex;
@@ -1492,7 +1488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1492 retvalue = ARCMSR_MESSAGE_FAIL; 1488 retvalue = ARCMSR_MESSAGE_FAIL;
1493 } 1489 }
1494 } 1490 }
1495 kfree(tmp); 1491 kfree(ver_addr);
1496 } 1492 }
1497 break; 1493 break;
1498 1494
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 3e73e264972e..b65f4cf0eec9 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -313,7 +313,7 @@ typedef struct {
313 313
314 /* miscellaneous */ 314 /* miscellaneous */
315 int internal_done; /* flag to indicate request done */ 315 int internal_done; /* flag to indicate request done */
316 struct scsi_eh_save *ses; /* holds request sense restore info */ 316 struct scsi_eh_save ses; /* holds request sense restore info */
317 unsigned long magic_end; 317 unsigned long magic_end;
318} FAS216_Info; 318} FAS216_Info;
319 319
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 6d67f5c0eb8e..27ebd336409b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
160static void gdth_clear_events(void); 160static void gdth_clear_events(void);
161 161
162static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 162static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
163 char *buffer, ushort count, int to_buffer); 163 char *buffer, ushort count);
164static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 164static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
166 166
@@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
182 unsigned int cmd, unsigned long arg); 182 unsigned int cmd, unsigned long arg);
183 183
184static void gdth_flush(gdth_ha_str *ha); 184static void gdth_flush(gdth_ha_str *ha);
185static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
186static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); 185static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
187static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, 186static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
188 struct gdth_cmndinfo *cmndinfo); 187 struct gdth_cmndinfo *cmndinfo);
@@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
417#include "gdth_proc.h" 416#include "gdth_proc.h"
418#include "gdth_proc.c" 417#include "gdth_proc.c"
419 418
420/* notifier block to get a notify on system shutdown/halt/reboot */
421static struct notifier_block gdth_notifier = {
422 gdth_halt, NULL, 0
423};
424static int notifier_disabled = 0;
425
426static gdth_ha_str *gdth_find_ha(int hanum) 419static gdth_ha_str *gdth_find_ha(int hanum)
427{ 420{
428 gdth_ha_str *ha; 421 gdth_ha_str *ha;
@@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
445 for (i=0; i<GDTH_MAXCMDS; ++i) { 438 for (i=0; i<GDTH_MAXCMDS; ++i) {
446 if (ha->cmndinfo[i].index == 0) { 439 if (ha->cmndinfo[i].index == 0) {
447 priv = &ha->cmndinfo[i]; 440 priv = &ha->cmndinfo[i];
448 priv->index = i+1;
449 memset(priv, 0, sizeof(*priv)); 441 memset(priv, 0, sizeof(*priv));
442 priv->index = i+1;
450 break; 443 break;
451 } 444 }
452 } 445 }
@@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
493 gdth_ha_str *ha = shost_priv(sdev->host); 486 gdth_ha_str *ha = shost_priv(sdev->host);
494 Scsi_Cmnd *scp; 487 Scsi_Cmnd *scp;
495 struct gdth_cmndinfo cmndinfo; 488 struct gdth_cmndinfo cmndinfo;
496 struct scatterlist one_sg;
497 DECLARE_COMPLETION_ONSTACK(wait); 489 DECLARE_COMPLETION_ONSTACK(wait);
498 int rval; 490 int rval;
499 491
@@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
507 /* use request field to save the ptr. to completion struct. */ 499 /* use request field to save the ptr. to completion struct. */
508 scp->request = (struct request *)&wait; 500 scp->request = (struct request *)&wait;
509 scp->timeout_per_command = timeout*HZ; 501 scp->timeout_per_command = timeout*HZ;
510 sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd));
511 gdth_set_sglist(scp, &one_sg);
512 gdth_set_sg_count(scp, 1);
513 gdth_set_bufflen(scp, sizeof(*gdtcmd));
514 scp->cmd_len = 12; 502 scp->cmd_len = 12;
515 memcpy(scp->cmnd, cmnd, 12); 503 memcpy(scp->cmnd, cmnd, 12);
516 cmndinfo.priority = IOCTL_PRI; 504 cmndinfo.priority = IOCTL_PRI;
505 cmndinfo.internal_cmd_str = gdtcmd;
517 cmndinfo.internal_command = 1; 506 cmndinfo.internal_command = 1;
518 507
519 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); 508 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
@@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha)
2355 * buffers, kmap_atomic() as needed. 2344 * buffers, kmap_atomic() as needed.
2356 */ 2345 */
2357static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 2346static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2358 char *buffer, ushort count, int to_buffer) 2347 char *buffer, ushort count)
2359{ 2348{
2360 ushort cpcount,i, max_sg = gdth_sg_count(scp); 2349 ushort cpcount,i, max_sg = gdth_sg_count(scp);
2361 ushort cpsum,cpnow; 2350 ushort cpsum,cpnow;
@@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2381 } 2370 }
2382 local_irq_save(flags); 2371 local_irq_save(flags);
2383 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; 2372 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
2384 if (to_buffer) 2373 memcpy(address, buffer, cpnow);
2385 memcpy(buffer, address, cpnow);
2386 else
2387 memcpy(address, buffer, cpnow);
2388 flush_dcache_page(sg_page(sl)); 2374 flush_dcache_page(sg_page(sl));
2389 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2375 kunmap_atomic(address, KM_BIO_SRC_IRQ);
2390 local_irq_restore(flags); 2376 local_irq_restore(flags);
@@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2438 strcpy(inq.vendor,ha->oem_name); 2424 strcpy(inq.vendor,ha->oem_name);
2439 sprintf(inq.product,"Host Drive #%02d",t); 2425 sprintf(inq.product,"Host Drive #%02d",t);
2440 strcpy(inq.revision," "); 2426 strcpy(inq.revision," ");
2441 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0); 2427 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
2442 break; 2428 break;
2443 2429
2444 case REQUEST_SENSE: 2430 case REQUEST_SENSE:
@@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2448 sd.key = NO_SENSE; 2434 sd.key = NO_SENSE;
2449 sd.info = 0; 2435 sd.info = 0;
2450 sd.add_length= 0; 2436 sd.add_length= 0;
2451 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0); 2437 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
2452 break; 2438 break;
2453 2439
2454 case MODE_SENSE: 2440 case MODE_SENSE:
@@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2460 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; 2446 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
2461 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; 2447 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
2462 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); 2448 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
2463 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0); 2449 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
2464 break; 2450 break;
2465 2451
2466 case READ_CAPACITY: 2452 case READ_CAPACITY:
@@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2470 else 2456 else
2471 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2457 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
2472 rdc.block_length = cpu_to_be32(SECTOR_SIZE); 2458 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
2473 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0); 2459 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
2474 break; 2460 break;
2475 2461
2476 case SERVICE_ACTION_IN: 2462 case SERVICE_ACTION_IN:
@@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2482 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); 2468 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
2483 rdc16.block_length = cpu_to_be32(SECTOR_SIZE); 2469 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
2484 gdth_copy_internal_data(ha, scp, (char*)&rdc16, 2470 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
2485 sizeof(gdth_rdcap16_data), 0); 2471 sizeof(gdth_rdcap16_data));
2486 } else { 2472 } else {
2487 scp->result = DID_ABORT << 16; 2473 scp->result = DID_ABORT << 16;
2488 } 2474 }
@@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2852static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) 2838static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2853{ 2839{
2854 register gdth_cmd_str *cmdp; 2840 register gdth_cmd_str *cmdp;
2841 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2855 int cmd_index; 2842 int cmd_index;
2856 2843
2857 cmdp= ha->pccb; 2844 cmdp= ha->pccb;
@@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2860 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 2847 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
2861 return 0; 2848 return 0;
2862 2849
2863 gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1); 2850 *cmdp = *cmndinfo->internal_cmd_str;
2864 cmdp->RequestBuffer = scp; 2851 cmdp->RequestBuffer = scp;
2865 2852
2866 /* search free command index */ 2853 /* search free command index */
@@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data)
3794 gdth_ha_str *ha; 3781 gdth_ha_str *ha;
3795 ulong flags; 3782 ulong flags;
3796 3783
3784 BUG_ON(list_empty(&gdth_instances));
3785
3797 ha = list_first_entry(&gdth_instances, gdth_ha_str, list); 3786 ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
3798 spin_lock_irqsave(&ha->smp_lock, flags); 3787 spin_lock_irqsave(&ha->smp_lock, flags);
3799 3788
@@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha)
4669 } 4658 }
4670} 4659}
4671 4660
4672/* shutdown routine */
4673static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
4674{
4675 gdth_ha_str *ha;
4676#ifndef __alpha__
4677 gdth_cmd_str gdtcmd;
4678 char cmnd[MAX_COMMAND_SIZE];
4679#endif
4680
4681 if (notifier_disabled)
4682 return NOTIFY_OK;
4683
4684 TRACE2(("gdth_halt() event %d\n",(int)event));
4685 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
4686 return NOTIFY_DONE;
4687
4688 notifier_disabled = 1;
4689 printk("GDT-HA: Flushing all host drives .. ");
4690 list_for_each_entry(ha, &gdth_instances, list) {
4691 gdth_flush(ha);
4692
4693#ifndef __alpha__
4694 /* controller reset */
4695 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
4696 gdtcmd.BoardNode = LOCALBOARD;
4697 gdtcmd.Service = CACHESERVICE;
4698 gdtcmd.OpCode = GDT_RESET;
4699 TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum));
4700 gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL);
4701#endif
4702 }
4703 printk("Done.\n");
4704
4705#ifdef GDTH_STATISTICS
4706 del_timer(&gdth_timer);
4707#endif
4708 return NOTIFY_OK;
4709}
4710
4711/* configure lun */ 4661/* configure lun */
4712static int gdth_slave_configure(struct scsi_device *sdev) 4662static int gdth_slave_configure(struct scsi_device *sdev)
4713{ 4663{
@@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha)
5142 5092
5143 scsi_remove_host(shp); 5093 scsi_remove_host(shp);
5144 5094
5095 gdth_flush(ha);
5096
5145 if (ha->sdev) { 5097 if (ha->sdev) {
5146 scsi_free_host_dev(ha->sdev); 5098 scsi_free_host_dev(ha->sdev);
5147 ha->sdev = NULL; 5099 ha->sdev = NULL;
5148 } 5100 }
5149 5101
5150 gdth_flush(ha);
5151
5152 if (shp->irq) 5102 if (shp->irq)
5153 free_irq(shp->irq,ha); 5103 free_irq(shp->irq,ha);
5154 5104
@@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha)
5174 scsi_host_put(shp); 5124 scsi_host_put(shp);
5175} 5125}
5176 5126
5127static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
5128{
5129 gdth_ha_str *ha;
5130
5131 TRACE2(("gdth_halt() event %d\n", (int)event));
5132 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
5133 return NOTIFY_DONE;
5134
5135 list_for_each_entry(ha, &gdth_instances, list)
5136 gdth_flush(ha);
5137
5138 return NOTIFY_OK;
5139}
5140
5141static struct notifier_block gdth_notifier = {
5142 gdth_halt, NULL, 0
5143};
5144
5177static int __init gdth_init(void) 5145static int __init gdth_init(void)
5178{ 5146{
5179 if (disable) { 5147 if (disable) {
@@ -5236,7 +5204,6 @@ static int __init gdth_init(void)
5236 add_timer(&gdth_timer); 5204 add_timer(&gdth_timer);
5237#endif 5205#endif
5238 major = register_chrdev(0,"gdth", &gdth_fops); 5206 major = register_chrdev(0,"gdth", &gdth_fops);
5239 notifier_disabled = 0;
5240 register_reboot_notifier(&gdth_notifier); 5207 register_reboot_notifier(&gdth_notifier);
5241 gdth_polling = FALSE; 5208 gdth_polling = FALSE;
5242 return 0; 5209 return 0;
@@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void)
5246{ 5213{
5247 gdth_ha_str *ha; 5214 gdth_ha_str *ha;
5248 5215
5249 list_for_each_entry(ha, &gdth_instances, list) 5216 unregister_chrdev(major, "gdth");
5250 gdth_remove_one(ha); 5217 unregister_reboot_notifier(&gdth_notifier);
5251 5218
5252#ifdef GDTH_STATISTICS 5219#ifdef GDTH_STATISTICS
5253 del_timer(&gdth_timer); 5220 del_timer_sync(&gdth_timer);
5254#endif 5221#endif
5255 unregister_chrdev(major,"gdth"); 5222
5256 unregister_reboot_notifier(&gdth_notifier); 5223 list_for_each_entry(ha, &gdth_instances, list)
5224 gdth_remove_one(ha);
5257} 5225}
5258 5226
5259module_init(gdth_init); 5227module_init(gdth_init);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1434c6b0297c..26e4e92515e0 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -915,6 +915,7 @@ typedef struct {
915 struct gdth_cmndinfo { /* per-command private info */ 915 struct gdth_cmndinfo { /* per-command private info */
916 int index; 916 int index;
917 int internal_command; /* don't call scsi_done */ 917 int internal_command; /* don't call scsi_done */
918 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
918 dma_addr_t sense_paddr; /* sense dma-addr */ 919 dma_addr_t sense_paddr; /* sense dma-addr */
919 unchar priority; 920 unchar priority;
920 int timeout; 921 int timeout;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index de5773443c62..ce0228e26aec 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
694{ 694{
695 ulong flags; 695 ulong flags;
696 696
697 spin_lock_irqsave(&ha->smp_lock, flags);
698
699 if (buf == ha->pscratch) { 697 if (buf == ha->pscratch) {
698 spin_lock_irqsave(&ha->smp_lock, flags);
700 ha->scratch_busy = FALSE; 699 ha->scratch_busy = FALSE;
700 spin_unlock_irqrestore(&ha->smp_lock, flags);
701 } else { 701 } else {
702 pci_free_consistent(ha->pdev, size, buf, paddr); 702 pci_free_consistent(ha->pdev, size, buf, paddr);
703 } 703 }
704
705 spin_unlock_irqrestore(&ha->smp_lock, flags);
706} 704}
707 705
708#ifdef GDTH_IOCTL_PROC 706#ifdef GDTH_IOCTL_PROC
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bd62131b97a1..e5881e92d0fb 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
290 int err = 0; 290 int err = 0;
291 291
292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], 292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
293 cmd->usg_sg); 293 scsi_sg_count(sc));
294 294
295 if (scsi_sg_count(sc)) 295 if (scsi_sg_count(sc))
296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); 296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
@@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
838 if (!shost) 838 if (!shost)
839 goto free_vport; 839 goto free_vport;
840 shost->transportt = ibmvstgt_transport_template; 840 shost->transportt = ibmvstgt_transport_template;
841 err = scsi_tgt_alloc_queue(shost);
842 if (err)
843 goto put_host;
844 841
845 target = host_to_srp_target(shost); 842 target = host_to_srp_target(shost);
846 target->shost = shost; 843 target->shost = shost;
@@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
872 if (err) 869 if (err)
873 goto destroy_queue; 870 goto destroy_queue;
874 871
872 err = scsi_tgt_alloc_queue(shost);
873 if (err)
874 goto destroy_queue;
875
875 return 0; 876 return 0;
876destroy_queue: 877destroy_queue:
877 crq_queue_destroy(target); 878 crq_queue_destroy(target);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bb152fb9fec7..7ed568f180ae 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1576,7 +1576,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1576 METHOD_TRACE("ips_make_passthru", 1); 1576 METHOD_TRACE("ips_make_passthru", 1);
1577 1577
1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) 1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1579 length += sg[i].length; 1579 length += sg->length;
1580 1580
1581 if (length < sizeof (ips_passthru_t)) { 1581 if (length < sizeof (ips_passthru_t)) {
1582 /* wrong size */ 1582 /* wrong size */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 59f8445eab0d..bdd7de7da39a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1708 qdepth = ISCSI_DEF_CMD_PER_LUN; 1708 qdepth = ISCSI_DEF_CMD_PER_LUN;
1709 } 1709 }
1710 1710
1711 if (!is_power_of_2(cmds_max) || 1711 if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
1712 cmds_max >= ISCSI_MGMT_ITT_OFFSET) { 1712 cmds_max < 2) {
1713 if (cmds_max != 0) 1713 if (cmds_max != 0)
1714 printk(KERN_ERR "iscsi: invalid can_queue of %d. " 1714 printk(KERN_ERR "iscsi: invalid can_queue of %d. "
1715 "can_queue must be a power of 2 and between " 1715 "can_queue must be a power of 2 and between "
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 7cd05b599a12..b0e5ac372a32 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap)
236 struct domain_device *dev = ap->private_data; 236 struct domain_device *dev = ap->private_data;
237 struct sas_internal *i = 237 struct sas_internal *i =
238 to_sas_internal(dev->port->ha->core.shost->transportt); 238 to_sas_internal(dev->port->ha->core.shost->transportt);
239 int res = 0; 239 int res = TMF_RESP_FUNC_FAILED;
240 240
241 if (i->dft->lldd_I_T_nexus_reset) 241 if (i->dft->lldd_I_T_nexus_reset)
242 res = i->dft->lldd_I_T_nexus_reset(dev); 242 res = i->dft->lldd_I_T_nexus_reset(dev);
243 243
244 if (res) 244 if (res != TMF_RESP_FUNC_COMPLETE)
245 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); 245 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
246 246
247 switch (dev->sata_dev.command_set) { 247 switch (dev->sata_dev.command_set) {
@@ -656,21 +656,6 @@ out:
656 return res; 656 return res;
657} 657}
658 658
659static void sas_sata_propagate_sas_addr(struct domain_device *dev)
660{
661 unsigned long flags;
662 struct asd_sas_port *port = dev->port;
663 struct asd_sas_phy *phy;
664
665 BUG_ON(dev->parent);
666
667 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
668 spin_lock_irqsave(&port->phy_list_lock, flags);
669 list_for_each_entry(phy, &port->phy_list, port_phy_el)
670 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
671 spin_unlock_irqrestore(&port->phy_list_lock, flags);
672}
673
674#define ATA_IDENTIFY_DEV 0xEC 659#define ATA_IDENTIFY_DEV 0xEC
675#define ATA_IDENTIFY_PACKET_DEV 0xA1 660#define ATA_IDENTIFY_PACKET_DEV 0xA1
676#define ATA_SET_FEATURES 0xEF 661#define ATA_SET_FEATURES 0xEF
@@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev)
728 goto out_err; 713 goto out_err;
729 } 714 }
730cont1: 715cont1:
731 /* Get WWN */
732 if (dev->port->oob_mode != SATA_OOB_MODE) {
733 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
734 SAS_ADDR_SIZE);
735 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
736 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
737 == 0x5000) {
738 int i;
739
740 for (i = 0; i < 4; i++) {
741 dev->sas_addr[2*i] =
742 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
743 dev->sas_addr[2*i+1] =
744 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
745 }
746 }
747 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
748 if (!dev->parent)
749 sas_sata_propagate_sas_addr(dev);
750
751 /* XXX Hint: register this SATA device with SATL. 716 /* XXX Hint: register this SATA device with SATL.
752 When this returns, dev->sata_dev->lu is alive and 717 When this returns, dev->sata_dev->lu is alive and
753 present. 718 present.
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e1e2d085c920..39ae68a3b0ef 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy)
92 if (!port->phy) 92 if (!port->phy)
93 port->phy = phy->phy; 93 port->phy = phy->phy;
94 94
95 SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
96 port->id, port->phy_mask);
97
98 if (*(u64 *)port->attached_sas_addr == 0) { 95 if (*(u64 *)port->attached_sas_addr == 0) {
99 port->class = phy->class; 96 port->class = phy->class;
100 memcpy(port->attached_sas_addr, phy->attached_sas_addr, 97 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
115 } 112 }
116 sas_port_add_phy(port->port, phy->phy); 113 sas_port_add_phy(port->port, phy->phy);
117 114
115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
116 phy->phy->dev.bus_id,port->port->dev.bus_id,
117 port->phy_mask,
118 SAS_ADDR(port->attached_sas_addr));
119
118 if (port->port_dev) 120 if (port->port_dev)
119 port->port_dev->pathways = port->num_phys; 121 port->port_dev->pathways = port->num_phys;
120 122
@@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work)
255static void sas_init_port(struct asd_sas_port *port, 257static void sas_init_port(struct asd_sas_port *port,
256 struct sas_ha_struct *sas_ha, int i) 258 struct sas_ha_struct *sas_ha, int i)
257{ 259{
260 memset(port, 0, sizeof(*port));
258 port->id = i; 261 port->id = i;
259 INIT_LIST_HEAD(&port->dev_list); 262 INIT_LIST_HEAD(&port->dev_list);
260 spin_lock_init(&port->phy_list_lock); 263 spin_lock_init(&port->phy_list_lock);
261 INIT_LIST_HEAD(&port->phy_list); 264 INIT_LIST_HEAD(&port->phy_list);
262 port->num_phys = 0;
263 port->phy_mask = 0;
264 port->ha = sas_ha; 265 port->ha = sas_ha;
265 266
266 spin_lock_init(&port->dev_list_lock); 267 spin_lock_init(&port->dev_list_lock);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f869fba86807..1f8241563c6c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -51,10 +51,14 @@ static void sas_scsi_task_done(struct sas_task *task)
51{ 51{
52 struct task_status_struct *ts = &task->task_status; 52 struct task_status_struct *ts = &task->task_status;
53 struct scsi_cmnd *sc = task->uldd_task; 53 struct scsi_cmnd *sc = task->uldd_task;
54 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
55 unsigned ts_flags = task->task_state_flags;
56 int hs = 0, stat = 0; 54 int hs = 0, stat = 0;
57 55
56 if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
57 /* Aborted tasks will be completed by the error handler */
58 SAS_DPRINTK("task done but aborted\n");
59 return;
60 }
61
58 if (unlikely(!sc)) { 62 if (unlikely(!sc)) {
59 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 63 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
60 list_del_init(&task->list); 64 list_del_init(&task->list);
@@ -120,11 +124,7 @@ static void sas_scsi_task_done(struct sas_task *task)
120 sc->result = (hs << 16) | stat; 124 sc->result = (hs << 16) | stat;
121 list_del_init(&task->list); 125 list_del_init(&task->list);
122 sas_free_task(task); 126 sas_free_task(task);
123 /* This is very ugly but this is how SCSI Core works. */ 127 sc->scsi_done(sc);
124 if (ts_flags & SAS_TASK_STATE_ABORTED)
125 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
126 else
127 sc->scsi_done(sc);
128} 128}
129 129
130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) 130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
@@ -255,13 +255,34 @@ out:
255 return res; 255 return res;
256} 256}
257 257
258static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
259{
260 struct sas_task *task = TO_SAS_TASK(cmd);
261 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
262
263 /* remove the aborted task flag to allow the task to be
264 * completed now. At this point, we only get called following
265 * an actual abort of the task, so we should be guaranteed not
266 * to be racing with any completions from the LLD (hence we
267 * don't need the task state lock to clear the flag) */
268 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
269 /* Now call task_done. However, task will be free'd after
270 * this */
271 task->task_done(task);
272 /* now finish the command and move it on to the error
273 * handler done list, this also takes it off the
274 * error handler pending list */
275 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
276}
277
258static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 278static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
259{ 279{
260 struct scsi_cmnd *cmd, *n; 280 struct scsi_cmnd *cmd, *n;
261 281
262 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 282 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
263 if (cmd == my_cmd) 283 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
264 list_del_init(&cmd->eh_entry); 284 cmd->device->lun == my_cmd->device->lun)
285 sas_eh_finish_cmd(cmd);
265 } 286 }
266} 287}
267 288
@@ -274,7 +295,7 @@ static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
274 struct domain_device *x = cmd_to_domain_dev(cmd); 295 struct domain_device *x = cmd_to_domain_dev(cmd);
275 296
276 if (x == dev) 297 if (x == dev)
277 list_del_init(&cmd->eh_entry); 298 sas_eh_finish_cmd(cmd);
278 } 299 }
279} 300}
280 301
@@ -288,7 +309,7 @@ static void sas_scsi_clear_queue_port(struct list_head *error_q,
288 struct asd_sas_port *x = dev->port; 309 struct asd_sas_port *x = dev->port;
289 310
290 if (x == port) 311 if (x == port)
291 list_del_init(&cmd->eh_entry); 312 sas_eh_finish_cmd(cmd);
292 } 313 }
293} 314}
294 315
@@ -413,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev)
413} 434}
414 435
415/* Find the sas_phy that's attached to this device */ 436/* Find the sas_phy that's attached to this device */
416static struct sas_phy *find_local_sas_phy(struct domain_device *dev) 437struct sas_phy *sas_find_local_phy(struct domain_device *dev)
417{ 438{
418 struct domain_device *pdev = dev->parent; 439 struct domain_device *pdev = dev->parent;
419 struct ex_phy *exphy = NULL; 440 struct ex_phy *exphy = NULL;
@@ -435,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
435 BUG_ON(!exphy); 456 BUG_ON(!exphy);
436 return exphy->phy; 457 return exphy->phy;
437} 458}
459EXPORT_SYMBOL_GPL(sas_find_local_phy);
438 460
439/* Attempt to send a LUN reset message to a device */ 461/* Attempt to send a LUN reset message to a device */
440int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 462int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@@ -461,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
461int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) 483int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
462{ 484{
463 struct domain_device *dev = cmd_to_domain_dev(cmd); 485 struct domain_device *dev = cmd_to_domain_dev(cmd);
464 struct sas_phy *phy = find_local_sas_phy(dev); 486 struct sas_phy *phy = sas_find_local_phy(dev);
465 int res; 487 int res;
466 488
467 res = sas_phy_reset(phy, 1); 489 res = sas_phy_reset(phy, 1);
@@ -476,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
476} 498}
477 499
478/* Try to reset a device */ 500/* Try to reset a device */
479static int try_to_reset_cmd_device(struct Scsi_Host *shost, 501static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
480 struct scsi_cmnd *cmd)
481{ 502{
482 int res; 503 int res;
504 struct Scsi_Host *shost = cmd->device->host;
483 505
484 if (!shost->hostt->eh_device_reset_handler) 506 if (!shost->hostt->eh_device_reset_handler)
485 goto try_bus_reset; 507 goto try_bus_reset;
@@ -519,6 +541,12 @@ Again:
519 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; 541 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
520 spin_unlock_irqrestore(&task->task_state_lock, flags); 542 spin_unlock_irqrestore(&task->task_state_lock, flags);
521 543
544 if (need_reset) {
545 SAS_DPRINTK("%s: task 0x%p requests reset\n",
546 __FUNCTION__, task);
547 goto reset;
548 }
549
522 SAS_DPRINTK("trying to find task 0x%p\n", task); 550 SAS_DPRINTK("trying to find task 0x%p\n", task);
523 res = sas_scsi_find_task(task); 551 res = sas_scsi_find_task(task);
524 552
@@ -528,28 +556,23 @@ Again:
528 case TASK_IS_DONE: 556 case TASK_IS_DONE:
529 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 557 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
530 task); 558 task);
531 task->task_done(task); 559 sas_eh_finish_cmd(cmd);
532 if (need_reset)
533 try_to_reset_cmd_device(shost, cmd);
534 continue; 560 continue;
535 case TASK_IS_ABORTED: 561 case TASK_IS_ABORTED:
536 SAS_DPRINTK("%s: task 0x%p is aborted\n", 562 SAS_DPRINTK("%s: task 0x%p is aborted\n",
537 __FUNCTION__, task); 563 __FUNCTION__, task);
538 task->task_done(task); 564 sas_eh_finish_cmd(cmd);
539 if (need_reset)
540 try_to_reset_cmd_device(shost, cmd);
541 continue; 565 continue;
542 case TASK_IS_AT_LU: 566 case TASK_IS_AT_LU:
543 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 567 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
568 reset:
544 tmf_resp = sas_recover_lu(task->dev, cmd); 569 tmf_resp = sas_recover_lu(task->dev, cmd);
545 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 570 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
546 SAS_DPRINTK("dev %016llx LU %x is " 571 SAS_DPRINTK("dev %016llx LU %x is "
547 "recovered\n", 572 "recovered\n",
548 SAS_ADDR(task->dev), 573 SAS_ADDR(task->dev),
549 cmd->device->lun); 574 cmd->device->lun);
550 task->task_done(task); 575 sas_eh_finish_cmd(cmd);
551 if (need_reset)
552 try_to_reset_cmd_device(shost, cmd);
553 sas_scsi_clear_queue_lu(work_q, cmd); 576 sas_scsi_clear_queue_lu(work_q, cmd);
554 goto Again; 577 goto Again;
555 } 578 }
@@ -560,15 +583,15 @@ Again:
560 task); 583 task);
561 tmf_resp = sas_recover_I_T(task->dev); 584 tmf_resp = sas_recover_I_T(task->dev);
562 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 585 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
586 struct domain_device *dev = task->dev;
563 SAS_DPRINTK("I_T %016llx recovered\n", 587 SAS_DPRINTK("I_T %016llx recovered\n",
564 SAS_ADDR(task->dev->sas_addr)); 588 SAS_ADDR(task->dev->sas_addr));
565 task->task_done(task); 589 sas_eh_finish_cmd(cmd);
566 if (need_reset) 590 sas_scsi_clear_queue_I_T(work_q, dev);
567 try_to_reset_cmd_device(shost, cmd);
568 sas_scsi_clear_queue_I_T(work_q, task->dev);
569 goto Again; 591 goto Again;
570 } 592 }
571 /* Hammer time :-) */ 593 /* Hammer time :-) */
594 try_to_reset_cmd_device(cmd);
572 if (i->dft->lldd_clear_nexus_port) { 595 if (i->dft->lldd_clear_nexus_port) {
573 struct asd_sas_port *port = task->dev->port; 596 struct asd_sas_port *port = task->dev->port;
574 SAS_DPRINTK("clearing nexus for port:%d\n", 597 SAS_DPRINTK("clearing nexus for port:%d\n",
@@ -577,9 +600,7 @@ Again:
577 if (res == TMF_RESP_FUNC_COMPLETE) { 600 if (res == TMF_RESP_FUNC_COMPLETE) {
578 SAS_DPRINTK("clear nexus port:%d " 601 SAS_DPRINTK("clear nexus port:%d "
579 "succeeded\n", port->id); 602 "succeeded\n", port->id);
580 task->task_done(task); 603 sas_eh_finish_cmd(cmd);
581 if (need_reset)
582 try_to_reset_cmd_device(shost, cmd);
583 sas_scsi_clear_queue_port(work_q, 604 sas_scsi_clear_queue_port(work_q,
584 port); 605 port);
585 goto Again; 606 goto Again;
@@ -591,10 +612,8 @@ Again:
591 if (res == TMF_RESP_FUNC_COMPLETE) { 612 if (res == TMF_RESP_FUNC_COMPLETE) {
592 SAS_DPRINTK("clear nexus ha " 613 SAS_DPRINTK("clear nexus ha "
593 "succeeded\n"); 614 "succeeded\n");
594 task->task_done(task); 615 sas_eh_finish_cmd(cmd);
595 if (need_reset) 616 goto clear_q;
596 try_to_reset_cmd_device(shost, cmd);
597 goto out;
598 } 617 }
599 } 618 }
600 /* If we are here -- this means that no amount 619 /* If we are here -- this means that no amount
@@ -606,21 +625,16 @@ Again:
606 SAS_ADDR(task->dev->sas_addr), 625 SAS_ADDR(task->dev->sas_addr),
607 cmd->device->lun); 626 cmd->device->lun);
608 627
609 task->task_done(task); 628 sas_eh_finish_cmd(cmd);
610 if (need_reset)
611 try_to_reset_cmd_device(shost, cmd);
612 goto clear_q; 629 goto clear_q;
613 } 630 }
614 } 631 }
615out:
616 return list_empty(work_q); 632 return list_empty(work_q);
617clear_q: 633clear_q:
618 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 634 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
619 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 635 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
620 struct sas_task *task = TO_SAS_TASK(cmd); 636 sas_eh_finish_cmd(cmd);
621 list_del_init(&cmd->eh_entry); 637
622 task->task_done(task);
623 }
624 return list_empty(work_q); 638 return list_empty(work_q);
625} 639}
626 640
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 848d97744b4d..0819f5f39de5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,7 +55,6 @@ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 56void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
57void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 57void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 58struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
60 struct lpfc_nodelist *, int); 59 struct lpfc_nodelist *, int);
61void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); 60void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bd572d6b60af..976653440fba 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1694,7 +1694,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1694 NLP_STE_UNUSED_NODE); 1694 NLP_STE_UNUSED_NODE);
1695} 1695}
1696 1696
1697void 1697static void
1698lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1698lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1699{ 1699{
1700 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1700 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f53206411cd8..fc0d9501aba6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -648,28 +648,24 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
648 unsigned long flags; 648 unsigned long flags;
649 struct hbq_dmabuf *hbq_buffer; 649 struct hbq_dmabuf *hbq_buffer;
650 650
651 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { 651 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
652 return 0; 652 return 0;
653 }
654 653
655 start = phba->hbqs[hbqno].buffer_count; 654 start = phba->hbqs[hbqno].buffer_count;
656 end = count + start; 655 end = count + start;
657 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 656 if (end > lpfc_hbq_defs[hbqno]->entry_count)
658 end = lpfc_hbq_defs[hbqno]->entry_count; 657 end = lpfc_hbq_defs[hbqno]->entry_count;
659 }
660 658
661 /* Check whether HBQ is still in use */ 659 /* Check whether HBQ is still in use */
662 spin_lock_irqsave(&phba->hbalock, flags); 660 spin_lock_irqsave(&phba->hbalock, flags);
663 if (!phba->hbq_in_use) { 661 if (!phba->hbq_in_use)
664 spin_unlock_irqrestore(&phba->hbalock, flags); 662 goto out;
665 return 0;
666 }
667 663
668 /* Populate HBQ entries */ 664 /* Populate HBQ entries */
669 for (i = start; i < end; i++) { 665 for (i = start; i < end; i++) {
670 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 666 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
671 if (!hbq_buffer) 667 if (!hbq_buffer)
672 return 1; 668 goto err;
673 hbq_buffer->tag = (i | (hbqno << 16)); 669 hbq_buffer->tag = (i | (hbqno << 16));
674 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 670 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
675 phba->hbqs[hbqno].buffer_count++; 671 phba->hbqs[hbqno].buffer_count++;
@@ -677,8 +673,12 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
677 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 673 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
678 } 674 }
679 675
676 out:
680 spin_unlock_irqrestore(&phba->hbalock, flags); 677 spin_unlock_irqrestore(&phba->hbalock, flags);
681 return 0; 678 return 0;
679 err:
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 return 1;
682} 682}
683 683
684int 684int
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4d59ae8491a4..b135a1ed4b2c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -151,19 +151,19 @@ mega_setup_mailbox(adapter_t *adapter)
151 */ 151 */
152 if( adapter->flag & BOARD_IOMAP ) { 152 if( adapter->flag & BOARD_IOMAP ) {
153 153
154 outb_p(adapter->mbox_dma & 0xFF, 154 outb(adapter->mbox_dma & 0xFF,
155 adapter->host->io_port + MBOX_PORT0); 155 adapter->host->io_port + MBOX_PORT0);
156 156
157 outb_p((adapter->mbox_dma >> 8) & 0xFF, 157 outb((adapter->mbox_dma >> 8) & 0xFF,
158 adapter->host->io_port + MBOX_PORT1); 158 adapter->host->io_port + MBOX_PORT1);
159 159
160 outb_p((adapter->mbox_dma >> 16) & 0xFF, 160 outb((adapter->mbox_dma >> 16) & 0xFF,
161 adapter->host->io_port + MBOX_PORT2); 161 adapter->host->io_port + MBOX_PORT2);
162 162
163 outb_p((adapter->mbox_dma >> 24) & 0xFF, 163 outb((adapter->mbox_dma >> 24) & 0xFF,
164 adapter->host->io_port + MBOX_PORT3); 164 adapter->host->io_port + MBOX_PORT3);
165 165
166 outb_p(ENABLE_MBOX_BYTE, 166 outb(ENABLE_MBOX_BYTE,
167 adapter->host->io_port + ENABLE_MBOX_REGION); 167 adapter->host->io_port + ENABLE_MBOX_REGION);
168 168
169 irq_ack(adapter); 169 irq_ack(adapter);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 651d09b08f2a..fd63b06d9ef1 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1759,6 +1759,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1759 1759
1760 switch (mesg.event) { 1760 switch (mesg.event) {
1761 case PM_EVENT_SUSPEND: 1761 case PM_EVENT_SUSPEND:
1762 case PM_EVENT_HIBERNATE:
1762 case PM_EVENT_FREEZE: 1763 case PM_EVENT_FREEZE:
1763 break; 1764 break;
1764 default: 1765 default:
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
new file mode 100644
index 000000000000..5ec0665b3a3d
--- /dev/null
+++ b/drivers/scsi/mvsas.c
@@ -0,0 +1,2969 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <asm/io.h>
41
42#define DRV_NAME "mvsas"
43#define DRV_VERSION "0.5.1"
44#define _MV_DUMP 0
45#define MVS_DISABLE_NVRAM
46#define MVS_DISABLE_MSI
47
48#define mr32(reg) readl(regs + MVS_##reg)
49#define mw32(reg,val) writel((val), regs + MVS_##reg)
50#define mw32_f(reg,val) do { \
51 writel((val), regs + MVS_##reg); \
52 readl(regs + MVS_##reg); \
53 } while (0)
54
55#define MVS_ID_NOT_MAPPED 0xff
56#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
57
58/* offset for D2H FIS in the Received FIS List Structure */
59#define SATA_RECEIVED_D2H_FIS(reg_set) \
60 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
61#define SATA_RECEIVED_PIO_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
63#define UNASSOC_D2H_FIS(id) \
64 ((void *) mvi->rx_fis + 0x100 * id)
65
66#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
67 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
68 (__mc) != 0 && __rest; \
69 (++__lseq), (__mc) >>= 1)
70
71/* driver compile-time configuration */
72enum driver_configuration {
73 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
74 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
75 /* software requires power-of-2
76 ring size */
77
78 MVS_SLOTS = 512, /* command slots */
79 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
80 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
81 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
82 MVS_OAF_SZ = 64, /* Open address frame buffer size */
83
84 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
85
86 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
87};
88
89/* unchangeable hardware details */
90enum hardware_details {
91 MVS_MAX_PHYS = 8, /* max. possible phys */
92 MVS_MAX_PORTS = 8, /* max. possible ports */
93 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
94};
95
96/* peripheral registers (BAR2) */
97enum peripheral_registers {
98 SPI_CTL = 0x10, /* EEPROM control */
99 SPI_CMD = 0x14, /* EEPROM command */
100 SPI_DATA = 0x18, /* EEPROM data */
101};
102
103enum peripheral_register_bits {
104 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
105 TWSI_RD = (1U << 4), /* EEPROM read access */
106
107 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
108};
109
110/* enhanced mode registers (BAR4) */
111enum hw_registers {
112 MVS_GBL_CTL = 0x04, /* global control */
113 MVS_GBL_INT_STAT = 0x08, /* global irq status */
114 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
115 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
116
117 MVS_CTL = 0x100, /* SAS/SATA port configuration */
118 MVS_PCS = 0x104, /* SAS/SATA port control/status */
119 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
120 MVS_CMD_LIST_HI = 0x10C,
121 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
122 MVS_RX_FIS_HI = 0x114,
123
124 MVS_TX_CFG = 0x120, /* TX configuration */
125 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
126 MVS_TX_HI = 0x128,
127
128 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
129 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
130 MVS_RX_CFG = 0x134, /* RX configuration */
131 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
132 MVS_RX_HI = 0x13C,
133 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
134
135 MVS_INT_COAL = 0x148, /* Int coalescing config */
136 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
137 MVS_INT_STAT = 0x150, /* Central int status */
138 MVS_INT_MASK = 0x154, /* Central int enable */
139 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
140 MVS_INT_MASK_SRS = 0x15C,
141
142 /* ports 1-3 follow after this */
143 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
144 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
145 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
146 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
147
148 /* ports 1-3 follow after this */
149 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
150 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
151
152 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
153 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
154
155 /* ports 1-3 follow after this */
156 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
157 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
158 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
159 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
160
161 /* ports 1-3 follow after this */
162 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
163 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
164 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
165 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
166};
167
168enum hw_register_bits {
169 /* MVS_GBL_CTL */
170 INT_EN = (1U << 1), /* Global int enable */
171 HBA_RST = (1U << 0), /* HBA reset */
172
173 /* MVS_GBL_INT_STAT */
174 INT_XOR = (1U << 4), /* XOR engine event */
175 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
176
177 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
178 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
179 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
180 MODE_AUTO_DET_PORT6 = (1U << 14),
181 MODE_AUTO_DET_PORT5 = (1U << 13),
182 MODE_AUTO_DET_PORT4 = (1U << 12),
183 MODE_AUTO_DET_PORT3 = (1U << 11),
184 MODE_AUTO_DET_PORT2 = (1U << 10),
185 MODE_AUTO_DET_PORT1 = (1U << 9),
186 MODE_AUTO_DET_PORT0 = (1U << 8),
187 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
188 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
189 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
190 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
191 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
192 MODE_SAS_PORT6_MASK = (1U << 6),
193 MODE_SAS_PORT5_MASK = (1U << 5),
194 MODE_SAS_PORT4_MASK = (1U << 4),
195 MODE_SAS_PORT3_MASK = (1U << 3),
196 MODE_SAS_PORT2_MASK = (1U << 2),
197 MODE_SAS_PORT1_MASK = (1U << 1),
198 MODE_SAS_PORT0_MASK = (1U << 0),
199 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
200 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
201 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
202 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
203
204 /* SAS_MODE value may be
205 * dictated (in hw) by values
206 * of SATA_TARGET & AUTO_DET
207 */
208
209 /* MVS_TX_CFG */
210 TX_EN = (1U << 16), /* Enable TX */
211 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
212
213 /* MVS_RX_CFG */
214 RX_EN = (1U << 16), /* Enable RX */
215 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
216
217 /* MVS_INT_COAL */
218 COAL_EN = (1U << 16), /* Enable int coalescing */
219
220 /* MVS_INT_STAT, MVS_INT_MASK */
221 CINT_I2C = (1U << 31), /* I2C event */
222 CINT_SW0 = (1U << 30), /* software event 0 */
223 CINT_SW1 = (1U << 29), /* software event 1 */
224 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
225 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
226 CINT_MEM = (1U << 26), /* int mem parity err */
227 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
228 CINT_SRS = (1U << 3), /* SRS event */
229 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
230 CINT_DONE = (1U << 0), /* cmd completion */
231
232 /* shl for ports 1-3 */
233 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
234 CINT_PORT = (1U << 8), /* port0 event */
235 CINT_PORT_MASK_OFFSET = 8,
236 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
237
238 /* TX (delivery) ring bits */
239 TXQ_CMD_SHIFT = 29,
240 TXQ_CMD_SSP = 1, /* SSP protocol */
241 TXQ_CMD_SMP = 2, /* SMP protocol */
242 TXQ_CMD_STP = 3, /* STP/SATA protocol */
243 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
244 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
245 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
246 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
247 TXQ_SRS_SHIFT = 20, /* SATA register set */
248 TXQ_SRS_MASK = 0x7f,
249 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
250 TXQ_PHY_MASK = 0xff,
251 TXQ_SLOT_MASK = 0xfff, /* slot number */
252
253 /* RX (completion) ring bits */
254 RXQ_GOOD = (1U << 23), /* Response good */
255 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
256 RXQ_CMD_RX = (1U << 20), /* target cmd received */
257 RXQ_ATTN = (1U << 19), /* attention */
258 RXQ_RSP = (1U << 18), /* response frame xfer'd */
259 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
260 RXQ_DONE = (1U << 16), /* cmd complete */
261 RXQ_SLOT_MASK = 0xfff, /* slot number */
262
263 /* mvs_cmd_hdr bits */
264 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
265 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
266
267 /* SSP initiator only */
268 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
269
270 /* SSP initiator or target */
271 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
272
273 /* SSP target only */
274 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
275 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
276 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
277 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
278
279 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
280 MCH_FBURST = (1U << 11), /* first burst (SSP) */
281 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
282 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
283 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
284 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
285 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
286 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
287 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
288 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
289
290 CCTL_RST = (1U << 5), /* port logic reset */
291
292 /* 0(LSB first), 1(MSB first) */
293 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
294 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
295 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
296 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
297
298 /* MVS_Px_SER_CTLSTAT (per-phy control) */
299 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
300 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
301 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
302 PHY_RST = (1U << 0), /* phy reset */
303 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
304 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
305 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
306 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
307 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
308 PHY_READY_MASK = (1U << 20),
309
310 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
311 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
312 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
313 PHYEV_AN = (1U << 18), /* SATA async notification */
314 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
315 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
316 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
317 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
318 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
319 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
320 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
321 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
322 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
323 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
324 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
325 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
326 PHYEV_ID_DONE = (1U << 2), /* identify done */
327 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
328 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
329
330 /* MVS_PCS */
331 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
332 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
333 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
334 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
335 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
336 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
337 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
338 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
339 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
340 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
341
342 /* Port n Attached Device Info */
343 PORT_DEV_SSP_TRGT = (1U << 19),
344 PORT_DEV_SMP_TRGT = (1U << 18),
345 PORT_DEV_STP_TRGT = (1U << 17),
346 PORT_DEV_SSP_INIT = (1U << 11),
347 PORT_DEV_SMP_INIT = (1U << 10),
348 PORT_DEV_STP_INIT = (1U << 9),
349 PORT_PHY_ID_MASK = (0xFFU << 24),
350 PORT_DEV_TRGT_MASK = (0x7U << 17),
351 PORT_DEV_INIT_MASK = (0x7U << 9),
352 PORT_DEV_TYPE_MASK = (0x7U << 0),
353
354 /* Port n PHY Status */
355 PHY_RDY = (1U << 2),
356 PHY_DW_SYNC = (1U << 1),
357 PHY_OOB_DTCTD = (1U << 0),
358
359 /* VSR */
360 /* PHYMODE 6 (CDB) */
361 PHY_MODE6_DTL_SPEED = (1U << 27),
362};
363
364enum mvs_info_flags {
365 MVF_MSI = (1U << 0), /* MSI is enabled */
366 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
367};
368
369enum sas_cmd_port_registers {
370 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
371 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
372 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
373 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
374 CMD_OOB_SPACE = 0x110, /* OOB space control register */
375 CMD_OOB_BURST = 0x114, /* OOB burst control register */
376 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
377 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
378 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
379 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
380 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
381 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
382 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
383 CMD_ID_TEST = 0x134, /* ID test register */
384 CMD_PL_TIMER = 0x138, /* PL timer register */
385 CMD_WD_TIMER = 0x13c, /* WD timer register */
386 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
387 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
388 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
389 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
390 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
391 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
392 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
393 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
394 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
395 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
396 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
397 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
398 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
399 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
400 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
401 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
402 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
403 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
404 CMD_RESET_COUNT = 0x188, /* Reset Count */
405 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
406 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
407 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
408 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
409 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
410 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
411 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
412 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
413 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
414 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
415 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
416 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
417 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
418 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
419 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
420};
421
422/* SAS/SATA configuration port registers, aka phy registers */
423enum sas_sata_config_port_regs {
424 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
425 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
426 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
427 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
428 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
429 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
430 PHYR_SATA_CTL = 0x18, /* SATA control */
431 PHYR_PHY_STAT = 0x1C, /* PHY status */
432 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
433 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
434 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
435 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
436 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
437 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
438 PHYR_WIDE_PORT = 0x38, /* wide port participating */
439 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
440 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
441 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
442};
443
444/* SAS/SATA Vendor Specific Port Registers */
445enum sas_sata_vsp_regs {
446 VSR_PHY_STAT = 0x00, /* Phy Status */
447 VSR_PHY_MODE1 = 0x01, /* phy tx */
448 VSR_PHY_MODE2 = 0x02, /* tx scc */
449 VSR_PHY_MODE3 = 0x03, /* pll */
450 VSR_PHY_MODE4 = 0x04, /* VCO */
451 VSR_PHY_MODE5 = 0x05, /* Rx */
452 VSR_PHY_MODE6 = 0x06, /* CDR */
453 VSR_PHY_MODE7 = 0x07, /* Impedance */
454 VSR_PHY_MODE8 = 0x08, /* Voltage */
455 VSR_PHY_MODE9 = 0x09, /* Test */
456 VSR_PHY_MODE10 = 0x0A, /* Power */
457 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
458 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
459 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
460};
461
462enum pci_cfg_registers {
463 PCR_PHY_CTL = 0x40,
464 PCR_PHY_CTL2 = 0x90,
465 PCR_DEV_CTRL = 0xE8,
466};
467
468enum pci_cfg_register_bits {
469 PCTL_PWR_ON = (0xFU << 24),
470 PCTL_OFF = (0xFU << 12),
471 PRD_REQ_SIZE = (0x4000),
472 PRD_REQ_MASK = (0x00007000),
473};
474
475enum nvram_layout_offsets {
476 NVR_SIG = 0x00, /* 0xAA, 0x55 */
477 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
478};
479
480enum chip_flavors {
481 chip_6320,
482 chip_6440,
483 chip_6480,
484};
485
486enum port_type {
487 PORT_TYPE_SAS = (1L << 1),
488 PORT_TYPE_SATA = (1L << 0),
489};
490
491/* Command Table Format */
492enum ct_format {
493 /* SSP */
494 SSP_F_H = 0x00,
495 SSP_F_IU = 0x18,
496 SSP_F_MAX = 0x4D,
497 /* STP */
498 STP_CMD_FIS = 0x00,
499 STP_ATAPI_CMD = 0x40,
500 STP_F_MAX = 0x10,
501 /* SMP */
502 SMP_F_T = 0x00,
503 SMP_F_DEP = 0x01,
504 SMP_F_MAX = 0x101,
505};
506
507enum status_buffer {
508 SB_EIR_OFF = 0x00, /* Error Information Record */
509 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
510 SB_RFB_MAX = 0x400, /* RFB size*/
511};
512
513enum error_info_rec {
514 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
515};
516
517struct mvs_chip_info {
518 u32 n_phy;
519 u32 srs_sz;
520 u32 slot_width;
521};
522
523struct mvs_err_info {
524 __le32 flags;
525 __le32 flags2;
526};
527
528struct mvs_prd {
529 __le64 addr; /* 64-bit buffer address */
530 __le32 reserved;
531 __le32 len; /* 16-bit length */
532};
533
534struct mvs_cmd_hdr {
535 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
536 __le32 lens; /* cmd, max resp frame len */
537 __le32 tags; /* targ port xfer tag; tag */
538 __le32 data_len; /* data xfer len */
539 __le64 cmd_tbl; /* command table address */
540 __le64 open_frame; /* open addr frame address */
541 __le64 status_buf; /* status buffer address */
542 __le64 prd_tbl; /* PRD tbl address */
543 __le32 reserved[4];
544};
545
546struct mvs_slot_info {
547 struct sas_task *task;
548 u32 n_elem;
549 u32 tx;
550
551 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
552 * and PRD table
553 */
554 void *buf;
555 dma_addr_t buf_dma;
556#if _MV_DUMP
557 u32 cmd_size;
558#endif
559
560 void *response;
561};
562
563struct mvs_port {
564 struct asd_sas_port sas_port;
565 u8 port_attached;
566 u8 taskfileset;
567 u8 wide_port_phymap;
568};
569
570struct mvs_phy {
571 struct mvs_port *port;
572 struct asd_sas_phy sas_phy;
573 struct sas_identify identify;
574 struct scsi_device *sdev;
575 u64 dev_sas_addr;
576 u64 att_dev_sas_addr;
577 u32 att_dev_info;
578 u32 dev_info;
579 u32 phy_type;
580 u32 phy_status;
581 u32 irq_status;
582 u32 frame_rcvd_size;
583 u8 frame_rcvd[32];
584 u8 phy_attached;
585};
586
587struct mvs_info {
588 unsigned long flags;
589
590 spinlock_t lock; /* host-wide lock */
591 struct pci_dev *pdev; /* our device */
592 void __iomem *regs; /* enhanced mode registers */
593 void __iomem *peri_regs; /* peripheral registers */
594
595 u8 sas_addr[SAS_ADDR_SIZE];
596 struct sas_ha_struct sas; /* SCSI/SAS glue */
597 struct Scsi_Host *shost;
598
599 __le32 *tx; /* TX (delivery) DMA ring */
600 dma_addr_t tx_dma;
601 u32 tx_prod; /* cached next-producer idx */
602
603 __le32 *rx; /* RX (completion) DMA ring */
604 dma_addr_t rx_dma;
605 u32 rx_cons; /* RX consumer idx */
606
607 __le32 *rx_fis; /* RX'd FIS area */
608 dma_addr_t rx_fis_dma;
609
610 struct mvs_cmd_hdr *slot; /* DMA command header slots */
611 dma_addr_t slot_dma;
612
613 const struct mvs_chip_info *chip;
614
615 unsigned long tags[MVS_SLOTS];
616 struct mvs_slot_info slot_info[MVS_SLOTS];
617 /* further per-slot information */
618 struct mvs_phy phy[MVS_MAX_PHYS];
619 struct mvs_port port[MVS_MAX_PHYS];
620
621 u32 can_queue; /* per adapter */
622 u32 tag_out; /*Get*/
623 u32 tag_in; /*Give*/
624};
625
626struct mvs_queue_task {
627 struct list_head list;
628
629 void *uldd_task;
630};
631
632static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
633 void *funcdata);
634static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
635static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
636static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
637static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
638static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
639static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
640
641static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
642static void mvs_detect_porttype(struct mvs_info *mvi, int i);
643static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
644
645static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
646static void mvs_scan_start(struct Scsi_Host *);
647static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
648
649static struct scsi_transport_template *mvs_stt;
650
651static const struct mvs_chip_info mvs_chips[] = {
652 [chip_6320] = { 2, 16, 9 },
653 [chip_6440] = { 4, 16, 9 },
654 [chip_6480] = { 8, 32, 10 },
655};
656
657static struct scsi_host_template mvs_sht = {
658 .module = THIS_MODULE,
659 .name = DRV_NAME,
660 .queuecommand = sas_queuecommand,
661 .target_alloc = sas_target_alloc,
662 .slave_configure = sas_slave_configure,
663 .slave_destroy = sas_slave_destroy,
664 .scan_finished = mvs_scan_finished,
665 .scan_start = mvs_scan_start,
666 .change_queue_depth = sas_change_queue_depth,
667 .change_queue_type = sas_change_queue_type,
668 .bios_param = sas_bios_param,
669 .can_queue = 1,
670 .cmd_per_lun = 1,
671 .this_id = -1,
672 .sg_tablesize = SG_ALL,
673 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
674 .use_clustering = ENABLE_CLUSTERING,
675 .eh_device_reset_handler = sas_eh_device_reset_handler,
676 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
677 .slave_alloc = mvs_sas_slave_alloc,
678 .target_destroy = sas_target_destroy,
679 .ioctl = sas_ioctl,
680};
681
682static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
683{
684 u32 i;
685 u32 run;
686 u32 offset;
687
688 offset = 0;
689 while (size) {
690 printk("%08X : ", baseaddr + offset);
691 if (size >= 16)
692 run = 16;
693 else
694 run = size;
695 size -= run;
696 for (i = 0; i < 16; i++) {
697 if (i < run)
698 printk("%02X ", (u32)data[i]);
699 else
700 printk(" ");
701 }
702 printk(": ");
703 for (i = 0; i < run; i++)
704 printk("%c", isalnum(data[i]) ? data[i] : '.');
705 printk("\n");
706 data = &data[16];
707 offset += run;
708 }
709 printk("\n");
710}
711
712static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
713 enum sas_protocol proto)
714{
715#if _MV_DUMP
716 u32 offset;
717 struct pci_dev *pdev = mvi->pdev;
718 struct mvs_slot_info *slot = &mvi->slot_info[tag];
719
720 offset = slot->cmd_size + MVS_OAF_SZ +
721 sizeof(struct mvs_prd) * slot->n_elem;
722 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
723 tag);
724 mvs_hexdump(32, (u8 *) slot->response,
725 (u32) slot->buf_dma + offset);
726#endif
727}
728
729static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
730 enum sas_protocol proto)
731{
732#if _MV_DUMP
733 u32 sz, w_ptr, r_ptr;
734 u64 addr;
735 void __iomem *regs = mvi->regs;
736 struct pci_dev *pdev = mvi->pdev;
737 struct mvs_slot_info *slot = &mvi->slot_info[tag];
738
739 /*Delivery Queue */
740 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
741 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
742 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
743 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
744 dev_printk(KERN_DEBUG, &pdev->dev,
745 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
746 sz, w_ptr, r_ptr);
747 dev_printk(KERN_DEBUG, &pdev->dev,
748 "Delivery Queue Base Address=0x%llX (PA)"
749 "(tx_dma=0x%llX), Entry=%04d\n",
750 addr, mvi->tx_dma, w_ptr);
751 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
752 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
753 /*Command List */
754 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
755 dev_printk(KERN_DEBUG, &pdev->dev,
756 "Command List Base Address=0x%llX (PA)"
757 "(slot_dma=0x%llX), Header=%03d\n",
758 addr, mvi->slot_dma, tag);
759 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
760 /*mvs_cmd_hdr */
761 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
762 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
763 /*1.command table area */
764 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
765 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
766 /*2.open address frame area */
767 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
768 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
769 (u32) slot->buf_dma + slot->cmd_size);
770 /*3.status buffer */
771 mvs_hba_sb_dump(mvi, tag, proto);
772 /*4.PRD table */
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
774 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
775 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
776 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
777#endif
778}
779
780static void mvs_hba_cq_dump(struct mvs_info *mvi)
781{
782#if _MV_DUMP
783 u64 addr;
784 void __iomem *regs = mvi->regs;
785 struct pci_dev *pdev = mvi->pdev;
786 u32 entry = mvi->rx_cons + 1;
787 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
788
789 /*Completion Queue */
790 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
791 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
792 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
793 dev_printk(KERN_DEBUG, &pdev->dev,
794 "Completion List Base Address=0x%llX (PA), "
795 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
796 addr, entry - 1, mvi->rx[0]);
797 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
798 mvi->rx_dma + sizeof(u32) * entry);
799#endif
800}
801
802static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
803{
804 void __iomem *regs = mvi->regs;
805 u32 tmp;
806
807 tmp = mr32(GBL_CTL);
808
809 mw32(GBL_CTL, tmp | INT_EN);
810}
811
812static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
813{
814 void __iomem *regs = mvi->regs;
815 u32 tmp;
816
817 tmp = mr32(GBL_CTL);
818
819 mw32(GBL_CTL, tmp & ~INT_EN);
820}
821
822static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
823
824/* move to PCI layer or libata core? */
825static int pci_go_64(struct pci_dev *pdev)
826{
827 int rc;
828
829 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
830 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
831 if (rc) {
832 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
833 if (rc) {
834 dev_printk(KERN_ERR, &pdev->dev,
835 "64-bit DMA enable failed\n");
836 return rc;
837 }
838 }
839 } else {
840 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
841 if (rc) {
842 dev_printk(KERN_ERR, &pdev->dev,
843 "32-bit DMA enable failed\n");
844 return rc;
845 }
846 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
847 if (rc) {
848 dev_printk(KERN_ERR, &pdev->dev,
849 "32-bit consistent DMA enable failed\n");
850 return rc;
851 }
852 }
853
854 return rc;
855}
856
857static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
858{
859 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
860 mvi->tags[mvi->tag_in] = tag;
861}
862
863static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
864{
865 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
866}
867
868static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
869{
870 if (mvi->tag_out != mvi->tag_in) {
871 *tag_out = mvi->tags[mvi->tag_out];
872 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
873 return 0;
874 }
875 return -EBUSY;
876}
877
878static void mvs_tag_init(struct mvs_info *mvi)
879{
880 int i;
881 for (i = 0; i < MVS_SLOTS; ++i)
882 mvi->tags[i] = i;
883 mvi->tag_out = 0;
884 mvi->tag_in = MVS_SLOTS - 1;
885}
886
887#ifndef MVS_DISABLE_NVRAM
888static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
889{
890 int timeout = 1000;
891
892 if (addr & ~SPI_ADDR_MASK)
893 return -EINVAL;
894
895 writel(addr, regs + SPI_CMD);
896 writel(TWSI_RD, regs + SPI_CTL);
897
898 while (timeout-- > 0) {
899 if (readl(regs + SPI_CTL) & TWSI_RDY) {
900 *data = readl(regs + SPI_DATA);
901 return 0;
902 }
903
904 udelay(10);
905 }
906
907 return -EBUSY;
908}
909
910static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
911 void *buf, u32 buflen)
912{
913 u32 addr_end, tmp_addr, i, j;
914 u32 tmp = 0;
915 int rc;
916 u8 *tmp8, *buf8 = buf;
917
918 addr_end = addr + buflen;
919 tmp_addr = ALIGN(addr, 4);
920 if (addr > 0xff)
921 return -EINVAL;
922
923 j = addr & 0x3;
924 if (j) {
925 rc = mvs_eep_read(regs, tmp_addr, &tmp);
926 if (rc)
927 return rc;
928
929 tmp8 = (u8 *)&tmp;
930 for (i = j; i < 4; i++)
931 *buf8++ = tmp8[i];
932
933 tmp_addr += 4;
934 }
935
936 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
937 rc = mvs_eep_read(regs, tmp_addr, &tmp);
938 if (rc)
939 return rc;
940
941 memcpy(buf8, &tmp, 4);
942 buf8 += 4;
943 }
944
945 if (tmp_addr < addr_end) {
946 rc = mvs_eep_read(regs, tmp_addr, &tmp);
947 if (rc)
948 return rc;
949
950 tmp8 = (u8 *)&tmp;
951 j = addr_end - tmp_addr;
952 for (i = 0; i < j; i++)
953 *buf8++ = tmp8[i];
954
955 tmp_addr += 4;
956 }
957
958 return 0;
959}
960#endif
961
962static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
963 void *buf, u32 buflen)
964{
965#ifndef MVS_DISABLE_NVRAM
966 void __iomem *regs = mvi->regs;
967 int rc, i;
968 u32 sum;
969 u8 hdr[2], *tmp;
970 const char *msg;
971
972 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
973 if (rc) {
974 msg = "nvram hdr read failed";
975 goto err_out;
976 }
977 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
978 if (rc) {
979 msg = "nvram read failed";
980 goto err_out;
981 }
982
983 if (hdr[0] != 0x5A) {
984 /* entry id */
985 msg = "invalid nvram entry id";
986 rc = -ENOENT;
987 goto err_out;
988 }
989
990 tmp = buf;
991 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
992 for (i = 0; i < buflen; i++)
993 sum += ((u32)tmp[i]);
994
995 if (sum) {
996 msg = "nvram checksum failure";
997 rc = -EILSEQ;
998 goto err_out;
999 }
1000
1001 return 0;
1002
1003err_out:
1004 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1005 return rc;
1006#else
1007 /* FIXME , For SAS target mode */
1008 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1009 return 0;
1010#endif
1011}
1012
1013static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1014{
1015 struct mvs_phy *phy = &mvi->phy[i];
1016
1017 if (!phy->phy_attached)
1018 return;
1019
1020 if (phy->phy_type & PORT_TYPE_SAS) {
1021 struct sas_identify_frame *id;
1022
1023 id = (struct sas_identify_frame *)phy->frame_rcvd;
1024 id->dev_type = phy->identify.device_type;
1025 id->initiator_bits = SAS_PROTOCOL_ALL;
1026 id->target_bits = phy->identify.target_port_protocols;
1027 } else if (phy->phy_type & PORT_TYPE_SATA) {
1028 /* TODO */
1029 }
1030 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1031 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1032 PORTE_BYTES_DMAED);
1033}
1034
1035static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1036{
1037 /* give the phy enabling interrupt event time to come in (1s
1038 * is empirically about all it takes) */
1039 if (time < HZ)
1040 return 0;
1041 /* Wait for discovery to finish */
1042 scsi_flush_work(shost);
1043 return 1;
1044}
1045
1046static void mvs_scan_start(struct Scsi_Host *shost)
1047{
1048 int i;
1049 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050
1051 for (i = 0; i < mvi->chip->n_phy; ++i) {
1052 mvs_bytes_dmaed(mvi, i);
1053 }
1054}
1055
1056static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
1057{
1058 int rc;
1059
1060 rc = sas_slave_alloc(scsi_dev);
1061
1062 return rc;
1063}
1064
1065static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
1066{
1067 struct pci_dev *pdev = mvi->pdev;
1068 struct sas_ha_struct *sas_ha = &mvi->sas;
1069 struct mvs_phy *phy = &mvi->phy[port_no];
1070 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1071
1072 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
1073 /*
1074 * events is port event now ,
1075 * we need check the interrupt status which belongs to per port.
1076 */
1077 dev_printk(KERN_DEBUG, &pdev->dev,
1078 "Port %d Event = %X\n",
1079 port_no, phy->irq_status);
1080
1081 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1082 if (!mvs_is_phy_ready(mvi, port_no)) {
1083 sas_phy_disconnected(sas_phy);
1084 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1085 } else
1086 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1087 }
1088 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1089 if (phy->irq_status & PHYEV_COMWAKE) {
1090 u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
1091 mvs_write_port_irq_mask(mvi, port_no,
1092 tmp | PHYEV_SIG_FIS);
1093 }
1094 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1095 phy->phy_status = mvs_is_phy_ready(mvi, port_no);
1096 if (phy->phy_status) {
1097 mvs_detect_porttype(mvi, port_no);
1098
1099 if (phy->phy_type & PORT_TYPE_SATA) {
1100 u32 tmp = mvs_read_port_irq_mask(mvi,
1101 port_no);
1102 tmp &= ~PHYEV_SIG_FIS;
1103 mvs_write_port_irq_mask(mvi,
1104 port_no, tmp);
1105 }
1106
1107 mvs_update_phyinfo(mvi, port_no, 0);
1108 sas_ha->notify_phy_event(sas_phy,
1109 PHYE_OOB_DONE);
1110 mvs_bytes_dmaed(mvi, port_no);
1111 } else {
1112 dev_printk(KERN_DEBUG, &pdev->dev,
1113 "plugin interrupt but phy is gone\n");
1114 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1115 NULL);
1116 }
1117 } else if (phy->irq_status & PHYEV_BROAD_CH)
1118 sas_ha->notify_port_event(sas_phy,
1119 PORTE_BROADCAST_RCVD);
1120 }
1121 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
1122}
1123
1124static void mvs_int_sata(struct mvs_info *mvi)
1125{
1126 /* FIXME */
1127}
1128
1129static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
1130 struct mvs_slot_info *slot, u32 slot_idx)
1131{
1132 if (!sas_protocol_ata(task->task_proto))
1133 if (slot->n_elem)
1134 pci_unmap_sg(mvi->pdev, task->scatter,
1135 slot->n_elem, task->data_dir);
1136
1137 switch (task->task_proto) {
1138 case SAS_PROTOCOL_SMP:
1139 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1140 PCI_DMA_FROMDEVICE);
1141 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1142 PCI_DMA_TODEVICE);
1143 break;
1144
1145 case SAS_PROTOCOL_SATA:
1146 case SAS_PROTOCOL_STP:
1147 case SAS_PROTOCOL_SSP:
1148 default:
1149 /* do nothing */
1150 break;
1151 }
1152
1153 slot->task = NULL;
1154 mvs_tag_clear(mvi, slot_idx);
1155}
1156
1157static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1158 u32 slot_idx)
1159{
1160 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1161 u64 err_dw0 = *(u32 *) slot->response;
1162 void __iomem *regs = mvi->regs;
1163 u32 tmp;
1164
1165 if (err_dw0 & CMD_ISS_STPD)
1166 if (sas_protocol_ata(task->task_proto)) {
1167 tmp = mr32(INT_STAT_SRS);
1168 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1169 }
1170
1171 mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
1172}
1173
1174static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1175{
1176 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1177 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1178 struct sas_task *task = slot->task;
1179 struct task_status_struct *tstat = &task->task_status;
1180 struct mvs_port *port = &mvi->port[task->dev->port->id];
1181 bool aborted;
1182 void *to;
1183
1184 spin_lock(&task->task_state_lock);
1185 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1186 if (!aborted) {
1187 task->task_state_flags &=
1188 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1189 task->task_state_flags |= SAS_TASK_STATE_DONE;
1190 }
1191 spin_unlock(&task->task_state_lock);
1192
1193 if (aborted)
1194 return -1;
1195
1196 memset(tstat, 0, sizeof(*tstat));
1197 tstat->resp = SAS_TASK_COMPLETE;
1198
1199
1200 if (unlikely(!port->port_attached)) {
1201 tstat->stat = SAS_PHY_DOWN;
1202 goto out;
1203 }
1204
1205 /* error info record present */
1206 if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
1207 tstat->stat = SAM_CHECK_COND;
1208 mvs_slot_err(mvi, task, slot_idx);
1209 goto out;
1210 }
1211
1212 switch (task->task_proto) {
1213 case SAS_PROTOCOL_SSP:
1214 /* hw says status == 0, datapres == 0 */
1215 if (rx_desc & RXQ_GOOD) {
1216 tstat->stat = SAM_GOOD;
1217 tstat->resp = SAS_TASK_COMPLETE;
1218 }
1219 /* response frame present */
1220 else if (rx_desc & RXQ_RSP) {
1221 struct ssp_response_iu *iu =
1222 slot->response + sizeof(struct mvs_err_info);
1223 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1224 }
1225
1226 /* should never happen? */
1227 else
1228 tstat->stat = SAM_CHECK_COND;
1229 break;
1230
1231 case SAS_PROTOCOL_SMP: {
1232 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1233 tstat->stat = SAM_GOOD;
1234 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1235 memcpy(to + sg_resp->offset,
1236 slot->response + sizeof(struct mvs_err_info),
1237 sg_dma_len(sg_resp));
1238 kunmap_atomic(to, KM_IRQ0);
1239 break;
1240 }
1241
1242 case SAS_PROTOCOL_SATA:
1243 case SAS_PROTOCOL_STP:
1244 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1245 struct ata_task_resp *resp =
1246 (struct ata_task_resp *)tstat->buf;
1247
1248 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
1249 RXQ_DONE)
1250 tstat->stat = SAM_GOOD;
1251 else
1252 tstat->stat = SAM_CHECK_COND;
1253
1254 resp->frame_len = sizeof(struct dev_to_host_fis);
1255 memcpy(&resp->ending_fis[0],
1256 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1257 sizeof(struct dev_to_host_fis));
1258 if (resp->ending_fis[2] & ATA_ERR)
1259 mvs_hexdump(16, resp->ending_fis, 0);
1260 break;
1261 }
1262
1263 default:
1264 tstat->stat = SAM_CHECK_COND;
1265 break;
1266 }
1267
1268out:
1269 mvs_slot_free(mvi, task, slot, slot_idx);
1270 task->task_done(task);
1271 return tstat->stat;
1272}
1273
1274static void mvs_int_full(struct mvs_info *mvi)
1275{
1276 void __iomem *regs = mvi->regs;
1277 u32 tmp, stat;
1278 int i;
1279
1280 stat = mr32(INT_STAT);
1281
1282 mvs_int_rx(mvi, false);
1283
1284 for (i = 0; i < MVS_MAX_PORTS; i++) {
1285 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1286 if (tmp)
1287 mvs_int_port(mvi, i, tmp);
1288 }
1289
1290 if (stat & CINT_SRS)
1291 mvs_int_sata(mvi);
1292
1293 mw32(INT_STAT, stat);
1294}
1295
1296static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1297{
1298 void __iomem *regs = mvi->regs;
1299 u32 rx_prod_idx, rx_desc;
1300 bool attn = false;
1301 struct pci_dev *pdev = mvi->pdev;
1302
1303 /* the first dword in the RX ring is special: it contains
1304 * a mirror of the hardware's RX producer index, so that
1305 * we don't have to stall the CPU reading that register.
1306 * The actual RX ring is offset by one dword, due to this.
1307 */
1308 rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1309 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
1310 mvi->rx_cons = 0xfff;
1311 return 0;
1312 }
1313
1314 /* The CMPL_Q may come late, read from register and try again
1315 * note: if coalescing is enabled,
1316 * it will need to read from register every time for sure
1317 */
1318 if (mvi->rx_cons == rx_prod_idx)
1319 return 0;
1320
1321 if (mvi->rx_cons == 0xfff)
1322 mvi->rx_cons = MVS_RX_RING_SZ - 1;
1323
1324 while (mvi->rx_cons != rx_prod_idx) {
1325
1326 /* increment our internal RX consumer pointer */
1327 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
1328
1329 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
1330
1331 mvs_hba_cq_dump(mvi);
1332
1333 if (likely(rx_desc & RXQ_DONE))
1334 mvs_slot_complete(mvi, rx_desc);
1335 if (rx_desc & RXQ_ATTN) {
1336 attn = true;
1337 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1338 rx_desc);
1339 } else if (rx_desc & RXQ_ERR) {
1340 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1341 rx_desc);
1342 }
1343 }
1344
1345 if (attn && self_clear)
1346 mvs_int_full(mvi);
1347
1348 return 0;
1349}
1350
1351static irqreturn_t mvs_interrupt(int irq, void *opaque)
1352{
1353 struct mvs_info *mvi = opaque;
1354 void __iomem *regs = mvi->regs;
1355 u32 stat;
1356
1357 stat = mr32(GBL_INT_STAT);
1358
1359 /* clear CMD_CMPLT ASAP */
1360 mw32_f(INT_STAT, CINT_DONE);
1361
1362 if (stat == 0 || stat == 0xffffffff)
1363 return IRQ_NONE;
1364
1365 spin_lock(&mvi->lock);
1366
1367 mvs_int_full(mvi);
1368
1369 spin_unlock(&mvi->lock);
1370
1371 return IRQ_HANDLED;
1372}
1373
1374#ifndef MVS_DISABLE_MSI
1375static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1376{
1377 struct mvs_info *mvi = opaque;
1378
1379 spin_lock(&mvi->lock);
1380
1381 mvs_int_rx(mvi, true);
1382
1383 spin_unlock(&mvi->lock);
1384
1385 return IRQ_HANDLED;
1386}
1387#endif
1388
1389struct mvs_task_exec_info {
1390 struct sas_task *task;
1391 struct mvs_cmd_hdr *hdr;
1392 struct mvs_port *port;
1393 u32 tag;
1394 int n_elem;
1395};
1396
1397static int mvs_task_prep_smp(struct mvs_info *mvi,
1398 struct mvs_task_exec_info *tei)
1399{
1400 int elem, rc, i;
1401 struct sas_task *task = tei->task;
1402 struct mvs_cmd_hdr *hdr = tei->hdr;
1403 struct scatterlist *sg_req, *sg_resp;
1404 u32 req_len, resp_len, tag = tei->tag;
1405 void *buf_tmp;
1406 u8 *buf_oaf;
1407 dma_addr_t buf_tmp_dma;
1408 struct mvs_prd *buf_prd;
1409 struct scatterlist *sg;
1410 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1411 struct asd_sas_port *sas_port = task->dev->port;
1412 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1413#if _MV_DUMP
1414 u8 *buf_cmd;
1415 void *from;
1416#endif
1417 /*
1418 * DMA-map SMP request, response buffers
1419 */
1420 sg_req = &task->smp_task.smp_req;
1421 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1422 if (!elem)
1423 return -ENOMEM;
1424 req_len = sg_dma_len(sg_req);
1425
1426 sg_resp = &task->smp_task.smp_resp;
1427 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1428 if (!elem) {
1429 rc = -ENOMEM;
1430 goto err_out;
1431 }
1432 resp_len = sg_dma_len(sg_resp);
1433
1434 /* must be in dwords */
1435 if ((req_len & 0x3) || (resp_len & 0x3)) {
1436 rc = -EINVAL;
1437 goto err_out_2;
1438 }
1439
1440 /*
1441 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1442 */
1443
1444 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1445 buf_tmp = slot->buf;
1446 buf_tmp_dma = slot->buf_dma;
1447
1448#if _MV_DUMP
1449 buf_cmd = buf_tmp;
1450 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1451 buf_tmp += req_len;
1452 buf_tmp_dma += req_len;
1453 slot->cmd_size = req_len;
1454#else
1455 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1456#endif
1457
1458 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1459 buf_oaf = buf_tmp;
1460 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1461
1462 buf_tmp += MVS_OAF_SZ;
1463 buf_tmp_dma += MVS_OAF_SZ;
1464
1465 /* region 3: PRD table ********************************************* */
1466 buf_prd = buf_tmp;
1467 if (tei->n_elem)
1468 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1469 else
1470 hdr->prd_tbl = 0;
1471
1472 i = sizeof(struct mvs_prd) * tei->n_elem;
1473 buf_tmp += i;
1474 buf_tmp_dma += i;
1475
1476 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1477 slot->response = buf_tmp;
1478 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1479
1480 /*
1481 * Fill in TX ring and command slot header
1482 */
1483 slot->tx = mvi->tx_prod;
1484 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1485 TXQ_MODE_I | tag |
1486 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1487
1488 hdr->flags |= flags;
1489 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1490 hdr->tags = cpu_to_le32(tag);
1491 hdr->data_len = 0;
1492
1493 /* generate open address frame hdr (first 12 bytes) */
1494 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1495 buf_oaf[1] = task->dev->linkrate & 0xf;
1496 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1497 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1498
1499 /* fill in PRD (scatter/gather) table, if any */
1500 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1501 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1502 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1503 buf_prd++;
1504 }
1505
1506#if _MV_DUMP
1507 /* copy cmd table */
1508 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1509 memcpy(buf_cmd, from + sg_req->offset, req_len);
1510 kunmap_atomic(from, KM_IRQ0);
1511#endif
1512 return 0;
1513
1514err_out_2:
1515 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1516 PCI_DMA_FROMDEVICE);
1517err_out:
1518 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1519 PCI_DMA_TODEVICE);
1520 return rc;
1521}
1522
1523static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1524{
1525 void __iomem *regs = mvi->regs;
1526 u32 tmp, offs;
1527 u8 *tfs = &port->taskfileset;
1528
1529 if (*tfs == MVS_ID_NOT_MAPPED)
1530 return;
1531
1532 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1533 if (*tfs < 16) {
1534 tmp = mr32(PCS);
1535 mw32(PCS, tmp & ~offs);
1536 } else {
1537 tmp = mr32(CTL);
1538 mw32(CTL, tmp & ~offs);
1539 }
1540
1541 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1542 if (tmp)
1543 mw32(INT_STAT_SRS, tmp);
1544
1545 *tfs = MVS_ID_NOT_MAPPED;
1546}
1547
1548static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1549{
1550 int i;
1551 u32 tmp, offs;
1552 void __iomem *regs = mvi->regs;
1553
1554 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1555 return 0;
1556
1557 tmp = mr32(PCS);
1558
1559 for (i = 0; i < mvi->chip->srs_sz; i++) {
1560 if (i == 16)
1561 tmp = mr32(CTL);
1562 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1563 if (!(tmp & offs)) {
1564 port->taskfileset = i;
1565
1566 if (i < 16)
1567 mw32(PCS, tmp | offs);
1568 else
1569 mw32(CTL, tmp | offs);
1570 tmp = mr32(INT_STAT_SRS) & (1U << i);
1571 if (tmp)
1572 mw32(INT_STAT_SRS, tmp);
1573 return 0;
1574 }
1575 }
1576 return MVS_ID_NOT_MAPPED;
1577}
1578
1579static u32 mvs_get_ncq_tag(struct sas_task *task)
1580{
1581 u32 tag = 0;
1582 struct ata_queued_cmd *qc = task->uldd_task;
1583
1584 if (qc)
1585 tag = qc->tag;
1586
1587 return tag;
1588}
1589
1590static int mvs_task_prep_ata(struct mvs_info *mvi,
1591 struct mvs_task_exec_info *tei)
1592{
1593 struct sas_task *task = tei->task;
1594 struct domain_device *dev = task->dev;
1595 struct mvs_cmd_hdr *hdr = tei->hdr;
1596 struct asd_sas_port *sas_port = dev->port;
1597 struct mvs_slot_info *slot;
1598 struct scatterlist *sg;
1599 struct mvs_prd *buf_prd;
1600 struct mvs_port *port = tei->port;
1601 u32 tag = tei->tag;
1602 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1603 void *buf_tmp;
1604 u8 *buf_cmd, *buf_oaf;
1605 dma_addr_t buf_tmp_dma;
1606 u32 i, req_len, resp_len;
1607 const u32 max_resp_len = SB_RFB_MAX;
1608
1609 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1610 return -EBUSY;
1611
1612 slot = &mvi->slot_info[tag];
1613 slot->tx = mvi->tx_prod;
1614 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1617 (port->taskfileset << TXQ_SRS_SHIFT));
1618
1619 if (task->ata_task.use_ncq)
1620 flags |= MCH_FPDMA;
1621 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1622 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1623 flags |= MCH_ATAPI;
1624 }
1625
1626 /* FIXME: fill in port multiplier number */
1627
1628 hdr->flags = cpu_to_le32(flags);
1629
1630 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1631 if (task->ata_task.use_ncq) {
1632 hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
1633 /*Fill in task file */
1634 task->ata_task.fis.sector_count = hdr->tags << 3;
1635 } else
1636 hdr->tags = cpu_to_le32(tag);
1637 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1638
1639 /*
1640 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1641 */
1642
1643 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1644 buf_cmd = buf_tmp = slot->buf;
1645 buf_tmp_dma = slot->buf_dma;
1646
1647 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1648
1649 buf_tmp += MVS_ATA_CMD_SZ;
1650 buf_tmp_dma += MVS_ATA_CMD_SZ;
1651#if _MV_DUMP
1652 slot->cmd_size = MVS_ATA_CMD_SZ;
1653#endif
1654
1655 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1656 /* used for STP. unused for SATA? */
1657 buf_oaf = buf_tmp;
1658 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1659
1660 buf_tmp += MVS_OAF_SZ;
1661 buf_tmp_dma += MVS_OAF_SZ;
1662
1663 /* region 3: PRD table ********************************************* */
1664 buf_prd = buf_tmp;
1665 if (tei->n_elem)
1666 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1667 else
1668 hdr->prd_tbl = 0;
1669
1670 i = sizeof(struct mvs_prd) * tei->n_elem;
1671 buf_tmp += i;
1672 buf_tmp_dma += i;
1673
1674 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1675 /* FIXME: probably unused, for SATA. kept here just in case
1676 * we get a STP/SATA error information record
1677 */
1678 slot->response = buf_tmp;
1679 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1680
1681 req_len = sizeof(struct host_to_dev_fis);
1682 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1683 sizeof(struct mvs_err_info) - i;
1684
1685 /* request, response lengths */
1686 resp_len = min(resp_len, max_resp_len);
1687 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1688
1689 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1690 /* fill in command FIS and ATAPI CDB */
1691 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1692 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1693 memcpy(buf_cmd + STP_ATAPI_CMD,
1694 task->ata_task.atapi_packet, 16);
1695
1696 /* generate open address frame hdr (first 12 bytes) */
1697 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1698 buf_oaf[1] = task->dev->linkrate & 0xf;
1699 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1700 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1701
1702 /* fill in PRD (scatter/gather) table, if any */
1703 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1704 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1705 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1706 buf_prd++;
1707 }
1708
1709 return 0;
1710}
1711
1712static int mvs_task_prep_ssp(struct mvs_info *mvi,
1713 struct mvs_task_exec_info *tei)
1714{
1715 struct sas_task *task = tei->task;
1716 struct mvs_cmd_hdr *hdr = tei->hdr;
1717 struct mvs_port *port = tei->port;
1718 struct mvs_slot_info *slot;
1719 struct scatterlist *sg;
1720 struct mvs_prd *buf_prd;
1721 struct ssp_frame_hdr *ssp_hdr;
1722 void *buf_tmp;
1723 u8 *buf_cmd, *buf_oaf, fburst = 0;
1724 dma_addr_t buf_tmp_dma;
1725 u32 flags;
1726 u32 resp_len, req_len, i, tag = tei->tag;
1727 const u32 max_resp_len = SB_RFB_MAX;
1728
1729 slot = &mvi->slot_info[tag];
1730
1731 slot->tx = mvi->tx_prod;
1732 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1733 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1734 (port->wide_port_phymap << TXQ_PHY_SHIFT));
1735
1736 flags = MCH_RETRY;
1737 if (task->ssp_task.enable_first_burst) {
1738 flags |= MCH_FBURST;
1739 fburst = (1 << 7);
1740 }
1741 hdr->flags = cpu_to_le32(flags |
1742 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1743 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1744
1745 hdr->tags = cpu_to_le32(tag);
1746 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1747
1748 /*
1749 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1750 */
1751
1752 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1753 buf_cmd = buf_tmp = slot->buf;
1754 buf_tmp_dma = slot->buf_dma;
1755
1756 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1757
1758 buf_tmp += MVS_SSP_CMD_SZ;
1759 buf_tmp_dma += MVS_SSP_CMD_SZ;
1760#if _MV_DUMP
1761 slot->cmd_size = MVS_SSP_CMD_SZ;
1762#endif
1763
1764 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1765 buf_oaf = buf_tmp;
1766 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1767
1768 buf_tmp += MVS_OAF_SZ;
1769 buf_tmp_dma += MVS_OAF_SZ;
1770
1771 /* region 3: PRD table ********************************************* */
1772 buf_prd = buf_tmp;
1773 if (tei->n_elem)
1774 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1775 else
1776 hdr->prd_tbl = 0;
1777
1778 i = sizeof(struct mvs_prd) * tei->n_elem;
1779 buf_tmp += i;
1780 buf_tmp_dma += i;
1781
1782 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1783 slot->response = buf_tmp;
1784 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1785
1786 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
1787 sizeof(struct mvs_err_info) - i;
1788 resp_len = min(resp_len, max_resp_len);
1789
1790 req_len = sizeof(struct ssp_frame_hdr) + 28;
1791
1792 /* request, response lengths */
1793 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1794
1795 /* generate open address frame hdr (first 12 bytes) */
1796 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
1797 buf_oaf[1] = task->dev->linkrate & 0xf;
1798 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1799 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1800
1801 /* fill in SSP frame header (Command Table.SSP frame header) */
1802 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
1803 ssp_hdr->frame_type = SSP_COMMAND;
1804 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
1805 HASHED_SAS_ADDR_SIZE);
1806 memcpy(ssp_hdr->hashed_src_addr,
1807 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
1808 ssp_hdr->tag = cpu_to_be16(tag);
1809
1810 /* fill in command frame IU */
1811 buf_cmd += sizeof(*ssp_hdr);
1812 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1813 buf_cmd[9] = fburst | task->ssp_task.task_attr |
1814 (task->ssp_task.task_prio << 3);
1815 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
1816
1817 /* fill in PRD (scatter/gather) table, if any */
1818 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1819 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1820 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1821 buf_prd++;
1822 }
1823
1824 return 0;
1825}
1826
1827static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1828{
1829 struct domain_device *dev = task->dev;
1830 struct mvs_info *mvi = dev->port->ha->lldd_ha;
1831 struct pci_dev *pdev = mvi->pdev;
1832 void __iomem *regs = mvi->regs;
1833 struct mvs_task_exec_info tei;
1834 struct sas_task *t = task;
1835 u32 tag = 0xdeadbeef, rc, n_elem = 0;
1836 unsigned long flags;
1837 u32 n = num, pass = 0;
1838
1839 spin_lock_irqsave(&mvi->lock, flags);
1840
1841 do {
1842 tei.port = &mvi->port[dev->port->id];
1843
1844 if (!tei.port->port_attached) {
1845 struct task_status_struct *ts = &t->task_status;
1846 ts->stat = SAS_PHY_DOWN;
1847 t->task_done(t);
1848 rc = 0;
1849 goto exec_exit;
1850 }
1851 if (!sas_protocol_ata(t->task_proto)) {
1852 if (t->num_scatter) {
1853 n_elem = pci_map_sg(mvi->pdev, t->scatter,
1854 t->num_scatter,
1855 t->data_dir);
1856 if (!n_elem) {
1857 rc = -ENOMEM;
1858 goto err_out;
1859 }
1860 }
1861 } else {
1862 n_elem = t->num_scatter;
1863 }
1864
1865 rc = mvs_tag_alloc(mvi, &tag);
1866 if (rc)
1867 goto err_out;
1868
1869 mvi->slot_info[tag].task = t;
1870 mvi->slot_info[tag].n_elem = n_elem;
1871 memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
1872 tei.task = t;
1873 tei.hdr = &mvi->slot[tag];
1874 tei.tag = tag;
1875 tei.n_elem = n_elem;
1876
1877 switch (t->task_proto) {
1878 case SAS_PROTOCOL_SMP:
1879 rc = mvs_task_prep_smp(mvi, &tei);
1880 break;
1881 case SAS_PROTOCOL_SSP:
1882 rc = mvs_task_prep_ssp(mvi, &tei);
1883 break;
1884 case SAS_PROTOCOL_SATA:
1885 case SAS_PROTOCOL_STP:
1886 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1887 rc = mvs_task_prep_ata(mvi, &tei);
1888 break;
1889 default:
1890 dev_printk(KERN_ERR, &pdev->dev,
1891 "unknown sas_task proto: 0x%x\n",
1892 t->task_proto);
1893 rc = -EINVAL;
1894 break;
1895 }
1896
1897 if (rc)
1898 goto err_out_tag;
1899
1900 /* TODO: select normal or high priority */
1901
1902 spin_lock(&t->task_state_lock);
1903 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1904 spin_unlock(&t->task_state_lock);
1905
1906 if (n == 1) {
1907 spin_unlock_irqrestore(&mvi->lock, flags);
1908 mw32(TX_PROD_IDX, mvi->tx_prod);
1909 }
1910 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1911
1912 ++pass;
1913 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1914
1915 if (n == 1)
1916 break;
1917
1918 t = list_entry(t->list.next, struct sas_task, list);
1919 } while (--n);
1920
1921 return 0;
1922
1923err_out_tag:
1924 mvs_tag_free(mvi, tag);
1925err_out:
1926 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
1927 if (!sas_protocol_ata(t->task_proto))
1928 if (n_elem)
1929 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
1930 t->data_dir);
1931exec_exit:
1932 if (pass)
1933 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1934 spin_unlock_irqrestore(&mvi->lock, flags);
1935 return rc;
1936}
1937
1938static int mvs_task_abort(struct sas_task *task)
1939{
1940 int rc = 1;
1941 unsigned long flags;
1942 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1943 struct pci_dev *pdev = mvi->pdev;
1944
1945 spin_lock_irqsave(&task->task_state_lock, flags);
1946 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1947 rc = TMF_RESP_FUNC_COMPLETE;
1948 goto out_done;
1949 }
1950 spin_unlock_irqrestore(&task->task_state_lock, flags);
1951
1952 /*FIXME*/
1953 rc = TMF_RESP_FUNC_COMPLETE;
1954
1955 switch (task->task_proto) {
1956 case SAS_PROTOCOL_SMP:
1957 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
1958 break;
1959 case SAS_PROTOCOL_SSP:
1960 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
1961 break;
1962 case SAS_PROTOCOL_SATA:
1963 case SAS_PROTOCOL_STP:
1964 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1965 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
1966 "Dump D2H FIS: \n");
1967 mvs_hexdump(sizeof(struct host_to_dev_fis),
1968 (void *)&task->ata_task.fis, 0);
1969 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1970 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1971 break;
1972 }
1973 default:
1974 break;
1975 }
1976out_done:
1977 return rc;
1978}
1979
1980static void mvs_free(struct mvs_info *mvi)
1981{
1982 int i;
1983
1984 if (!mvi)
1985 return;
1986
1987 for (i = 0; i < MVS_SLOTS; i++) {
1988 struct mvs_slot_info *slot = &mvi->slot_info[i];
1989
1990 if (slot->buf)
1991 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
1992 slot->buf, slot->buf_dma);
1993 }
1994
1995 if (mvi->tx)
1996 dma_free_coherent(&mvi->pdev->dev,
1997 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
1998 mvi->tx, mvi->tx_dma);
1999 if (mvi->rx_fis)
2000 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2001 mvi->rx_fis, mvi->rx_fis_dma);
2002 if (mvi->rx)
2003 dma_free_coherent(&mvi->pdev->dev,
2004 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2005 mvi->rx, mvi->rx_dma);
2006 if (mvi->slot)
2007 dma_free_coherent(&mvi->pdev->dev,
2008 sizeof(*mvi->slot) * MVS_SLOTS,
2009 mvi->slot, mvi->slot_dma);
2010#ifdef MVS_ENABLE_PERI
2011 if (mvi->peri_regs)
2012 iounmap(mvi->peri_regs);
2013#endif
2014 if (mvi->regs)
2015 iounmap(mvi->regs);
2016 if (mvi->shost)
2017 scsi_host_put(mvi->shost);
2018 kfree(mvi->sas.sas_port);
2019 kfree(mvi->sas.sas_phy);
2020 kfree(mvi);
2021}
2022
2023/* FIXME: locking? */
2024static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2025 void *funcdata)
2026{
2027 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2028 int rc = 0, phy_id = sas_phy->id;
2029 u32 tmp;
2030
2031 tmp = mvs_read_phy_ctl(mvi, phy_id);
2032
2033 switch (func) {
2034 case PHY_FUNC_SET_LINK_RATE:{
2035 struct sas_phy_linkrates *rates = funcdata;
2036 u32 lrmin = 0, lrmax = 0;
2037
2038 lrmin = (rates->minimum_linkrate << 8);
2039 lrmax = (rates->maximum_linkrate << 12);
2040
2041 if (lrmin) {
2042 tmp &= ~(0xf << 8);
2043 tmp |= lrmin;
2044 }
2045 if (lrmax) {
2046 tmp &= ~(0xf << 12);
2047 tmp |= lrmax;
2048 }
2049 mvs_write_phy_ctl(mvi, phy_id, tmp);
2050 break;
2051 }
2052
2053 case PHY_FUNC_HARD_RESET:
2054 if (tmp & PHY_RST_HARD)
2055 break;
2056 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2057 break;
2058
2059 case PHY_FUNC_LINK_RESET:
2060 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2061 break;
2062
2063 case PHY_FUNC_DISABLE:
2064 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2065 default:
2066 rc = -EOPNOTSUPP;
2067 }
2068
2069 return rc;
2070}
2071
2072static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2073{
2074 struct mvs_phy *phy = &mvi->phy[phy_id];
2075 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2076
2077 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2078 sas_phy->class = SAS;
2079 sas_phy->iproto = SAS_PROTOCOL_ALL;
2080 sas_phy->tproto = 0;
2081 sas_phy->type = PHY_TYPE_PHYSICAL;
2082 sas_phy->role = PHY_ROLE_INITIATOR;
2083 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2084 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2085
2086 sas_phy->id = phy_id;
2087 sas_phy->sas_addr = &mvi->sas_addr[0];
2088 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2089 sas_phy->ha = &mvi->sas;
2090 sas_phy->lldd_phy = phy;
2091}
2092
2093static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2094 const struct pci_device_id *ent)
2095{
2096 struct mvs_info *mvi;
2097 unsigned long res_start, res_len, res_flag;
2098 struct asd_sas_phy **arr_phy;
2099 struct asd_sas_port **arr_port;
2100 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2101 int i;
2102
2103 /*
2104 * alloc and init our per-HBA mvs_info struct
2105 */
2106
2107 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2108 if (!mvi)
2109 return NULL;
2110
2111 spin_lock_init(&mvi->lock);
2112 mvi->pdev = pdev;
2113 mvi->chip = chip;
2114
2115 if (pdev->device == 0x6440 && pdev->revision == 0)
2116 mvi->flags |= MVF_PHY_PWR_FIX;
2117
2118 /*
2119 * alloc and init SCSI, SAS glue
2120 */
2121
2122 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2123 if (!mvi->shost)
2124 goto err_out;
2125
2126 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2127 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2128 if (!arr_phy || !arr_port)
2129 goto err_out;
2130
2131 for (i = 0; i < MVS_MAX_PHYS; i++) {
2132 mvs_phy_init(mvi, i);
2133 arr_phy[i] = &mvi->phy[i].sas_phy;
2134 arr_port[i] = &mvi->port[i].sas_port;
2135 }
2136
2137 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2138 mvi->shost->transportt = mvs_stt;
2139 mvi->shost->max_id = 21;
2140 mvi->shost->max_lun = ~0;
2141 mvi->shost->max_channel = 0;
2142 mvi->shost->max_cmd_len = 16;
2143
2144 mvi->sas.sas_ha_name = DRV_NAME;
2145 mvi->sas.dev = &pdev->dev;
2146 mvi->sas.lldd_module = THIS_MODULE;
2147 mvi->sas.sas_addr = &mvi->sas_addr[0];
2148 mvi->sas.sas_phy = arr_phy;
2149 mvi->sas.sas_port = arr_port;
2150 mvi->sas.num_phys = chip->n_phy;
2151 mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
2152 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2153 mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
2154 mvi->sas.lldd_ha = mvi;
2155 mvi->sas.core.shost = mvi->shost;
2156
2157 mvs_tag_init(mvi);
2158
2159 /*
2160 * ioremap main and peripheral registers
2161 */
2162
2163#ifdef MVS_ENABLE_PERI
2164 res_start = pci_resource_start(pdev, 2);
2165 res_len = pci_resource_len(pdev, 2);
2166 if (!res_start || !res_len)
2167 goto err_out;
2168
2169 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2170 if (!mvi->peri_regs)
2171 goto err_out;
2172#endif
2173
2174 res_start = pci_resource_start(pdev, 4);
2175 res_len = pci_resource_len(pdev, 4);
2176 if (!res_start || !res_len)
2177 goto err_out;
2178
2179 res_flag = pci_resource_flags(pdev, 4);
2180 if (res_flag & IORESOURCE_CACHEABLE)
2181 mvi->regs = ioremap(res_start, res_len);
2182 else
2183 mvi->regs = ioremap_nocache(res_start, res_len);
2184
2185 if (!mvi->regs)
2186 goto err_out;
2187
2188 /*
2189 * alloc and init our DMA areas
2190 */
2191
2192 mvi->tx = dma_alloc_coherent(&pdev->dev,
2193 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2194 &mvi->tx_dma, GFP_KERNEL);
2195 if (!mvi->tx)
2196 goto err_out;
2197 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2198
2199 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2200 &mvi->rx_fis_dma, GFP_KERNEL);
2201 if (!mvi->rx_fis)
2202 goto err_out;
2203 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2204
2205 mvi->rx = dma_alloc_coherent(&pdev->dev,
2206 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2207 &mvi->rx_dma, GFP_KERNEL);
2208 if (!mvi->rx)
2209 goto err_out;
2210 memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
2211
2212 mvi->rx[0] = cpu_to_le32(0xfff);
2213 mvi->rx_cons = 0xfff;
2214
2215 mvi->slot = dma_alloc_coherent(&pdev->dev,
2216 sizeof(*mvi->slot) * MVS_SLOTS,
2217 &mvi->slot_dma, GFP_KERNEL);
2218 if (!mvi->slot)
2219 goto err_out;
2220 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2221
2222 for (i = 0; i < MVS_SLOTS; i++) {
2223 struct mvs_slot_info *slot = &mvi->slot_info[i];
2224
2225 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2226 &slot->buf_dma, GFP_KERNEL);
2227 if (!slot->buf)
2228 goto err_out;
2229 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2230 }
2231
2232 /* finally, read NVRAM to get our SAS address */
2233 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2234 goto err_out;
2235 return mvi;
2236
2237err_out:
2238 mvs_free(mvi);
2239 return NULL;
2240}
2241
2242static u32 mvs_cr32(void __iomem *regs, u32 addr)
2243{
2244 mw32(CMD_ADDR, addr);
2245 return mr32(CMD_DATA);
2246}
2247
2248static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2249{
2250 mw32(CMD_ADDR, addr);
2251 mw32(CMD_DATA, val);
2252}
2253
2254static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2255{
2256 void __iomem *regs = mvi->regs;
2257 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2258 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2259}
2260
2261static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2262{
2263 void __iomem *regs = mvi->regs;
2264 if (port < 4)
2265 mw32(P0_SER_CTLSTAT + port * 4, val);
2266 else
2267 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2268}
2269
2270static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2271{
2272 void __iomem *regs = mvi->regs + off;
2273 void __iomem *regs2 = mvi->regs + off2;
2274 return (port < 4)?readl(regs + port * 8):
2275 readl(regs2 + (port - 4) * 8);
2276}
2277
2278static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2279 u32 port, u32 val)
2280{
2281 void __iomem *regs = mvi->regs + off;
2282 void __iomem *regs2 = mvi->regs + off2;
2283 if (port < 4)
2284 writel(val, regs + port * 8);
2285 else
2286 writel(val, regs2 + (port - 4) * 8);
2287}
2288
2289static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2290{
2291 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2292}
2293
2294static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2295{
2296 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2297}
2298
2299static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2300{
2301 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2302}
2303
2304static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2305{
2306 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2307}
2308
2309static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2310{
2311 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2312}
2313
2314static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2315{
2316 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2317}
2318
2319static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2320{
2321 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2322}
2323
2324static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2325{
2326 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2327}
2328
2329static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2330{
2331 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2332}
2333
2334static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2335{
2336 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2337}
2338
2339static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2340{
2341 void __iomem *regs = mvi->regs;
2342 u32 tmp;
2343
2344 /* workaround for SATA R-ERR, to ignore phy glitch */
2345 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2346 tmp &= ~(1 << 9);
2347 tmp |= (1 << 10);
2348 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2349
2350 /* enable retry 127 times */
2351 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2352
2353 /* extend open frame timeout to max */
2354 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2355 tmp &= ~0xffff;
2356 tmp |= 0x3fff;
2357 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2358
2359 /* workaround for WDTIMEOUT , set to 550 ms */
2360 mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
2361
2362 /* not to halt for different port op during wideport link change */
2363 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2364
2365 /* workaround for Seagate disk not-found OOB sequence, recv
2366 * COMINIT before sending out COMWAKE */
2367 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2368 tmp &= 0x0000ffff;
2369 tmp |= 0x00fa0000;
2370 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2371
2372 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2373 tmp &= 0x1fffffff;
2374 tmp |= (2U << 29); /* 8 ms retry */
2375 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2376
2377 /* TEST - for phy decoding error, adjust voltage levels */
2378 mw32(P0_VSR_ADDR + 0, 0x8);
2379 mw32(P0_VSR_DATA + 0, 0x2F0);
2380
2381 mw32(P0_VSR_ADDR + 8, 0x8);
2382 mw32(P0_VSR_DATA + 8, 0x2F0);
2383
2384 mw32(P0_VSR_ADDR + 16, 0x8);
2385 mw32(P0_VSR_DATA + 16, 0x2F0);
2386
2387 mw32(P0_VSR_ADDR + 24, 0x8);
2388 mw32(P0_VSR_DATA + 24, 0x2F0);
2389
2390}
2391
2392static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2393{
2394 void __iomem *regs = mvi->regs;
2395 u32 tmp;
2396
2397 tmp = mr32(PCS);
2398 if (mvi->chip->n_phy <= 4)
2399 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2400 else
2401 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2402 mw32(PCS, tmp);
2403}
2404
2405static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2406{
2407 void __iomem *regs = mvi->regs;
2408 u32 reg;
2409 struct mvs_phy *phy = &mvi->phy[i];
2410
2411 /* TODO check & save device type */
2412 reg = mr32(GBL_PORT_TYPE);
2413
2414 if (reg & MODE_SAS_SATA & (1 << i))
2415 phy->phy_type |= PORT_TYPE_SAS;
2416 else
2417 phy->phy_type |= PORT_TYPE_SATA;
2418}
2419
2420static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2421{
2422 u32 *s = (u32 *) buf;
2423
2424 if (!s)
2425 return NULL;
2426
2427 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2428 s[3] = mvs_read_port_cfg_data(mvi, i);
2429
2430 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2431 s[2] = mvs_read_port_cfg_data(mvi, i);
2432
2433 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2434 s[1] = mvs_read_port_cfg_data(mvi, i);
2435
2436 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2437 s[0] = mvs_read_port_cfg_data(mvi, i);
2438
2439 return (void *)s;
2440}
2441
2442static u32 mvs_is_sig_fis_received(u32 irq_status)
2443{
2444 return irq_status & PHYEV_SIG_FIS;
2445}
2446
2447static void mvs_update_wideport(struct mvs_info *mvi, int i)
2448{
2449 struct mvs_phy *phy = &mvi->phy[i];
2450 struct mvs_port *port = phy->port;
2451 int j, no;
2452
2453 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2454 if (no & 1) {
2455 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2456 mvs_write_port_cfg_data(mvi, no,
2457 port->wide_port_phymap);
2458 } else {
2459 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2460 mvs_write_port_cfg_data(mvi, no, 0);
2461 }
2462}
2463
2464static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2465{
2466 u32 tmp;
2467 struct mvs_phy *phy = &mvi->phy[i];
2468 struct mvs_port *port;
2469
2470 tmp = mvs_read_phy_ctl(mvi, i);
2471
2472 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2473 if (!phy->port)
2474 phy->phy_attached = 1;
2475 return tmp;
2476 }
2477
2478 port = phy->port;
2479 if (port) {
2480 if (phy->phy_type & PORT_TYPE_SAS) {
2481 port->wide_port_phymap &= ~(1U << i);
2482 if (!port->wide_port_phymap)
2483 port->port_attached = 0;
2484 mvs_update_wideport(mvi, i);
2485 } else if (phy->phy_type & PORT_TYPE_SATA)
2486 port->port_attached = 0;
2487 mvs_free_reg_set(mvi, phy->port);
2488 phy->port = NULL;
2489 phy->phy_attached = 0;
2490 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2491 }
2492 return 0;
2493}
2494
2495static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2496 int get_st)
2497{
2498 struct mvs_phy *phy = &mvi->phy[i];
2499 struct pci_dev *pdev = mvi->pdev;
2500 u32 tmp, j;
2501 u64 tmp64;
2502
2503 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2504 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2505
2506 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2507 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2508
2509 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2510 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2511
2512 if (get_st) {
2513 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2514 phy->phy_status = mvs_is_phy_ready(mvi, i);
2515 }
2516
2517 if (phy->phy_status) {
2518 u32 phy_st;
2519 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2520
2521 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2522 phy_st = mvs_read_port_cfg_data(mvi, i);
2523
2524 sas_phy->linkrate =
2525 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2526 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2527
2528 /* Updated attached_sas_addr */
2529 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2530 phy->att_dev_sas_addr =
2531 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2532
2533 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2534 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2535
2536 dev_printk(KERN_DEBUG, &pdev->dev,
2537 "phy[%d] Get Attached Address 0x%llX ,"
2538 " SAS Address 0x%llX\n",
2539 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2540 dev_printk(KERN_DEBUG, &pdev->dev,
2541 "Rate = %x , type = %d\n",
2542 sas_phy->linkrate, phy->phy_type);
2543
2544#if 1
2545 /*
2546 * If the device is capable of supporting a wide port
2547 * on its phys, it may configure the phys as a wide port.
2548 */
2549 if (phy->phy_type & PORT_TYPE_SAS)
2550 for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
2551 if ((mvi->phy[j].phy_attached) &&
2552 (mvi->phy[j].phy_type & PORT_TYPE_SAS))
2553 if (phy->att_dev_sas_addr ==
2554 mvi->phy[j].att_dev_sas_addr - 1) {
2555 phy->att_dev_sas_addr =
2556 mvi->phy[j].att_dev_sas_addr;
2557 break;
2558 }
2559 }
2560
2561#endif
2562
2563 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2564 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2565
2566 if (phy->phy_type & PORT_TYPE_SAS) {
2567 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2568 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2569 phy->identify.device_type =
2570 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2571
2572 if (phy->identify.device_type == SAS_END_DEV)
2573 phy->identify.target_port_protocols =
2574 SAS_PROTOCOL_SSP;
2575 else if (phy->identify.device_type != NO_DEVICE)
2576 phy->identify.target_port_protocols =
2577 SAS_PROTOCOL_SMP;
2578 if (phy_st & PHY_OOB_DTCTD)
2579 sas_phy->oob_mode = SAS_OOB_MODE;
2580 phy->frame_rcvd_size =
2581 sizeof(struct sas_identify_frame);
2582 } else if (phy->phy_type & PORT_TYPE_SATA) {
2583 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2584 if (mvs_is_sig_fis_received(phy->irq_status)) {
2585 if (phy_st & PHY_OOB_DTCTD)
2586 sas_phy->oob_mode = SATA_OOB_MODE;
2587 phy->frame_rcvd_size =
2588 sizeof(struct dev_to_host_fis);
2589 mvs_get_d2h_reg(mvi, i,
2590 (void *)sas_phy->frame_rcvd);
2591 } else {
2592 dev_printk(KERN_DEBUG, &pdev->dev,
2593 "No sig fis\n");
2594 }
2595 }
2596 /* workaround for HW phy decoding error on 1.5g disk drive */
2597 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2598 tmp = mvs_read_port_vsr_data(mvi, i);
2599 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2600 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2601 SAS_LINK_RATE_1_5_GBPS)
2602 tmp &= ~PHY_MODE6_DTL_SPEED;
2603 else
2604 tmp |= PHY_MODE6_DTL_SPEED;
2605 mvs_write_port_vsr_data(mvi, i, tmp);
2606
2607 }
2608 if (get_st)
2609 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2610}
2611
2612static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2613{
2614 struct sas_ha_struct *sas_ha = sas_phy->ha;
2615 struct mvs_info *mvi = sas_ha->lldd_ha;
2616 struct asd_sas_port *sas_port = sas_phy->port;
2617 struct mvs_phy *phy = sas_phy->lldd_phy;
2618 struct mvs_port *port = &mvi->port[sas_port->id];
2619 unsigned long flags;
2620
2621 spin_lock_irqsave(&mvi->lock, flags);
2622 port->port_attached = 1;
2623 phy->port = port;
2624 port->taskfileset = MVS_ID_NOT_MAPPED;
2625 if (phy->phy_type & PORT_TYPE_SAS) {
2626 port->wide_port_phymap = sas_port->phy_mask;
2627 mvs_update_wideport(mvi, sas_phy->id);
2628 }
2629 spin_unlock_irqrestore(&mvi->lock, flags);
2630}
2631
2632static int __devinit mvs_hw_init(struct mvs_info *mvi)
2633{
2634 void __iomem *regs = mvi->regs;
2635 int i;
2636 u32 tmp, cctl;
2637
2638 /* make sure interrupts are masked immediately (paranoia) */
2639 mw32(GBL_CTL, 0);
2640 tmp = mr32(GBL_CTL);
2641
2642 /* Reset Controller */
2643 if (!(tmp & HBA_RST)) {
2644 if (mvi->flags & MVF_PHY_PWR_FIX) {
2645 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2646 tmp &= ~PCTL_PWR_ON;
2647 tmp |= PCTL_OFF;
2648 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2649
2650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2651 tmp &= ~PCTL_PWR_ON;
2652 tmp |= PCTL_OFF;
2653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2654 }
2655
2656 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2657 mw32_f(GBL_CTL, HBA_RST);
2658 }
2659
2660 /* wait for reset to finish; timeout is just a guess */
2661 i = 1000;
2662 while (i-- > 0) {
2663 msleep(10);
2664
2665 if (!(mr32(GBL_CTL) & HBA_RST))
2666 break;
2667 }
2668 if (mr32(GBL_CTL) & HBA_RST) {
2669 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2670 return -EBUSY;
2671 }
2672
2673 /* Init Chip */
2674 /* make sure RST is set; HBA_RST /should/ have done that for us */
2675 cctl = mr32(CTL);
2676 if (cctl & CCTL_RST)
2677 cctl &= ~CCTL_RST;
2678 else
2679 mw32_f(CTL, cctl | CCTL_RST);
2680
2681 /* write to device control _AND_ device status register? - A.C. */
2682 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2683 tmp &= ~PRD_REQ_MASK;
2684 tmp |= PRD_REQ_SIZE;
2685 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2686
2687 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2688 tmp |= PCTL_PWR_ON;
2689 tmp &= ~PCTL_OFF;
2690 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2691
2692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2693 tmp |= PCTL_PWR_ON;
2694 tmp &= ~PCTL_OFF;
2695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2696
2697 mw32_f(CTL, cctl);
2698
2699 /* reset control */
2700 mw32(PCS, 0); /*MVS_PCS */
2701
2702 mvs_phy_hacks(mvi);
2703
2704 mw32(CMD_LIST_LO, mvi->slot_dma);
2705 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2706
2707 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2708 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2709
2710 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2711 mw32(TX_LO, mvi->tx_dma);
2712 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2713
2714 mw32(RX_CFG, MVS_RX_RING_SZ);
2715 mw32(RX_LO, mvi->rx_dma);
2716 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2717
2718 /* enable auto port detection */
2719 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2720 msleep(100);
2721 /* init and reset phys */
2722 for (i = 0; i < mvi->chip->n_phy; i++) {
2723 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2724 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2725
2726 mvs_detect_porttype(mvi, i);
2727
2728 /* set phy local SAS address */
2729 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2730 mvs_write_port_cfg_data(mvi, i, lo);
2731 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2732 mvs_write_port_cfg_data(mvi, i, hi);
2733
2734 /* reset phy */
2735 tmp = mvs_read_phy_ctl(mvi, i);
2736 tmp |= PHY_RST;
2737 mvs_write_phy_ctl(mvi, i, tmp);
2738 }
2739
2740 msleep(100);
2741
2742 for (i = 0; i < mvi->chip->n_phy; i++) {
2743 /* clear phy int status */
2744 tmp = mvs_read_port_irq_stat(mvi, i);
2745 tmp &= ~PHYEV_SIG_FIS;
2746 mvs_write_port_irq_stat(mvi, i, tmp);
2747
2748 /* set phy int mask */
2749 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2750 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2751 mvs_write_port_irq_mask(mvi, i, tmp);
2752
2753 msleep(100);
2754 mvs_update_phyinfo(mvi, i, 1);
2755 mvs_enable_xmt(mvi, i);
2756 }
2757
2758 /* FIXME: update wide port bitmaps */
2759
2760 /* little endian for open address and command table, etc. */
2761 /* A.C.
2762 * it seems that ( from the spec ) turning on big-endian won't
2763 * do us any good on big-endian machines, need further confirmation
2764 */
2765 cctl = mr32(CTL);
2766 cctl |= CCTL_ENDIAN_CMD;
2767 cctl |= CCTL_ENDIAN_DATA;
2768 cctl &= ~CCTL_ENDIAN_OPEN;
2769 cctl |= CCTL_ENDIAN_RSP;
2770 mw32_f(CTL, cctl);
2771
2772 /* reset CMD queue */
2773 tmp = mr32(PCS);
2774 tmp |= PCS_CMD_RST;
2775 mw32(PCS, tmp);
2776 /* interrupt coalescing may cause missing HW interrput in some case,
2777 * and the max count is 0x1ff, while our max slot is 0x200,
2778 * it will make count 0.
2779 */
2780 tmp = 0;
2781 mw32(INT_COAL, tmp);
2782
2783 tmp = 0x100;
2784 mw32(INT_COAL_TMOUT, tmp);
2785
2786 /* ladies and gentlemen, start your engines */
2787 mw32(TX_CFG, 0);
2788 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
2789 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
2790 /* enable CMD/CMPL_Q/RESP mode */
2791 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
2792
2793 /* re-enable interrupts globally */
2794 mvs_hba_interrupt_enable(mvi);
2795
2796 /* enable completion queue interrupt */
2797 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
2798 mw32(INT_MASK, tmp);
2799
2800 return 0;
2801}
2802
2803static void __devinit mvs_print_info(struct mvs_info *mvi)
2804{
2805 struct pci_dev *pdev = mvi->pdev;
2806 static int printed_version;
2807
2808 if (!printed_version++)
2809 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2810
2811 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
2812 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
2813}
2814
2815static int __devinit mvs_pci_init(struct pci_dev *pdev,
2816 const struct pci_device_id *ent)
2817{
2818 int rc;
2819 struct mvs_info *mvi;
2820 irq_handler_t irq_handler = mvs_interrupt;
2821
2822 rc = pci_enable_device(pdev);
2823 if (rc)
2824 return rc;
2825
2826 pci_set_master(pdev);
2827
2828 rc = pci_request_regions(pdev, DRV_NAME);
2829 if (rc)
2830 goto err_out_disable;
2831
2832 rc = pci_go_64(pdev);
2833 if (rc)
2834 goto err_out_regions;
2835
2836 mvi = mvs_alloc(pdev, ent);
2837 if (!mvi) {
2838 rc = -ENOMEM;
2839 goto err_out_regions;
2840 }
2841
2842 rc = mvs_hw_init(mvi);
2843 if (rc)
2844 goto err_out_mvi;
2845
2846#ifndef MVS_DISABLE_MSI
2847 if (!pci_enable_msi(pdev)) {
2848 u32 tmp;
2849 void __iomem *regs = mvi->regs;
2850 mvi->flags |= MVF_MSI;
2851 irq_handler = mvs_msi_interrupt;
2852 tmp = mr32(PCS);
2853 mw32(PCS, tmp | PCS_SELF_CLEAR);
2854 }
2855#endif
2856
2857 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
2858 if (rc)
2859 goto err_out_msi;
2860
2861 rc = scsi_add_host(mvi->shost, &pdev->dev);
2862 if (rc)
2863 goto err_out_irq;
2864
2865 rc = sas_register_ha(&mvi->sas);
2866 if (rc)
2867 goto err_out_shost;
2868
2869 pci_set_drvdata(pdev, mvi);
2870
2871 mvs_print_info(mvi);
2872
2873 scsi_scan_host(mvi->shost);
2874
2875 return 0;
2876
2877err_out_shost:
2878 scsi_remove_host(mvi->shost);
2879err_out_irq:
2880 free_irq(pdev->irq, mvi);
2881err_out_msi:
2882 if (mvi->flags |= MVF_MSI)
2883 pci_disable_msi(pdev);
2884err_out_mvi:
2885 mvs_free(mvi);
2886err_out_regions:
2887 pci_release_regions(pdev);
2888err_out_disable:
2889 pci_disable_device(pdev);
2890 return rc;
2891}
2892
2893static void __devexit mvs_pci_remove(struct pci_dev *pdev)
2894{
2895 struct mvs_info *mvi = pci_get_drvdata(pdev);
2896
2897 pci_set_drvdata(pdev, NULL);
2898
2899 if (mvi) {
2900 sas_unregister_ha(&mvi->sas);
2901 mvs_hba_interrupt_disable(mvi);
2902 sas_remove_host(mvi->shost);
2903 scsi_remove_host(mvi->shost);
2904
2905 free_irq(pdev->irq, mvi);
2906 if (mvi->flags & MVF_MSI)
2907 pci_disable_msi(pdev);
2908 mvs_free(mvi);
2909 pci_release_regions(pdev);
2910 }
2911 pci_disable_device(pdev);
2912}
2913
2914static struct sas_domain_function_template mvs_transport_ops = {
2915 .lldd_execute_task = mvs_task_exec,
2916 .lldd_control_phy = mvs_phy_control,
2917 .lldd_abort_task = mvs_task_abort,
2918 .lldd_port_formed = mvs_port_formed
2919};
2920
2921static struct pci_device_id __devinitdata mvs_pci_table[] = {
2922 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
2923 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
2924 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
2925 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
2926
2927 { } /* terminate list */
2928};
2929
2930static struct pci_driver mvs_pci_driver = {
2931 .name = DRV_NAME,
2932 .id_table = mvs_pci_table,
2933 .probe = mvs_pci_init,
2934 .remove = __devexit_p(mvs_pci_remove),
2935};
2936
2937static int __init mvs_init(void)
2938{
2939 int rc;
2940
2941 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
2942 if (!mvs_stt)
2943 return -ENOMEM;
2944
2945 rc = pci_register_driver(&mvs_pci_driver);
2946 if (rc)
2947 goto err_out;
2948
2949 return 0;
2950
2951err_out:
2952 sas_release_transport(mvs_stt);
2953 return rc;
2954}
2955
2956static void __exit mvs_exit(void)
2957{
2958 pci_unregister_driver(&mvs_pci_driver);
2959 sas_release_transport(mvs_stt);
2960}
2961
2962module_init(mvs_init);
2963module_exit(mvs_exit);
2964
2965MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
2966MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
2967MODULE_VERSION(DRV_VERSION);
2968MODULE_LICENSE("GPL");
2969MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 0cd614a0fa73..fad6cb5cba28 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
124 } 124 }
125 req_len += sgpnt->length; 125 req_len += sgpnt->length;
126 } 126 }
127 scsi_set_resid(cmd, req_len - act_len); 127 scsi_set_resid(cmd, buflen - act_len);
128 return 0; 128 return 0;
129} 129}
130 130
@@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = {
427 .cmd_per_lun = 1, 427 .cmd_per_lun = 1,
428 .emulated = 1, /* only sg driver uses this */ 428 .emulated = 1, /* only sg driver uses this */
429 .max_sectors = PS3ROM_MAX_SECTORS, 429 .max_sectors = PS3ROM_MAX_SECTORS,
430 .use_clustering = ENABLE_CLUSTERING, 430 .use_clustering = DISABLE_CLUSTERING,
431 .module = THIS_MODULE, 431 .module = THIS_MODULE,
432}; 432};
433 433
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1479c60441c8..2cd899bfe84b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -23,7 +23,7 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
23 mutex_lock(&ha->fce_mutex); 23 mutex_lock(&ha->fce_mutex);
24 24
25 seq_printf(s, "FCE Trace Buffer\n"); 25 seq_printf(s, "FCE Trace Buffer\n");
26 seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); 26 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); 27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
28 seq_printf(s, "FCE Enable Registers\n"); 28 seq_printf(s, "FCE Enable Registers\n");
29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", 29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 6226d88479f5..c1808763d40e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
39 ms_pkt->entry_count = 1; 39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 41 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = __constant_cpu_to_le16(25); 42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 43 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
44 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 44 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 45 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
75 ct_pkt->entry_type = CT_IOCB_TYPE; 75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1; 76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); 77 ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
78 ct_pkt->timeout = __constant_cpu_to_le16(25); 78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 79 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 80 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1144 ms_pkt->entry_count = 1; 1144 ms_pkt->entry_count = 1;
1145 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); 1145 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
1146 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1146 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1147 ms_pkt->timeout = __constant_cpu_to_le16(59); 1147 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1148 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1148 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1149 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 1149 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
1150 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1150 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1181 ct_pkt->entry_type = CT_IOCB_TYPE; 1181 ct_pkt->entry_type = CT_IOCB_TYPE;
1182 ct_pkt->entry_count = 1; 1182 ct_pkt->entry_count = 1;
1183 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1183 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1184 ct_pkt->timeout = __constant_cpu_to_le16(59); 1184 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1185 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1185 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1186 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1186 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1187 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1187 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1761 ct_pkt->entry_type = CT_IOCB_TYPE; 1761 ct_pkt->entry_type = CT_IOCB_TYPE;
1762 ct_pkt->entry_count = 1; 1762 ct_pkt->entry_count = 1;
1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1764 ct_pkt->timeout = __constant_cpu_to_le16(59); 1764 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1767 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1767 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d5c7853e7eba..364be7d06875 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1733 ha->login_timeout = nv->login_timeout; 1733 ha->login_timeout = nv->login_timeout;
1734 icb->login_timeout = nv->login_timeout; 1734 icb->login_timeout = nv->login_timeout;
1735 1735
1736 /* Set minimum RATOV to 200 tenths of a second. */ 1736 /* Set minimum RATOV to 100 tenths of a second. */
1737 ha->r_a_tov = 200; 1737 ha->r_a_tov = 100;
1738 1738
1739 ha->loop_reset_delay = nv->reset_delay; 1739 ha->loop_reset_delay = nv->reset_delay;
1740 1740
@@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3645 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3645 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3646 icb->login_timeout = cpu_to_le16(nv->login_timeout); 3646 icb->login_timeout = cpu_to_le16(nv->login_timeout);
3647 3647
3648 /* Set minimum RATOV to 200 tenths of a second. */ 3648 /* Set minimum RATOV to 100 tenths of a second. */
3649 ha->r_a_tov = 200; 3649 ha->r_a_tov = 100;
3650 3650
3651 ha->loop_reset_delay = nv->reset_delay; 3651 ha->loop_reset_delay = nv->reset_delay;
3652 3652
@@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4022 return; 4022 return;
4023 4023
4024 ret = qla2x00_stop_firmware(ha); 4024 ret = qla2x00_stop_firmware(ha);
4025 for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { 4025 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4026 retries ; retries--) {
4026 qla2x00_reset_chip(ha); 4027 qla2x00_reset_chip(ha);
4027 if (qla2x00_chip_diag(ha) != QLA_SUCCESS) 4028 if (qla2x00_chip_diag(ha) != QLA_SUCCESS)
4028 continue; 4029 continue;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 14e6f22944b7..f0337036c7bb 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
958 } 958 }
959 } 959 }
960 960
961 /* Check for overrun. */
962 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
963 scsi_status & SS_RESIDUAL_OVER)
964 comp_status = CS_DATA_OVERRUN;
965
961 /* 966 /*
962 * Based on Host and scsi status generate status code for Linux 967 * Based on Host and scsi status generate status code for Linux
963 */ 968 */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 99d29fff836d..bb103580e1ba 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2206 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2206 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2207 tsk->p.tsk.entry_count = 1; 2207 tsk->p.tsk.entry_count = 1;
2208 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2208 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2209 tsk->p.tsk.timeout = __constant_cpu_to_le16(25); 2209 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2210 tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); 2210 tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET);
2211 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2211 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2212 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2212 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c5742cc15abb..ea08a129fee9 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k8" 10#define QLA2XXX_VERSION "8.02.00-k9"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 10b3b9a620f3..109c5f5985ec 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1299 ddb_entry->fw_ddb_device_state = state; 1299 ddb_entry->fw_ddb_device_state = state;
1300 /* Device is back online. */ 1300 /* Device is back online. */
1301 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 1301 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
1302 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1302 atomic_set(&ddb_entry->port_down_timer, 1303 atomic_set(&ddb_entry->port_down_timer,
1303 ha->port_down_retry_count); 1304 ha->port_down_retry_count);
1304 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1305 atomic_set(&ddb_entry->relogin_retry_count, 0); 1305 atomic_set(&ddb_entry->relogin_retry_count, 0);
1306 atomic_set(&ddb_entry->relogin_timer, 0); 1306 atomic_set(&ddb_entry->relogin_timer, 0);
1307 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 0f029d0d7315..fc84db4069f4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -100,8 +100,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
100 100
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { 101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual); 102 scsi_set_resid(cmd, residual);
103 if (!scsi_status && ((scsi_bufflen(cmd) - residual) < 103 if ((scsi_bufflen(cmd) - residual) < cmd->underflow) {
104 cmd->underflow)) {
105 104
106 cmd->result = DID_ERROR << 16; 105 cmd->result = DID_ERROR << 16;
107 106
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c3c59d763037..8b92f348f02c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
75static int qla4xxx_slave_alloc(struct scsi_device *device); 75static int qla4xxx_slave_alloc(struct scsi_device *device);
76static int qla4xxx_slave_configure(struct scsi_device *device); 76static int qla4xxx_slave_configure(struct scsi_device *device);
77static void qla4xxx_slave_destroy(struct scsi_device *sdev); 77static void qla4xxx_slave_destroy(struct scsi_device *sdev);
78static void qla4xxx_scan_start(struct Scsi_Host *shost);
78 79
79static struct scsi_host_template qla4xxx_driver_template = { 80static struct scsi_host_template qla4xxx_driver_template = {
80 .module = THIS_MODULE, 81 .module = THIS_MODULE,
@@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
90 .slave_destroy = qla4xxx_slave_destroy, 91 .slave_destroy = qla4xxx_slave_destroy,
91 92
92 .scan_finished = iscsi_scan_finished, 93 .scan_finished = iscsi_scan_finished,
94 .scan_start = qla4xxx_scan_start,
93 95
94 .this_id = -1, 96 .this_id = -1,
95 .cmd_per_lun = 3, 97 .cmd_per_lun = 3,
@@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
299 return ddb_entry; 301 return ddb_entry;
300} 302}
301 303
304static void qla4xxx_scan_start(struct Scsi_Host *shost)
305{
306 struct scsi_qla_host *ha = shost_priv(shost);
307 struct ddb_entry *ddb_entry, *ddbtemp;
308
309 /* finish setup of sessions that were already setup in firmware */
310 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
311 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
312 qla4xxx_add_sess(ddb_entry);
313 }
314}
315
302/* 316/*
303 * Timer routines 317 * Timer routines
304 */ 318 */
@@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
864 * qla4xxx_recover_adapter - recovers adapter after a fatal error 878 * qla4xxx_recover_adapter - recovers adapter after a fatal error
865 * @ha: Pointer to host adapter structure. 879 * @ha: Pointer to host adapter structure.
866 * @renew_ddb_list: Indicates what to do with the adapter's ddb list 880 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
867 * after adapter recovery has completed. 881 *
868 * 0=preserve ddb list, 1=destroy and rebuild ddb list 882 * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild
883 * ddb list.
869 **/ 884 **/
870static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, 885static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
871 uint8_t renew_ddb_list) 886 uint8_t renew_ddb_list)
@@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
874 889
875 /* Stall incoming I/O until we are done */ 890 /* Stall incoming I/O until we are done */
876 clear_bit(AF_ONLINE, &ha->flags); 891 clear_bit(AF_ONLINE, &ha->flags);
892
877 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, 893 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
878 __func__)); 894 __func__));
879 895
@@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1176 int ret = -ENODEV, status; 1192 int ret = -ENODEV, status;
1177 struct Scsi_Host *host; 1193 struct Scsi_Host *host;
1178 struct scsi_qla_host *ha; 1194 struct scsi_qla_host *ha;
1179 struct ddb_entry *ddb_entry, *ddbtemp;
1180 uint8_t init_retry_count = 0; 1195 uint8_t init_retry_count = 0;
1181 char buf[34]; 1196 char buf[34];
1182 1197
@@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1295 if (ret) 1310 if (ret)
1296 goto probe_failed; 1311 goto probe_failed;
1297 1312
1298 /* Update transport device information for all devices. */
1299 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
1300 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
1301 if (qla4xxx_add_sess(ddb_entry))
1302 goto remove_host;
1303 }
1304
1305 printk(KERN_INFO 1313 printk(KERN_INFO
1306 " QLogic iSCSI HBA Driver version: %s\n" 1314 " QLogic iSCSI HBA Driver version: %s\n"
1307 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 1315 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1311 scsi_scan_host(host); 1319 scsi_scan_host(host);
1312 return 0; 1320 return 0;
1313 1321
1314remove_host:
1315 qla4xxx_free_ddb_list(ha);
1316 scsi_remove_host(host);
1317
1318probe_failed: 1322probe_failed:
1319 qla4xxx_free_adapter(ha); 1323 qla4xxx_free_adapter(ha);
1320 scsi_host_put(ha->host); 1324 scsi_host_put(ha->host);
@@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1600 return FAILED; 1604 return FAILED;
1601 } 1605 }
1602 1606
1603 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) { 1607 /* make sure the dpc thread is stopped while we reset the hba */
1608 clear_bit(AF_ONLINE, &ha->flags);
1609 flush_workqueue(ha->dpc_thread);
1610
1611 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS)
1604 return_status = SUCCESS; 1612 return_status = SUCCESS;
1605 }
1606 1613
1607 dev_info(&ha->pdev->dev, "HOST RESET %s.\n", 1614 dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
1608 return_status == FAILED ? "FAILED" : "SUCCEDED"); 1615 return_status == FAILED ? "FAILED" : "SUCCEDED");
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 65455ab1f3b9..4a1cf6377f6c 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -651,7 +651,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
651 651
652static irqreturn_t qpti_intr(int irq, void *dev_id); 652static irqreturn_t qpti_intr(int irq, void *dev_id);
653 653
654static void __init qpti_chain_add(struct qlogicpti *qpti) 654static void __devinit qpti_chain_add(struct qlogicpti *qpti)
655{ 655{
656 spin_lock_irq(&qptichain_lock); 656 spin_lock_irq(&qptichain_lock);
657 if (qptichain != NULL) { 657 if (qptichain != NULL) {
@@ -667,7 +667,7 @@ static void __init qpti_chain_add(struct qlogicpti *qpti)
667 spin_unlock_irq(&qptichain_lock); 667 spin_unlock_irq(&qptichain_lock);
668} 668}
669 669
670static void __init qpti_chain_del(struct qlogicpti *qpti) 670static void __devexit qpti_chain_del(struct qlogicpti *qpti)
671{ 671{
672 spin_lock_irq(&qptichain_lock); 672 spin_lock_irq(&qptichain_lock);
673 if (qptichain == qpti) { 673 if (qptichain == qpti) {
@@ -682,7 +682,7 @@ static void __init qpti_chain_del(struct qlogicpti *qpti)
682 spin_unlock_irq(&qptichain_lock); 682 spin_unlock_irq(&qptichain_lock);
683} 683}
684 684
685static int __init qpti_map_regs(struct qlogicpti *qpti) 685static int __devinit qpti_map_regs(struct qlogicpti *qpti)
686{ 686{
687 struct sbus_dev *sdev = qpti->sdev; 687 struct sbus_dev *sdev = qpti->sdev;
688 688
@@ -705,7 +705,7 @@ static int __init qpti_map_regs(struct qlogicpti *qpti)
705 return 0; 705 return 0;
706} 706}
707 707
708static int __init qpti_register_irq(struct qlogicpti *qpti) 708static int __devinit qpti_register_irq(struct qlogicpti *qpti)
709{ 709{
710 struct sbus_dev *sdev = qpti->sdev; 710 struct sbus_dev *sdev = qpti->sdev;
711 711
@@ -730,7 +730,7 @@ fail:
730 return -1; 730 return -1;
731} 731}
732 732
733static void __init qpti_get_scsi_id(struct qlogicpti *qpti) 733static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
734{ 734{
735 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 735 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
736 "initiator-id", 736 "initiator-id",
@@ -783,7 +783,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
783/* The request and response queues must each be aligned 783/* The request and response queues must each be aligned
784 * on a page boundary. 784 * on a page boundary.
785 */ 785 */
786static int __init qpti_map_queues(struct qlogicpti *qpti) 786static int __devinit qpti_map_queues(struct qlogicpti *qpti)
787{ 787{
788 struct sbus_dev *sdev = qpti->sdev; 788 struct sbus_dev *sdev = qpti->sdev;
789 789
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fecba05b4e77..e5c6f6af8765 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
757 "Notifying upper driver of completion " 757 "Notifying upper driver of completion "
758 "(result %x)\n", cmd->result)); 758 "(result %x)\n", cmd->result));
759 759
760 good_bytes = scsi_bufflen(cmd); 760 good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len;
761 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 761 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
762 drv = scsi_cmd_to_driver(cmd); 762 drv = scsi_cmd_to_driver(cmd);
763 if (drv->done) 763 if (drv->done)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1541c174937a..d1777a9a9625 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -222,7 +222,7 @@ static struct scsi_host_template sdebug_driver_template = {
222 .cmd_per_lun = 16, 222 .cmd_per_lun = 16,
223 .max_sectors = 0xffff, 223 .max_sectors = 0xffff,
224 .unchecked_isa_dma = 0, 224 .unchecked_isa_dma = 0,
225 .use_clustering = ENABLE_CLUSTERING, 225 .use_clustering = DISABLE_CLUSTERING,
226 .module = THIS_MODULE, 226 .module = THIS_MODULE,
227}; 227};
228 228
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1dc165ad17fb..e67c14e31bab 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1577} 1577}
1578 1578
1579/** 1579/**
1580 * scsi_scan_target - scan a target id, possibly including all LUNs on the 1580 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1581 * target.
1582 * @parent: host to scan 1581 * @parent: host to scan
1583 * @channel: channel to scan 1582 * @channel: channel to scan
1584 * @id: target id to scan 1583 * @id: target id to scan
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 3677fbb30b72..a0f308bd145b 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
103 if (!cmd) 103 if (!cmd)
104 goto release_rq; 104 goto release_rq;
105 105
106 memset(cmd, 0, sizeof(*cmd));
107 cmd->sc_data_direction = data_dir; 106 cmd->sc_data_direction = data_dir;
108 cmd->jiffies_at_alloc = jiffies; 107 cmd->jiffies_at_alloc = jiffies;
109 cmd->request = rq; 108 cmd->request = rq;
@@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
382 scsi_release_buffers(cmd); 381 scsi_release_buffers(cmd);
383 goto unmap_rq; 382 goto unmap_rq;
384 } 383 }
384 /*
385 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
386 * length for us.
387 */
388 cmd->sdb.length = rq->data_len;
385 389
386 return 0; 390 return 0;
387 391
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac7534f3ec4..ca7bb6f63bde 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -33,7 +33,7 @@
33#define ISCSI_SESSION_ATTRS 19 33#define ISCSI_SESSION_ATTRS 19
34#define ISCSI_CONN_ATTRS 13 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-868" 36#define ISCSI_TRANSPORT_VERSION "2.0-869"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
39 int daemon_pid; 39 int daemon_pid;
@@ -231,7 +231,7 @@ static struct {
231 { ISCSI_SESSION_FREE, "FREE" }, 231 { ISCSI_SESSION_FREE, "FREE" },
232}; 232};
233 233
234const char *iscsi_session_state_name(int state) 234static const char *iscsi_session_state_name(int state)
235{ 235{
236 int i; 236 int i;
237 char *name = NULL; 237 char *name = NULL;
@@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work)
373 scsi_target_unblock(&session->dev); 373 scsi_target_unblock(&session->dev);
374} 374}
375 375
376void __iscsi_unblock_session(struct iscsi_cls_session *session) 376static void __iscsi_unblock_session(struct work_struct *work)
377{
378 if (!cancel_delayed_work(&session->recovery_work))
379 flush_workqueue(iscsi_eh_timer_workq);
380 scsi_target_unblock(&session->dev);
381}
382
383void iscsi_unblock_session(struct iscsi_cls_session *session)
384{ 377{
378 struct iscsi_cls_session *session =
379 container_of(work, struct iscsi_cls_session,
380 unblock_work);
385 struct Scsi_Host *shost = iscsi_session_to_shost(session); 381 struct Scsi_Host *shost = iscsi_session_to_shost(session);
386 struct iscsi_host *ihost = shost->shost_data; 382 struct iscsi_host *ihost = shost->shost_data;
387 unsigned long flags; 383 unsigned long flags;
388 384
385 /*
386 * The recovery and unblock work get run from the same workqueue,
387 * so try to cancel it if it was going to run after this unblock.
388 */
389 cancel_delayed_work(&session->recovery_work);
389 spin_lock_irqsave(&session->lock, flags); 390 spin_lock_irqsave(&session->lock, flags);
390 session->state = ISCSI_SESSION_LOGGED_IN; 391 session->state = ISCSI_SESSION_LOGGED_IN;
391 spin_unlock_irqrestore(&session->lock, flags); 392 spin_unlock_irqrestore(&session->lock, flags);
392 393 /* start IO */
393 __iscsi_unblock_session(session); 394 scsi_target_unblock(&session->dev);
394 /* 395 /*
395 * Only do kernel scanning if the driver is properly hooked into 396 * Only do kernel scanning if the driver is properly hooked into
396 * the async scanning code (drivers like iscsi_tcp do login and 397 * the async scanning code (drivers like iscsi_tcp do login and
@@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session)
401 atomic_inc(&ihost->nr_scans); 402 atomic_inc(&ihost->nr_scans);
402 } 403 }
403} 404}
405
406/**
407 * iscsi_unblock_session - set a session as logged in and start IO.
408 * @session: iscsi session
409 *
410 * Mark a session as ready to accept IO.
411 */
412void iscsi_unblock_session(struct iscsi_cls_session *session)
413{
414 queue_work(iscsi_eh_timer_workq, &session->unblock_work);
415 /*
416 * make sure all the events have completed before tell the driver
417 * it is safe
418 */
419 flush_workqueue(iscsi_eh_timer_workq);
420}
404EXPORT_SYMBOL_GPL(iscsi_unblock_session); 421EXPORT_SYMBOL_GPL(iscsi_unblock_session);
405 422
406void iscsi_block_session(struct iscsi_cls_session *session) 423static void __iscsi_block_session(struct work_struct *work)
407{ 424{
425 struct iscsi_cls_session *session =
426 container_of(work, struct iscsi_cls_session,
427 block_work);
408 unsigned long flags; 428 unsigned long flags;
409 429
410 spin_lock_irqsave(&session->lock, flags); 430 spin_lock_irqsave(&session->lock, flags);
411 session->state = ISCSI_SESSION_FAILED; 431 session->state = ISCSI_SESSION_FAILED;
412 spin_unlock_irqrestore(&session->lock, flags); 432 spin_unlock_irqrestore(&session->lock, flags);
413
414 scsi_target_block(&session->dev); 433 scsi_target_block(&session->dev);
415 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 434 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
416 session->recovery_tmo * HZ); 435 session->recovery_tmo * HZ);
417} 436}
437
438void iscsi_block_session(struct iscsi_cls_session *session)
439{
440 queue_work(iscsi_eh_timer_workq, &session->block_work);
441}
418EXPORT_SYMBOL_GPL(iscsi_block_session); 442EXPORT_SYMBOL_GPL(iscsi_block_session);
419 443
420static void __iscsi_unbind_session(struct work_struct *work) 444static void __iscsi_unbind_session(struct work_struct *work)
@@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost,
463 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 487 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
464 INIT_LIST_HEAD(&session->host_list); 488 INIT_LIST_HEAD(&session->host_list);
465 INIT_LIST_HEAD(&session->sess_list); 489 INIT_LIST_HEAD(&session->sess_list);
490 INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
491 INIT_WORK(&session->block_work, __iscsi_block_session);
466 INIT_WORK(&session->unbind_work, __iscsi_unbind_session); 492 INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
467 INIT_WORK(&session->scan_work, iscsi_scan_session); 493 INIT_WORK(&session->scan_work, iscsi_scan_session);
468 spin_lock_init(&session->lock); 494 spin_lock_init(&session->lock);
@@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
575 list_del(&session->sess_list); 601 list_del(&session->sess_list);
576 spin_unlock_irqrestore(&sesslock, flags); 602 spin_unlock_irqrestore(&sesslock, flags);
577 603
604 /* make sure there are no blocks/unblocks queued */
605 flush_workqueue(iscsi_eh_timer_workq);
606 /* make sure the timedout callout is not running */
607 if (!cancel_delayed_work(&session->recovery_work))
608 flush_workqueue(iscsi_eh_timer_workq);
578 /* 609 /*
579 * If we are blocked let commands flow again. The lld or iscsi 610 * If we are blocked let commands flow again. The lld or iscsi
580 * layer should set up the queuecommand to fail commands. 611 * layer should set up the queuecommand to fail commands.
612 * We assume that LLD will not be calling block/unblock while
613 * removing the session.
581 */ 614 */
582 spin_lock_irqsave(&session->lock, flags); 615 spin_lock_irqsave(&session->lock, flags);
583 session->state = ISCSI_SESSION_FREE; 616 session->state = ISCSI_SESSION_FREE;
584 spin_unlock_irqrestore(&session->lock, flags); 617 spin_unlock_irqrestore(&session->lock, flags);
585 __iscsi_unblock_session(session);
586 __iscsi_unbind_session(&session->unbind_work);
587 618
588 /* flush running scans */ 619 scsi_target_unblock(&session->dev);
620 /* flush running scans then delete devices */
589 flush_workqueue(ihost->scan_workq); 621 flush_workqueue(ihost->scan_workq);
590 /* 622 __iscsi_unbind_session(&session->unbind_work);
591 * If the session dropped while removing devices then we need to make
592 * sure it is not blocked
593 */
594 if (!cancel_delayed_work(&session->recovery_work))
595 flush_workqueue(iscsi_eh_timer_workq);
596 623
597 /* hw iscsi may not have removed all connections from session */ 624 /* hw iscsi may not have removed all connections from session */
598 err = device_for_each_child(&session->dev, NULL, 625 err = device_for_each_child(&session->dev, NULL,
@@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
802 829
803void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 830void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
804{ 831{
805 struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
806 struct nlmsghdr *nlh; 832 struct nlmsghdr *nlh;
807 struct sk_buff *skb; 833 struct sk_buff *skb;
808 struct iscsi_uevent *ev; 834 struct iscsi_uevent *ev;
809 struct iscsi_internal *priv; 835 struct iscsi_internal *priv;
810 int len = NLMSG_SPACE(sizeof(*ev)); 836 int len = NLMSG_SPACE(sizeof(*ev));
811 unsigned long flags;
812 837
813 priv = iscsi_if_transport_lookup(conn->transport); 838 priv = iscsi_if_transport_lookup(conn->transport);
814 if (!priv) 839 if (!priv)
815 return; 840 return;
816 841
817 spin_lock_irqsave(&session->lock, flags);
818 if (session->state == ISCSI_SESSION_LOGGED_IN)
819 session->state = ISCSI_SESSION_FAILED;
820 spin_unlock_irqrestore(&session->lock, flags);
821
822 skb = alloc_skb(len, GFP_ATOMIC); 842 skb = alloc_skb(len, GFP_ATOMIC);
823 if (!skb) { 843 if (!skb) {
824 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " 844 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 37df8bbe7f46..7aee64dbfbeb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1835,8 +1835,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
1835 goto done; 1835 goto done;
1836 } 1836 }
1837 1837
1838 if (mesg.event == PM_EVENT_SUSPEND && 1838 if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
1839 sdkp->device->manage_start_stop) {
1840 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 1839 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1841 ret = sd_start_stop_device(sdkp, 0); 1840 ret = sd_start_stop_device(sdkp, 0);
1842 } 1841 }
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index a57fed47b39d..a6d96694d0a5 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -33,9 +33,9 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35struct ses_device { 35struct ses_device {
36 char *page1; 36 unsigned char *page1;
37 char *page2; 37 unsigned char *page2;
38 char *page10; 38 unsigned char *page10;
39 short page1_len; 39 short page1_len;
40 short page2_len; 40 short page2_len;
41 short page10_len; 41 short page10_len;
@@ -67,7 +67,7 @@ static int ses_probe(struct device *dev)
67static int ses_recv_diag(struct scsi_device *sdev, int page_code, 67static int ses_recv_diag(struct scsi_device *sdev, int page_code,
68 void *buf, int bufflen) 68 void *buf, int bufflen)
69{ 69{
70 char cmd[] = { 70 unsigned char cmd[] = {
71 RECEIVE_DIAGNOSTIC, 71 RECEIVE_DIAGNOSTIC,
72 1, /* Set PCV bit */ 72 1, /* Set PCV bit */
73 page_code, 73 page_code,
@@ -85,7 +85,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
85{ 85{
86 u32 result; 86 u32 result;
87 87
88 char cmd[] = { 88 unsigned char cmd[] = {
89 SEND_DIAGNOSTIC, 89 SEND_DIAGNOSTIC,
90 0x10, /* Set PF bit */ 90 0x10, /* Set PF bit */
91 0, 91 0,
@@ -104,13 +104,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
104 104
105static int ses_set_page2_descriptor(struct enclosure_device *edev, 105static int ses_set_page2_descriptor(struct enclosure_device *edev,
106 struct enclosure_component *ecomp, 106 struct enclosure_component *ecomp,
107 char *desc) 107 unsigned char *desc)
108{ 108{
109 int i, j, count = 0, descriptor = ecomp->number; 109 int i, j, count = 0, descriptor = ecomp->number;
110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
111 struct ses_device *ses_dev = edev->scratch; 111 struct ses_device *ses_dev = edev->scratch;
112 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 112 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
113 char *desc_ptr = ses_dev->page2 + 8; 113 unsigned char *desc_ptr = ses_dev->page2 + 8;
114 114
115 /* Clear everything */ 115 /* Clear everything */
116 memset(desc_ptr, 0, ses_dev->page2_len - 8); 116 memset(desc_ptr, 0, ses_dev->page2_len - 8);
@@ -133,14 +133,14 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev,
133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
134} 134}
135 135
136static char *ses_get_page2_descriptor(struct enclosure_device *edev, 136static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
137 struct enclosure_component *ecomp) 137 struct enclosure_component *ecomp)
138{ 138{
139 int i, j, count = 0, descriptor = ecomp->number; 139 int i, j, count = 0, descriptor = ecomp->number;
140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
141 struct ses_device *ses_dev = edev->scratch; 141 struct ses_device *ses_dev = edev->scratch;
142 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 142 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
143 char *desc_ptr = ses_dev->page2 + 8; 143 unsigned char *desc_ptr = ses_dev->page2 + 8;
144 144
145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
146 146
@@ -160,17 +160,18 @@ static char *ses_get_page2_descriptor(struct enclosure_device *edev,
160static void ses_get_fault(struct enclosure_device *edev, 160static void ses_get_fault(struct enclosure_device *edev,
161 struct enclosure_component *ecomp) 161 struct enclosure_component *ecomp)
162{ 162{
163 char *desc; 163 unsigned char *desc;
164 164
165 desc = ses_get_page2_descriptor(edev, ecomp); 165 desc = ses_get_page2_descriptor(edev, ecomp);
166 ecomp->fault = (desc[3] & 0x60) >> 4; 166 if (desc)
167 ecomp->fault = (desc[3] & 0x60) >> 4;
167} 168}
168 169
169static int ses_set_fault(struct enclosure_device *edev, 170static int ses_set_fault(struct enclosure_device *edev,
170 struct enclosure_component *ecomp, 171 struct enclosure_component *ecomp,
171 enum enclosure_component_setting val) 172 enum enclosure_component_setting val)
172{ 173{
173 char desc[4] = {0 }; 174 unsigned char desc[4] = {0 };
174 175
175 switch (val) { 176 switch (val) {
176 case ENCLOSURE_SETTING_DISABLED: 177 case ENCLOSURE_SETTING_DISABLED:
@@ -190,26 +191,28 @@ static int ses_set_fault(struct enclosure_device *edev,
190static void ses_get_status(struct enclosure_device *edev, 191static void ses_get_status(struct enclosure_device *edev,
191 struct enclosure_component *ecomp) 192 struct enclosure_component *ecomp)
192{ 193{
193 char *desc; 194 unsigned char *desc;
194 195
195 desc = ses_get_page2_descriptor(edev, ecomp); 196 desc = ses_get_page2_descriptor(edev, ecomp);
196 ecomp->status = (desc[0] & 0x0f); 197 if (desc)
198 ecomp->status = (desc[0] & 0x0f);
197} 199}
198 200
199static void ses_get_locate(struct enclosure_device *edev, 201static void ses_get_locate(struct enclosure_device *edev,
200 struct enclosure_component *ecomp) 202 struct enclosure_component *ecomp)
201{ 203{
202 char *desc; 204 unsigned char *desc;
203 205
204 desc = ses_get_page2_descriptor(edev, ecomp); 206 desc = ses_get_page2_descriptor(edev, ecomp);
205 ecomp->locate = (desc[2] & 0x02) ? 1 : 0; 207 if (desc)
208 ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
206} 209}
207 210
208static int ses_set_locate(struct enclosure_device *edev, 211static int ses_set_locate(struct enclosure_device *edev,
209 struct enclosure_component *ecomp, 212 struct enclosure_component *ecomp,
210 enum enclosure_component_setting val) 213 enum enclosure_component_setting val)
211{ 214{
212 char desc[4] = {0 }; 215 unsigned char desc[4] = {0 };
213 216
214 switch (val) { 217 switch (val) {
215 case ENCLOSURE_SETTING_DISABLED: 218 case ENCLOSURE_SETTING_DISABLED:
@@ -229,7 +232,7 @@ static int ses_set_active(struct enclosure_device *edev,
229 struct enclosure_component *ecomp, 232 struct enclosure_component *ecomp,
230 enum enclosure_component_setting val) 233 enum enclosure_component_setting val)
231{ 234{
232 char desc[4] = {0 }; 235 unsigned char desc[4] = {0 };
233 236
234 switch (val) { 237 switch (val) {
235 case ENCLOSURE_SETTING_DISABLED: 238 case ENCLOSURE_SETTING_DISABLED:
@@ -409,11 +412,11 @@ static int ses_intf_add(struct class_device *cdev,
409{ 412{
410 struct scsi_device *sdev = to_scsi_device(cdev->dev); 413 struct scsi_device *sdev = to_scsi_device(cdev->dev);
411 struct scsi_device *tmp_sdev; 414 struct scsi_device *tmp_sdev;
412 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, 415 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL,
413 *addl_desc_ptr; 416 *addl_desc_ptr = NULL;
414 struct ses_device *ses_dev; 417 struct ses_device *ses_dev;
415 u32 result; 418 u32 result;
416 int i, j, types, len, components = 0; 419 int i, j, types, len, page7_len = 0, components = 0;
417 int err = -ENOMEM; 420 int err = -ENOMEM;
418 struct enclosure_device *edev; 421 struct enclosure_device *edev;
419 struct ses_component *scomp = NULL; 422 struct ses_component *scomp = NULL;
@@ -447,7 +450,7 @@ static int ses_intf_add(struct class_device *cdev,
447 * traversal routines more complex */ 450 * traversal routines more complex */
448 sdev_printk(KERN_ERR, sdev, 451 sdev_printk(KERN_ERR, sdev,
449 "FIXME driver has no support for subenclosures (%d)\n", 452 "FIXME driver has no support for subenclosures (%d)\n",
450 buf[1]); 453 hdr_buf[1]);
451 goto err_free; 454 goto err_free;
452 } 455 }
453 456
@@ -461,9 +464,8 @@ static int ses_intf_add(struct class_device *cdev,
461 goto recv_failed; 464 goto recv_failed;
462 465
463 types = buf[10]; 466 types = buf[10];
464 len = buf[11];
465 467
466 type_ptr = buf + 12 + len; 468 type_ptr = buf + 12 + buf[11];
467 469
468 for (i = 0; i < types; i++, type_ptr += 4) { 470 for (i = 0; i < types; i++, type_ptr += 4) {
469 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 471 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
@@ -494,22 +496,21 @@ static int ses_intf_add(struct class_device *cdev,
494 /* The additional information page --- allows us 496 /* The additional information page --- allows us
495 * to match up the devices */ 497 * to match up the devices */
496 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); 498 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
497 if (result) 499 if (!result) {
498 goto no_page10; 500
499 501 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
500 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 502 buf = kzalloc(len, GFP_KERNEL);
501 buf = kzalloc(len, GFP_KERNEL); 503 if (!buf)
502 if (!buf) 504 goto err_free;
503 goto err_free; 505
504 506 result = ses_recv_diag(sdev, 10, buf, len);
505 result = ses_recv_diag(sdev, 10, buf, len); 507 if (result)
506 if (result) 508 goto recv_failed;
507 goto recv_failed; 509 ses_dev->page10 = buf;
508 ses_dev->page10 = buf; 510 ses_dev->page10_len = len;
509 ses_dev->page10_len = len; 511 buf = NULL;
510 buf = NULL; 512 }
511 513
512 no_page10:
513 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 514 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
514 if (!scomp) 515 if (!scomp)
515 goto err_free; 516 goto err_free;
@@ -530,7 +531,7 @@ static int ses_intf_add(struct class_device *cdev,
530 if (result) 531 if (result)
531 goto simple_populate; 532 goto simple_populate;
532 533
533 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 534 page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
534 /* add 1 for trailing '\0' we'll use */ 535 /* add 1 for trailing '\0' we'll use */
535 buf = kzalloc(len + 1, GFP_KERNEL); 536 buf = kzalloc(len + 1, GFP_KERNEL);
536 if (!buf) 537 if (!buf)
@@ -547,7 +548,8 @@ static int ses_intf_add(struct class_device *cdev,
547 len = (desc_ptr[2] << 8) + desc_ptr[3]; 548 len = (desc_ptr[2] << 8) + desc_ptr[3];
548 /* skip past overall descriptor */ 549 /* skip past overall descriptor */
549 desc_ptr += len + 4; 550 desc_ptr += len + 4;
550 addl_desc_ptr = ses_dev->page10 + 8; 551 if (ses_dev->page10)
552 addl_desc_ptr = ses_dev->page10 + 8;
551 } 553 }
552 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 554 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
553 components = 0; 555 components = 0;
@@ -557,29 +559,35 @@ static int ses_intf_add(struct class_device *cdev,
557 struct enclosure_component *ecomp; 559 struct enclosure_component *ecomp;
558 560
559 if (desc_ptr) { 561 if (desc_ptr) {
560 len = (desc_ptr[2] << 8) + desc_ptr[3]; 562 if (desc_ptr >= buf + page7_len) {
561 desc_ptr += 4; 563 desc_ptr = NULL;
562 /* Add trailing zero - pushes into 564 } else {
563 * reserved space */ 565 len = (desc_ptr[2] << 8) + desc_ptr[3];
564 desc_ptr[len] = '\0'; 566 desc_ptr += 4;
565 name = desc_ptr; 567 /* Add trailing zero - pushes into
568 * reserved space */
569 desc_ptr[len] = '\0';
570 name = desc_ptr;
571 }
566 } 572 }
567 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 573 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
568 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 574 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
569 continue; 575
570 ecomp = enclosure_component_register(edev, 576 ecomp = enclosure_component_register(edev,
571 components++, 577 components++,
572 type_ptr[0], 578 type_ptr[0],
573 name); 579 name);
574 if (desc_ptr) { 580
575 desc_ptr += len; 581 if (!IS_ERR(ecomp) && addl_desc_ptr)
576 if (!IS_ERR(ecomp))
577 ses_process_descriptor(ecomp, 582 ses_process_descriptor(ecomp,
578 addl_desc_ptr); 583 addl_desc_ptr);
579
580 if (addl_desc_ptr)
581 addl_desc_ptr += addl_desc_ptr[1] + 2;
582 } 584 }
585 if (desc_ptr)
586 desc_ptr += len;
587
588 if (addl_desc_ptr)
589 addl_desc_ptr += addl_desc_ptr[1] + 2;
590
583 } 591 }
584 } 592 }
585 kfree(buf); 593 kfree(buf);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71952703125a..0a52d9d2da2c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080117"; 20static const char *verstr = "20080221";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -1172,7 +1172,7 @@ static int st_open(struct inode *inode, struct file *filp)
1172 STp->try_dio_now = STp->try_dio; 1172 STp->try_dio_now = STp->try_dio;
1173 STp->recover_count = 0; 1173 STp->recover_count = 0;
1174 DEB( STp->nbr_waits = STp->nbr_finished = 0; 1174 DEB( STp->nbr_waits = STp->nbr_finished = 0;
1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) 1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; )
1176 1176
1177 retval = check_tape(STp, filp); 1177 retval = check_tape(STp, filp);
1178 if (retval < 0) 1178 if (retval < 0)
@@ -1226,8 +1226,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
1226 } 1226 }
1227 1227
1228 DEBC( if (STp->nbr_requests) 1228 DEBC( if (STp->nbr_requests)
1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n",
1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages));
1231 1231
1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1422,9 +1422,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1422 if (STbp->do_dio) { 1422 if (STbp->do_dio) {
1423 STp->nbr_dio++; 1423 STp->nbr_dio++;
1424 STp->nbr_pages += STbp->do_dio; 1424 STp->nbr_pages += STbp->do_dio;
1425 for (i=1; i < STbp->do_dio; i++)
1426 if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1)
1427 STp->nbr_combinable++;
1428 } 1425 }
1429 ) 1426 )
1430 } else 1427 } else
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 6c8075712974..5931726fcf93 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -164,7 +164,6 @@ struct scsi_tape {
164 int nbr_requests; 164 int nbr_requests;
165 int nbr_dio; 165 int nbr_dio;
166 int nbr_pages; 166 int nbr_pages;
167 int nbr_combinable;
168 unsigned char last_cmnd[6]; 167 unsigned char last_cmnd[6];
169 unsigned char last_sense[16]; 168 unsigned char last_sense[16];
170#endif 169#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 72f6d8015358..654430edf74d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -461,30 +461,14 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
461 } 461 }
462} 462}
463 463
464static int stex_direct_copy(struct scsi_cmnd *cmd,
465 const void *src, size_t count)
466{
467 size_t cp_len = count;
468 int n_elem = 0;
469
470 n_elem = scsi_dma_map(cmd);
471 if (n_elem < 0)
472 return 0;
473
474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
475
476 scsi_dma_unmap(cmd);
477
478 return cp_len == count;
479}
480
481static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) 464static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
482{ 465{
483 struct st_frame *p; 466 struct st_frame *p;
484 size_t count = sizeof(struct st_frame); 467 size_t count = sizeof(struct st_frame);
485 468
486 p = hba->copy_buffer; 469 p = hba->copy_buffer;
487 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD); 470 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
471 ST_FROM_CMD);
488 memset(p->base, 0, sizeof(u32)*6); 472 memset(p->base, 0, sizeof(u32)*6);
489 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); 473 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
490 p->rom_addr = 0; 474 p->rom_addr = 0;
@@ -502,7 +486,8 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
502 p->subid = 486 p->subid =
503 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; 487 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
504 488
505 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD); 489 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
490 ST_TO_CMD);
506} 491}
507 492
508static void 493static void
@@ -569,8 +554,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
569 unsigned char page; 554 unsigned char page;
570 page = cmd->cmnd[2] & 0x3f; 555 page = cmd->cmnd[2] & 0x3f;
571 if (page == 0x8 || page == 0x3f) { 556 if (page == 0x8 || page == 0x3f) {
572 stex_direct_copy(cmd, ms10_caching_page, 557 size_t cp_len = sizeof(ms10_caching_page);
573 sizeof(ms10_caching_page)); 558 stex_internal_copy(cmd, ms10_caching_page,
559 &cp_len, scsi_sg_count(cmd),
560 ST_TO_CMD);
574 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 561 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
575 done(cmd); 562 done(cmd);
576 } else 563 } else
@@ -599,8 +586,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
599 if (id != host->max_id - 1) 586 if (id != host->max_id - 1)
600 break; 587 break;
601 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { 588 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
602 stex_direct_copy(cmd, console_inq_page, 589 size_t cp_len = sizeof(console_inq_page);
603 sizeof(console_inq_page)); 590 stex_internal_copy(cmd, console_inq_page,
591 &cp_len, scsi_sg_count(cmd),
592 ST_TO_CMD);
604 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 593 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
605 done(cmd); 594 done(cmd);
606 } else 595 } else
@@ -609,6 +598,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
609 case PASSTHRU_CMD: 598 case PASSTHRU_CMD:
610 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { 599 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
611 struct st_drvver ver; 600 struct st_drvver ver;
601 size_t cp_len = sizeof(ver);
612 ver.major = ST_VER_MAJOR; 602 ver.major = ST_VER_MAJOR;
613 ver.minor = ST_VER_MINOR; 603 ver.minor = ST_VER_MINOR;
614 ver.oem = ST_OEM; 604 ver.oem = ST_OEM;
@@ -616,7 +606,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
616 ver.signature[0] = PASSTHRU_SIGNATURE; 606 ver.signature[0] = PASSTHRU_SIGNATURE;
617 ver.console_id = host->max_id - 1; 607 ver.console_id = host->max_id - 1;
618 ver.host_no = hba->host->host_no; 608 ver.host_no = hba->host->host_no;
619 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? 609 stex_internal_copy(cmd, &ver, &cp_len,
610 scsi_sg_count(cmd), ST_TO_CMD);
611 cmd->result = sizeof(ver) == cp_len ?
620 DID_OK << 16 | COMMAND_COMPLETE << 8 : 612 DID_OK << 16 | COMMAND_COMPLETE << 8 :
621 DID_ERROR << 16 | COMMAND_COMPLETE << 8; 613 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
622 done(cmd); 614 done(cmd);
@@ -709,7 +701,7 @@ static void stex_copy_data(struct st_ccb *ccb,
709 if (ccb->cmd == NULL) 701 if (ccb->cmd == NULL)
710 return; 702 return;
711 stex_internal_copy(ccb->cmd, 703 stex_internal_copy(ccb->cmd,
712 resp->variable, &count, ccb->sg_count, ST_TO_CMD); 704 resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD);
713} 705}
714 706
715static void stex_ys_commands(struct st_hba *hba, 707static void stex_ys_commands(struct st_hba *hba,
@@ -734,7 +726,7 @@ static void stex_ys_commands(struct st_hba *hba,
734 726
735 count = STEX_EXTRA_SIZE; 727 count = STEX_EXTRA_SIZE;
736 stex_internal_copy(ccb->cmd, hba->copy_buffer, 728 stex_internal_copy(ccb->cmd, hba->copy_buffer,
737 &count, ccb->sg_count, ST_FROM_CMD); 729 &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD);
738 inq_data = (ST_INQ *)hba->copy_buffer; 730 inq_data = (ST_INQ *)hba->copy_buffer;
739 if (inq_data->DeviceTypeQualifier != 0) 731 if (inq_data->DeviceTypeQualifier != 0)
740 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; 732 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;