aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-18 15:35:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-18 15:35:17 -0500
commitd71f5be276bf79eda14c40f3cafcf827326f10cb (patch)
tree581dadb90b06b950882a8eb75deee23d3370a4ab /drivers
parent92b5abbb44e05cdbc4483219f30a435dd871a8ea (diff)
parent76ffe8a3f766358a0ade543153625b3e4e66159d (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
SCSI updates on 20120118 * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (49 commits) [SCSI] libfc: remove redundant timer init for fcp [SCSI] fcoe: Move fcoe_debug_logging from fcoe.h to fcoe.c [SCSI] libfc: Declare local functions static [SCSI] fcoe: fix regression on offload em matching function for initiator/target [SCSI] qla4xxx: Update driver version to 5.02.00-k12 [SCSI] qla4xxx: Cleanup modinfo display [SCSI] qla4xxx: Update license [SCSI] qla4xxx: Clear the RISC interrupt bit during FW init [SCSI] qla4xxx: Added error logging for firmware abort [SCSI] qla4xxx: Disable generating pause frames in case of FW hung [SCSI] qla4xxx: Temperature monitoring for ISP82XX core. [SCSI] megaraid: fix sparse warnings [SCSI] sg: convert to kstrtoul_from_user() [SCSI] don't change sdev starvation list order without request dispatched [SCSI] isci: fix, prevent port from getting stuck in the 'configuring' state [SCSI] isci: fix start OOB [SCSI] isci: fix io failures while wide port links are coming up [SCSI] isci: allow more time for wide port targets [SCSI] isci: enable wide port targets [SCSI] isci: Fix IO fails when pull cable from phy in x4 wideport in MPC mode. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h7
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c340
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c511
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sg.c25
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
56 files changed, 1179 insertions, 1441 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bcfdd2a..16570aa84aac 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
830 tristate "Intel(R) C600 Series Chipset SAS Controller" 830 tristate "Intel(R) C600 Series Chipset SAS Controller"
831 depends on PCI && SCSI 831 depends on PCI && SCSI
832 depends on X86 832 depends on X86
833 # (temporary): known alpha quality driver
834 depends on EXPERIMENTAL
835 select SCSI_SAS_LIBSAS 833 select SCSI_SAS_LIBSAS
836 select SCSI_SAS_HOST_SMP
837 ---help--- 834 ---help---
838 This driver supports the 6Gb/s SAS capabilities of the storage 835 This driver supports the 6Gb/s SAS capabilities of the storage
839 control unit found in the Intel(R) C600 series chipset. 836 control unit found in the Intel(R) C600 series chipset.
840 837
841 The experimental tag will be removed after the driver exits alpha
842
843config SCSI_GENERIC_NCR5380 838config SCSI_GENERIC_NCR5380
844 tristate "Generic NCR5380/53c400 SCSI PIO support" 839 tristate "Generic NCR5380/53c400 SCSI PIO support"
845 depends on ISA && SCSI 840 depends on ISA && SCSI
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be2c4fb..cb07c628b2f1 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
674 u32 tm_cleanups; /* TM cleanup requests */ 674 u32 tm_cleanups; /* TM cleanup requests */
675 u32 tm_cleanup_comps; /* TM cleanup completions */ 675 u32 tm_cleanup_comps; /* TM cleanup completions */
676 u32 lm_lun_across_sg; /* LM lun is across sg data buf */ 676 u32 rsvd[6];
677 u32 lm_lun_not_sup; /* LM lun not supported */
678 u32 lm_rpl_data_changed; /* LM report-lun data changed */
679 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
680 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
681 u32 lm_lun_not_rdy; /* LM lun not ready */
682}; 677};
683 678
684/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 679/* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c86195..8d0b88f67a38 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
214/* 59/*
215 * Fibre Channel Header Structure (FCHS) definition 60 * Fibre Channel Header Structure (FCHS) definition
216 */ 61 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8b..f0f80e282e39 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa); 27static void bfa_ioim_lm_init(struct bfa_s *bfa);
30 28
31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 29#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
60 } \ 58 } \
61} while (0) 59} while (0)
62 60
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
71#define bfa_itnim_sler_cb(__itnim) do { \ 61#define bfa_itnim_sler_cb(__itnim) do { \
72 if ((__itnim)->bfa->fcs) \ 62 if ((__itnim)->bfa->fcs) \
73 bfa_cb_itnim_sler((__itnim)->ditn); \ 63 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
77 } \ 67 } \
78} while (0) 68} while (0)
79 69
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status { 70enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0, 71 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1, 72 BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 128 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 129 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 130 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
151}; 131};
152 132
153 133
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 225static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 226static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 227static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
251 228
252/* 229/*
253 * forward declaration of BFA IO state machine 230 * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 422 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 423 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 424 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
454} 425}
455 426
456bfa_status_t 427bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1580 __bfa_cb_ioim_abort, ioim); 1551 __bfa_cb_ioim_abort, ioim);
1581 break; 1552 break;
1582 1553
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1602 break;
1603
1604 default: 1554 default:
1605 bfa_sm_fault(ioim->bfa, event); 1555 bfa_sm_fault(ioim->bfa, event);
1606 } 1556 }
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
2160 } 2110 }
2161} 2111}
2162 2112
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2399
2400static void 2113static void
2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2114__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2402{ 2115{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2454 m->scsi_status, sns_len, snsinfo, residue); 2167 m->scsi_status, sns_len, snsinfo, residue);
2455} 2168}
2456 2169
2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void 2170void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, 2171bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag) 2172 u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2647 if (port) { 2283 if (port) {
2648 *pwwn = port->port_cfg.pwwn; 2284 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2285 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport; 2286 if (rp_fcs)
2287 rp = rp_fcs->bfa_rport;
2651 } 2288 }
2652 2289
2653 lunm_list = bfa_get_lun_mask_list(bfa); 2290 lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2715 if (port) { 2352 if (port) {
2716 *pwwn = port->port_cfg.pwwn; 2353 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2354 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport; 2355 if (rp_fcs)
2356 rp = rp_fcs->bfa_rport;
2719 } 2357 }
2720 } 2358 }
2721 2359
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2757 return; 2395 return;
2758 } 2396 }
2759 2397
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2398 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2762 0, 0, NULL, 0); 2399 0, 0, NULL, 0);
2763} 2400}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2773 return; 2410 return;
2774 } 2411 }
2775 2412
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2413 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2778 0, 0, NULL, 0); 2414 0, 0, NULL, 0);
2779} 2415}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2788 return; 2424 return;
2789 } 2425 }
2790 2426
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2427 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2793} 2428}
2794 2429
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
3132 ioim->bfa = fcpim->bfa; 2767 ioim->bfa = fcpim->bfa;
3133 ioim->fcpim = fcpim; 2768 ioim->fcpim = fcpim;
3134 ioim->iosp = iosp; 2769 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3136 INIT_LIST_HEAD(&ioim->sgpg_q); 2770 INIT_LIST_HEAD(&ioim->sgpg_q);
3137 bfa_reqq_winit(&ioim->iosp->reqq_wait, 2771 bfa_reqq_winit(&ioim->iosp->reqq_wait,
3138 bfa_ioim_qresume, ioim); 2772 bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3170 evt = BFA_IOIM_SM_DONE; 2804 evt = BFA_IOIM_SM_DONE;
3171 else 2805 else
3172 evt = BFA_IOIM_SM_COMP; 2806 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
3174 break; 2807 break;
3175 2808
3176 case BFI_IOIM_STS_TIMEDOUT: 2809 case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3206 if (rsp->abort_tag != ioim->abort_tag) { 2839 if (rsp->abort_tag != ioim->abort_tag) {
3207 bfa_trc(ioim->bfa, rsp->abort_tag); 2840 bfa_trc(ioim->bfa, rsp->abort_tag);
3208 bfa_trc(ioim->bfa, ioim->abort_tag); 2841 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3210 return; 2842 return;
3211 } 2843 }
3212 2844
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3225 WARN_ON(1); 2857 WARN_ON(1);
3226 } 2858 }
3227 2859
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3229 bfa_sm_send_event(ioim, evt); 2860 bfa_sm_send_event(ioim, evt);
3230} 2861}
3231 2862
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3244 2875
3245 bfa_ioim_cb_profile_comp(fcpim, ioim); 2876 bfa_ioim_cb_profile_comp(fcpim, ioim);
3246 2877
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) { 2878 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
3256} 2879}
3257 2880
3258/* 2881/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
3364void 2987void
3365bfa_ioim_start(struct bfa_ioim_s *ioim) 2988bfa_ioim_start(struct bfa_ioim_s *ioim)
3366{ 2989{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2990 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3397 2991
3398 /* 2992 /*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb7..36f26da80f76 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
110struct bfad_tskim_s; 110struct bfad_tskim_s;
111 111
112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
114 113
115struct bfa_fcpim_s { 114struct bfa_fcpim_s {
116 struct bfa_s *bfa; 115 struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
124 u32 path_tov; 123 u32 path_tov;
125 u16 q_depth; 124 u16 q_depth;
126 u8 reqq; /* Request queue to be used */ 125 u8 reqq; /* Request queue to be used */
127 u8 lun_masking_pending;
128 struct list_head itnim_q; /* queue of active itnim */ 126 struct list_head itnim_q; /* queue of active itnim */
129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 127 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
130 struct list_head ioim_comp_q; /* IO global comp Q */ 128 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
181 u8 reqq; /* Request queue for I/O */ 179 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */ 180 u8 mode; /* IO is passthrough or not */
183 u64 start_time; /* IO's Profile start val */ 181 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
185}; 182};
186 183
187struct bfa_ioim_sp_s { 184struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 258 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
262} while (0) 259} while (0)
263 260
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
268static inline bfa_boolean_t 261static inline bfa_boolean_t
269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 262bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
270{ 263{
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86d3769..b52cbb6bcd5a 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
582#define BFA_LP_TAG_INVALID 0xff 582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590 585
591/* 586/*
592 * bfa fcxp API functions 587 * bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb72531b34..404fd10ddb21 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
674 674
675 spin_lock_irqsave(&bfad->bfad_lock, flags); 675 spin_lock_irqsave(&bfad->bfad_lock, flags);
676 bfa_fcs_vport_start(&vport->fcs_vport); 676 bfa_fcs_vport_start(&vport->fcs_vport);
677 list_add_tail(&vport->list_entry, &bfad->vport_list);
677 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 678 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
678 679
679 return BFA_STATUS_OK; 680 return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1404 bfad->ref_count = 0; 1405 bfad->ref_count = 0;
1405 bfad->pport.bfad = bfad; 1406 bfad->pport.bfad = bfad;
1406 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1407 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1408 INIT_LIST_HEAD(&bfad->vport_list);
1407 1409
1408 /* Setup the debugfs node for this bfad */ 1410 /* Setup the debugfs node for this bfad */
1409 if (bfa_debugfs_enable) 1411 if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab463..1938fe0473e9 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
491 491
492free_scsi_host: 492free_scsi_host:
493 bfad_scsi_host_free(bfad, im_port); 493 bfad_scsi_host_free(bfad, im_port);
494 494 list_del(&vport->list_entry);
495 kfree(vport); 495 kfree(vport);
496 496
497 return 0; 497 return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb41..530de2b1200a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
2394 return 0; 2394 return 0;
2395} 2395}
2396 2396
2397/* Function to reset the LUN SCAN mode */
2398static void
2399bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2400{
2401 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2402 struct bfad_vport_s *vport = NULL;
2403
2404 /* Set the scsi device LUN SCAN flags for base port */
2405 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2406
2407 /* Set the scsi device LUN SCAN flags for the vports */
2408 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2409 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2410}
2411
2397int 2412int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2413bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{ 2414{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2401 unsigned long flags; 2416 unsigned long flags;
2402 2417
2403 spin_lock_irqsave(&bfad->bfad_lock, flags); 2418 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) 2419 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2420 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) 2421 /* Set the LUN Scanning mode to be Sequential scan */
2422 if (iocmd->status == BFA_STATUS_OK)
2423 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2424 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2425 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2426 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2427 if (iocmd->status == BFA_STATUS_OK)
2428 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2429 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2430 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2431 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0; 2432 return 0;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820ec..dc5b9d99c450 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
43#include <scsi/scsi_transport_fc.h> 43#include <scsi/scsi_transport_fc.h>
44#include <scsi/scsi_transport.h> 44#include <scsi/scsi_transport.h>
45#include <scsi/scsi_bsg_fc.h> 45#include <scsi/scsi_bsg_fc.h>
46#include <scsi/scsi_devinfo.h>
46 47
47#include "bfa_modules.h" 48#include "bfa_modules.h"
48#include "bfa_fcs.h" 49#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
227 struct list_head active_aen_q; 228 struct list_head active_aen_q;
228 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; 229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
229 spinlock_t bfad_aen_spinlock; 230 spinlock_t bfad_aen_spinlock;
231 struct list_head vport_list;
230}; 232};
231 233
232/* BFAD state machine events */ 234/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb7..3153923f5b60 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
918} 918}
919 919
920/* 920/*
921 * Function is invoked from the SCSI Host Template slave_alloc() entry point.
922 * Has the logic to query the LUN Mask database to check if this LUN needs to
923 * be made visible to the SCSI mid-layer or not.
924 *
925 * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
926 * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
927 */
928static int
929bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
930 struct fc_rport *rport)
931{
932 struct bfad_itnim_data_s *itnim_data =
933 (struct bfad_itnim_data_s *) rport->dd_data;
934 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
935 struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
936 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
937 int i = 0, ret = -ENXIO;
938
939 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
940 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
941 scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
942 lun_list[i].rp_tag == bfa_rport->rport_tag &&
943 lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
944 ret = BFA_STATUS_OK;
945 break;
946 }
947 }
948 return ret;
949}
950
951/*
921 * Scsi_Host template entry slave_alloc 952 * Scsi_Host template entry slave_alloc
922 */ 953 */
923static int 954static int
924bfad_im_slave_alloc(struct scsi_device *sdev) 955bfad_im_slave_alloc(struct scsi_device *sdev)
925{ 956{
926 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 957 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
958 struct bfad_itnim_data_s *itnim_data =
959 (struct bfad_itnim_data_s *) rport->dd_data;
960 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
927 961
928 if (!rport || fc_remote_port_chkready(rport)) 962 if (!rport || fc_remote_port_chkready(rport))
929 return -ENXIO; 963 return -ENXIO;
930 964
965 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
966 /*
967 * We should not mask LUN 0 - since this will translate
968 * to no LUN / TARGET for SCSI ml resulting no scan.
969 */
970 if (sdev->lun == 0) {
971 sdev->sdev_bflags |= BLIST_NOREPORTLUN |
972 BLIST_SPARSELUN;
973 goto done;
974 }
975
976 /*
977 * Query LUN Mask configuration - to expose this LUN
978 * to the SCSI mid-layer or to mask it.
979 */
980 if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
981 BFA_STATUS_OK)
982 return -ENXIO;
983 }
984done:
931 sdev->hostdata = rport->dd_data; 985 sdev->hostdata = rport->dd_data;
932 986
933 return 0; 987 return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
1037 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1091 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1038 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1092 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1039 1093
1094 itnim->channel = fc_rport->channel;
1095
1040 return; 1096 return;
1041} 1097}
1042 1098
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d9..0814367ef101 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
91 struct fc_rport *fc_rport; 91 struct fc_rport *fc_rport;
92 struct bfa_itnim_s *bfa_itnim; 92 struct bfa_itnim_s *bfa_itnim;
93 u16 scsi_tgt_id; 93 u16 scsi_tgt_id;
94 u16 channel;
94 u16 queue_work; 95 u16 queue_work;
95 unsigned long last_ramp_up_time; 96 unsigned long last_ramp_up_time;
96 unsigned long last_queue_full_time; 97 unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
166int bfad_im_bsg_request(struct fc_bsg_job *job); 167int bfad_im_bsg_request(struct fc_bsg_job *job);
167int bfad_im_bsg_timeout(struct fc_bsg_job *job); 168int bfad_im_bsg_timeout(struct fc_bsg_job *job);
168 169
170/*
171 * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
172 * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
173 *
174 * Internally iterate's over all the ITNIM's part of the im_port & set's the
175 * sdev_bflags for the scsi_device associated with LUN #0.
176 */
177#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
178 struct scsi_device *__sdev = NULL; \
179 struct bfad_itnim_s *__itnim = NULL; \
180 u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
181 list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
182 list_entry) { \
183 __sdev = scsi_device_lookup((__im_port)->shost, \
184 __itnim->channel, \
185 __itnim->scsi_tgt_id, 0); \
186 if (__sdev) { \
187 if ((__lunmask_cfg) == BFA_TRUE) \
188 __sdev->sdev_bflags |= scan_flags; \
189 else \
190 __sdev->sdev_bflags &= ~scan_flags; \
191 scsi_device_put(__sdev); \
192 } \
193 } \
194} while (0)
195
169#endif 196#endif
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c5360ffb4bed..d3ff9cd40234 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1868 1868
1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1870 if (!tdata->skb) { 1870 if (!tdata->skb) {
1871 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", 1871 struct cxgbi_sock *csk = cconn->cep->csk;
1872 cdev->skb_tx_rsvd, headroom, opcode); 1872 struct net_device *ndev = cdev->ports[csk->port_id];
1873 ndev->stats.tx_dropped++;
1873 return -ENOMEM; 1874 return -ENOMEM;
1874 } 1875 }
1875 1876
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4d..04c5cea47a22 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
466 * Power On, Reset, or Bus Device Reset, just retry. 466 * Power On, Reset, or Bus Device Reset, just retry.
467 */ 467 */
468 return ADD_TO_MLQUEUE; 468 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
470 /*
471 * Mode Parameters Changed
472 */
473 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) 474 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
470 /* 475 /*
471 * ALUA state changed 476 * ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4a6788..53a31c753cb1 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
953 if (!kmpath_rdacd) { 953 if (!kmpath_rdacd) {
954 scsi_unregister_device_handler(&rdac_dh); 954 scsi_unregister_device_handler(&rdac_dh);
955 printk(KERN_ERR "kmpath_rdacd creation failed.\n"); 955 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
956
957 r = -EINVAL;
956 } 958 }
957done: 959done:
958 return r; 960 return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467dd9ce..e9599600aa23 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
59 "Direct Data Placement (DDP)."); 59 "Direct Data Placement (DDP).");
60 60
61DEFINE_MUTEX(fcoe_config_mutex); 61unsigned int fcoe_debug_logging;
62module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65static DEFINE_MUTEX(fcoe_config_mutex);
62 66
63static struct workqueue_struct *fcoe_wq; 67static struct workqueue_struct *fcoe_wq;
64 68
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
67 71
68/* fcoe host list */ 72/* fcoe host list */
69/* must only by accessed under the RTNL mutex */ 73/* must only by accessed under the RTNL mutex */
70LIST_HEAD(fcoe_hostlist); 74static LIST_HEAD(fcoe_hostlist);
71DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 75static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
72 76
73/* Function Prototypes */ 77/* Function Prototypes */
74static int fcoe_reset(struct Scsi_Host *); 78static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
157 .lport_set_port_id = fcoe_set_port_id, 161 .lport_set_port_id = fcoe_set_port_id,
158}; 162};
159 163
160struct fc_function_template fcoe_nport_fc_functions = { 164static struct fc_function_template fcoe_nport_fc_functions = {
161 .show_host_node_name = 1, 165 .show_host_node_name = 1,
162 .show_host_port_name = 1, 166 .show_host_port_name = 1,
163 .show_host_supported_classes = 1, 167 .show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
197 .bsg_request = fc_lport_bsg_request, 201 .bsg_request = fc_lport_bsg_request,
198}; 202};
199 203
200struct fc_function_template fcoe_vport_fc_functions = { 204static struct fc_function_template fcoe_vport_fc_functions = {
201 .show_host_node_name = 1, 205 .show_host_node_name = 1,
202 .show_host_port_name = 1, 206 .show_host_port_name = 1,
203 .show_host_supported_classes = 1, 207 .show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
433 * 437 *
434 * Caller must be holding the RTNL mutex 438 * Caller must be holding the RTNL mutex
435 */ 439 */
436void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 440static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
437{ 441{
438 struct net_device *netdev = fcoe->netdev; 442 struct net_device *netdev = fcoe->netdev;
439 struct fcoe_ctlr *fip = &fcoe->ctlr; 443 struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
748 * 752 *
749 * Returns: True for read types I/O, otherwise returns false. 753 * Returns: True for read types I/O, otherwise returns false.
750 */ 754 */
751bool fcoe_oem_match(struct fc_frame *fp) 755static bool fcoe_oem_match(struct fc_frame *fp)
752{ 756{
753 struct fc_frame_header *fh = fc_frame_header_get(fp); 757 struct fc_frame_header *fh = fc_frame_header_get(fp);
754 struct fcp_cmnd *fcp; 758 struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
756 if (fc_fcp_is_read(fr_fsp(fp)) && 760 if (fc_fcp_is_read(fr_fsp(fp)) &&
757 (fr_fsp(fp)->data_len > fcoe_ddp_min)) 761 (fr_fsp(fp)->data_len > fcoe_ddp_min))
758 return true; 762 return true;
759 else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { 763 else if ((fr_fsp(fp) == NULL) &&
764 (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
765 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
760 fcp = fc_frame_payload_get(fp, sizeof(*fcp)); 766 fcp = fc_frame_payload_get(fp, sizeof(*fcp));
761 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && 767 if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
762 fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && 768 (ntohl(fcp->fc_dl) > fcoe_ddp_min))
763 (fcp->fc_flags & FCP_CFL_WRDATA))
764 return true; 769 return true;
765 } 770 }
766 return false; 771 return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
1106 * 1111 *
1107 * Returns: 0 on success 1112 * Returns: 0 on success
1108 */ 1113 */
1109int __exit fcoe_if_exit(void) 1114static int __exit fcoe_if_exit(void)
1110{ 1115{
1111 fc_release_transport(fcoe_nport_scsi_transport); 1116 fc_release_transport(fcoe_nport_scsi_transport);
1112 fc_release_transport(fcoe_vport_scsi_transport); 1117 fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
1295 * 1300 *
1296 * Returns: 0 for success 1301 * Returns: 0 for success
1297 */ 1302 */
1298int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1303static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1299 struct packet_type *ptype, struct net_device *olddev) 1304 struct packet_type *ptype, struct net_device *olddev)
1300{ 1305{
1301 struct fc_lport *lport; 1306 struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1451 * 1456 *
1452 * Return: 0 for success 1457 * Return: 0 for success
1453 */ 1458 */
1454int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1459static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1455{ 1460{
1456 int wlen; 1461 int wlen;
1457 u32 crc; 1462 u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1671 skb->dev ? skb->dev->name : "<NULL>"); 1676 skb->dev ? skb->dev->name : "<NULL>");
1672 1677
1673 port = lport_priv(lport); 1678 port = lport_priv(lport);
1674 if (skb_is_nonlinear(skb)) 1679 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
1675 skb_linearize(skb); /* not ideal */
1676 1680
1677 /* 1681 /*
1678 * Frame length checks and setting up the header pointers 1682 * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
1728 * 1732 *
1729 * Return: 0 for success 1733 * Return: 0 for success
1730 */ 1734 */
1731int fcoe_percpu_receive_thread(void *arg) 1735static int fcoe_percpu_receive_thread(void *arg)
1732{ 1736{
1733 struct fcoe_percpu_s *p = arg; 1737 struct fcoe_percpu_s *p = arg;
1734 struct sk_buff *skb; 1738 struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
2146 * Returns: 0 if the ethtool query was successful 2150 * Returns: 0 if the ethtool query was successful
2147 * -1 if the ethtool query failed 2151 * -1 if the ethtool query failed
2148 */ 2152 */
2149int fcoe_link_speed_update(struct fc_lport *lport) 2153static int fcoe_link_speed_update(struct fc_lport *lport)
2150{ 2154{
2151 struct net_device *netdev = fcoe_netdev(lport); 2155 struct net_device *netdev = fcoe_netdev(lport);
2152 struct ethtool_cmd ecmd; 2156 struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
2180 * Returns: 0 if link is UP and OK, -1 if not 2184 * Returns: 0 if link is UP and OK, -1 if not
2181 * 2185 *
2182 */ 2186 */
2183int fcoe_link_ok(struct fc_lport *lport) 2187static int fcoe_link_ok(struct fc_lport *lport)
2184{ 2188{
2185 struct net_device *netdev = fcoe_netdev(lport); 2189 struct net_device *netdev = fcoe_netdev(lport);
2186 2190
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
2200 * there no packets that will be handled by the lport, but also that any 2204 * there no packets that will be handled by the lport, but also that any
2201 * threads already handling packet have returned. 2205 * threads already handling packet have returned.
2202 */ 2206 */
2203void fcoe_percpu_clean(struct fc_lport *lport) 2207static void fcoe_percpu_clean(struct fc_lport *lport)
2204{ 2208{
2205 struct fcoe_percpu_s *pp; 2209 struct fcoe_percpu_s *pp;
2206 struct fcoe_rcv_info *fr; 2210 struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
2251 * 2255 *
2252 * Returns: Always 0 (return value required by FC transport template) 2256 * Returns: Always 0 (return value required by FC transport template)
2253 */ 2257 */
2254int fcoe_reset(struct Scsi_Host *shost) 2258static int fcoe_reset(struct Scsi_Host *shost)
2255{ 2259{
2256 struct fc_lport *lport = shost_priv(shost); 2260 struct fc_lport *lport = shost_priv(shost);
2257 struct fcoe_port *port = lport_priv(lport); 2261 struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf840..bcc89e639495 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
42 42
43unsigned int fcoe_debug_logging; 43extern unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
46 44
47#define FCOE_LOGGING 0x01 /* General logging, not categorized */ 45#define FCOE_LOGGING 0x01 /* General logging, not categorized */
48#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ 46#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5140f5d0fd6b..b96962c39449 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
4271 remove_ctlr_from_lockup_detector_list(h); 4271 remove_ctlr_from_lockup_detector_list(h);
4272 /* If the list of ctlr's to monitor is empty, stop the thread */ 4272 /* If the list of ctlr's to monitor is empty, stop the thread */
4273 if (list_empty(&hpsa_ctlr_list)) { 4273 if (list_empty(&hpsa_ctlr_list)) {
4274 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4274 kthread_stop(hpsa_lockup_detector); 4275 kthread_stop(hpsa_lockup_detector);
4276 spin_lock_irqsave(&lockup_detector_lock, flags);
4275 hpsa_lockup_detector = NULL; 4277 hpsa_lockup_detector = NULL;
4276 } 4278 }
4277 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4279 spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc5..000000000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
1# Makefile for create_fw
2#
3CC=gcc
4CFLAGS=-c -Wall -O2 -g
5LDFLAGS=
6SOURCES=create_fw.c
7OBJECTS=$(SOURCES:.cpp=.o)
8EXECUTABLE=create_fw
9
10all: $(SOURCES) $(EXECUTABLE)
11
12$(EXECUTABLE): $(OBJECTS)
13 $(CC) $(LDFLAGS) $(OBJECTS) -o $@
14
15.c.o:
16 $(CC) $(CFLAGS) $< -O $@
17
18clean:
19 rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233b..000000000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
1This defines the temporary binary blow we are to pass to the SCU
2driver to emulate the binary firmware that we will eventually be
3able to access via NVRAM on the SCU controller.
4
5The current size of the binary blob is expected to be 149 bytes or larger
6
7Header Types:
80x1: Phy Masks
90x2: Phy Gens
100x3: SAS Addrs
110xff: End of Data
12
13ID string - u8[12]: "#SCU MAGIC#\0"
14Version - u8: 1
15SubVersion - u8: 0
16
17Header Type - u8: 0x1
18Size - u8: 8
19Phy Mask - u32[8]
20
21Header Type - u8: 0x2
22Size - u8: 8
23Phy Gen - u32[8]
24
25Header Type - u8: 0x3
26Size - u8: 8
27Sas Addr - u64[8]
28
29Header Type - u8: 0xf
30
31
32==============================================================================
33
34Place isci_firmware.bin in /lib/firmware
35Be sure to recreate the initramfs image to include the firmware.
36
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e95..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include <string.h>
8#include <errno.h>
9#include <asm/types.h>
10#include <strings.h>
11#include <stdint.h>
12
13#include "create_fw.h"
14#include "../probe_roms.h"
15
16int write_blob(struct isci_orom *isci_orom)
17{
18 FILE *fd;
19 int err;
20 size_t count;
21
22 fd = fopen(blob_name, "w+");
23 if (!fd) {
24 perror("Open file for write failed");
25 fclose(fd);
26 return -EIO;
27 }
28
29 count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
30 if (count != 1) {
31 perror("Write data failed");
32 fclose(fd);
33 return -EIO;
34 }
35
36 fclose(fd);
37
38 return 0;
39}
40
41void set_binary_values(struct isci_orom *isci_orom)
42{
43 int ctrl_idx, phy_idx, port_idx;
44
45 /* setting OROM signature */
46 strncpy(isci_orom->hdr.signature, sig, strlen(sig));
47 isci_orom->hdr.version = version;
48 isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
49 isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
50 isci_orom->hdr.num_elements = num_elements;
51
52 for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
53 isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
54 isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
55 max_num_concurrent_dev_spin_up;
56 isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
57 enable_ssc;
58
59 for (port_idx = 0; port_idx < 4; port_idx++)
60 isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
61 phy_mask[ctrl_idx][port_idx];
62
63 for (phy_idx = 0; phy_idx < 4; phy_idx++) {
64 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
65 (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
66 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
67 (__u32)(sas_addr[ctrl_idx][phy_idx]);
68
69 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
70 afe_tx_amp_control0;
71 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
72 afe_tx_amp_control1;
73 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
74 afe_tx_amp_control2;
75 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
76 afe_tx_amp_control3;
77 }
78 }
79}
80
81int main(void)
82{
83 int err;
84 struct isci_orom *isci_orom;
85
86 isci_orom = malloc(sizeof(struct isci_orom));
87 memset(isci_orom, 0, sizeof(struct isci_orom));
88
89 set_binary_values(isci_orom);
90
91 err = write_blob(isci_orom);
92 if (err < 0) {
93 free(isci_orom);
94 return err;
95 }
96
97 free(isci_orom);
98 return 0;
99}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22e..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _CREATE_FW_H_
2#define _CREATE_FW_H_
3#include "../probe_roms.h"
4
5
6/* we are configuring for 2 SCUs */
7static const int num_elements = 2;
8
9/*
10 * For all defined arrays:
11 * elements 0-3 are for SCU0, ports 0-3
12 * elements 4-7 are for SCU1, ports 0-3
13 *
14 * valid configurations for one SCU are:
15 * P0 P1 P2 P3
16 * ----------------
17 * 0xF,0x0,0x0,0x0 # 1 x4 port
18 * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
19 * # ports
20 * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
21 * # port
22 * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
23 * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
24 *
25 * if there is a port/phy on which you do not wish to override the default
26 * values, use the value assigned to UNINIT_PARAM (255).
27 */
28
29/* discovery mode type (port auto config mode by default ) */
30
31/*
32 * if there is a port/phy on which you do not wish to override the default
33 * values, use the value "0000000000000000". SAS address of zero's is
34 * considered invalid and will not be used.
35 */
36#ifdef MPC
37static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
38static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
39 {1, 2, 4, 8} };
40static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
41 0x5FCFFFFFF0000002ULL,
42 0x5FCFFFFFF0000003ULL,
43 0x5FCFFFFFF0000004ULL },
44 { 0x5FCFFFFFF0000005ULL,
45 0x5FCFFFFFF0000006ULL,
46 0x5FCFFFFFF0000007ULL,
47 0x5FCFFFFFF0000008ULL } };
48#else /* APC (default) */
49static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
50static const __u8 phy_mask[2][4];
51static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
52 0x5FCFFFFF00000001ULL,
53 0x5FCFFFFF00000001ULL,
54 0x5FCFFFFF00000001ULL },
55 { 0x5FCFFFFF00000002ULL,
56 0x5FCFFFFF00000002ULL,
57 0x5FCFFFFF00000002ULL,
58 0x5FCFFFFF00000002ULL } };
59#endif
60
61/* Maximum number of concurrent device spin up */
62static const int max_num_concurrent_dev_spin_up = 1;
63
64/* enable of ssc operation */
65static const int enable_ssc;
66
67/* AFE_TX_AMP_CONTROL */
68static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
69static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
70static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
71static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
72
73static const char blob_name[] = "isci_firmware.bin";
74static const char sig[] = "ISCUOEMB";
75static const unsigned char version = 0x10;
76
77#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4c85b8..1a65d6514237 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
899 */ 899 */
900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || 900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || 901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
902 (iphy->is_in_link_training == true && is_phy_starting(iphy))) { 902 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
903 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
903 is_controller_start_complete = false; 904 is_controller_start_complete = false;
904 break; 905 break;
905 } 906 }
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1666 /* Default to no SSC operation. */ 1667 /* Default to no SSC operation. */
1667 ihost->oem_parameters.controller.do_enable_ssc = false; 1668 ihost->oem_parameters.controller.do_enable_ssc = false;
1668 1669
1670 /* Default to short cables on all phys. */
1671 ihost->oem_parameters.controller.cable_selection_mask = 0;
1672
1669 /* Initialize all of the port parameter information to narrow ports. */ 1673 /* Initialize all of the port parameter information to narrow ports. */
1670 for (index = 0; index < SCI_MAX_PORTS; index++) { 1674 for (index = 0; index < SCI_MAX_PORTS; index++) {
1671 ihost->oem_parameters.ports[index].phy_mask = 0; 1675 ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1673 1677
1674 /* Initialize all of the phy parameter information. */ 1678 /* Initialize all of the phy parameter information. */
1675 for (index = 0; index < SCI_MAX_PHYS; index++) { 1679 for (index = 0; index < SCI_MAX_PHYS; index++) {
1676 /* Default to 6G (i.e. Gen 3) for now. */ 1680 /* Default to 3G (i.e. Gen 2). */
1677 ihost->user_parameters.phys[index].max_speed_generation = 3; 1681 ihost->user_parameters.phys[index].max_speed_generation =
1682 SCIC_SDS_PARM_GEN2_SPEED;
1678 1683
1679 /* the frequencies cannot be 0 */ 1684 /* the frequencies cannot be 0 */
1680 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; 1685 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1694 ihost->user_parameters.ssp_inactivity_timeout = 5; 1699 ihost->user_parameters.ssp_inactivity_timeout = 5;
1695 ihost->user_parameters.stp_max_occupancy_timeout = 5; 1700 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1696 ihost->user_parameters.ssp_max_occupancy_timeout = 20; 1701 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1697 ihost->user_parameters.no_outbound_task_timeout = 20; 1702 ihost->user_parameters.no_outbound_task_timeout = 2;
1698} 1703}
1699 1704
1700static void controller_timeout(unsigned long data) 1705static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1759 return sci_controller_reset(ihost); 1764 return sci_controller_reset(ihost);
1760} 1765}
1761 1766
1762int sci_oem_parameters_validate(struct sci_oem_params *oem) 1767int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1763{ 1768{
1764 int i; 1769 int i;
1765 1770
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
1791 oem->controller.max_concurr_spin_up < 1) 1796 oem->controller.max_concurr_spin_up < 1)
1792 return -EINVAL; 1797 return -EINVAL;
1793 1798
1799 if (oem->controller.do_enable_ssc) {
1800 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1801 return -EINVAL;
1802
1803 if (version >= ISCI_ROM_VER_1_1) {
1804 u8 test = oem->controller.ssc_sata_tx_spread_level;
1805
1806 switch (test) {
1807 case 0:
1808 case 2:
1809 case 3:
1810 case 6:
1811 case 7:
1812 break;
1813 default:
1814 return -EINVAL;
1815 }
1816
1817 test = oem->controller.ssc_sas_tx_spread_level;
1818 if (oem->controller.ssc_sas_tx_type == 0) {
1819 switch (test) {
1820 case 0:
1821 case 2:
1822 case 3:
1823 break;
1824 default:
1825 return -EINVAL;
1826 }
1827 } else if (oem->controller.ssc_sas_tx_type == 1) {
1828 switch (test) {
1829 case 0:
1830 case 3:
1831 case 6:
1832 break;
1833 default:
1834 return -EINVAL;
1835 }
1836 }
1837 }
1838 }
1839
1794 return 0; 1840 return 0;
1795} 1841}
1796 1842
1797static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) 1843static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1798{ 1844{
1799 u32 state = ihost->sm.current_state_id; 1845 u32 state = ihost->sm.current_state_id;
1846 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1800 1847
1801 if (state == SCIC_RESET || 1848 if (state == SCIC_RESET ||
1802 state == SCIC_INITIALIZING || 1849 state == SCIC_INITIALIZING ||
1803 state == SCIC_INITIALIZED) { 1850 state == SCIC_INITIALIZED) {
1804 1851
1805 if (sci_oem_parameters_validate(&ihost->oem_parameters)) 1852 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1853 pci_info->orom->hdr.version))
1806 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1854 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1807 1855
1808 return SCI_SUCCESS; 1856 return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
1857 ihost->power_control.phys_waiting--; 1905 ihost->power_control.phys_waiting--;
1858 ihost->power_control.phys_granted_power++; 1906 ihost->power_control.phys_granted_power++;
1859 sci_phy_consume_power_handler(iphy); 1907 sci_phy_consume_power_handler(iphy);
1908
1909 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
1910 u8 j;
1911
1912 for (j = 0; j < SCI_MAX_PHYS; j++) {
1913 struct isci_phy *requester = ihost->power_control.requesters[j];
1914
1915 /*
1916 * Search the power_control queue to see if there are other phys
1917 * attached to the same remote device. If found, take all of
1918 * them out of await_sas_power state.
1919 */
1920 if (requester != NULL && requester != iphy) {
1921 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1922 iphy->frame_rcvd.iaf.sas_addr,
1923 sizeof(requester->frame_rcvd.iaf.sas_addr));
1924
1925 if (other == 0) {
1926 ihost->power_control.requesters[j] = NULL;
1927 ihost->power_control.phys_waiting--;
1928 sci_phy_consume_power_handler(requester);
1929 }
1930 }
1931 }
1932 }
1860 } 1933 }
1861 1934
1862 /* 1935 /*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1891 ihost->power_control.timer_started = true; 1964 ihost->power_control.timer_started = true;
1892 1965
1893 } else { 1966 } else {
1894 /* Add the phy in the waiting list */ 1967 /*
1895 ihost->power_control.requesters[iphy->phy_index] = iphy; 1968 * There are phys, attached to the same sas address as this phy, are
1896 ihost->power_control.phys_waiting++; 1969 * already in READY state, this phy don't need wait.
1970 */
1971 u8 i;
1972 struct isci_phy *current_phy;
1973
1974 for (i = 0; i < SCI_MAX_PHYS; i++) {
1975 u8 other;
1976 current_phy = &ihost->phys[i];
1977
1978 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1979 iphy->frame_rcvd.iaf.sas_addr,
1980 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1981
1982 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1983 current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
1984 other == 0) {
1985 sci_phy_consume_power_handler(iphy);
1986 break;
1987 }
1988 }
1989
1990 if (i == SCI_MAX_PHYS) {
1991 /* Add the phy in the waiting list */
1992 ihost->power_control.requesters[iphy->phy_index] = iphy;
1993 ihost->power_control.phys_waiting++;
1994 }
1897 } 1995 }
1898} 1996}
1899 1997
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1908 ihost->power_control.requesters[iphy->phy_index] = NULL; 2006 ihost->power_control.requesters[iphy->phy_index] = NULL;
1909} 2007}
1910 2008
2009static int is_long_cable(int phy, unsigned char selection_byte)
2010{
2011 return !!(selection_byte & (1 << phy));
2012}
2013
2014static int is_medium_cable(int phy, unsigned char selection_byte)
2015{
2016 return !!(selection_byte & (1 << (phy + 4)));
2017}
2018
2019static enum cable_selections decode_selection_byte(
2020 int phy,
2021 unsigned char selection_byte)
2022{
2023 return ((selection_byte & (1 << phy)) ? 1 : 0)
2024 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
2025}
2026
2027static unsigned char *to_cable_select(struct isci_host *ihost)
2028{
2029 if (is_cable_select_overridden())
2030 return ((unsigned char *)&cable_selection_override)
2031 + ihost->id;
2032 else
2033 return &ihost->oem_parameters.controller.cable_selection_mask;
2034}
2035
2036enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
2037{
2038 return decode_selection_byte(phy, *to_cable_select(ihost));
2039}
2040
2041char *lookup_cable_names(enum cable_selections selection)
2042{
2043 static char *cable_names[] = {
2044 [short_cable] = "short",
2045 [long_cable] = "long",
2046 [medium_cable] = "medium",
2047 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
2048 };
2049 return (selection <= undefined_cable) ? cable_names[selection]
2050 : cable_names[undefined_cable];
2051}
2052
1911#define AFE_REGISTER_WRITE_DELAY 10 2053#define AFE_REGISTER_WRITE_DELAY 10
1912 2054
1913/* Initialize the AFE for this phy index. We need to read the AFE setup from
1914 * the OEM parameters
1915 */
1916static void sci_controller_afe_initialization(struct isci_host *ihost) 2055static void sci_controller_afe_initialization(struct isci_host *ihost)
1917{ 2056{
2057 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1918 const struct sci_oem_params *oem = &ihost->oem_parameters; 2058 const struct sci_oem_params *oem = &ihost->oem_parameters;
1919 struct pci_dev *pdev = ihost->pdev; 2059 struct pci_dev *pdev = ihost->pdev;
1920 u32 afe_status; 2060 u32 afe_status;
1921 u32 phy_id; 2061 u32 phy_id;
2062 unsigned char cable_selection_mask = *to_cable_select(ihost);
1922 2063
1923 /* Clear DFX Status registers */ 2064 /* Clear DFX Status registers */
1924 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); 2065 writel(0x0081000f, &afe->afe_dfx_master_control0);
1925 udelay(AFE_REGISTER_WRITE_DELAY); 2066 udelay(AFE_REGISTER_WRITE_DELAY);
1926 2067
1927 if (is_b0(pdev)) { 2068 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1928 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 2069 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1929 * Timer, PM Stagger Timer */ 2070 * Timer, PM Stagger Timer
1930 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); 2071 */
2072 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1931 udelay(AFE_REGISTER_WRITE_DELAY); 2073 udelay(AFE_REGISTER_WRITE_DELAY);
1932 } 2074 }
1933 2075
1934 /* Configure bias currents to normal */ 2076 /* Configure bias currents to normal */
1935 if (is_a2(pdev)) 2077 if (is_a2(pdev))
1936 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); 2078 writel(0x00005A00, &afe->afe_bias_control);
1937 else if (is_b0(pdev) || is_c0(pdev)) 2079 else if (is_b0(pdev) || is_c0(pdev))
1938 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); 2080 writel(0x00005F00, &afe->afe_bias_control);
2081 else if (is_c1(pdev))
2082 writel(0x00005500, &afe->afe_bias_control);
1939 2083
1940 udelay(AFE_REGISTER_WRITE_DELAY); 2084 udelay(AFE_REGISTER_WRITE_DELAY);
1941 2085
1942 /* Enable PLL */ 2086 /* Enable PLL */
1943 if (is_b0(pdev) || is_c0(pdev)) 2087 if (is_a2(pdev))
1944 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); 2088 writel(0x80040908, &afe->afe_pll_control0);
1945 else 2089 else if (is_b0(pdev) || is_c0(pdev))
1946 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); 2090 writel(0x80040A08, &afe->afe_pll_control0);
2091 else if (is_c1(pdev)) {
2092 writel(0x80000B08, &afe->afe_pll_control0);
2093 udelay(AFE_REGISTER_WRITE_DELAY);
2094 writel(0x00000B08, &afe->afe_pll_control0);
2095 udelay(AFE_REGISTER_WRITE_DELAY);
2096 writel(0x80000B08, &afe->afe_pll_control0);
2097 }
1947 2098
1948 udelay(AFE_REGISTER_WRITE_DELAY); 2099 udelay(AFE_REGISTER_WRITE_DELAY);
1949 2100
1950 /* Wait for the PLL to lock */ 2101 /* Wait for the PLL to lock */
1951 do { 2102 do {
1952 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); 2103 afe_status = readl(&afe->afe_common_block_status);
1953 udelay(AFE_REGISTER_WRITE_DELAY); 2104 udelay(AFE_REGISTER_WRITE_DELAY);
1954 } while ((afe_status & 0x00001000) == 0); 2105 } while ((afe_status & 0x00001000) == 0);
1955 2106
1956 if (is_a2(pdev)) { 2107 if (is_a2(pdev)) {
1957 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 2108 /* Shorten SAS SNW lock time (RxLock timer value from 76
1958 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); 2109 * us to 50 us)
2110 */
2111 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1959 udelay(AFE_REGISTER_WRITE_DELAY); 2112 udelay(AFE_REGISTER_WRITE_DELAY);
1960 } 2113 }
1961 2114
1962 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 2115 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2116 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
1963 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 2117 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2118 int cable_length_long =
2119 is_long_cable(phy_id, cable_selection_mask);
2120 int cable_length_medium =
2121 is_medium_cable(phy_id, cable_selection_mask);
1964 2122
1965 if (is_b0(pdev)) { 2123 if (is_a2(pdev)) {
1966 /* Configure transmitter SSC parameters */ 2124 /* All defaults, except the Receive Word
1967 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2125 * Alignament/Comma Detect Enable....(0xe800)
2126 */
2127 writel(0x00004512, &xcvr->afe_xcvr_control0);
2128 udelay(AFE_REGISTER_WRITE_DELAY);
2129
2130 writel(0x0050100F, &xcvr->afe_xcvr_control1);
2131 udelay(AFE_REGISTER_WRITE_DELAY);
2132 } else if (is_b0(pdev)) {
2133 /* Configure transmitter SSC parameters */
2134 writel(0x00030000, &xcvr->afe_tx_ssc_control);
1968 udelay(AFE_REGISTER_WRITE_DELAY); 2135 udelay(AFE_REGISTER_WRITE_DELAY);
1969 } else if (is_c0(pdev)) { 2136 } else if (is_c0(pdev)) {
1970 /* Configure transmitter SSC parameters */ 2137 /* Configure transmitter SSC parameters */
1971 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2138 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1972 udelay(AFE_REGISTER_WRITE_DELAY); 2139 udelay(AFE_REGISTER_WRITE_DELAY);
1973 2140
1974 /* 2141 /* All defaults, except the Receive Word
1975 * All defaults, except the Receive Word Alignament/Comma Detect 2142 * Alignament/Comma Detect Enable....(0xe800)
1976 * Enable....(0xe800) */ 2143 */
1977 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2144 writel(0x00014500, &xcvr->afe_xcvr_control0);
1978 udelay(AFE_REGISTER_WRITE_DELAY); 2145 udelay(AFE_REGISTER_WRITE_DELAY);
1979 } else { 2146 } else if (is_c1(pdev)) {
1980 /* 2147 /* Configure transmitter SSC parameters */
1981 * All defaults, except the Receive Word Alignament/Comma Detect 2148 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1982 * Enable....(0xe800) */
1983 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1984 udelay(AFE_REGISTER_WRITE_DELAY); 2149 udelay(AFE_REGISTER_WRITE_DELAY);
1985 2150
1986 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 2151 /* All defaults, except the Receive Word
2152 * Alignament/Comma Detect Enable....(0xe800)
2153 */
2154 writel(0x0001C500, &xcvr->afe_xcvr_control0);
1987 udelay(AFE_REGISTER_WRITE_DELAY); 2155 udelay(AFE_REGISTER_WRITE_DELAY);
1988 } 2156 }
1989 2157
1990 /* 2158 /* Power up TX and RX out from power down (PWRDNTX and
1991 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2159 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
1992 * & increase TX int & ext bias 20%....(0xe85c) */ 2160 */
1993 if (is_a2(pdev)) 2161 if (is_a2(pdev))
1994 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2162 writel(0x000003F0, &xcvr->afe_channel_control);
1995 else if (is_b0(pdev)) { 2163 else if (is_b0(pdev)) {
1996 /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 2164 writel(0x000003D7, &xcvr->afe_channel_control);
1997 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1998 udelay(AFE_REGISTER_WRITE_DELAY); 2165 udelay(AFE_REGISTER_WRITE_DELAY);
1999 2166
2000 /* 2167 writel(0x000003D4, &xcvr->afe_channel_control);
2001 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2168 } else if (is_c0(pdev)) {
2002 * & increase TX int & ext bias 20%....(0xe85c) */ 2169 writel(0x000001E7, &xcvr->afe_channel_control);
2003 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2004 } else {
2005 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2006 udelay(AFE_REGISTER_WRITE_DELAY); 2170 udelay(AFE_REGISTER_WRITE_DELAY);
2007 2171
2008 /* 2172 writel(0x000001E4, &xcvr->afe_channel_control);
2009 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2173 } else if (is_c1(pdev)) {
2010 * & increase TX int & ext bias 20%....(0xe85c) */ 2174 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2011 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2175 &xcvr->afe_channel_control);
2176 udelay(AFE_REGISTER_WRITE_DELAY);
2177
2178 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2179 &xcvr->afe_channel_control);
2012 } 2180 }
2013 udelay(AFE_REGISTER_WRITE_DELAY); 2181 udelay(AFE_REGISTER_WRITE_DELAY);
2014 2182
2015 if (is_a2(pdev)) { 2183 if (is_a2(pdev)) {
2016 /* Enable TX equalization (0xe824) */ 2184 /* Enable TX equalization (0xe824) */
2017 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2185 writel(0x00040000, &xcvr->afe_tx_control);
2018 udelay(AFE_REGISTER_WRITE_DELAY); 2186 udelay(AFE_REGISTER_WRITE_DELAY);
2019 } 2187 }
2020 2188
2021 /* 2189 if (is_a2(pdev) || is_b0(pdev))
2022 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 2190 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2023 * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 2191 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2024 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2192 * Enabled) ....(0xe800)
2193 */
2194 writel(0x00004100, &xcvr->afe_xcvr_control0);
2195 else if (is_c0(pdev))
2196 writel(0x00014100, &xcvr->afe_xcvr_control0);
2197 else if (is_c1(pdev))
2198 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2025 udelay(AFE_REGISTER_WRITE_DELAY); 2199 udelay(AFE_REGISTER_WRITE_DELAY);
2026 2200
2027 /* Leave DFE/FFE on */ 2201 /* Leave DFE/FFE on */
2028 if (is_a2(pdev)) 2202 if (is_a2(pdev))
2029 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2203 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2030 else if (is_b0(pdev)) { 2204 else if (is_b0(pdev)) {
2031 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2205 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2032 udelay(AFE_REGISTER_WRITE_DELAY); 2206 udelay(AFE_REGISTER_WRITE_DELAY);
2033 /* Enable TX equalization (0xe824) */ 2207 /* Enable TX equalization (0xe824) */
2034 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2208 writel(0x00040000, &xcvr->afe_tx_control);
2035 } else { 2209 } else if (is_c0(pdev)) {
2036 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); 2210 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2211 udelay(AFE_REGISTER_WRITE_DELAY);
2212
2213 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2214 udelay(AFE_REGISTER_WRITE_DELAY);
2215
2216 /* Enable TX equalization (0xe824) */
2217 writel(0x00040000, &xcvr->afe_tx_control);
2218 } else if (is_c1(pdev)) {
2219 writel(cable_length_long ? 0x01500C0C :
2220 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2221 &xcvr->afe_xcvr_control1);
2222 udelay(AFE_REGISTER_WRITE_DELAY);
2223
2224 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2037 udelay(AFE_REGISTER_WRITE_DELAY); 2225 udelay(AFE_REGISTER_WRITE_DELAY);
2038 2226
2039 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2227 writel(cable_length_long ? 0x33091C1F :
2228 cable_length_medium ? 0x3315181F : 0x2B17161F,
2229 &xcvr->afe_rx_ssc_control0);
2040 udelay(AFE_REGISTER_WRITE_DELAY); 2230 udelay(AFE_REGISTER_WRITE_DELAY);
2041 2231
2042 /* Enable TX equalization (0xe824) */ 2232 /* Enable TX equalization (0xe824) */
2043 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2233 writel(0x00040000, &xcvr->afe_tx_control);
2044 } 2234 }
2045 2235
2046 udelay(AFE_REGISTER_WRITE_DELAY); 2236 udelay(AFE_REGISTER_WRITE_DELAY);
2047 2237
2048 writel(oem_phy->afe_tx_amp_control0, 2238 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2049 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2050 udelay(AFE_REGISTER_WRITE_DELAY); 2239 udelay(AFE_REGISTER_WRITE_DELAY);
2051 2240
2052 writel(oem_phy->afe_tx_amp_control1, 2241 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2053 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2054 udelay(AFE_REGISTER_WRITE_DELAY); 2242 udelay(AFE_REGISTER_WRITE_DELAY);
2055 2243
2056 writel(oem_phy->afe_tx_amp_control2, 2244 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2057 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2058 udelay(AFE_REGISTER_WRITE_DELAY); 2245 udelay(AFE_REGISTER_WRITE_DELAY);
2059 2246
2060 writel(oem_phy->afe_tx_amp_control3, 2247 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2061 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2062 udelay(AFE_REGISTER_WRITE_DELAY); 2248 udelay(AFE_REGISTER_WRITE_DELAY);
2063 } 2249 }
2064 2250
2065 /* Transfer control to the PEs */ 2251 /* Transfer control to the PEs */
2066 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); 2252 writel(0x00010f00, &afe->afe_dfx_master_control0);
2067 udelay(AFE_REGISTER_WRITE_DELAY); 2253 udelay(AFE_REGISTER_WRITE_DELAY);
2068} 2254}
2069 2255
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051afd3cb..5477f0fa8233 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
435 435
436static inline bool is_c0(struct pci_dev *pdev) 436static inline bool is_c0(struct pci_dev *pdev)
437{ 437{
438 if (pdev->revision >= 5) 438 if (pdev->revision == 5)
439 return true; 439 return true;
440 return false; 440 return false;
441} 441}
442 442
443static inline bool is_c1(struct pci_dev *pdev)
444{
445 if (pdev->revision >= 6)
446 return true;
447 return false;
448}
449
450enum cable_selections {
451 short_cable = 0,
452 long_cable = 1,
453 medium_cable = 2,
454 undefined_cable = 3
455};
456
457#define CABLE_OVERRIDE_DISABLED (0x10000)
458
459static inline int is_cable_select_overridden(void)
460{
461 return cable_selection_override < CABLE_OVERRIDE_DISABLED;
462}
463
464enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
465void validate_cable_selections(struct isci_host *ihost);
466char *lookup_cable_names(enum cable_selections);
467
443/* set hw control for 'activity', even though active enclosures seem to drive 468/* set hw control for 'activity', even though active enclosures seem to drive
444 * the activity led on their own. Skip setting FSENG control on 'status' due 469 * the activity led on their own. Skip setting FSENG control on 'status' due
445 * to unexpected operation and 'error' due to not being a supported automatic 470 * to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edabcb85a..17c4c2c89c2e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
65#include "probe_roms.h" 65#include "probe_roms.h"
66 66
67#define MAJ 1 67#define MAJ 1
68#define MIN 0 68#define MIN 1
69#define BUILD 0 69#define BUILD 0
70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
71 __stringify(BUILD) 71 __stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
94 94
95/* linux isci specific settings */ 95/* linux isci specific settings */
96 96
97unsigned char no_outbound_task_to = 20; 97unsigned char no_outbound_task_to = 2;
98module_param(no_outbound_task_to, byte, 0); 98module_param(no_outbound_task_to, byte, 0);
99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); 99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
100 100
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
114module_param(stp_inactive_to, ushort, 0); 114module_param(stp_inactive_to, ushort, 0);
115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); 115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
116 116
117unsigned char phy_gen = 3; 117unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
118module_param(phy_gen, byte, 0); 118module_param(phy_gen, byte, 0);
119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); 119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
120 120
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
122module_param(max_concurr_spinup, byte, 0); 122module_param(max_concurr_spinup, byte, 0);
123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
124 124
125uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
126module_param(cable_selection_override, uint, 0);
127
128MODULE_PARM_DESC(cable_selection_override,
129 "This field indicates length of the SAS/SATA cable between "
130 "host and device. If any bits > 15 are set (default) "
131 "indicates \"use platform defaults\"");
132
125static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 133static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
126{ 134{
127 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 135 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
412 return NULL; 420 return NULL;
413 isci_host->shost = shost; 421 isci_host->shost = shost;
414 422
423 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
424 "{%s, %s, %s, %s}\n",
425 (is_cable_select_overridden() ? "* " : ""), isci_host->id,
426 lookup_cable_names(decode_cable_selection(isci_host, 3)),
427 lookup_cable_names(decode_cable_selection(isci_host, 2)),
428 lookup_cable_names(decode_cable_selection(isci_host, 1)),
429 lookup_cable_names(decode_cable_selection(isci_host, 0)));
430
415 err = isci_host_init(isci_host); 431 err = isci_host_init(isci_host);
416 if (err) 432 if (err)
417 goto err_shost; 433 goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
466 orom = isci_request_oprom(pdev); 482 orom = isci_request_oprom(pdev);
467 483
468 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { 484 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
469 if (sci_oem_parameters_validate(&orom->ctrl[i])) { 485 if (sci_oem_parameters_validate(&orom->ctrl[i],
486 orom->hdr.version)) {
470 dev_warn(&pdev->dev, 487 dev_warn(&pdev->dev,
471 "[%d]: invalid oem parameters detected, falling back to firmware\n", i); 488 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
472 devm_kfree(&pdev->dev, orom); 489 devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b08321..234ab46fce33 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
480extern u16 stp_inactive_to; 480extern u16 stp_inactive_to;
481extern unsigned char phy_gen; 481extern unsigned char phy_gen;
482extern unsigned char max_concurr_spinup; 482extern unsigned char max_concurr_spinup;
483extern uint cable_selection_override;
483 484
484irqreturn_t isci_msix_isr(int vec, void *data); 485irqreturn_t isci_msix_isr(int vec, void *data);
485irqreturn_t isci_intx_isr(int vec, void *data); 486irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2183e1..fe18acfd6eb3 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
91 91
92static enum sci_status 92static enum sci_status
93sci_phy_link_layer_initialization(struct isci_phy *iphy, 93sci_phy_link_layer_initialization(struct isci_phy *iphy,
94 struct scu_link_layer_registers __iomem *reg) 94 struct scu_link_layer_registers __iomem *llr)
95{ 95{
96 struct isci_host *ihost = iphy->owning_port->owning_controller; 96 struct isci_host *ihost = iphy->owning_port->owning_controller;
97 struct sci_phy_user_params *phy_user;
98 struct sci_phy_oem_params *phy_oem;
97 int phy_idx = iphy->phy_index; 99 int phy_idx = iphy->phy_index;
98 struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
99 struct sci_phy_oem_params *phy_oem =
100 &ihost->oem_parameters.phys[phy_idx];
101 u32 phy_configuration;
102 struct sci_phy_cap phy_cap; 100 struct sci_phy_cap phy_cap;
101 u32 phy_configuration;
103 u32 parity_check = 0; 102 u32 parity_check = 0;
104 u32 parity_count = 0; 103 u32 parity_count = 0;
105 u32 llctl, link_rate; 104 u32 llctl, link_rate;
106 u32 clksm_value = 0; 105 u32 clksm_value = 0;
107 u32 sp_timeouts = 0; 106 u32 sp_timeouts = 0;
108 107
109 iphy->link_layer_registers = reg; 108 phy_user = &ihost->user_parameters.phys[phy_idx];
109 phy_oem = &ihost->oem_parameters.phys[phy_idx];
110 iphy->link_layer_registers = llr;
110 111
111 /* Set our IDENTIFY frame data */ 112 /* Set our IDENTIFY frame data */
112 #define SCI_END_DEVICE 0x01 113 #define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
116 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | 117 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
117 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | 118 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
118 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), 119 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
119 &iphy->link_layer_registers->transmit_identification); 120 &llr->transmit_identification);
120 121
121 /* Write the device SAS Address */ 122 /* Write the device SAS Address */
122 writel(0xFEDCBA98, 123 writel(0xFEDCBA98, &llr->sas_device_name_high);
123 &iphy->link_layer_registers->sas_device_name_high); 124 writel(phy_idx, &llr->sas_device_name_low);
124 writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
125 125
126 /* Write the source SAS Address */ 126 /* Write the source SAS Address */
127 writel(phy_oem->sas_address.high, 127 writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
128 &iphy->link_layer_registers->source_sas_address_high); 128 writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
129 writel(phy_oem->sas_address.low,
130 &iphy->link_layer_registers->source_sas_address_low);
131 129
132 /* Clear and Set the PHY Identifier */ 130 /* Clear and Set the PHY Identifier */
133 writel(0, &iphy->link_layer_registers->identify_frame_phy_id); 131 writel(0, &llr->identify_frame_phy_id);
134 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), 132 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
135 &iphy->link_layer_registers->identify_frame_phy_id);
136 133
137 /* Change the initial state of the phy configuration register */ 134 /* Change the initial state of the phy configuration register */
138 phy_configuration = 135 phy_configuration = readl(&llr->phy_configuration);
139 readl(&iphy->link_layer_registers->phy_configuration);
140 136
141 /* Hold OOB state machine in reset */ 137 /* Hold OOB state machine in reset */
142 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); 138 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
143 writel(phy_configuration, 139 writel(phy_configuration, &llr->phy_configuration);
144 &iphy->link_layer_registers->phy_configuration);
145 140
146 /* Configure the SNW capabilities */ 141 /* Configure the SNW capabilities */
147 phy_cap.all = 0; 142 phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
149 phy_cap.gen3_no_ssc = 1; 144 phy_cap.gen3_no_ssc = 1;
150 phy_cap.gen2_no_ssc = 1; 145 phy_cap.gen2_no_ssc = 1;
151 phy_cap.gen1_no_ssc = 1; 146 phy_cap.gen1_no_ssc = 1;
152 if (ihost->oem_parameters.controller.do_enable_ssc == true) { 147 if (ihost->oem_parameters.controller.do_enable_ssc) {
148 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
149 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
150 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
151 bool en_sas = false;
152 bool en_sata = false;
153 u32 sas_type = 0;
154 u32 sata_spread = 0x2;
155 u32 sas_spread = 0x2;
156
153 phy_cap.gen3_ssc = 1; 157 phy_cap.gen3_ssc = 1;
154 phy_cap.gen2_ssc = 1; 158 phy_cap.gen2_ssc = 1;
155 phy_cap.gen1_ssc = 1; 159 phy_cap.gen1_ssc = 1;
160
161 if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
162 en_sas = en_sata = true;
163 else {
164 sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
165 sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
166
167 if (sata_spread)
168 en_sata = true;
169
170 if (sas_spread) {
171 en_sas = true;
172 sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
173 }
174
175 }
176
177 if (en_sas) {
178 u32 reg;
179
180 reg = readl(&xcvr->afe_xcvr_control0);
181 reg |= (0x00100000 | (sas_type << 19));
182 writel(reg, &xcvr->afe_xcvr_control0);
183
184 reg = readl(&xcvr->afe_tx_ssc_control);
185 reg |= sas_spread << 8;
186 writel(reg, &xcvr->afe_tx_ssc_control);
187 }
188
189 if (en_sata) {
190 u32 reg;
191
192 reg = readl(&xcvr->afe_tx_ssc_control);
193 reg |= sata_spread;
194 writel(reg, &xcvr->afe_tx_ssc_control);
195
196 reg = readl(&llr->stp_control);
197 reg |= 1 << 12;
198 writel(reg, &llr->stp_control);
199 }
156 } 200 }
157 201
158 /* 202 /* The SAS specification indicates that the phy_capabilities that
159 * The SAS specification indicates that the phy_capabilities that 203 * are transmitted shall have an even parity. Calculate the parity.
160 * are transmitted shall have an even parity. Calculate the parity. */ 204 */
161 parity_check = phy_cap.all; 205 parity_check = phy_cap.all;
162 while (parity_check != 0) { 206 while (parity_check != 0) {
163 if (parity_check & 0x1) 207 if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
165 parity_check >>= 1; 209 parity_check >>= 1;
166 } 210 }
167 211
168 /* 212 /* If parity indicates there are an odd number of bits set, then
169 * If parity indicates there are an odd number of bits set, then 213 * set the parity bit to 1 in the phy capabilities.
170 * set the parity bit to 1 in the phy capabilities. */ 214 */
171 if ((parity_count % 2) != 0) 215 if ((parity_count % 2) != 0)
172 phy_cap.parity = 1; 216 phy_cap.parity = 1;
173 217
174 writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); 218 writel(phy_cap.all, &llr->phy_capabilities);
175 219
176 /* Set the enable spinup period but disable the ability to send 220 /* Set the enable spinup period but disable the ability to send
177 * notify enable spinup 221 * notify enable spinup
178 */ 222 */
179 writel(SCU_ENSPINUP_GEN_VAL(COUNT, 223 writel(SCU_ENSPINUP_GEN_VAL(COUNT,
180 phy_user->notify_enable_spin_up_insertion_frequency), 224 phy_user->notify_enable_spin_up_insertion_frequency),
181 &iphy->link_layer_registers->notify_enable_spinup_control); 225 &llr->notify_enable_spinup_control);
182 226
183 /* Write the ALIGN Insertion Ferequency for connected phy and 227 /* Write the ALIGN Insertion Ferequency for connected phy and
184 * inpendent of connected state 228 * inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
189 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, 233 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
190 phy_user->align_insertion_frequency); 234 phy_user->align_insertion_frequency);
191 235
192 writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); 236 writel(clksm_value, &llr->clock_skew_management);
193 237
194 /* @todo Provide a way to write this register correctly */ 238 if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
195 writel(0x02108421, 239 writel(0x04210400, &llr->afe_lookup_table_control);
196 &iphy->link_layer_registers->afe_lookup_table_control); 240 writel(0x020A7C05, &llr->sas_primitive_timeout);
241 } else
242 writel(0x02108421, &llr->afe_lookup_table_control);
197 243
198 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, 244 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
199 (u8)ihost->user_parameters.no_outbound_task_timeout); 245 (u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
210 break; 256 break;
211 } 257 }
212 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 258 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
213 writel(llctl, &iphy->link_layer_registers->link_layer_control); 259 writel(llctl, &llr->link_layer_control);
214 260
215 sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); 261 sp_timeouts = readl(&llr->sas_phy_timeouts);
216 262
217 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ 263 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
218 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); 264 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
222 */ 268 */
223 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); 269 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
224 270
225 writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); 271 writel(sp_timeouts, &llr->sas_phy_timeouts);
226 272
227 if (is_a2(ihost->pdev)) { 273 if (is_a2(ihost->pdev)) {
228 /* Program the max ARB time for the PHY to 700us so we inter-operate with 274 /* Program the max ARB time for the PHY to 700us so we
229 * the PMC expander which shuts down PHYs if the expander PHY generates too 275 * inter-operate with the PMC expander which shuts down
230 * many breaks. This time value will guarantee that the initiator PHY will 276 * PHYs if the expander PHY generates too many breaks.
231 * generate the break. 277 * This time value will guarantee that the initiator PHY
278 * will generate the break.
232 */ 279 */
233 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, 280 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
234 &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); 281 &llr->maximum_arbitration_wait_timer_timeout);
235 } 282 }
236 283
237 /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ 284 /* Disable link layer hang detection, rely on the OS timeout for
238 writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); 285 * I/O timeouts.
286 */
287 writel(0, &llr->link_layer_hang_detection_timeout);
239 288
240 /* We can exit the initial state to the stopped state */ 289 /* We can exit the initial state to the stopped state */
241 sci_change_state(&iphy->sm, SCI_PHY_STOPPED); 290 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
1049 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); 1098 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
1050} 1099}
1051 1100
1052/** 1101static void scu_link_layer_start_oob(struct isci_phy *iphy)
1053 *
1054 *
1055 * This method will start the OOB/SN state machine for this struct isci_phy object.
1056 */
1057static void scu_link_layer_start_oob(
1058 struct isci_phy *iphy)
1059{ 1102{
1060 u32 scu_sas_pcfg_value; 1103 struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
1061 1104 u32 val;
1062 scu_sas_pcfg_value = 1105
1063 readl(&iphy->link_layer_registers->phy_configuration); 1106 /** Reset OOB sequence - start */
1064 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); 1107 val = readl(&ll->phy_configuration);
1065 scu_sas_pcfg_value &= 1108 val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1066 ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | 1109 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1067 SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); 1110 writel(val, &ll->phy_configuration);
1068 writel(scu_sas_pcfg_value, 1111 readl(&ll->phy_configuration); /* flush */
1069 &iphy->link_layer_registers->phy_configuration); 1112 /** Reset OOB sequence - end */
1113
1114 /** Start OOB sequence - start */
1115 val = readl(&ll->phy_configuration);
1116 val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1117 writel(val, &ll->phy_configuration);
1118 readl(&ll->phy_configuration); /* flush */
1119 /** Start OOB sequence - end */
1070} 1120}
1071 1121
1072/** 1122/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f27749f97..7c6ac58a5c4c 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
114 * value is returned if the specified port is not valid. When this value is 114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter. 115 * returned, no data is copied to the properties output parameter.
116 */ 116 */
117static enum sci_status sci_port_get_properties(struct isci_port *iport, 117enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop) 118 struct sci_port_properties *prop)
119{ 119{
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) 120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
647 } 647 }
648} 648}
649 649
650static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, 650static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
651 bool do_notify_user) 651{
652 sci_phy_resume(iphy);
653 iport->enabled_phy_mask |= 1 << iphy->phy_index;
654}
655
656static void sci_port_activate_phy(struct isci_port *iport,
657 struct isci_phy *iphy,
658 u8 flags)
652{ 659{
653 struct isci_host *ihost = iport->owning_controller; 660 struct isci_host *ihost = iport->owning_controller;
654 661
655 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) 662 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
656 sci_phy_resume(iphy); 663 sci_phy_resume(iphy);
657 664
658 iport->active_phy_mask |= 1 << iphy->phy_index; 665 iport->active_phy_mask |= 1 << iphy->phy_index;
659 666
660 sci_controller_clear_invalid_phy(ihost, iphy); 667 sci_controller_clear_invalid_phy(ihost, iphy);
661 668
662 if (do_notify_user == true) 669 if (flags & PF_NOTIFY)
663 isci_port_link_up(ihost, iport, iphy); 670 isci_port_link_up(ihost, iport, iphy);
664} 671}
665 672
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
669 struct isci_host *ihost = iport->owning_controller; 676 struct isci_host *ihost = iport->owning_controller;
670 677
671 iport->active_phy_mask &= ~(1 << iphy->phy_index); 678 iport->active_phy_mask &= ~(1 << iphy->phy_index);
679 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
672 if (!iport->active_phy_mask) 680 if (!iport->active_phy_mask)
673 iport->last_active_phy = iphy->phy_index; 681 iport->last_active_phy = iphy->phy_index;
674 682
675 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 683 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
676 684
677 /* Re-assign the phy back to the LP as if it were a narrow port */ 685 /* Re-assign the phy back to the LP as if it were a narrow port for APC
678 writel(iphy->phy_index, 686 * mode. For MPC mode, the phy will remain in the port.
679 &iport->port_pe_configuration_register[iphy->phy_index]); 687 */
688 if (iport->owning_controller->oem_parameters.controller.mode_type ==
689 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
690 writel(iphy->phy_index,
691 &iport->port_pe_configuration_register[iphy->phy_index]);
680 692
681 if (do_notify_user == true) 693 if (do_notify_user == true)
682 isci_port_link_down(ihost, iphy, iport); 694 isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
701 * sci_port_general_link_up_handler - phy can be assigned to port? 713 * sci_port_general_link_up_handler - phy can be assigned to port?
702 * @sci_port: sci_port object for which has a phy that has gone link up. 714 * @sci_port: sci_port object for which has a phy that has gone link up.
703 * @sci_phy: This is the struct isci_phy object that has gone link up. 715 * @sci_phy: This is the struct isci_phy object that has gone link up.
704 * @do_notify_user: This parameter specifies whether to inform the user (via 716 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
705 * sci_port_link_up()) as to the fact that a new phy as become ready.
706 * 717 *
707 * Determine if this phy can be assigned to this 718 * Determine if this phy can be assigned to this port . If the phy is
708 * port . If the phy is not a valid PHY for 719 * not a valid PHY for this port then the function will notify the user.
709 * this port then the function will notify the user. A PHY can only be 720 * A PHY can only be part of a port if it's attached SAS ADDRESS is the
710 * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in 721 * same as all other PHYs in the same port.
711 * the same port. none
712 */ 722 */
713static void sci_port_general_link_up_handler(struct isci_port *iport, 723static void sci_port_general_link_up_handler(struct isci_port *iport,
714 struct isci_phy *iphy, 724 struct isci_phy *iphy,
715 bool do_notify_user) 725 u8 flags)
716{ 726{
717 struct sci_sas_address port_sas_address; 727 struct sci_sas_address port_sas_address;
718 struct sci_sas_address phy_sas_address; 728 struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
730 iport->active_phy_mask == 0) { 740 iport->active_phy_mask == 0) {
731 struct sci_base_state_machine *sm = &iport->sm; 741 struct sci_base_state_machine *sm = &iport->sm;
732 742
733 sci_port_activate_phy(iport, iphy, do_notify_user); 743 sci_port_activate_phy(iport, iphy, flags);
734 if (sm->current_state_id == SCI_PORT_RESETTING) 744 if (sm->current_state_id == SCI_PORT_RESETTING)
735 port_state_machine_change(iport, SCI_PORT_READY); 745 port_state_machine_change(iport, SCI_PORT_READY);
736 } else 746 } else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
781 struct isci_phy *iphy) 791 struct isci_phy *iphy)
782{ 792{
783 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 793 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
784 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && 794 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
785 sci_port_is_wide(iport)) { 795 if (sci_port_is_wide(iport)) {
786 sci_port_invalid_link_up(iport, iphy); 796 sci_port_invalid_link_up(iport, iphy);
787 797 return false;
788 return false; 798 } else {
799 struct isci_host *ihost = iport->owning_controller;
800 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
801 writel(iphy->phy_index,
802 &dst_port->port_pe_configuration_register[iphy->phy_index]);
803 }
789 } 804 }
790 805
791 return true; 806 return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
975 } 990 }
976} 991}
977 992
993static void scic_sds_port_ready_substate_waiting_exit(
994 struct sci_base_state_machine *sm)
995{
996 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
997 sci_port_resume_port_task_scheduler(iport);
998}
999
978static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) 1000static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
979{ 1001{
980 u32 index; 1002 u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
988 writel(iport->physical_port_index, 1010 writel(iport->physical_port_index,
989 &iport->port_pe_configuration_register[ 1011 &iport->port_pe_configuration_register[
990 iport->phy_table[index]->phy_index]); 1012 iport->phy_table[index]->phy_index]);
1013 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
1014 sci_port_resume_phy(iport, iport->phy_table[index]);
991 } 1015 }
992 } 1016 }
993 1017
994 sci_port_update_viit_entry(iport); 1018 sci_port_update_viit_entry(iport);
995 1019
996 sci_port_resume_port_task_scheduler(iport);
997
998 /* 1020 /*
999 * Post the dummy task for the port so the hardware can schedule 1021 * Post the dummy task for the port so the hardware can schedule
1000 * io correctly 1022 * io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
1061 if (iport->active_phy_mask == 0) { 1083 if (iport->active_phy_mask == 0) {
1062 isci_port_not_ready(ihost, iport); 1084 isci_port_not_ready(ihost, iport);
1063 1085
1064 port_state_machine_change(iport, 1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1065 SCI_PORT_SUB_WAITING); 1087 } else
1066 } else if (iport->started_request_count == 0) 1088 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1067 port_state_machine_change(iport,
1068 SCI_PORT_SUB_OPERATIONAL);
1069}
1070
1071static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
1072{
1073 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1074
1075 sci_port_suspend_port_task_scheduler(iport);
1076 if (iport->ready_exit)
1077 sci_port_invalidate_dummy_remote_node(iport);
1078} 1089}
1079 1090
1080enum sci_status sci_port_start(struct isci_port *iport) 1091enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1252 if (status != SCI_SUCCESS) 1263 if (status != SCI_SUCCESS)
1253 return status; 1264 return status;
1254 1265
1255 sci_port_general_link_up_handler(iport, iphy, true); 1266 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1256 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1267 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1257 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); 1268 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1258 1269
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1262 1273
1263 if (status != SCI_SUCCESS) 1274 if (status != SCI_SUCCESS)
1264 return status; 1275 return status;
1265 sci_port_general_link_up_handler(iport, iphy, true); 1276 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1266 1277
1267 /* Re-enter the configuring state since this may be the last phy in 1278 /* Re-enter the configuring state since this may be the last phy in
1268 * the port. 1279 * the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1338 /* Since this is the first phy going link up for the port we 1349 /* Since this is the first phy going link up for the port we
1339 * can just enable it and continue 1350 * can just enable it and continue
1340 */ 1351 */
1341 sci_port_activate_phy(iport, iphy, true); 1352 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1342 1353
1343 port_state_machine_change(iport, 1354 port_state_machine_change(iport,
1344 SCI_PORT_SUB_OPERATIONAL); 1355 SCI_PORT_SUB_OPERATIONAL);
1345 return SCI_SUCCESS; 1356 return SCI_SUCCESS;
1346 case SCI_PORT_SUB_OPERATIONAL: 1357 case SCI_PORT_SUB_OPERATIONAL:
1347 sci_port_general_link_up_handler(iport, iphy, true); 1358 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1348 return SCI_SUCCESS; 1359 return SCI_SUCCESS;
1349 case SCI_PORT_RESETTING: 1360 case SCI_PORT_RESETTING:
1350 /* TODO We should make sure that the phy that has gone 1361 /* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1361 /* In the resetting state we don't notify the user regarding 1372 /* In the resetting state we don't notify the user regarding
1362 * link up and link down notifications. 1373 * link up and link down notifications.
1363 */ 1374 */
1364 sci_port_general_link_up_handler(iport, iphy, false); 1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1365 return SCI_SUCCESS; 1376 return SCI_SUCCESS;
1366 default: 1377 default:
1367 dev_warn(sciport_to_dev(iport), 1378 dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
1584 }, 1595 },
1585 [SCI_PORT_SUB_WAITING] = { 1596 [SCI_PORT_SUB_WAITING] = {
1586 .enter_state = sci_port_ready_substate_waiting_enter, 1597 .enter_state = sci_port_ready_substate_waiting_enter,
1598 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1587 }, 1599 },
1588 [SCI_PORT_SUB_OPERATIONAL] = { 1600 [SCI_PORT_SUB_OPERATIONAL] = {
1589 .enter_state = sci_port_ready_substate_operational_enter, 1601 .enter_state = sci_port_ready_substate_operational_enter,
1590 .exit_state = sci_port_ready_substate_operational_exit 1602 .exit_state = sci_port_ready_substate_operational_exit
1591 }, 1603 },
1592 [SCI_PORT_SUB_CONFIGURING] = { 1604 [SCI_PORT_SUB_CONFIGURING] = {
1593 .enter_state = sci_port_ready_substate_configuring_enter, 1605 .enter_state = sci_port_ready_substate_configuring_enter
1594 .exit_state = sci_port_ready_substate_configuring_exit
1595 }, 1606 },
1596 [SCI_PORT_RESETTING] = { 1607 [SCI_PORT_RESETTING] = {
1597 .exit_state = sci_port_resetting_state_exit 1608 .exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1609 iport->logical_port_index = SCIC_SDS_DUMMY_PORT; 1620 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1610 iport->physical_port_index = index; 1621 iport->physical_port_index = index;
1611 iport->active_phy_mask = 0; 1622 iport->active_phy_mask = 0;
1623 iport->enabled_phy_mask = 0;
1612 iport->last_active_phy = 0; 1624 iport->last_active_phy = 0;
1613 iport->ready_exit = false; 1625 iport->ready_exit = false;
1614 1626
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc38603..08116090eb70 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
63 63
64#define SCIC_SDS_DUMMY_PORT 0xFF 64#define SCIC_SDS_DUMMY_PORT 0xFF
65 65
66#define PF_NOTIFY (1 << 0)
67#define PF_RESUME (1 << 1)
68
66struct isci_phy; 69struct isci_phy;
67struct isci_host; 70struct isci_host;
68 71
@@ -83,6 +86,8 @@ enum isci_status {
83 * @logical_port_index: software port index 86 * @logical_port_index: software port index
84 * @physical_port_index: hardware port index 87 * @physical_port_index: hardware port index
85 * @active_phy_mask: identifies phy members 88 * @active_phy_mask: identifies phy members
89 * @enabled_phy_mask: phy mask for the port
90 * that are already part of the port
86 * @reserved_tag: 91 * @reserved_tag:
87 * @reserved_rni: reserver for port task scheduler workaround 92 * @reserved_rni: reserver for port task scheduler workaround
88 * @started_request_count: reference count for outstanding commands 93 * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
104 u8 logical_port_index; 109 u8 logical_port_index;
105 u8 physical_port_index; 110 u8 physical_port_index;
106 u8 active_phy_mask; 111 u8 active_phy_mask;
112 u8 enabled_phy_mask;
107 u8 last_active_phy; 113 u8 last_active_phy;
108 u16 reserved_rni; 114 u16 reserved_rni;
109 u16 reserved_tag; 115 u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
250 struct isci_port *iport, 256 struct isci_port *iport,
251 struct isci_phy *iphy); 257 struct isci_phy *iphy);
252 258
259enum sci_status sci_port_get_properties(
260 struct isci_port *iport,
261 struct sci_port_properties *prop);
262
253enum sci_status sci_port_link_up(struct isci_port *iport, 263enum sci_status sci_port_link_up(struct isci_port *iport,
254 struct isci_phy *iphy); 264 struct isci_phy *iphy);
255enum sci_status sci_port_link_down(struct isci_port *iport, 265enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d281141..6d1e9544cbe5 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
57 57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) 58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) 59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) 60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
61 61
62enum SCIC_SDS_APC_ACTIVITY { 62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY, 63 SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
466 return sci_port_configuration_agent_validate_ports(ihost, port_agent); 466 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
467} 467}
468 468
469/*
470 * This routine will restart the automatic port configuration timeout
471 * timer for the next time period. This could be caused by either a link
472 * down event or a link up event where we can not yet tell to which a phy
473 * belongs.
474 */
475static void sci_apc_agent_start_timer(
476 struct sci_port_configuration_agent *port_agent,
477 u32 timeout)
478{
479 if (port_agent->timer_pending)
480 sci_del_timer(&port_agent->timer);
481
482 port_agent->timer_pending = true;
483 sci_mod_timer(&port_agent->timer, timeout);
484}
485
469static void sci_apc_agent_configure_ports(struct isci_host *ihost, 486static void sci_apc_agent_configure_ports(struct isci_host *ihost,
470 struct sci_port_configuration_agent *port_agent, 487 struct sci_port_configuration_agent *port_agent,
471 struct isci_phy *iphy, 488 struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
565 break; 582 break;
566 583
567 case SCIC_SDS_APC_START_TIMER: 584 case SCIC_SDS_APC_START_TIMER:
568 /* 585 sci_apc_agent_start_timer(port_agent,
569 * This can occur for either a link down event, or a link 586 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
570 * up event where we cannot yet tell the port to which a
571 * phy belongs.
572 */
573 if (port_agent->timer_pending)
574 sci_del_timer(&port_agent->timer);
575
576 port_agent->timer_pending = true;
577 sci_mod_timer(&port_agent->timer,
578 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
579 break; 587 break;
580 588
581 case SCIC_SDS_APC_SKIP_PHY: 589 case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
607 if (!iport) { 615 if (!iport) {
608 /* the phy is not the part of this port */ 616 /* the phy is not the part of this port */
609 port_agent->phy_ready_mask |= 1 << phy_index; 617 port_agent->phy_ready_mask |= 1 << phy_index;
610 sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); 618 sci_apc_agent_start_timer(port_agent,
619 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
611 } else { 620 } else {
612 /* the phy is already the part of the port */ 621 /* the phy is already the part of the port */
613 u32 port_state = iport->sm.current_state_id; 622 u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341de243..9b8117b9d756 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
147 147
148 memcpy(orom, fw->data, fw->size); 148 memcpy(orom, fw->data, fw->size);
149 149
150 if (is_c0(pdev)) 150 if (is_c0(pdev) || is_c1(pdev))
151 goto out; 151 goto out;
152 152
153 /* 153 /*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248ca326..bb0e9d4d97c9 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
153 153
154struct sci_oem_params; 154struct sci_oem_params;
155int sci_oem_parameters_validate(struct sci_oem_params *oem); 155int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
156 156
157struct isci_orom; 157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev); 158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
191 0x1a, 0x04, 0xc6) 191 0x1a, 0x04, 0xc6)
192#define ISCI_EFI_VAR_NAME "RstScuO" 192#define ISCI_EFI_VAR_NAME "RstScuO"
193 193
194#define ISCI_ROM_VER_1_0 0x10
195#define ISCI_ROM_VER_1_1 0x11
196#define ISCI_ROM_VER_1_3 0x13
197#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
198
194/* Allowed PORT configuration modes APC Automatic PORT configuration mode is 199/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
195 * defined by the OEM configuration parameters providing no PHY_MASK parameters 200 * defined by the OEM configuration parameters providing no PHY_MASK parameters
196 * for any PORT. i.e. There are no phys assigned to any of the ports at start. 201 * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
220 struct { 225 struct {
221 uint8_t mode_type; 226 uint8_t mode_type;
222 uint8_t max_concurr_spin_up; 227 uint8_t max_concurr_spin_up;
223 uint8_t do_enable_ssc; 228 /*
224 uint8_t reserved; 229 * This bitfield indicates the OEM's desired default Tx
230 * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
231 * NOTE: Default SSC Modulation Frequency is 31.5KHz.
232 */
233 union {
234 struct {
235 /*
236 * NOTE: Max spread for SATA is +0 / -5000 PPM.
237 * Down-spreading SSC (only method allowed for SATA):
238 * SATA SSC Tx Disabled = 0x0
239 * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
240 * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
241 * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
242 * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
243 */
244 uint8_t ssc_sata_tx_spread_level:4;
245 /*
246 * SAS SSC Tx Disabled = 0x0
247 *
248 * NOTE: Max spread for SAS down-spreading +0 /
249 * -2300 PPM
250 * Down-spreading SSC:
251 * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
252 * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
253 *
254 * NOTE: Max spread for SAS center-spreading +2300 /
255 * -2300 PPM
256 * Center-spreading SSC:
257 * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
258 * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
259 */
260 uint8_t ssc_sas_tx_spread_level:3;
261 /*
262 * NOTE: Refer to the SSC section of the SAS 2.x
263 * Specification for proper setting of this field.
264 * For standard SAS Initiator SAS PHY operation it
265 * should be 0 for Down-spreading.
266 * SAS SSC Tx spread type:
267 * Down-spreading SSC = 0
268 * Center-spreading SSC = 1
269 */
270 uint8_t ssc_sas_tx_type:1;
271 };
272 uint8_t do_enable_ssc;
273 };
274 /*
275 * This field indicates length of the SAS/SATA cable between
276 * host and device.
277 * This field is used make relationship between analog
278 * parameters of the phy in the silicon and length of the cable.
279 * Supported cable attenuation levels:
280 * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
281 * 6m.
282 *
283 * This is bit mask field:
284 *
285 * BIT: (MSB) 7 6 5 4
286 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
287 * length assignment
288 * BIT: 3 2 1 0 (LSB)
289 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
290 * assignment
291 *
292 * BITS 7-4 are set when the cable length is assigned to medium
293 * BITS 3-0 are set when the cable length is assigned to long
294 *
295 * The BIT positions are clear when the cable length is
296 * assigned to short.
297 *
298 * Setting the bits for both long and medium cable length is
299 * undefined.
300 *
301 * A value of 0x84 would assign
302 * phy3 - medium
303 * phy2 - long
304 * phy1 - short
305 * phy0 - short
306 */
307 uint8_t cable_selection_mask;
225 } controller; 308 } controller;
226 309
227 struct { 310 struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3b15a0..dd74b6ceeb82 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55#include <scsi/sas.h> 55#include <scsi/sas.h>
56#include <linux/bitops.h>
56#include "isci.h" 57#include "isci.h"
57#include "port.h" 58#include "port.h"
58#include "remote_device.h" 59#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1101 struct isci_remote_device *idev) 1102 struct isci_remote_device *idev)
1102{ 1103{
1103 enum sci_status status; 1104 enum sci_status status;
1105 struct sci_port_properties properties;
1104 struct domain_device *dev = idev->domain_dev; 1106 struct domain_device *dev = idev->domain_dev;
1105 1107
1106 sci_remote_device_construct(iport, idev); 1108 sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1110 * entries will be needed to store the remote node. 1112 * entries will be needed to store the remote node.
1111 */ 1113 */
1112 idev->is_direct_attached = true; 1114 idev->is_direct_attached = true;
1115
1116 sci_port_get_properties(iport, &properties);
1117 /* Get accurate port width from port's phy mask for a DA device. */
1118 idev->device_port_width = hweight32(properties.phy_mask);
1119
1113 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1120 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1114 idev, 1121 idev,
1115 &idev->rnc.remote_node_index); 1122 &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1125 1132
1126 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1133 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1127 1134
1128 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1129 idev->device_port_width = 1;
1130
1131 return SCI_SUCCESS; 1135 return SCI_SUCCESS;
1132} 1136}
1133 1137
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc89498..f5a3f7d2bdab 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
496 } 496 }
497 } 497 }
498 498
499 isci_print_tmf(tmf); 499 isci_print_tmf(ihost, tmf);
500 500
501 if (tmf->status == SCI_SUCCESS) 501 if (tmf->status == SCI_SUCCESS)
502 ret = TMF_RESP_FUNC_COMPLETE; 502 ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a41d5c..1b27b3797c6c 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
106 } resp; 106 } resp;
107 unsigned char lun[8]; 107 unsigned char lun[8];
108 u16 io_tag; 108 u16 io_tag;
109 struct isci_remote_device *device;
110 enum isci_tmf_function_codes tmf_code; 109 enum isci_tmf_function_codes tmf_code;
111 int status; 110 int status;
112 111
@@ -120,10 +119,10 @@ struct isci_tmf {
120 119
121}; 120};
122 121
123static inline void isci_print_tmf(struct isci_tmf *tmf) 122static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
124{ 123{
125 if (SAS_PROTOCOL_SATA == tmf->proto) 124 if (SAS_PROTOCOL_SATA == tmf->proto)
126 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 125 dev_dbg(&ihost->pdev->dev,
127 "%s: status = %x\n" 126 "%s: status = %x\n"
128 "tmf->resp.d2h_fis.status = %x\n" 127 "tmf->resp.d2h_fis.status = %x\n"
129 "tmf->resp.d2h_fis.error = %x\n", 128 "tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
132 tmf->resp.d2h_fis.status, 131 tmf->resp.d2h_fis.status,
133 tmf->resp.d2h_fis.error); 132 tmf->resp.d2h_fis.error);
134 else 133 else
135 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 134 dev_dbg(&ihost->pdev->dev,
136 "%s: status = %x\n" 135 "%s: status = %x\n"
137 "tmf->resp.resp_iu.data_present = %x\n" 136 "tmf->resp.resp_iu.data_present = %x\n"
138 "tmf->resp.resp_iu.status = %x\n" 137 "tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e928824a..1d1b0c9da29b 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
61 * Locking Note: This function expects that the lport mutex is locked before 61 * Locking Note: This function expects that the lport mutex is locked before
62 * calling it. 62 * calling it.
63 */ 63 */
64void fc_disc_stop_rports(struct fc_disc *disc) 64static void fc_disc_stop_rports(struct fc_disc *disc)
65{ 65{
66 struct fc_lport *lport; 66 struct fc_lport *lport;
67 struct fc_rport_priv *rdata; 67 struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
682 * fc_disc_stop() - Stop discovery for a given lport 682 * fc_disc_stop() - Stop discovery for a given lport
683 * @lport: The local port that discovery should stop on 683 * @lport: The local port that discovery should stop on
684 */ 684 */
685void fc_disc_stop(struct fc_lport *lport) 685static void fc_disc_stop(struct fc_lport *lport)
686{ 686{
687 struct fc_disc *disc = &lport->disc; 687 struct fc_disc *disc = &lport->disc;
688 688
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
698 * This function will block until discovery has been 698 * This function will block until discovery has been
699 * completely stopped and all rports have been deleted. 699 * completely stopped and all rports have been deleted.
700 */ 700 */
701void fc_disc_stop_final(struct fc_lport *lport) 701static void fc_disc_stop_final(struct fc_lport *lport)
702{ 702{
703 fc_disc_stop(lport); 703 fc_disc_stop(lport);
704 lport->tt.rport_flush_queue(); 704 lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca6..e17a28d324d0 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
28#include <scsi/fc/fc_els.h> 28#include <scsi/fc/fc_els.h>
29#include <scsi/libfc.h> 29#include <scsi/libfc.h>
30#include <scsi/fc_encode.h> 30#include <scsi/fc_encode.h>
31#include "fc_libfc.h"
31 32
32/** 33/**
33 * fc_elsct_send() - Send an ELS or CT frame 34 * fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db27e874..4d70d96fa5dc 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
91 * It manages the allocation of exchange IDs. 91 * It manages the allocation of exchange IDs.
92 */ 92 */
93struct fc_exch_mgr { 93struct fc_exch_mgr {
94 struct fc_exch_pool *pool; 94 struct fc_exch_pool __percpu *pool;
95 mempool_t *ep_pool; 95 mempool_t *ep_pool;
96 enum fc_class class; 96 enum fc_class class;
97 struct kref kref; 97 struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7c..f607314810ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
155 fsp->xfer_ddp = FC_XID_UNKNOWN; 155 fsp->xfer_ddp = FC_XID_UNKNOWN;
156 atomic_set(&fsp->ref_cnt, 1); 156 atomic_set(&fsp->ref_cnt, 1);
157 init_timer(&fsp->timer); 157 init_timer(&fsp->timer);
158 fsp->timer.data = (unsigned long)fsp;
158 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
159 spin_lock_init(&fsp->scsi_pkt_lock); 160 spin_lock_init(&fsp->scsi_pkt_lock);
160 } 161 }
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1850 } 1851 }
1851 put_cpu(); 1852 put_cpu();
1852 1853
1853 init_timer(&fsp->timer);
1854 fsp->timer.data = (unsigned long)fsp;
1855
1856 /* 1854 /*
1857 * send it to the lower layer 1855 * send it to the lower layer
1858 * if we get -1 return then put the request in the pending 1856 * if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a587ed..83750ebb527f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
677 * @lport: The local port receiving the event 677 * @lport: The local port receiving the event
678 * @event: The discovery event 678 * @event: The discovery event
679 */ 679 */
680void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 680static void fc_lport_disc_callback(struct fc_lport *lport,
681 enum fc_disc_event event)
681{ 682{
682 switch (event) { 683 switch (event) {
683 case DISC_EV_SUCCESS: 684 case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
1568 * Locking Note: The lport lock is expected to be held before calling 1569 * Locking Note: The lport lock is expected to be held before calling
1569 * this routine. 1570 * this routine.
1570 */ 1571 */
1571void fc_lport_enter_flogi(struct fc_lport *lport) 1572static void fc_lport_enter_flogi(struct fc_lport *lport)
1572{ 1573{
1573 struct fc_frame *fp; 1574 struct fc_frame *fp;
1574 1575
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e434844a69..83aa1efec875 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
391 * If it appears we are already logged in, ADISC is used to verify 391 * If it appears we are already logged in, ADISC is used to verify
392 * the setup. 392 * the setup.
393 */ 393 */
394int fc_rport_login(struct fc_rport_priv *rdata) 394static int fc_rport_login(struct fc_rport_priv *rdata)
395{ 395{
396 mutex_lock(&rdata->rp_mutex); 396 mutex_lock(&rdata->rp_mutex);
397 397
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
451 * function will hold the rport lock, call an _enter_* 451 * function will hold the rport lock, call an _enter_*
452 * function and then unlock the rport. 452 * function and then unlock the rport.
453 */ 453 */
454int fc_rport_logoff(struct fc_rport_priv *rdata) 454static int fc_rport_logoff(struct fc_rport_priv *rdata)
455{ 455{
456 mutex_lock(&rdata->rp_mutex); 456 mutex_lock(&rdata->rp_mutex);
457 457
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
653 * @fp: The FLOGI response frame 653 * @fp: The FLOGI response frame
654 * @rp_arg: The remote port that received the FLOGI response 654 * @rp_arg: The remote port that received the FLOGI response
655 */ 655 */
656void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 656static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
657 void *rp_arg) 657 void *rp_arg)
658{ 658{
659 struct fc_rport_priv *rdata = rp_arg; 659 struct fc_rport_priv *rdata = rp_arg;
660 struct fc_lport *lport = rdata->local_port; 660 struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
1520 * 1520 *
1521 * Locking Note: Called with the lport lock held. 1521 * Locking Note: Called with the lport lock held.
1522 */ 1522 */
1523void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) 1523static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1524{ 1524{
1525 struct fc_seq_els_data els_data; 1525 struct fc_seq_els_data els_data;
1526 1526
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c1776406c96..15eefa1d61fd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
306 adapter->host->sg_tablesize = adapter->sglen; 306 adapter->host->sg_tablesize = adapter->sglen;
307 307
308 308
309 /* use HP firmware and bios version encoding */ 309 /* use HP firmware and bios version encoding
310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
311 right 8 bits making them zero. This 0 value was hardcoded to fix
312 sparse warnings. */
310 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { 313 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
311 sprintf (adapter->fw_version, "%c%d%d.%d%d", 314 sprintf (adapter->fw_version, "%c%d%d.%d%d",
312 adapter->product_info.fw_version[2], 315 adapter->product_info.fw_version[2],
313 adapter->product_info.fw_version[1] >> 8, 316 0,
314 adapter->product_info.fw_version[1] & 0x0f, 317 adapter->product_info.fw_version[1] & 0x0f,
315 adapter->product_info.fw_version[0] >> 8, 318 0,
316 adapter->product_info.fw_version[0] & 0x0f); 319 adapter->product_info.fw_version[0] & 0x0f);
317 sprintf (adapter->bios_version, "%c%d%d.%d%d", 320 sprintf (adapter->bios_version, "%c%d%d.%d%d",
318 adapter->product_info.bios_version[2], 321 adapter->product_info.bios_version[2],
319 adapter->product_info.bios_version[1] >> 8, 322 0,
320 adapter->product_info.bios_version[1] & 0x0f, 323 adapter->product_info.bios_version[1] & 0x0f,
321 adapter->product_info.bios_version[0] >> 8, 324 0,
322 adapter->product_info.bios_version[0] & 0x0f); 325 adapter->product_info.bios_version[0] & 0x0f);
323 } else { 326 } else {
324 memcpy(adapter->fw_version, 327 memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574fb..e5f416f8042d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.12-rc1" 36#define MEGASAS_VERSION "00.00.06.14-rc1"
37#define MEGASAS_RELDATE "Oct. 5, 2011" 37#define MEGASAS_RELDATE "Jan. 6, 2012"
38#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
773 773
774#define MFI_OB_INTR_STATUS_MASK 0x00000002 774#define MFI_OB_INTR_STATUS_MASK 0x00000002
775#define MFI_POLL_TIMEOUT_SECS 60 775#define MFI_POLL_TIMEOUT_SECS 60
776#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
777 776
778#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 777#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
779#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 778#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
1353 u32 mfiStatus; 1352 u32 mfiStatus;
1354 u32 last_seq_num; 1353 u32 last_seq_num;
1355 1354
1356 struct timer_list io_completion_timer;
1357 struct list_head internal_reset_pending_q; 1355 struct list_head internal_reset_pending_q;
1358 1356
1359 /* Ptr to hba specific information */ 1357 /* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f1..8b300be44284 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.12-rc1 21 * Version : v00.00.06.14-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -59,14 +59,6 @@
59#include "megaraid_sas.h" 59#include "megaraid_sas.h"
60 60
61/* 61/*
62 * poll_mode_io:1- schedule complete completion from q cmd
63 */
64static unsigned int poll_mode_io;
65module_param_named(poll_mode_io, poll_mode_io, int, 0);
66MODULE_PARM_DESC(poll_mode_io,
67 "Complete cmds from IO path, (default=0)");
68
69/*
70 * Number of sectors per IO command 62 * Number of sectors per IO command
71 * Will be set in megasas_init_mfi if user does not provide 63 * Will be set in megasas_init_mfi if user does not provide
72 */ 64 */
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
1439 1431
1440 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1432 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1441 cmd->frame_count-1, instance->reg_set); 1433 cmd->frame_count-1, instance->reg_set);
1442 /*
1443 * Check if we have pend cmds to be completed
1444 */
1445 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
1446 tasklet_schedule(&instance->isr_tasklet);
1447 1434
1448 return 0; 1435 return 0;
1449out_return_cmd: 1436out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
3370 return -EINVAL; 3357 return -EINVAL;
3371} 3358}
3372 3359
3373/**
3374 * megasas_start_timer - Initializes a timer object
3375 * @instance: Adapter soft state
3376 * @timer: timer object to be initialized
3377 * @fn: timer function
3378 * @interval: time interval between timer function call
3379 */
3380static inline void
3381megasas_start_timer(struct megasas_instance *instance,
3382 struct timer_list *timer,
3383 void *fn, unsigned long interval)
3384{
3385 init_timer(timer);
3386 timer->expires = jiffies + interval;
3387 timer->data = (unsigned long)instance;
3388 timer->function = fn;
3389 add_timer(timer);
3390}
3391
3392/**
3393 * megasas_io_completion_timer - Timer fn
3394 * @instance_addr: Address of adapter soft state
3395 *
3396 * Schedules tasklet for cmd completion
3397 * if poll_mode_io is set
3398 */
3399static void
3400megasas_io_completion_timer(unsigned long instance_addr)
3401{
3402 struct megasas_instance *instance =
3403 (struct megasas_instance *)instance_addr;
3404
3405 if (atomic_read(&instance->fw_outstanding))
3406 tasklet_schedule(&instance->isr_tasklet);
3407
3408 /* Restart timer */
3409 if (poll_mode_io)
3410 mod_timer(&instance->io_completion_timer,
3411 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
3412}
3413
3414static u32 3360static u32
3415megasas_init_adapter_mfi(struct megasas_instance *instance) 3361megasas_init_adapter_mfi(struct megasas_instance *instance)
3416{ 3362{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
3638 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 3584 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
3639 (unsigned long)instance); 3585 (unsigned long)instance);
3640 3586
3641 /* Initialize the cmd completion timer */
3642 if (poll_mode_io)
3643 megasas_start_timer(instance, &instance->io_completion_timer,
3644 megasas_io_completion_timer,
3645 MEGASAS_COMPLETION_TIMER_INTERVAL);
3646 return 0; 3587 return 0;
3647 3588
3648fail_init_adapter: 3589fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4369 host = instance->host; 4310 host = instance->host;
4370 instance->unload = 1; 4311 instance->unload = 1;
4371 4312
4372 if (poll_mode_io)
4373 del_timer_sync(&instance->io_completion_timer);
4374
4375 megasas_flush_cache(instance); 4313 megasas_flush_cache(instance);
4376 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 4314 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
4377 4315
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
4511 } 4449 }
4512 4450
4513 instance->instancet->enable_intr(instance->reg_set); 4451 instance->instancet->enable_intr(instance->reg_set);
4514
4515 /* Initialize the cmd completion timer */
4516 if (poll_mode_io)
4517 megasas_start_timer(instance, &instance->io_completion_timer,
4518 megasas_io_completion_timer,
4519 MEGASAS_COMPLETION_TIMER_INTERVAL);
4520 instance->unload = 0; 4452 instance->unload = 0;
4521 4453
4522 /* 4454 /*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4570 host = instance->host; 4502 host = instance->host;
4571 fusion = instance->ctrl_context; 4503 fusion = instance->ctrl_context;
4572 4504
4573 if (poll_mode_io)
4574 del_timer_sync(&instance->io_completion_timer);
4575
4576 scsi_remove_host(instance->host); 4505 scsi_remove_host(instance->host);
4577 megasas_flush_cache(instance); 4506 megasas_flush_cache(instance);
4578 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4507 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4773 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 4702 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
4774 cmd->frame->hdr.context = cmd->index; 4703 cmd->frame->hdr.context = cmd->index;
4775 cmd->frame->hdr.pad_0 = 0; 4704 cmd->frame->hdr.pad_0 = 0;
4705 cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
4706 MFI_FRAME_SENSE64);
4776 4707
4777 /* 4708 /*
4778 * The management interface between applications and the fw uses 4709 * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
5219static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 5150static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
5220 megasas_sysfs_set_dbg_lvl); 5151 megasas_sysfs_set_dbg_lvl);
5221 5152
5222static ssize_t
5223megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
5224{
5225 return sprintf(buf, "%u\n", poll_mode_io);
5226}
5227
5228static ssize_t
5229megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
5230 const char *buf, size_t count)
5231{
5232 int retval = count;
5233 int tmp = poll_mode_io;
5234 int i;
5235 struct megasas_instance *instance;
5236
5237 if (sscanf(buf, "%u", &poll_mode_io) < 1) {
5238 printk(KERN_ERR "megasas: could not set poll_mode_io\n");
5239 retval = -EINVAL;
5240 }
5241
5242 /*
5243 * Check if poll_mode_io is already set or is same as previous value
5244 */
5245 if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
5246 goto out;
5247
5248 if (poll_mode_io) {
5249 /*
5250 * Start timers for all adapters
5251 */
5252 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5253 instance = megasas_mgmt_info.instance[i];
5254 if (instance) {
5255 megasas_start_timer(instance,
5256 &instance->io_completion_timer,
5257 megasas_io_completion_timer,
5258 MEGASAS_COMPLETION_TIMER_INTERVAL);
5259 }
5260 }
5261 } else {
5262 /*
5263 * Delete timers for all adapters
5264 */
5265 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5266 instance = megasas_mgmt_info.instance[i];
5267 if (instance)
5268 del_timer_sync(&instance->io_completion_timer);
5269 }
5270 }
5271
5272out:
5273 return retval;
5274}
5275
5276static void 5153static void
5277megasas_aen_polling(struct work_struct *work) 5154megasas_aen_polling(struct work_struct *work)
5278{ 5155{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
5502 kfree(ev); 5379 kfree(ev);
5503} 5380}
5504 5381
5505
5506static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
5507 megasas_sysfs_show_poll_mode_io,
5508 megasas_sysfs_set_poll_mode_io);
5509
5510/** 5382/**
5511 * megasas_init - Driver load entry point 5383 * megasas_init - Driver load entry point
5512 */ 5384 */
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
5566 if (rval) 5438 if (rval)
5567 goto err_dcf_dbg_lvl; 5439 goto err_dcf_dbg_lvl;
5568 rval = driver_create_file(&megasas_pci_driver.driver, 5440 rval = driver_create_file(&megasas_pci_driver.driver,
5569 &driver_attr_poll_mode_io);
5570 if (rval)
5571 goto err_dcf_poll_mode_io;
5572
5573 rval = driver_create_file(&megasas_pci_driver.driver,
5574 &driver_attr_support_device_change); 5441 &driver_attr_support_device_change);
5575 if (rval) 5442 if (rval)
5576 goto err_dcf_support_device_change; 5443 goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
5579 5446
5580err_dcf_support_device_change: 5447err_dcf_support_device_change:
5581 driver_remove_file(&megasas_pci_driver.driver, 5448 driver_remove_file(&megasas_pci_driver.driver,
5582 &driver_attr_poll_mode_io);
5583
5584err_dcf_poll_mode_io:
5585 driver_remove_file(&megasas_pci_driver.driver,
5586 &driver_attr_dbg_lvl); 5449 &driver_attr_dbg_lvl);
5587err_dcf_dbg_lvl: 5450err_dcf_dbg_lvl:
5588 driver_remove_file(&megasas_pci_driver.driver, 5451 driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
5607static void __exit megasas_exit(void) 5470static void __exit megasas_exit(void)
5608{ 5471{
5609 driver_remove_file(&megasas_pci_driver.driver, 5472 driver_remove_file(&megasas_pci_driver.driver,
5610 &driver_attr_poll_mode_io);
5611 driver_remove_file(&megasas_pci_driver.driver,
5612 &driver_attr_dbg_lvl); 5473 &driver_attr_dbg_lvl);
5613 driver_remove_file(&megasas_pci_driver.driver, 5474 driver_remove_file(&megasas_pci_driver.driver,
5614 &driver_attr_support_poll_for_event); 5475 &driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688aca..294abb0defa6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
282 else { 282 else {
283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
284 if ((raid->level >= 5) && 284 if ((raid->level >= 5) &&
285 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER)) 285 ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
286 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
287 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
286 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 288 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
287 else if (raid->level == 1) { 289 else if (raid->level == 1) {
288 /* Get alternate Pd. */ 290 /* Get alternate Pd. */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff02e48a..bfe68545203f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ 150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
151 /* recovery timeout */ 151 /* recovery timeout */
152 152
153#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
154#define LSW(x) ((uint16_t)(x))
153#define LSDW(x) ((u32)((u64)(x))) 155#define LSDW(x) ((u32)((u64)(x)))
154#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 156#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
155 157
@@ -671,6 +673,7 @@ struct scsi_qla_host {
671 uint16_t pri_ddb_idx; 673 uint16_t pri_ddb_idx;
672 uint16_t sec_ddb_idx; 674 uint16_t sec_ddb_idx;
673 int is_reset; 675 int is_reset;
676 uint16_t temperature;
674}; 677};
675 678
676struct ql4_task_data { 679struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa8120ac8..90614f38b55d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
698 &ha->reg->ctrl_status); 698 &ha->reg->ctrl_status);
699 readl(&ha->reg->ctrl_status); 699 readl(&ha->reg->ctrl_status);
700 writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
701 &ha->reg->ctrl_status);
702 readl(&ha->reg->ctrl_status);
700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
701 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { 704 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
702 DEBUG2(printk("scsi%ld: %s: Get firmware " 705 DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c2593782fbbe..e1e66a45e4d0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
219 ha->mailbox_timeout_count++; 219 ha->mailbox_timeout_count++;
220 mbx_sts[0] = (-1); 220 mbx_sts[0] = (-1);
221 set_bit(DPC_RESET_HA, &ha->dpc_flags); 221 set_bit(DPC_RESET_HA, &ha->dpc_flags);
222 if (is_qla8022(ha)) {
223 ql4_printk(KERN_INFO, ha,
224 "disabling pause transmit on port 0 & 1.\n");
225 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
226 CRB_NIU_XG_PAUSE_CTL_P0 |
227 CRB_NIU_XG_PAUSE_CTL_P1);
228 }
222 goto mbox_exit; 229 goto mbox_exit;
223 } 230 }
224 231
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b2ff17..78f1111158d7 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@ exit:
1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha) 1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
1876{ 1876{
1877 int retval; 1877 int retval;
1878
1879 /* clear the interrupt */
1880 writel(0, &ha->qla4_8xxx_reg->host_int);
1881 readl(&ha->qla4_8xxx_reg->host_int);
1882
1878 retval = qla4_8xxx_device_state_handler(ha); 1883 retval = qla4_8xxx_device_state_handler(ha);
1879 1884
1880 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) 1885 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1b..dc45ac923691 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
19#define PHAN_PEG_RCV_INITIALIZED 0xff01 19#define PHAN_PEG_RCV_INITIALIZED 0xff01
20 20
21/*CRB_RELATED*/ 21/*CRB_RELATED*/
22#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200) 22#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) 23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) 24#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
28
29#define qla82xx_get_temp_val(x) ((x) >> 16)
30#define qla82xx_get_temp_state(x) ((x) & 0xffff)
31#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
32
33/*
34 * Temperature control.
35 */
36enum {
37 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
38 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
39 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
40};
41
42#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
43#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
28 44
29#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 45#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
30#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E 46#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec393a00c038..ce6d3b7f0c61 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
35int ql4xdisablesysfsboot = 1; 35int ql4xdisablesysfsboot = 1;
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot, 37MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 "Set to disable exporting boot targets to sysfs\n" 38 " Set to disable exporting boot targets to sysfs.\n"
39 " 0 - Export boot targets\n" 39 "\t\t 0 - Export boot targets\n"
40 " 1 - Do not export boot targets (Default)"); 40 "\t\t 1 - Do not export boot targets (Default)");
41 41
42int ql4xdontresethba = 0; 42int ql4xdontresethba = 0;
43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(ql4xdontresethba, 44MODULE_PARM_DESC(ql4xdontresethba,
45 "Don't reset the HBA for driver recovery \n" 45 " Don't reset the HBA for driver recovery.\n"
46 " 0 - It will reset HBA (Default)\n" 46 "\t\t 0 - It will reset HBA (Default)\n"
47 " 1 - It will NOT reset HBA"); 47 "\t\t 1 - It will NOT reset HBA");
48 48
49int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */ 49int ql4xextended_error_logging;
50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51MODULE_PARM_DESC(ql4xextended_error_logging, 51MODULE_PARM_DESC(ql4xextended_error_logging,
52 "Option to enable extended error logging, " 52 " Option to enable extended error logging.\n"
53 "Default is 0 - no logging, 1 - debug logging"); 53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
54 55
55int ql4xenablemsix = 1; 56int ql4xenablemsix = 1;
56module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
57MODULE_PARM_DESC(ql4xenablemsix, 58MODULE_PARM_DESC(ql4xenablemsix,
58 "Set to enable MSI or MSI-X interrupt mechanism.\n" 59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
59 " 0 = enable INTx interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n"
60 " 1 = enable MSI-X interrupt mechanism (Default).\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
61 " 2 = enable MSI interrupt mechanism."); 62 "\t\t 2 = enable MSI interrupt mechanism.");
62 63
63#define QL4_DEF_QDEPTH 32 64#define QL4_DEF_QDEPTH 32
64static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
65module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(ql4xmaxqdepth, 67MODULE_PARM_DESC(ql4xmaxqdepth,
67 "Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
68 " Default: 32."); 69 "\t\t Default: 32.");
69 70
70static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
71module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
72MODULE_PARM_DESC(ql4xsess_recovery_tmo, 73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
73 "Target Session Recovery Timeout.\n" 74 "Target Session Recovery Timeout.\n"
74 " Default: 120 sec."); 75 "\t\t Default: 120 sec.");
75 76
76static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
77/* 78/*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1630 1631
1631 /* Update timers after login */ 1632 /* Update timers after login */
1632 ddb_entry->default_relogin_timeout = 1633 ddb_entry->default_relogin_timeout =
1633 le16_to_cpu(fw_ddb_entry->def_timeout); 1634 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
1635 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
1636 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
1634 ddb_entry->default_time2wait = 1637 ddb_entry->default_time2wait =
1635 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 1638 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1636 1639
@@ -1970,6 +1973,42 @@ mem_alloc_error_exit:
1970} 1973}
1971 1974
1972/** 1975/**
1976 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
1977 * @ha: adapter block pointer.
1978 *
1979 * Note: The caller should not hold the idc lock.
1980 **/
1981static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
1982{
1983 uint32_t temp, temp_state, temp_val;
1984 int status = QLA_SUCCESS;
1985
1986 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
1987
1988 temp_state = qla82xx_get_temp_state(temp);
1989 temp_val = qla82xx_get_temp_val(temp);
1990
1991 if (temp_state == QLA82XX_TEMP_PANIC) {
1992 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
1993 " exceeds maximum allowed. Hardware has been shut"
1994 " down.\n", temp_val);
1995 status = QLA_ERROR;
1996 } else if (temp_state == QLA82XX_TEMP_WARN) {
1997 if (ha->temperature == QLA82XX_TEMP_NORMAL)
1998 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
1999 " degrees C exceeds operating range."
2000 " Immediate action needed.\n", temp_val);
2001 } else {
2002 if (ha->temperature == QLA82XX_TEMP_WARN)
2003 ql4_printk(KERN_INFO, ha, "Device temperature is"
2004 " now %d degrees C in normal range.\n",
2005 temp_val);
2006 }
2007 ha->temperature = temp_state;
2008 return status;
2009}
2010
2011/**
1973 * qla4_8xxx_check_fw_alive - Check firmware health 2012 * qla4_8xxx_check_fw_alive - Check firmware health
1974 * @ha: Pointer to host adapter structure. 2013 * @ha: Pointer to host adapter structure.
1975 * 2014 *
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2040 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2079 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2041 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2080 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2042 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2081 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2043 if (dev_state == QLA82XX_DEV_NEED_RESET && 2082
2083 if (qla4_8xxx_check_temp(ha)) {
2084 ql4_printk(KERN_INFO, ha, "disabling pause"
2085 " transmit on port 0 & 1.\n");
2086 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2087 CRB_NIU_XG_PAUSE_CTL_P0 |
2088 CRB_NIU_XG_PAUSE_CTL_P1);
2089 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2090 qla4xxx_wake_dpc(ha);
2091 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
2044 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2092 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2045 if (!ql4xdontresethba) { 2093 if (!ql4xdontresethba) {
2046 ql4_printk(KERN_INFO, ha, "%s: HW State: " 2094 ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2057 } else { 2105 } else {
2058 /* Check firmware health */ 2106 /* Check firmware health */
2059 if (qla4_8xxx_check_fw_alive(ha)) { 2107 if (qla4_8xxx_check_fw_alive(ha)) {
2108 ql4_printk(KERN_INFO, ha, "disabling pause"
2109 " transmit on port 0 & 1.\n");
2110 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2111 CRB_NIU_XG_PAUSE_CTL_P0 |
2112 CRB_NIU_XG_PAUSE_CTL_P1);
2060 halt_status = qla4_8xxx_rd_32(ha, 2113 halt_status = qla4_8xxx_rd_32(ha,
2061 QLA82XX_PEG_HALT_STATUS1); 2114 QLA82XX_PEG_HALT_STATUS1);
2062 2115
2116 if (LSW(MSB(halt_status)) == 0x67)
2117 ql4_printk(KERN_ERR, ha, "%s:"
2118 " Firmware aborted with"
2119 " error code 0x00006700."
2120 " Device is being reset\n",
2121 __func__);
2122
2063 /* Since we cannot change dev_state in interrupt 2123 /* Since we cannot change dev_state in interrupt
2064 * context, set appropriate DPC flag then wakeup 2124 * context, set appropriate DPC flag then wakeup
2065 * DPC */ 2125 * DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2078 } 2138 }
2079} 2139}
2080 2140
2081void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 2141static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2082{ 2142{
2083 struct iscsi_session *sess; 2143 struct iscsi_session *sess;
2084 struct ddb_entry *ddb_entry; 2144 struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ exit_check:
3826 return ret; 3886 return ret;
3827} 3887}
3828 3888
3829static void qla4xxx_free_nt_list(struct list_head *list_nt) 3889static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
3830{ 3890{
3831 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 3891 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
3832 3892
3833 /* Free up the normaltargets list */ 3893 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
3834 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 3894 list_del_init(&ddb_idx->list);
3835 list_del_init(&nt_ddb_idx->list); 3895 vfree(ddb_idx);
3836 vfree(nt_ddb_idx);
3837 } 3896 }
3838
3839} 3897}
3840 3898
3841static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 3899static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
3884static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 3942static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3885 struct ddb_entry *ddb_entry) 3943 struct ddb_entry *ddb_entry)
3886{ 3944{
3945 uint16_t def_timeout;
3946
3887 ddb_entry->ddb_type = FLASH_DDB; 3947 ddb_entry->ddb_type = FLASH_DDB;
3888 ddb_entry->fw_ddb_index = INVALID_ENTRY; 3948 ddb_entry->fw_ddb_index = INVALID_ENTRY;
3889 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3949 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3894 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 3954 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
3895 atomic_set(&ddb_entry->relogin_timer, 0); 3955 atomic_set(&ddb_entry->relogin_timer, 0);
3896 atomic_set(&ddb_entry->relogin_retry_count, 0); 3956 atomic_set(&ddb_entry->relogin_retry_count, 0);
3897 3957 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
3898 ddb_entry->default_relogin_timeout = 3958 ddb_entry->default_relogin_timeout =
3899 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 3959 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
3960 def_timeout : LOGIN_TOV;
3900 ddb_entry->default_time2wait = 3961 ddb_entry->default_time2wait =
3901 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 3962 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
3902} 3963}
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3934 ip_state == IP_ADDRSTATE_DEPRICATED || 3995 ip_state == IP_ADDRSTATE_DEPRICATED ||
3935 ip_state == IP_ADDRSTATE_DISABLING) 3996 ip_state == IP_ADDRSTATE_DISABLING)
3936 ip_idx[idx] = -1; 3997 ip_idx[idx] = -1;
3937
3938 } 3998 }
3939 3999
3940 /* Break if all IP states checked */ 4000 /* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3947 } while (time_after(wtime, jiffies)); 4007 } while (time_after(wtime, jiffies));
3948} 4008}
3949 4009
3950void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 4010static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4011 struct list_head *list_st)
3951{ 4012{
4013 struct qla_ddb_index *st_ddb_idx;
3952 int max_ddbs; 4014 int max_ddbs;
4015 int fw_idx_size;
4016 struct dev_db_entry *fw_ddb_entry;
4017 dma_addr_t fw_ddb_dma;
3953 int ret; 4018 int ret;
3954 uint32_t idx = 0, next_idx = 0; 4019 uint32_t idx = 0, next_idx = 0;
3955 uint32_t state = 0, conn_err = 0; 4020 uint32_t state = 0, conn_err = 0;
3956 uint16_t conn_id; 4021 uint16_t conn_id = 0;
3957 struct dev_db_entry *fw_ddb_entry;
3958 struct ddb_entry *ddb_entry = NULL;
3959 dma_addr_t fw_ddb_dma;
3960 struct iscsi_cls_session *cls_sess;
3961 struct iscsi_session *sess;
3962 struct iscsi_cls_conn *cls_conn;
3963 struct iscsi_endpoint *ep;
3964 uint16_t cmds_max = 32, tmo = 0;
3965 uint32_t initial_cmdsn = 0;
3966 struct list_head list_st, list_nt; /* List of sendtargets */
3967 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
3968 int fw_idx_size;
3969 unsigned long wtime;
3970 struct qla_ddb_index *nt_ddb_idx;
3971
3972 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3973 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
3974 ha->is_reset = is_reset;
3975 return;
3976 }
3977 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3978 MAX_DEV_DB_ENTRIES;
3979 4022
3980 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 4023 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
3981 &fw_ddb_dma); 4024 &fw_ddb_dma);
3982 if (fw_ddb_entry == NULL) { 4025 if (fw_ddb_entry == NULL) {
3983 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 4026 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
3984 goto exit_ddb_list; 4027 goto exit_st_list;
3985 } 4028 }
3986 4029
3987 INIT_LIST_HEAD(&list_st); 4030 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3988 INIT_LIST_HEAD(&list_nt); 4031 MAX_DEV_DB_ENTRIES;
3989 fw_idx_size = sizeof(struct qla_ddb_index); 4032 fw_idx_size = sizeof(struct qla_ddb_index);
3990 4033
3991 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4034 for (idx = 0; idx < max_ddbs; idx = next_idx) {
3992 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4035 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
3993 fw_ddb_dma, NULL, 4036 NULL, &next_idx, &state,
3994 &next_idx, &state, &conn_err, 4037 &conn_err, NULL, &conn_id);
3995 NULL, &conn_id);
3996 if (ret == QLA_ERROR) 4038 if (ret == QLA_ERROR)
3997 break; 4039 break;
3998 4040
3999 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4000 goto continue_next_st;
4001
4002 /* Check if ST, add to the list_st */ 4041 /* Check if ST, add to the list_st */
4003 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 4042 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4004 goto continue_next_st; 4043 goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4009 4048
4010 st_ddb_idx->fw_ddb_idx = idx; 4049 st_ddb_idx->fw_ddb_idx = idx;
4011 4050
4012 list_add_tail(&st_ddb_idx->list, &list_st); 4051 list_add_tail(&st_ddb_idx->list, list_st);
4013continue_next_st: 4052continue_next_st:
4014 if (next_idx == 0) 4053 if (next_idx == 0)
4015 break; 4054 break;
4016 } 4055 }
4017 4056
4018 /* Before issuing conn open mbox, ensure all IPs states are configured 4057exit_st_list:
4019 * Note, conn open fails if IPs are not configured 4058 if (fw_ddb_entry)
4059 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4060}
4061
4062/**
4063 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4064 * @ha: pointer to adapter structure
4065 * @list_ddb: List from which failed ddb to be removed
4066 *
4067 * Iterate over the list of DDBs and find and remove DDBs that are either in
4068 * no connection active state or failed state
4069 **/
4070static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4071 struct list_head *list_ddb)
4072{
4073 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4074 uint32_t next_idx = 0;
4075 uint32_t state = 0, conn_err = 0;
4076 int ret;
4077
4078 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4079 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4080 NULL, 0, NULL, &next_idx, &state,
4081 &conn_err, NULL, NULL);
4082 if (ret == QLA_ERROR)
4083 continue;
4084
4085 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4086 state == DDB_DS_SESSION_FAILED) {
4087 list_del_init(&ddb_idx->list);
4088 vfree(ddb_idx);
4089 }
4090 }
4091}
4092
4093static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4094 struct dev_db_entry *fw_ddb_entry,
4095 int is_reset)
4096{
4097 struct iscsi_cls_session *cls_sess;
4098 struct iscsi_session *sess;
4099 struct iscsi_cls_conn *cls_conn;
4100 struct iscsi_endpoint *ep;
4101 uint16_t cmds_max = 32;
4102 uint16_t conn_id = 0;
4103 uint32_t initial_cmdsn = 0;
4104 int ret = QLA_SUCCESS;
4105
4106 struct ddb_entry *ddb_entry = NULL;
4107
4108 /* Create session object, with INVALID_ENTRY,
4109 * the targer_id would get set when we issue the login
4020 */ 4110 */
4021 qla4xxx_wait_for_ip_configuration(ha); 4111 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4112 cmds_max, sizeof(struct ddb_entry),
4113 sizeof(struct ql4_task_data),
4114 initial_cmdsn, INVALID_ENTRY);
4115 if (!cls_sess) {
4116 ret = QLA_ERROR;
4117 goto exit_setup;
4118 }
4022 4119
4023 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 4120 /*
4024 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4121 * so calling module_put function to decrement the
4025 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 4122 * reference count.
4123 **/
4124 module_put(qla4xxx_iscsi_transport.owner);
4125 sess = cls_sess->dd_data;
4126 ddb_entry = sess->dd_data;
4127 ddb_entry->sess = cls_sess;
4128
4129 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4130 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4131 sizeof(struct dev_db_entry));
4132
4133 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4134
4135 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4136
4137 if (!cls_conn) {
4138 ret = QLA_ERROR;
4139 goto exit_setup;
4026 } 4140 }
4027 4141
4028 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 4142 ddb_entry->conn = cls_conn;
4029 tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
4030 DEBUG2(ql4_printk(KERN_INFO, ha,
4031 "Default time to wait for build ddb %d\n", tmo));
4032 4143
4033 wtime = jiffies + (HZ * tmo); 4144 /* Setup ep, for displaying attributes in sysfs */
4034 do { 4145 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4035 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, 4146 if (ep) {
4036 list) { 4147 ep->conn = cls_conn;
4037 ret = qla4xxx_get_fwddb_entry(ha, 4148 cls_conn->ep = ep;
4038 st_ddb_idx->fw_ddb_idx, 4149 } else {
4039 NULL, 0, NULL, &next_idx, 4150 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4040 &state, &conn_err, NULL, 4151 ret = QLA_ERROR;
4041 NULL); 4152 goto exit_setup;
4042 if (ret == QLA_ERROR) 4153 }
4043 continue;
4044 4154
4045 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4155 /* Update sess/conn params */
4046 state == DDB_DS_SESSION_FAILED) { 4156 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4047 list_del_init(&st_ddb_idx->list);
4048 vfree(st_ddb_idx);
4049 }
4050 }
4051 schedule_timeout_uninterruptible(HZ / 10);
4052 } while (time_after(wtime, jiffies));
4053 4157
4054 /* Free up the sendtargets list */ 4158 if (is_reset == RESET_ADAPTER) {
4055 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4159 iscsi_block_session(cls_sess);
4056 list_del_init(&st_ddb_idx->list); 4160 /* Use the relogin path to discover new devices
4057 vfree(st_ddb_idx); 4161 * by short-circuting the logic of setting
4162 * timer to relogin - instead set the flags
4163 * to initiate login right away.
4164 */
4165 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4166 set_bit(DF_RELOGIN, &ddb_entry->flags);
4058 } 4167 }
4059 4168
4169exit_setup:
4170 return ret;
4171}
4172
4173static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4174 struct list_head *list_nt, int is_reset)
4175{
4176 struct dev_db_entry *fw_ddb_entry;
4177 dma_addr_t fw_ddb_dma;
4178 int max_ddbs;
4179 int fw_idx_size;
4180 int ret;
4181 uint32_t idx = 0, next_idx = 0;
4182 uint32_t state = 0, conn_err = 0;
4183 uint16_t conn_id = 0;
4184 struct qla_ddb_index *nt_ddb_idx;
4185
4186 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4187 &fw_ddb_dma);
4188 if (fw_ddb_entry == NULL) {
4189 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4190 goto exit_nt_list;
4191 }
4192 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4193 MAX_DEV_DB_ENTRIES;
4194 fw_idx_size = sizeof(struct qla_ddb_index);
4195
4060 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4196 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4061 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4197 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4062 fw_ddb_dma, NULL, 4198 NULL, &next_idx, &state,
4063 &next_idx, &state, &conn_err, 4199 &conn_err, NULL, &conn_id);
4064 NULL, &conn_id);
4065 if (ret == QLA_ERROR) 4200 if (ret == QLA_ERROR)
4066 break; 4201 break;
4067 4202
@@ -4072,107 +4207,113 @@ continue_next_st:
4072 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 4207 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4073 goto continue_next_nt; 4208 goto continue_next_nt;
4074 4209
4075 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4210 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4076 state == DDB_DS_SESSION_FAILED) { 4211 state == DDB_DS_SESSION_FAILED))
4077 DEBUG2(ql4_printk(KERN_INFO, ha, 4212 goto continue_next_nt;
4078 "Adding DDB to session = 0x%x\n",
4079 idx));
4080 if (is_reset == INIT_ADAPTER) {
4081 nt_ddb_idx = vmalloc(fw_idx_size);
4082 if (!nt_ddb_idx)
4083 break;
4084
4085 nt_ddb_idx->fw_ddb_idx = idx;
4086
4087 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4088 sizeof(struct dev_db_entry));
4089
4090 if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
4091 fw_ddb_entry) == QLA_SUCCESS) {
4092 vfree(nt_ddb_idx);
4093 goto continue_next_nt;
4094 }
4095 list_add_tail(&nt_ddb_idx->list, &list_nt);
4096 } else if (is_reset == RESET_ADAPTER) {
4097 if (qla4xxx_is_session_exists(ha,
4098 fw_ddb_entry) == QLA_SUCCESS)
4099 goto continue_next_nt;
4100 }
4101 4213
4102 /* Create session object, with INVALID_ENTRY, 4214 DEBUG2(ql4_printk(KERN_INFO, ha,
4103 * the targer_id would get set when we issue the login 4215 "Adding DDB to session = 0x%x\n", idx));
4104 */ 4216 if (is_reset == INIT_ADAPTER) {
4105 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, 4217 nt_ddb_idx = vmalloc(fw_idx_size);
4106 ha->host, cmds_max, 4218 if (!nt_ddb_idx)
4107 sizeof(struct ddb_entry), 4219 break;
4108 sizeof(struct ql4_task_data),
4109 initial_cmdsn, INVALID_ENTRY);
4110 if (!cls_sess)
4111 goto exit_ddb_list;
4112 4220
4113 /* 4221 nt_ddb_idx->fw_ddb_idx = idx;
4114 * iscsi_session_setup increments the driver reference
4115 * count which wouldn't let the driver to be unloaded.
4116 * so calling module_put function to decrement the
4117 * reference count.
4118 **/
4119 module_put(qla4xxx_iscsi_transport.owner);
4120 sess = cls_sess->dd_data;
4121 ddb_entry = sess->dd_data;
4122 ddb_entry->sess = cls_sess;
4123 4222
4124 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 4223 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4125 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4126 sizeof(struct dev_db_entry)); 4224 sizeof(struct dev_db_entry));
4127 4225
4128 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); 4226 if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
4129 4227 fw_ddb_entry) == QLA_SUCCESS) {
4130 cls_conn = iscsi_conn_setup(cls_sess, 4228 vfree(nt_ddb_idx);
4131 sizeof(struct qla_conn), 4229 goto continue_next_nt;
4132 conn_id);
4133 if (!cls_conn)
4134 goto exit_ddb_list;
4135
4136 ddb_entry->conn = cls_conn;
4137
4138 /* Setup ep, for displaying attributes in sysfs */
4139 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4140 if (ep) {
4141 ep->conn = cls_conn;
4142 cls_conn->ep = ep;
4143 } else {
4144 DEBUG2(ql4_printk(KERN_ERR, ha,
4145 "Unable to get ep\n"));
4146 }
4147
4148 /* Update sess/conn params */
4149 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
4150 cls_conn);
4151
4152 if (is_reset == RESET_ADAPTER) {
4153 iscsi_block_session(cls_sess);
4154 /* Use the relogin path to discover new devices
4155 * by short-circuting the logic of setting
4156 * timer to relogin - instead set the flags
4157 * to initiate login right away.
4158 */
4159 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4160 set_bit(DF_RELOGIN, &ddb_entry->flags);
4161 } 4230 }
4231 list_add_tail(&nt_ddb_idx->list, list_nt);
4232 } else if (is_reset == RESET_ADAPTER) {
4233 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4234 QLA_SUCCESS)
4235 goto continue_next_nt;
4162 } 4236 }
4237
4238 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4239 if (ret == QLA_ERROR)
4240 goto exit_nt_list;
4241
4163continue_next_nt: 4242continue_next_nt:
4164 if (next_idx == 0) 4243 if (next_idx == 0)
4165 break; 4244 break;
4166 } 4245 }
4167exit_ddb_list: 4246
4168 qla4xxx_free_nt_list(&list_nt); 4247exit_nt_list:
4169 if (fw_ddb_entry) 4248 if (fw_ddb_entry)
4170 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 4249 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4250}
4251
4252/**
4253 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4254 * @ha: pointer to adapter structure
4255 * @is_reset: Is this init path or reset path
4256 *
4257 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4258 * using connection open, then create the list of normal targets (nt)
4259 * from firmware DDBs. Based on the list of nt setup session and connection
4260 * objects.
4261 **/
4262void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4263{
4264 uint16_t tmo = 0;
4265 struct list_head list_st, list_nt;
4266 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4267 unsigned long wtime;
4268
4269 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4270 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4271 ha->is_reset = is_reset;
4272 return;
4273 }
4274
4275 INIT_LIST_HEAD(&list_st);
4276 INIT_LIST_HEAD(&list_nt);
4277
4278 qla4xxx_build_st_list(ha, &list_st);
4279
4280 /* Before issuing conn open mbox, ensure all IPs states are configured
4281 * Note, conn open fails if IPs are not configured
4282 */
4283 qla4xxx_wait_for_ip_configuration(ha);
4284
4285 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4286 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4287 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4288 }
4289
4290 /* Wait to ensure all sendtargets are done for min 12 sec wait */
4291 tmo = ((ha->def_timeout > LOGIN_TOV) &&
4292 (ha->def_timeout < LOGIN_TOV * 10) ?
4293 ha->def_timeout : LOGIN_TOV);
4294
4295 DEBUG2(ql4_printk(KERN_INFO, ha,
4296 "Default time to wait for build ddb %d\n", tmo));
4297
4298 wtime = jiffies + (HZ * tmo);
4299 do {
4300 if (list_empty(&list_st))
4301 break;
4302
4303 qla4xxx_remove_failed_ddb(ha, &list_st);
4304 schedule_timeout_uninterruptible(HZ / 10);
4305 } while (time_after(wtime, jiffies));
4306
4307 /* Free up the sendtargets list */
4308 qla4xxx_free_ddb_list(&list_st);
4309
4310 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
4311
4312 qla4xxx_free_ddb_list(&list_nt);
4171 4313
4172 qla4xxx_free_ddb_index(ha); 4314 qla4xxx_free_ddb_index(ha);
4173} 4315}
4174 4316
4175
4176/** 4317/**
4177 * qla4xxx_probe_adapter - callback function to probe HBA 4318 * qla4xxx_probe_adapter - callback function to probe HBA
4178 * @pdev: pointer to pci_dev structure 4319 * @pdev: pointer to pci_dev structure
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa34a33c..133989b3a9f4 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k10" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6c47b5..b2c95dbe9d65 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1316 } 1316 }
1317 1317
1318 if (scsi_target_is_busy(starget)) { 1318 if (scsi_target_is_busy(starget)) {
1319 if (list_empty(&sdev->starved_entry)) 1319 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1320 list_add_tail(&sdev->starved_entry,
1321 &shost->starved_list);
1322 return 0; 1320 return 0;
1323 } 1321 }
1324 1322
1325 /* We're OK to process the command, so we can't be starved */
1326 if (!list_empty(&sdev->starved_entry))
1327 list_del_init(&sdev->starved_entry);
1328 return 1; 1323 return 1;
1329} 1324}
1330 1325
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b214910b714..f59d4a05ecd7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3048 3048
3049 spin_lock_irqsave(shost->host_lock, flags); 3049 spin_lock_irqsave(shost->host_lock, flags);
3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3051 FC_RPORT_DEVLOSS_PENDING); 3051 FC_RPORT_DEVLOSS_PENDING |
3052 FC_RPORT_DEVLOSS_CALLBK_DONE);
3052 spin_unlock_irqrestore(shost->host_lock, flags); 3053 spin_unlock_irqrestore(shost->host_lock, flags);
3053 3054
3054 /* ensure any stgt delete functions are done */ 3055 /* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 02d99982a74d..eacd46bb36b9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@ static ssize_t
2368sg_proc_write_adio(struct file *filp, const char __user *buffer, 2368sg_proc_write_adio(struct file *filp, const char __user *buffer,
2369 size_t count, loff_t *off) 2369 size_t count, loff_t *off)
2370{ 2370{
2371 int num; 2371 int err;
2372 char buff[11]; 2372 unsigned long num;
2373 2373
2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2375 return -EACCES; 2375 return -EACCES;
2376 num = (count < 10) ? count : 10; 2376 err = kstrtoul_from_user(buffer, count, 0, &num);
2377 if (copy_from_user(buff, buffer, num)) 2377 if (err)
2378 return -EFAULT; 2378 return err;
2379 buff[num] = '\0'; 2379 sg_allow_dio = num ? 1 : 0;
2380 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2381 return count; 2380 return count;
2382} 2381}
2383 2382
@@ -2390,17 +2389,15 @@ static ssize_t
2390sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2389sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2391 size_t count, loff_t *off) 2390 size_t count, loff_t *off)
2392{ 2391{
2393 int num; 2392 int err;
2394 unsigned long k = ULONG_MAX; 2393 unsigned long k = ULONG_MAX;
2395 char buff[11];
2396 2394
2397 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2395 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2398 return -EACCES; 2396 return -EACCES;
2399 num = (count < 10) ? count : 10; 2397
2400 if (copy_from_user(buff, buffer, num)) 2398 err = kstrtoul_from_user(buffer, count, 0, &k);
2401 return -EFAULT; 2399 if (err)
2402 buff[num] = '\0'; 2400 return err;
2403 k = simple_strtoul(buff, NULL, 10);
2404 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2401 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2405 sg_big_buff = k; 2402 sg_big_buff = k;
2406 return count; 2403 return count;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f46..36d1ed7817eb 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
839 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 839 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
840 unsigned long flags; 840 unsigned long flags;
841 841
842 /* if slave_alloc returned before allocating a sym_lcb, return */
843 if (!lp)
844 return;
845
842 spin_lock_irqsave(np->s.host->host_lock, flags); 846 spin_lock_irqsave(np->s.host->host_lock, flags);
843 847
844 if (lp->busy_itlq || lp->busy_itl) { 848 if (lp->busy_itlq || lp->busy_itl) {